repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
MissionCriticalCloud/marvin | marvin/cloudstackAPI/updatePhysicalNetwork.py | 1 | 2102 | """Updates a physical network"""
from baseCmd import *
from baseResponse import *
class updatePhysicalNetworkCmd (baseCmd):
typeInfo = {}
def __init__(self):
self.isAsync = "true"
"""physical network id"""
"""Required"""
self.id = None
self.typeInfo['id'] = 'uuid'
"""the speed for the physical network[1G/10G]"""
self.networkspeed = None
self.typeInfo['networkspeed'] = 'string'
"""Enabled/Disabled"""
self.state = None
self.typeInfo['state'] = 'string'
"""Tag the physical network"""
self.tags = []
self.typeInfo['tags'] = 'list'
"""the VLAN for the physical network"""
self.vlan = None
self.typeInfo['vlan'] = 'string'
self.required = ["id", ]
class updatePhysicalNetworkResponse (baseResponse):
typeInfo = {}
def __init__(self):
"""the uuid of the physical network"""
self.id = None
self.typeInfo['id'] = 'string'
"""Broadcast domain range of the physical network"""
self.broadcastdomainrange = None
self.typeInfo['broadcastdomainrange'] = 'string'
"""the domain id of the physical network owner"""
self.domainid = None
self.typeInfo['domainid'] = 'string'
"""isolation methods"""
self.isolationmethods = None
self.typeInfo['isolationmethods'] = 'string'
"""name of the physical network"""
self.name = None
self.typeInfo['name'] = 'string'
"""the speed of the physical network"""
self.networkspeed = None
self.typeInfo['networkspeed'] = 'string'
"""state of the physical network"""
self.state = None
self.typeInfo['state'] = 'string'
"""comma separated tag"""
self.tags = None
self.typeInfo['tags'] = 'string'
"""the vlan of the physical network"""
self.vlan = None
self.typeInfo['vlan'] = 'string'
"""zone id of the physical network"""
self.zoneid = None
self.typeInfo['zoneid'] = 'string'
| apache-2.0 |
opencomputeproject/onie | contrib/oce/modules/tftphpa.py | 11 | 1536 |
DEFAULT_CONF_FILENAME = None
DEFAULT_DIRS = ['logs', 'tftp-root']
DEFAULT_CMD_BINARY = 'in.tftpd'
DEFAULT_CMD_USER = 'nobody'
DEFAULT_CMD_GROUP = 'nogroup'
DEFAULT_CMD_TEMPLATE = \
'''
touch {{ log_file }}
sudo chown {{ user }}:{{ group }} {{ log_file }}
chmod -R 777 {{ tftp_root }}
sudo chown -R {{ user }}:{{ group }} {{ tftp_root }}
sudo {{ binary }} {{ options }} {{ tftp_root }}
'''
def build_cmd(output, test_args):
import os.path
from jinja2 import Template
values = {}
log_file = os.path.join(test_args['test_dir'], 'logs', 'tftpdhpa.log')
values['log_file'] = log_file
tftp_root = os.path.join(test_args['test_dir'], 'tftp-root')
values['tftp_root'] = tftp_root
values['binary'] = DEFAULT_CMD_BINARY
if 'tftp_binary' in test_args:
values['binary'] = test_args['tftp_binary']
values['user'] = DEFAULT_CMD_USER
if 'tftp_user' in test_args:
values['user'] = test_args['tftp_user']
values['group'] = DEFAULT_CMD_GROUP
if 'tftp_group' in test_args:
values['group'] = test_args['tftp_group']
args = []
args.append('--foreground')
args.append('-vvv')
args.append('-p')
args.append('-s')
if 'host_ipv4_addr' in test_args:
args.append('--address {0}'.format(test_args['host_ipv4_addr']))
else:
args.append('--address HOST_IP')
args.append('--user {0[user]}'.format(values))
values['options'] = ' '.join(args)
template = Template(DEFAULT_CMD_TEMPLATE)
output.write(template.render(values))
| gpl-2.0 |
andmos/ansible | test/units/modules/network/f5/test_bigip_gtm_wide_ip.py | 21 | 13120 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_gtm_wide_ip import ApiParameters
from library.modules.bigip_gtm_wide_ip import ModuleParameters
from library.modules.bigip_gtm_wide_ip import ModuleManager
from library.modules.bigip_gtm_wide_ip import ArgumentSpec
from library.modules.bigip_gtm_wide_ip import UntypedManager
from library.modules.bigip_gtm_wide_ip import TypedManager
from library.module_utils.network.f5.common import F5ModuleError
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.compat.mock import patch
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigip_gtm_wide_ip import ApiParameters
from ansible.modules.network.f5.bigip_gtm_wide_ip import ModuleParameters
from ansible.modules.network.f5.bigip_gtm_wide_ip import ModuleManager
from ansible.modules.network.f5.bigip_gtm_wide_ip import ArgumentSpec
from ansible.modules.network.f5.bigip_gtm_wide_ip import UntypedManager
from ansible.modules.network.f5.bigip_gtm_wide_ip import TypedManager
from ansible.module_utils.network.f5.common import F5ModuleError
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.compat.mock import patch
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
name='foo.baz.bar',
pool_lb_method='round-robin',
)
p = ModuleParameters(params=args)
assert p.name == 'foo.baz.bar'
assert p.pool_lb_method == 'round-robin'
def test_module_pools(self):
args = dict(
pools=[
dict(
name='foo',
ratio='100'
)
]
)
p = ModuleParameters(params=args)
assert len(p.pools) == 1
def test_api_parameters(self):
args = dict(
name='foo.baz.bar',
poolLbMode='round-robin'
)
p = ApiParameters(params=args)
assert p.name == 'foo.baz.bar'
assert p.pool_lb_method == 'round-robin'
def test_api_pools(self):
args = load_fixture('load_gtm_wide_ip_with_pools.json')
p = ApiParameters(params=args)
assert len(p.pools) == 1
assert 'name' in p.pools[0]
assert 'ratio' in p.pools[0]
assert p.pools[0]['name'] == '/Common/baz'
assert p.pools[0]['ratio'] == 10
def test_module_not_fqdn_name(self):
args = dict(
name='foo',
lb_method='round-robin'
)
with pytest.raises(F5ModuleError) as excinfo:
p = ModuleParameters(params=args)
assert p.name == 'foo'
assert 'The provided name must be a valid FQDN' in str(excinfo)
class TestUntypedManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
try:
self.p1 = patch('library.modules.bigip_gtm_wide_ip.module_provisioned')
self.m1 = self.p1.start()
self.m1.return_value = True
except Exception:
self.p1 = patch('ansible.modules.network.f5.bigip_gtm_wide_ip.module_provisioned')
self.m1 = self.p1.start()
self.m1.return_value = True
def tearDown(self):
self.p1.stop()
def test_create_wideip(self, *args):
set_module_args(dict(
name='foo.baz.bar',
lb_method='round-robin',
password='password',
server='localhost',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
tm = UntypedManager(module=module, params=module.params)
tm.exists = Mock(return_value=False)
tm.create_on_device = Mock(return_value=True)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.version_is_less_than_12 = Mock(return_value=True)
mm.get_manager = Mock(return_value=tm)
results = mm.exec_module()
assert results['changed'] is True
assert results['name'] == 'foo.baz.bar'
assert results['state'] == 'present'
assert results['lb_method'] == 'round-robin'
class TestTypedManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
try:
self.p1 = patch('library.modules.bigip_gtm_wide_ip.module_provisioned')
self.m1 = self.p1.start()
self.m1.return_value = True
except Exception:
self.p1 = patch('ansible.modules.network.f5.bigip_gtm_wide_ip.module_provisioned')
self.m1 = self.p1.start()
self.m1.return_value = True
def tearDown(self):
self.p1.stop()
def test_create_wideip(self, *args):
set_module_args(dict(
name='foo.baz.bar',
lb_method='round-robin',
type='a',
password='password',
server='localhost',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
tm = TypedManager(module=module, params=module.params)
tm.exists = Mock(return_value=False)
tm.create_on_device = Mock(return_value=True)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.version_is_less_than_12 = Mock(return_value=False)
mm.get_manager = Mock(return_value=tm)
results = mm.exec_module()
assert results['changed'] is True
assert results['name'] == 'foo.baz.bar'
assert results['state'] == 'present'
assert results['lb_method'] == 'round-robin'
def test_create_wideip_deprecated_lb_method1(self, *args):
set_module_args(dict(
name='foo.baz.bar',
lb_method='round_robin',
type='a',
password='password',
server='localhost',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
tm = TypedManager(module=module, params=module.params)
tm.exists = Mock(return_value=False)
tm.create_on_device = Mock(return_value=True)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.version_is_less_than_12 = Mock(return_value=False)
mm.get_manager = Mock(return_value=tm)
results = mm.exec_module()
assert results['changed'] is True
assert results['name'] == 'foo.baz.bar'
assert results['state'] == 'present'
assert results['lb_method'] == 'round-robin'
def test_create_wideip_deprecated_lb_method2(self, *args):
set_module_args(dict(
name='foo.baz.bar',
lb_method='global_availability',
type='a',
password='password',
server='localhost',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
tm = TypedManager(module=module, params=module.params)
tm.exists = Mock(return_value=False)
tm.create_on_device = Mock(return_value=True)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.version_is_less_than_12 = Mock(return_value=False)
mm.get_manager = Mock(return_value=tm)
results = mm.exec_module()
assert results['changed'] is True
assert results['name'] == 'foo.baz.bar'
assert results['state'] == 'present'
assert results['lb_method'] == 'global-availability'
def test_create_wideip_with_pool(self, *args):
set_module_args(dict(
name='foo.baz.bar',
lb_method='round-robin',
type='a',
pools=[
dict(
name='foo',
ratio=10
)
],
password='password',
server='localhost',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
tm = TypedManager(module=module, params=module.params)
tm.exists = Mock(return_value=False)
tm.create_on_device = Mock(return_value=True)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.version_is_less_than_12 = Mock(return_value=False)
mm.get_manager = Mock(return_value=tm)
results = mm.exec_module()
assert results['changed'] is True
assert results['name'] == 'foo.baz.bar'
assert results['state'] == 'present'
assert results['lb_method'] == 'round-robin'
def test_create_wideip_with_pool_idempotent(self, *args):
set_module_args(dict(
name='foo.bar.com',
lb_method='round-robin',
type='a',
pools=[
dict(
name='baz',
ratio=10
)
],
password='password',
server='localhost',
user='admin'
))
current = ApiParameters(params=load_fixture('load_gtm_wide_ip_with_pools.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
tm = TypedManager(module=module, params=module.params)
tm.exists = Mock(return_value=True)
tm.read_current_from_device = Mock(return_value=current)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.version_is_less_than_12 = Mock(return_value=False)
mm.get_manager = Mock(return_value=tm)
results = mm.exec_module()
assert results['changed'] is False
def test_update_wideip_with_pool(self, *args):
set_module_args(dict(
name='foo.bar.com',
lb_method='round-robin',
type='a',
pools=[
dict(
name='baz',
ratio=10
),
dict(
name='alec',
ratio=100
)
],
password='password',
server='localhost',
user='admin'
))
current = ApiParameters(params=load_fixture('load_gtm_wide_ip_with_pools.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
tm = TypedManager(module=module, params=module.params)
tm.exists = Mock(return_value=True)
tm.read_current_from_device = Mock(return_value=current)
tm.update_on_device = Mock(return_value=True)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.version_is_less_than_12 = Mock(return_value=False)
mm.get_manager = Mock(return_value=tm)
results = mm.exec_module()
assert results['changed'] is True
assert 'pools' in results
| gpl-3.0 |
miptliot/edx-platform | cms/djangoapps/contentstore/tests/tests.py | 6 | 15352 | """
This test file will test registration, login, activation, and session activity timeouts
"""
import datetime
import time
import unittest
import mock
from ddt import data, ddt, unpack
from django.conf import settings
from django.contrib.auth.models import User
from django.core.cache import cache
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.test.utils import override_settings
from freezegun import freeze_time
from pytz import UTC
from contentstore.models import PushNotificationConfig
from contentstore.tests.test_course_settings import CourseTestCase
from contentstore.tests.utils import AjaxEnabledTestClient, parse_json, registration, user
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
class ContentStoreTestCase(ModuleStoreTestCase):
def _login(self, email, password):
"""
Login. View should always return 200. The success/fail is in the
returned json
"""
resp = self.client.post(
reverse('login_post'),
{'email': email, 'password': password}
)
self.assertEqual(resp.status_code, 200)
return resp
def login(self, email, password):
"""Login, check that it worked."""
resp = self._login(email, password)
data = parse_json(resp)
self.assertTrue(data['success'])
return resp
def _create_account(self, username, email, password):
"""Try to create an account. No error checking"""
resp = self.client.post('/create_account', {
'username': username,
'email': email,
'password': password,
'location': 'home',
'language': 'Franglish',
'name': 'Fred Weasley',
'terms_of_service': 'true',
'honor_code': 'true',
})
return resp
def create_account(self, username, email, password):
"""Create the account and check that it worked"""
resp = self._create_account(username, email, password)
self.assertEqual(resp.status_code, 200)
data = parse_json(resp)
self.assertEqual(data['success'], True)
# Check both that the user is created, and inactive
self.assertFalse(user(email).is_active)
return resp
def _activate_user(self, email):
"""Look up the activation key for the user, then hit the activate view.
No error checking"""
activation_key = registration(email).activation_key
# and now we try to activate
resp = self.client.get(reverse('activate', kwargs={'key': activation_key}))
return resp
def activate_user(self, email):
resp = self._activate_user(email)
self.assertEqual(resp.status_code, 200)
# Now make sure that the user is now actually activated
self.assertTrue(user(email).is_active)
class AuthTestCase(ContentStoreTestCase):
"""Check that various permissions-related things work"""
CREATE_USER = False
ENABLED_CACHES = ['default', 'mongo_metadata_inheritance', 'loc_cache']
def setUp(self):
super(AuthTestCase, self).setUp()
self.email = 'a@b.com'
self.pw = 'xyz'
self.username = 'testuser'
self.client = AjaxEnabledTestClient()
# clear the cache so ratelimiting won't affect these tests
cache.clear()
def check_page_get(self, url, expected):
resp = self.client.get_html(url)
self.assertEqual(resp.status_code, expected)
return resp
def test_public_pages_load(self):
"""Make sure pages that don't require login load without error."""
pages = (
reverse('login'),
reverse('signup'),
)
for page in pages:
print "Checking '{0}'".format(page)
self.check_page_get(page, 200)
def test_create_account_errors(self):
# No post data -- should fail
resp = self.client.post('/create_account', {})
self.assertEqual(resp.status_code, 400)
data = parse_json(resp)
self.assertEqual(data['success'], False)
def test_create_account(self):
self.create_account(self.username, self.email, self.pw)
self.activate_user(self.email)
def test_create_account_username_already_exists(self):
User.objects.create_user(self.username, self.email, self.pw)
resp = self._create_account(self.username, "abc@def.com", "password")
# we have a constraint on unique usernames, so this should fail
self.assertEqual(resp.status_code, 400)
def test_create_account_pw_already_exists(self):
User.objects.create_user(self.username, self.email, self.pw)
resp = self._create_account("abcdef", "abc@def.com", self.pw)
# we can have two users with the same password, so this should succeed
self.assertEqual(resp.status_code, 200)
@unittest.skipUnless(settings.SOUTH_TESTS_MIGRATE, "South migrations required")
def test_create_account_email_already_exists(self):
User.objects.create_user(self.username, self.email, self.pw)
resp = self._create_account("abcdef", self.email, "password")
# This is tricky. Django's user model doesn't have a constraint on
# unique email addresses, but we *add* that constraint during the
# migration process:
# see common/djangoapps/student/migrations/0004_add_email_index.py
#
# The behavior we *want* is for this account creation request
# to fail, due to this uniqueness constraint, but the request will
# succeed if the migrations have not run.
self.assertEqual(resp.status_code, 400)
def test_login(self):
self.create_account(self.username, self.email, self.pw)
# Not activated yet. Login should fail.
resp = self._login(self.email, self.pw)
data = parse_json(resp)
self.assertFalse(data['success'])
self.activate_user(self.email)
# Now login should work
self.login(self.email, self.pw)
def test_login_ratelimited(self):
# try logging in 30 times, the default limit in the number of failed
# login attempts in one 5 minute period before the rate gets limited
for i in xrange(30):
resp = self._login(self.email, 'wrong_password{0}'.format(i))
self.assertEqual(resp.status_code, 200)
resp = self._login(self.email, 'wrong_password')
self.assertEqual(resp.status_code, 200)
data = parse_json(resp)
self.assertFalse(data['success'])
self.assertIn('Too many failed login attempts.', data['value'])
@override_settings(MAX_FAILED_LOGIN_ATTEMPTS_ALLOWED=3)
@override_settings(MAX_FAILED_LOGIN_ATTEMPTS_LOCKOUT_PERIOD_SECS=2)
def test_excessive_login_failures(self):
# try logging in 3 times, the account should get locked for 3 seconds
# note we want to keep the lockout time short, so we don't slow down the tests
with mock.patch.dict('django.conf.settings.FEATURES', {'ENABLE_MAX_FAILED_LOGIN_ATTEMPTS': True}):
self.create_account(self.username, self.email, self.pw)
self.activate_user(self.email)
for i in xrange(3):
resp = self._login(self.email, 'wrong_password{0}'.format(i))
self.assertEqual(resp.status_code, 200)
data = parse_json(resp)
self.assertFalse(data['success'])
self.assertIn(
'Email or password is incorrect.',
data['value']
)
# now the account should be locked
resp = self._login(self.email, 'wrong_password')
self.assertEqual(resp.status_code, 200)
data = parse_json(resp)
self.assertFalse(data['success'])
self.assertIn(
'This account has been temporarily locked due to excessive login failures. Try again later.',
data['value']
)
with freeze_time('2100-01-01'):
self.login(self.email, self.pw)
# make sure the failed attempt counter gets reset on successful login
resp = self._login(self.email, 'wrong_password')
self.assertEqual(resp.status_code, 200)
data = parse_json(resp)
self.assertFalse(data['success'])
# account should not be locked out after just one attempt
self.login(self.email, self.pw)
# do one more login when there is no bad login counter row at all in the database to
# test the "ObjectNotFound" case
self.login(self.email, self.pw)
def test_login_link_on_activation_age(self):
self.create_account(self.username, self.email, self.pw)
# we want to test the rendering of the activation page when the user isn't logged in
self.client.logout()
resp = self._activate_user(self.email)
self.assertEqual(resp.status_code, 200)
# check the the HTML has links to the right login page. Note that this is merely a content
# check and thus could be fragile should the wording change on this page
expected = 'You can now <a href="' + reverse('login') + '">sign in</a>.'
self.assertIn(expected, resp.content.decode('utf-8'))
def test_private_pages_auth(self):
"""Make sure pages that do require login work."""
auth_pages = (
'/home/',
)
# These are pages that should just load when the user is logged in
# (no data needed)
simple_auth_pages = (
'/home/',
)
# need an activated user
self.test_create_account()
# Create a new session
self.client = AjaxEnabledTestClient()
# Not logged in. Should redirect to login.
print 'Not logged in'
for page in auth_pages:
print "Checking '{0}'".format(page)
self.check_page_get(page, expected=302)
# Logged in should work.
self.login(self.email, self.pw)
print 'Logged in'
for page in simple_auth_pages:
print "Checking '{0}'".format(page)
self.check_page_get(page, expected=200)
def test_index_auth(self):
# not logged in. Should return a redirect.
resp = self.client.get_html('/home/')
self.assertEqual(resp.status_code, 302)
# Logged in should work.
@override_settings(SESSION_INACTIVITY_TIMEOUT_IN_SECONDS=1)
def test_inactive_session_timeout(self):
"""
Verify that an inactive session times out and redirects to the
login page
"""
self.create_account(self.username, self.email, self.pw)
self.activate_user(self.email)
self.login(self.email, self.pw)
# make sure we can access courseware immediately
course_url = '/home/'
resp = self.client.get_html(course_url)
self.assertEquals(resp.status_code, 200)
# then wait a bit and see if we get timed out
time.sleep(2)
resp = self.client.get_html(course_url)
# re-request, and we should get a redirect to login page
self.assertRedirects(resp, settings.LOGIN_REDIRECT_URL + '?next=/home/')
@mock.patch.dict(settings.FEATURES, {"ALLOW_PUBLIC_ACCOUNT_CREATION": False})
def test_signup_button_index_page(self):
"""
Navigate to the home page and check the Sign Up button is hidden when ALLOW_PUBLIC_ACCOUNT_CREATION flag
is turned off
"""
response = self.client.get(reverse('homepage'))
self.assertNotIn('<a class="action action-signup" href="/signup">Sign Up</a>', response.content)
@mock.patch.dict(settings.FEATURES, {"ALLOW_PUBLIC_ACCOUNT_CREATION": False})
def test_signup_button_login_page(self):
"""
Navigate to the login page and check the Sign Up button is hidden when ALLOW_PUBLIC_ACCOUNT_CREATION flag
is turned off
"""
response = self.client.get(reverse('login'))
self.assertNotIn('<a class="action action-signup" href="/signup">Sign Up</a>', response.content)
@mock.patch.dict(settings.FEATURES, {"ALLOW_PUBLIC_ACCOUNT_CREATION": False})
def test_signup_link_login_page(self):
"""
Navigate to the login page and check the Sign Up link is hidden when ALLOW_PUBLIC_ACCOUNT_CREATION flag
is turned off
"""
response = self.client.get(reverse('login'))
self.assertNotIn('<a href="/signup" class="action action-signin">Don't have a Studio Account? Sign up!</a>',
response.content)
class ForumTestCase(CourseTestCase):
def setUp(self):
""" Creates the test course. """
super(ForumTestCase, self).setUp()
self.course = CourseFactory.create(org='testX', number='727', display_name='Forum Course')
def test_blackouts(self):
now = datetime.datetime.now(UTC)
times1 = [
(now - datetime.timedelta(days=14), now - datetime.timedelta(days=11)),
(now + datetime.timedelta(days=24), now + datetime.timedelta(days=30))
]
self.course.discussion_blackouts = [(t.isoformat(), t2.isoformat()) for t, t2 in times1]
self.assertTrue(self.course.forum_posts_allowed)
times2 = [
(now - datetime.timedelta(days=14), now + datetime.timedelta(days=2)),
(now + datetime.timedelta(days=24), now + datetime.timedelta(days=30))
]
self.course.discussion_blackouts = [(t.isoformat(), t2.isoformat()) for t, t2 in times2]
self.assertFalse(self.course.forum_posts_allowed)
# test if user gives empty blackout date it should return true for forum_posts_allowed
self.course.discussion_blackouts = [[]]
self.assertTrue(self.course.forum_posts_allowed)
@ddt
class CourseKeyVerificationTestCase(CourseTestCase):
def setUp(self):
"""
Create test course.
"""
super(CourseKeyVerificationTestCase, self).setUp()
self.course = CourseFactory.create(org='edX', number='test_course_key', display_name='Test Course')
@data(('edX/test_course_key/Test_Course', 200), ('garbage:edX+test_course_key+Test_Course', 404))
@unpack
def test_course_key_decorator(self, course_key, status_code):
"""
Tests for the ensure_valid_course_key decorator.
"""
url = '/import/{course_key}'.format(course_key=course_key)
resp = self.client.get_html(url)
self.assertEqual(resp.status_code, status_code)
url = '/import_status/{course_key}/{filename}'.format(
course_key=course_key,
filename='xyz.tar.gz'
)
resp = self.client.get_html(url)
self.assertEqual(resp.status_code, status_code)
class PushNotificationConfigTestCase(TestCase):
"""
Tests PushNotificationConfig.
"""
def test_notifications_defaults(self):
self.assertFalse(PushNotificationConfig.is_enabled())
def test_notifications_enabled(self):
PushNotificationConfig(enabled=True).save()
self.assertTrue(PushNotificationConfig.is_enabled())
| agpl-3.0 |
rlugojr/django | django/utils/safestring.py | 54 | 2473 | """
Functions for working with "safe strings": strings that can be displayed safely
without further escaping in HTML. Marking something as a "safe string" means
that the producer of the string has already turned characters that should not
be interpreted by the HTML engine (e.g. '<') into the appropriate entities.
"""
from django.utils.functional import Promise, wraps
class SafeData:
def __html__(self):
"""
Return the html representation of a string for interoperability.
This allows other template engines to understand Django's SafeData.
"""
return self
class SafeBytes(bytes, SafeData):
"""
A bytes subclass that has been specifically marked as "safe" (requires no
further escaping) for HTML output purposes.
Kept in Django 2.0 for usage by apps supporting Python 2. Shouldn't be used
in Django anymore.
"""
def __add__(self, rhs):
"""
Concatenating a safe byte string with another safe byte string or safe
string is safe. Otherwise, the result is no longer safe.
"""
t = super().__add__(rhs)
if isinstance(rhs, SafeText):
return SafeText(t)
elif isinstance(rhs, SafeBytes):
return SafeBytes(t)
return t
class SafeText(str, SafeData):
"""
A str subclass that has been specifically marked as "safe" for HTML output
purposes.
"""
def __add__(self, rhs):
"""
Concatenating a safe string with another safe byte string or
safe string is safe. Otherwise, the result is no longer safe.
"""
t = super().__add__(rhs)
if isinstance(rhs, SafeData):
return SafeText(t)
return t
def __str__(self):
return self
SafeString = SafeText
def _safety_decorator(safety_marker, func):
@wraps(func)
def wrapped(*args, **kwargs):
return safety_marker(func(*args, **kwargs))
return wrapped
def mark_safe(s):
"""
Explicitly mark a string as safe for (HTML) output purposes. The returned
object can be used everywhere a string is appropriate.
If used on a method as a decorator, mark the returned data as safe.
Can be called multiple times on a single string.
"""
if hasattr(s, '__html__'):
return s
if isinstance(s, (str, Promise)):
return SafeText(s)
if callable(s):
return _safety_decorator(mark_safe, s)
return SafeText(str(s))
| bsd-3-clause |
tuxskar/trending-highlighter | socketIO_app.py | 1 | 2327 | #!/usr/bin/env python
from flask import Flask, render_template, request
from flask_assets import Environment
from controller import get_rooms, get_word_counter_processed, get_room_name, get_username, process_word_cnts
from flask_socketio import SocketIO, join_room, send, emit
# Set this variable to "threading", "eventlet" or "gevent" to test the
# different async modes, or leave it set to None for the application to choose
# the best option based on installed packages.
from models import Message, MessagesCollection, get_users, SENTENCES
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secret!'
socketio = SocketIO(app)
assets = Environment(app)
@app.route('/', defaults=dict(room=None))
@app.route('/<room>')
def index(room):
return render_template('index.html', async_mode=socketio.async_mode)
@socketio.on('join')
def join(message):
room, room_name = get_room_name(message)
join_room(room)
join_data = {'roomMessages': MessagesCollection.to_json(room), 'username': get_username(), 'users': get_users(),
'words': get_word_counter_processed(room), 'sentences': SENTENCES[room], 'room': room,
'roomName': room_name}
send(join_data)
emit_user_count(room)
@socketio.on('disconnect')
def on_disconnect():
emit_user_count(exclude_sid=request.sid)
def emit_user_count(room=None, exclude_sid=None):
for room in get_rooms(room):
users = [x for x in socketio.server.manager.rooms.get('/', {}).get(room, []) if x != exclude_sid]
emit('userCnt', dict(cnt=len(users)), room=room)
@socketio.on('newMsg')
def new_msg(user_message):
sent_msg = user_message.get('data')
username = user_message.get('username')
new_message = Message(msg=sent_msg, username=username)
room = user_message['room']
process_word_cnts(sent_msg, room)
if room not in MessagesCollection:
MessagesCollection[room] = []
MessagesCollection[room].append(new_message)
MessagesCollection[room] = MessagesCollection[room][:30]
emit('newMsg', new_message.to_json(), room=room)
emit('newWordUpdate', get_word_counter_processed(room), room=room)
def get_cnt_users(room):
return len(socketio.server.manager.rooms.get('/', {}).get(room))
if __name__ == '__main__':
socketio.run(app, debug=True, host='0.0.0.0')
| mit |
protochron/aurora | src/main/python/apache/aurora/admin/host_maintenance.py | 3 | 10365 | #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from threading import Event
from twitter.common import log
from twitter.common.quantity import Amount, Time
from apache.aurora.admin.admin_util import format_sla_results, print_results
from apache.aurora.client.api import AuroraClientAPI
from apache.aurora.client.base import DEFAULT_GROUPING, check_and_log_response, group_hosts
from gen.apache.aurora.api.ttypes import Hosts, MaintenanceMode
class HostMaintenance(object):
"""Submit requests to the scheduler to put hosts into and out of maintenance
mode so they can be operated upon without causing LOST tasks.
Aurora provides a two-tiered concept of Maintenance. The first step is to initiate maintenance,
which will ask the Aurora scheduler to de-prioritize scheduling on a large set of hosts (the ones
that will be operated upon during this maintenance window). Once all hosts have been tagged in
this manner, the operator can begin draining individual machines, which will have all user-tasks
killed and rescheduled. When the tasks get placed onto a new machine, the scheduler will first
look for hosts that do not have the maintenance tag, which will help decrease churn and prevent a
task from being constantly killed as its hosts go down from underneath it.
"""
SLA_MIN_JOB_INSTANCE_COUNT = 20
STATUS_POLL_INTERVAL = Amount(5, Time.SECONDS)
MAX_STATUS_WAIT = Amount(5, Time.MINUTES)
@classmethod
def iter_batches(cls, hostnames, grouping_function=DEFAULT_GROUPING):
groups = group_hosts(hostnames, grouping_function)
groups = sorted(groups.items(), key=lambda v: v[0])
for group in groups:
yield Hosts(group[1])
def __init__(self, cluster, verbosity, wait_event=None):
self._client = AuroraClientAPI(cluster, verbosity == 'verbose')
self._wait_event = wait_event or Event()
def _drain_hosts(self, drainable_hosts):
""""Drains tasks from the specified hosts.
This will move active tasks on these hosts to the DRAINING state, causing them to be
rescheduled elsewhere.
:param drainable_hosts: Hosts that are in maintenance mode and ready to be drained
:type drainable_hosts: gen.apache.aurora.ttypes.Hosts
:rtype: set of host names failed to drain
"""
check_and_log_response(self._client.drain_hosts(drainable_hosts))
drainable_hostnames = [hostname for hostname in drainable_hosts.hostNames]
total_wait = self.STATUS_POLL_INTERVAL
not_drained_hostnames = set(drainable_hostnames)
while not self._wait_event.is_set() and not_drained_hostnames:
log.info('Waiting for hosts to be in DRAINED: %s' % not_drained_hostnames)
self._wait_event.wait(self.STATUS_POLL_INTERVAL.as_(Time.SECONDS))
statuses = self.check_status(list(not_drained_hostnames))
not_drained_hostnames = set(h[0] for h in statuses if h[1] != 'DRAINED')
total_wait += self.STATUS_POLL_INTERVAL
if not_drained_hostnames and total_wait > self.MAX_STATUS_WAIT:
log.warning('Failed to move all hosts into DRAINED within %s:\n%s' %
(self.MAX_STATUS_WAIT,
'\n'.join("\tHost:%s\tStatus:%s" % h for h in sorted(statuses) if h[1] != 'DRAINED')))
break
return not_drained_hostnames
def _complete_maintenance(self, drained_hosts):
"""End the maintenance status for a given set of hosts.
:param drained_hosts: Hosts that are drained and finished being operated upon
:type drained_hosts: gen.apache.aurora.ttypes.Hosts
"""
check_and_log_response(self._client.end_maintenance(drained_hosts))
resp = self._client.maintenance_status(drained_hosts)
for host_status in resp.result.maintenanceStatusResult.statuses:
if host_status.mode != MaintenanceMode.NONE:
log.warning('%s is DRAINING or in DRAINED' % host_status.host)
def _check_sla(self, hostnames, grouping_function, percentage, duration):
"""Check if the provided list of hosts passes the job uptime SLA check.
This is an all-or-nothing check, meaning that all provided hosts must pass their job
SLA check for the maintenance to proceed.
:param hostnames: list of host names to check SLA for
:type hostnames: list of strings
:param grouping_function: grouping function to apply to the given hosts
:type grouping_function: function
:param percentage: SLA uptime percentage override
:type percentage: float
:param duration: SLA uptime duration override
:type duration: twitter.common.quantity.Amount
:rtype: set of unsafe hosts
"""
vector = self._client.sla_get_safe_domain_vector(self.SLA_MIN_JOB_INSTANCE_COUNT, hostnames)
host_groups = vector.probe_hosts(
percentage,
duration.as_(Time.SECONDS),
grouping_function)
unsafe_hostnames = set()
# Given that maintenance is performed 1 group at a time, any result longer than 1 group
# should be considered a batch failure.
if host_groups:
if len(host_groups) > 1:
log.error('Illegal multiple groups detected in SLA results. Skipping hosts: %s' % hostnames)
return set(hostnames)
results, unsafe_hostnames = format_sla_results(host_groups, unsafe_only=True)
if results:
print_results(results)
return unsafe_hostnames
return unsafe_hostnames
def end_maintenance(self, hostnames):
"""Pull a list of hostnames out of maintenance mode.
:param hostnames: List of hosts to operate upon
:type hostnames: list of strings
"""
self._complete_maintenance(Hosts(set(hostnames)))
def start_maintenance(self, hostnames):
"""Put a list of hostnames into maintenance mode, to de-prioritize scheduling.
This is part of two-phase draining- tasks will still be running on these hosts until
drain_hosts is called upon them.
:param hostnames: List of hosts to set for initial maintenance
:type hostnames: list of strings
:rtype: list of hostnames with the maintenance mode set
"""
resp = self._client.start_maintenance(Hosts(set(hostnames)))
check_and_log_response(resp)
result = [host_status.host for host_status in resp.result.startMaintenanceResult.statuses]
if len(result) != len(hostnames):
log.warning('Skipping maintenance for unknown hosts: %s' % (set(hostnames) - set(result)))
return result
def _operate_on_hosts(self, drained_hosts, callback):
"""Perform a given operation on a list of hosts that are ready for maintenance.
:param drained_hosts: Hosts that have been drained (via _drain_hosts)
:type drained_hosts: list of strings
:param callback: Function to call one hostname at a time
:type callback: function
"""
for hostname in drained_hosts:
callback(hostname)
def perform_maintenance(self, hostnames, grouping_function=DEFAULT_GROUPING,
percentage=None, duration=None, output_file=None, callback=None):
"""Put hosts into maintenance mode and drain them.
Walk through the process of putting hosts into maintenance and draining them of tasks. The hosts
will remain in maintenance mode upon completion.
:param hostnames: A list of hostnames to operate upon
:type hostnames: list of strings
:param grouping_function: How to split up the hostname into groups
:type grouping_function: function
:param percentage: SLA percentage to use
:type percentage: float
:param duration: SLA duration to use
:type duration: twitter.common.quantity.Time
:param output_file: file to write hosts that were not drained due to failed SLA check
:type output_file: string
:param callback: Function to call once hosts are drained
:type callback: function
:rtype: set of host names that were successfully drained
"""
hostnames = self.start_maintenance(hostnames)
not_drained_hostnames = set()
for hosts in self.iter_batches(hostnames, grouping_function):
log.info('Beginning SLA check for %s' % hosts.hostNames)
unsafe_hostnames = self._check_sla(
list(hosts.hostNames),
grouping_function,
percentage,
duration)
if unsafe_hostnames:
log.warning('Some hosts did not pass SLA check and will not be drained! '
'Skipping hosts: %s' % unsafe_hostnames)
not_drained_hostnames |= unsafe_hostnames
drainable_hostnames = hosts.hostNames - unsafe_hostnames
if not drainable_hostnames:
continue
hosts = Hosts(drainable_hostnames)
else:
log.info('All hosts passed SLA check.')
not_drained_hostnames |= self._drain_hosts(hosts)
if callback:
self._operate_on_hosts(hosts.hostNames - not_drained_hostnames, callback)
if not_drained_hostnames:
output = '\n'.join(list(not_drained_hostnames))
log.info('The following hosts WERE NOT DRAINED due to failed SLA check or external failures:')
print(output)
if output_file:
try:
with open(output_file, 'w') as fp:
fp.write(output)
fp.write('\n')
log.info('Written unsafe host names into: %s' % output_file)
except IOError as e:
log.error('Failed to write into the output file: %s' % e)
return set(hostnames) - not_drained_hostnames
def check_status(self, hostnames):
"""Query the scheduler to determine the maintenance status for a list of hostnames
:param hostnames: Hosts to query for
:type hostnames: list of strings
:rtype: list of 2-tuples, hostname and MaintenanceMode
"""
resp = self._client.maintenance_status(Hosts(set(hostnames)))
check_and_log_response(resp)
statuses = []
for host_status in resp.result.maintenanceStatusResult.statuses:
statuses.append((host_status.host, MaintenanceMode._VALUES_TO_NAMES[host_status.mode]))
return statuses
| apache-2.0 |
refeed/coala | coalib/bearlib/aspects/base.py | 12 | 2094 | from coalib.bearlib.languages import Language
from .taste import TasteError
class aspectbase:
"""
Base class for aspectclasses with common features for their instances.
Derived classes must use
:class:`coalib.bearlib.aspects.meta.aspectclass` as metaclass.
This is automatically handled by
:meth:`coalib.bearlib.aspects.meta.aspectclass.subaspect` decorator.
"""
def __init__(self, language, **taste_values):
"""
Instantiate an aspectclass with specific `taste_values`,
including parent tastes.
Given tastes must be available for the given `language`,
which must be a language identifier supported by
:class:`coalib.bearlib.languages.Language`.
All taste values will be casted to the related taste cast types.
Non-given available tastes will get their default values.
"""
# bypass self.__setattr__
self.__dict__['language'] = Language[language]
for name, taste in type(self).tastes.items():
if taste.languages and language not in taste.languages:
if name in taste_values:
raise TasteError('%s.%s is not available for %s.' % (
type(self).__qualname__, name, language))
else:
setattr(self, name, taste_values.get(name, taste.default))
def __eq__(self, other):
return type(self) is type(other) and self.tastes == other.tastes
@property
def tastes(self):
"""
Get a dictionary of all taste names mapped to their specific values,
including parent tastes.
"""
return {name: self.__dict__[name] for name in type(self).tastes
if name in self.__dict__}
def __setattr__(self, name, value):
"""
Don't allow attribute manipulations after instantiation of
aspectclasses.
"""
if name not in type(self).tastes:
raise AttributeError(
"can't set attributes of aspectclass instances")
super().__setattr__(name, value)
| agpl-3.0 |
masaori335/hyper | test/test_socket.py | 3 | 7857 | # -*- coding: utf-8 -*-
"""
test/socket
~~~~~~~~~~~
Test the BufferedSocket implementation in hyper.
"""
import pytest
import hyper.common.bufsocket
from hyper.common.bufsocket import BufferedSocket
from hyper.common.exceptions import ConnectionResetError, LineTooLongError
# Patch the select method in bufsocket to make sure that it always returns
# the dummy socket as readable.
def dummy_select(a, b, c, d):
return a
class TestBufferedSocket(object):
"""
Tests of the hyper BufferedSocket object.
"""
def test_can_create_buffered_sockets(self, monkeypatch):
monkeypatch.setattr(
hyper.common.bufsocket.select, 'select', dummy_select
)
s = DummySocket()
b = BufferedSocket(s)
assert b is not None
assert b._buffer_size == 1000
def test_can_send_on_buffered_socket(self, monkeypatch):
monkeypatch.setattr(
hyper.common.bufsocket.select, 'select', dummy_select
)
s = DummySocket()
b = BufferedSocket(s)
b.send(b'test data')
assert len(s.outbound_packets) == 1
assert s.outbound_packets[0] == b'test data'
def test_receive_single_packet(self, monkeypatch):
monkeypatch.setattr(
hyper.common.bufsocket.select, 'select', dummy_select
)
s = DummySocket()
b = BufferedSocket(s)
s.inbound_packets.append(b'test data')
d = b.recv(100).tobytes()
assert d == b'test data'
def test_receive_multiple_packets_one_at_a_time(self, monkeypatch):
monkeypatch.setattr(
hyper.common.bufsocket.select, 'select', dummy_select
)
s = DummySocket()
b = BufferedSocket(s)
s.inbound_packets = [b'Here', b'begins', b'the', b'test', b'data']
d = b''
for _ in range(5):
d += b.recv(100).tobytes()
assert d == b'Herebeginsthetestdata'
def test_receive_small_packets(self, monkeypatch):
monkeypatch.setattr(
hyper.common.bufsocket.select, 'select', dummy_select
)
s = DummySocket()
b = BufferedSocket(s)
s.inbound_packets = [b'Here', b'begins', b'the', b'test', b'data']
d = b''
for _ in range(5):
d += b.recv(100).tobytes()
assert d == b'Herebeginsthetestdata'
def test_receive_multiple_packets_at_once(self, monkeypatch):
monkeypatch.setattr(
hyper.common.bufsocket.select, 'select', dummy_select
)
s = DummySocket()
b = BufferedSocket(s)
s.inbound_packets = [b'Here', b'begins', b'the', b'test', b'data', b'!']
s.read_count = 3
d = b''
for _ in range(22):
d += b.recv(1).tobytes()
assert d == b'Herebeginsthetestdata!'
def test_filling_the_buffer(self, monkeypatch):
monkeypatch.setattr(
hyper.common.bufsocket.select, 'select', dummy_select
)
s = DummySocket()
b = BufferedSocket(s)
s.inbound_packets = [
b'a' * 1000,
b'a' * 800,
]
d = b''
for _ in range(2):
d += b.recv(900).tobytes()
assert d == (b'a' * 1800)
def test_oversized_read(self, monkeypatch):
monkeypatch.setattr(
hyper.common.bufsocket.select, 'select', dummy_select
)
s = DummySocket()
b = BufferedSocket(s)
s.inbound_packets.append(b'a' * 600)
d = b.recv(1200).tobytes()
assert d == b'a' * 600
def test_readline_from_buffer(self, monkeypatch):
monkeypatch.setattr(
hyper.common.bufsocket.select, 'select', dummy_select
)
s = DummySocket()
b = BufferedSocket(s)
one = b'hi there\r\n'
two = b'this is another line\r\n'
three = b'\r\n'
combined = b''.join([one, two, three])
b._buffer_view[0:len(combined)] = combined
b._bytes_in_buffer += len(combined)
assert b.readline().tobytes() == one
assert b.readline().tobytes() == two
assert b.readline().tobytes() == three
def test_readline_from_socket(self, monkeypatch):
monkeypatch.setattr(
hyper.common.bufsocket.select, 'select', dummy_select
)
s = DummySocket()
b = BufferedSocket(s)
one = b'hi there\r\n'
two = b'this is another line\r\n'
three = b'\r\n'
combined = b''.join([one, two, three])
for i in range(0, len(combined), 5):
s.inbound_packets.append(combined[i:i+5])
assert b.readline().tobytes() == one
assert b.readline().tobytes() == two
assert b.readline().tobytes() == three
def test_readline_both(self, monkeypatch):
monkeypatch.setattr(
hyper.common.bufsocket.select, 'select', dummy_select
)
s = DummySocket()
b = BufferedSocket(s)
one = b'hi there\r\n'
two = b'this is another line\r\n'
three = b'\r\n'
combined = b''.join([one, two, three])
split_index = int(len(combined) / 2)
b._buffer_view[0:split_index] = combined[0:split_index]
b._bytes_in_buffer += split_index
for i in range(split_index, len(combined), 5):
s.inbound_packets.append(combined[i:i+5])
assert b.readline().tobytes() == one
assert b.readline().tobytes() == two
assert b.readline().tobytes() == three
def test_socket_error_on_readline(self, monkeypatch):
monkeypatch.setattr(
hyper.common.bufsocket.select, 'select', dummy_select
)
s = DummySocket()
b = BufferedSocket(s)
with pytest.raises(ConnectionResetError):
b.readline()
def test_socket_readline_too_long(self, monkeypatch):
monkeypatch.setattr(
hyper.common.bufsocket.select, 'select', dummy_select
)
s = DummySocket()
b = BufferedSocket(s)
b._buffer_view[0:b._buffer_size] = b'0' * b._buffer_size
b._bytes_in_buffer = b._buffer_size
with pytest.raises(LineTooLongError):
b.readline()
def test_socket_fill_basic(self):
s = DummySocket()
b = BufferedSocket(s)
s.inbound_packets = [b'Here', b'begins', b'the']
assert not len(b.buffer)
b.fill()
assert len(b.buffer) == 4
b.fill()
assert len(b.buffer) == 10
b.fill()
assert len(b.buffer) == 13
def test_socket_fill_resizes_if_needed(self):
s = DummySocket()
b = BufferedSocket(s)
s.inbound_packets = [b'Here']
b._index = 1000
assert not len(b.buffer)
b.fill()
assert len(b.buffer) == 4
assert b._index == 0
def test_socket_fill_raises_connection_errors(self):
s = DummySocket()
b = BufferedSocket(s)
with pytest.raises(ConnectionResetError):
b.fill()
def test_advancing_sockets(self):
s = DummySocket()
b = BufferedSocket(s)
b._buffer_view[0:5] = b'abcde'
b._bytes_in_buffer += 5
assert len(b.buffer) == 5
b.advance_buffer(3)
assert len(b.buffer) == 2
assert b.buffer.tobytes() == b'de'
class DummySocket(object):
def __init__(self):
self.inbound_packets = []
self.outbound_packets = []
self.read_count = 1
def recv_into(self, buffer):
index = 0
try:
for _ in range(self.read_count):
pkt = self.inbound_packets.pop(0)
buffer[index:index+len(pkt)] = pkt
index += len(pkt)
except IndexError:
pass
return index
def send(self, data):
self.outbound_packets.append(data)
| mit |
googleads/google-ads-python | google/ads/googleads/v7/services/services/conversion_adjustment_upload_service/transports/__init__.py | 2 | 1141 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from typing import Dict, Type
from .base import ConversionAdjustmentUploadServiceTransport
from .grpc import ConversionAdjustmentUploadServiceGrpcTransport
# Compile a registry of transports.
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[ConversionAdjustmentUploadServiceTransport]]
_transport_registry["grpc"] = ConversionAdjustmentUploadServiceGrpcTransport
__all__ = (
"ConversionAdjustmentUploadServiceTransport",
"ConversionAdjustmentUploadServiceGrpcTransport",
)
| apache-2.0 |
IMCG/priter | src/contrib/hod/hodlib/Common/miniHTMLParser.py | 182 | 1402 | #Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import urllib, urlparse, re
from HTMLParser import HTMLParser
class miniHTMLParser( HTMLParser ):
viewedQueue = []
instQueue = []
def setBaseUrl(self, url):
self.baseUrl = url
def getNextLink( self ):
if self.instQueue == []:
return None
else:
return self.instQueue.pop(0)
def handle_starttag( self, tag, attrs ):
if tag == 'a':
newstr = urlparse.urljoin(self.baseUrl, str(attrs[0][1]))
if re.search('mailto', newstr) != None:
return
if (newstr in self.viewedQueue) == False:
self.instQueue.append( newstr )
self.viewedQueue.append( newstr )
| apache-2.0 |
doug-fish/horizon | openstack_dashboard/dashboards/project/network_topology/routers/tables.py | 73 | 1051 | # Copyright 2013 NTT Innovation Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
from openstack_dashboard.dashboards.project.routers import tables
class DeleteRouter(tables.DeleteRouter):
redirect_url = "horizon:project:network_topology:router"
class RoutersTable(tables.RoutersTable):
class Meta(object):
name = "Routers"
verbose_name = _("Routers")
status_columns = ["status"]
row_actions = (DeleteRouter,)
| apache-2.0 |
cloudbau/glance | glance/registry/client/v1/api.py | 1 | 6835 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010-2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Registry's Client API
"""
import json
import os
from oslo.config import cfg
from glance.common import exception
import glance.openstack.common.log as logging
from glance.registry.client.v1 import client
LOG = logging.getLogger(__name__)
registry_client_ctx_opts = [
cfg.BoolOpt('send_identity_headers', default=False,
help=_("Whether to pass through headers containing user "
"and tenant information when making requests to "
"the registry. This allows the registry to use the "
"context middleware without the keystoneclients' "
"auth_token middleware, removing calls to the keystone "
"auth service. It is recommended that when using this "
"option, secure communication between glance api and "
"glance registry is ensured by means other than "
"auth_token middleware.")),
]
CONF = cfg.CONF
CONF.register_opts(registry_client_ctx_opts)
_registry_client = 'glance.registry.client'
CONF.import_opt('registry_client_protocol', _registry_client)
CONF.import_opt('registry_client_key_file', _registry_client)
CONF.import_opt('registry_client_cert_file', _registry_client)
CONF.import_opt('registry_client_ca_file', _registry_client)
CONF.import_opt('registry_client_insecure', _registry_client)
CONF.import_opt('registry_client_timeout', _registry_client)
CONF.import_opt('use_user_token', _registry_client)
CONF.import_opt('admin_user', _registry_client)
CONF.import_opt('admin_password', _registry_client)
CONF.import_opt('admin_tenant_name', _registry_client)
CONF.import_opt('auth_url', _registry_client)
CONF.import_opt('auth_strategy', _registry_client)
CONF.import_opt('auth_region', _registry_client)
CONF.import_opt('metadata_encryption_key', 'glance.common.config')
_CLIENT_CREDS = None
_CLIENT_HOST = None
_CLIENT_PORT = None
_CLIENT_KWARGS = {}
# AES key used to encrypt 'location' metadata
_METADATA_ENCRYPTION_KEY = None
def configure_registry_client():
"""
Sets up a registry client for use in registry lookups
"""
global _CLIENT_KWARGS, _CLIENT_HOST, _CLIENT_PORT, _METADATA_ENCRYPTION_KEY
try:
host, port = CONF.registry_host, CONF.registry_port
except cfg.ConfigFileValueError:
msg = _("Configuration option was not valid")
LOG.error(msg)
raise exception.BadRegistryConnectionConfiguration(reason=msg)
except IndexError:
msg = _("Could not find required configuration option")
LOG.error(msg)
raise exception.BadRegistryConnectionConfiguration(reason=msg)
_CLIENT_HOST = host
_CLIENT_PORT = port
_METADATA_ENCRYPTION_KEY = CONF.metadata_encryption_key
_CLIENT_KWARGS = {
'use_ssl': CONF.registry_client_protocol.lower() == 'https',
'key_file': CONF.registry_client_key_file,
'cert_file': CONF.registry_client_cert_file,
'ca_file': CONF.registry_client_ca_file,
'insecure': CONF.registry_client_insecure,
'timeout': CONF.registry_client_timeout,
}
if not CONF.use_user_token:
configure_registry_admin_creds()
def configure_registry_admin_creds():
global _CLIENT_CREDS
if CONF.auth_url or os.getenv('OS_AUTH_URL'):
strategy = 'keystone'
else:
strategy = CONF.auth_strategy
_CLIENT_CREDS = {
'user': CONF.admin_user,
'password': CONF.admin_password,
'username': CONF.admin_user,
'tenant': CONF.admin_tenant_name,
'auth_url': CONF.auth_url,
'strategy': strategy,
'region': CONF.auth_region,
}
def get_registry_client(cxt):
global _CLIENT_CREDS, _CLIENT_KWARGS, _CLIENT_HOST, _CLIENT_PORT
global _METADATA_ENCRYPTION_KEY
kwargs = _CLIENT_KWARGS.copy()
if CONF.use_user_token:
kwargs['auth_tok'] = cxt.auth_tok
if _CLIENT_CREDS:
kwargs['creds'] = _CLIENT_CREDS
if CONF.send_identity_headers:
identity_headers = {
'X-User-Id': cxt.user,
'X-Tenant-Id': cxt.tenant,
'X-Roles': ','.join(cxt.roles),
'X-Identity-Status': 'Confirmed',
'X-Service-Catalog': json.dumps(cxt.service_catalog),
}
kwargs['identity_headers'] = identity_headers
return client.RegistryClient(_CLIENT_HOST, _CLIENT_PORT,
_METADATA_ENCRYPTION_KEY, **kwargs)
def get_images_list(context, **kwargs):
c = get_registry_client(context)
return c.get_images(**kwargs)
def get_images_detail(context, **kwargs):
c = get_registry_client(context)
return c.get_images_detailed(**kwargs)
def get_image_metadata(context, image_id):
c = get_registry_client(context)
return c.get_image(image_id)
def add_image_metadata(context, image_meta):
LOG.debug(_("Adding image metadata..."))
c = get_registry_client(context)
return c.add_image(image_meta)
def update_image_metadata(context, image_id, image_meta,
purge_props=False):
LOG.debug(_("Updating image metadata for image %s..."), image_id)
c = get_registry_client(context)
return c.update_image(image_id, image_meta, purge_props)
def delete_image_metadata(context, image_id):
LOG.debug(_("Deleting image metadata for image %s..."), image_id)
c = get_registry_client(context)
return c.delete_image(image_id)
def get_image_members(context, image_id):
c = get_registry_client(context)
return c.get_image_members(image_id)
def get_member_images(context, member_id):
c = get_registry_client(context)
return c.get_member_images(member_id)
def replace_members(context, image_id, member_data):
c = get_registry_client(context)
return c.replace_members(image_id, member_data)
def add_member(context, image_id, member_id, can_share=None):
c = get_registry_client(context)
return c.add_member(image_id, member_id, can_share=can_share)
def delete_member(context, image_id, member_id):
c = get_registry_client(context)
return c.delete_member(image_id, member_id)
| apache-2.0 |
madscatt/zazzie_1.5 | trunk/sassie/simulate/monte_carlo/extensions/pairs/setup_pairs.py | 4 | 1040 | '''
SASSIE Copyright (C) 2011 Joseph E. Curtis
This program comes with ABSOLUTELY NO WARRANTY;
This is free software, and you are welcome to redistribute it under certain
conditions; see http://www.gnu.org/licenses/gpl-3.0.html for details.
'''
# System imports
from distutils.core import *
from distutils import sysconfig
# Third-party modules - we depend on numpy for everything
import numpy
from numpy.distutils.core import Extension, setup
# Obtain the numpy include directory. This logic works across numpy versions.
try:
numpy_include = numpy.get_include()
except AttributeError:
numpy_include = numpy.get_numpy_include()
# simple extension module
pairs = Extension(name="pairs",sources=['./pairs.f'],
include_dirs = [numpy_include],
)
# NumyTypemapTests setup
setup( name = "PAIRS",
description = "Module sets up overlap array",
author = "Joseph E. Curtis",
version = "0.1",
ext_modules = [pairs]
)
| gpl-3.0 |
zahodi/ansible | lib/ansible/modules/cloud/vmware/vmware_vm_facts.py | 48 | 3199 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Joseph Callen <jcallen () csc.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: vmware_vm_facts
short_description: Return basic facts pertaining to a vSphere virtual machine guest
description:
- Return basic facts pertaining to a vSphere virtual machine guest
version_added: 2.0
author: "Joseph Callen (@jcpowermac)"
notes:
- Tested on vSphere 5.5
requirements:
- "python >= 2.6"
- PyVmomi
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
- name: Gather all registered virtual machines
local_action:
module: vmware_vm_facts
hostname: esxi_or_vcenter_ip_or_hostname
username: username
password: password
'''
try:
from pyVmomi import vim, vmodl
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
# https://github.com/vmware/pyvmomi-community-samples/blob/master/samples/getallvms.py
def get_all_virtual_machines(content):
virtual_machines = get_all_objs(content, [vim.VirtualMachine])
_virtual_machines = {}
for vm in virtual_machines:
_ip_address = ""
summary = vm.summary
if summary.guest is not None:
_ip_address = summary.guest.ipAddress
if _ip_address is None:
_ip_address = ""
virtual_machine = {
summary.config.name: {
"guest_fullname": summary.config.guestFullName,
"power_state": summary.runtime.powerState,
"ip_address": _ip_address
}
}
_virtual_machines.update(virtual_machine)
return _virtual_machines
def main():
argument_spec = vmware_argument_spec()
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_PYVMOMI:
module.fail_json(msg='pyvmomi is required for this module')
try:
content = connect_to_api(module)
_virtual_machines = get_all_virtual_machines(content)
module.exit_json(changed=False, virtual_machines=_virtual_machines)
except vmodl.RuntimeFault as runtime_fault:
module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
module.fail_json(msg=method_fault.msg)
except Exception as e:
module.fail_json(msg=str(e))
from ansible.module_utils.vmware import *
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
createwindow/pjsip-blf | tests/pjsua/mod_pres.py | 59 | 2860 | # $Id: mod_pres.py 2078 2008-06-27 21:12:12Z nanang $
import time
import imp
import sys
import inc_const as const
from inc_cfg import *
# Load configuration
cfg_file = imp.load_source("cfg_file", ARGS[1])
# Test body function
def test_func(t):
u1 = t.process[0]
uri1 = cfg_file.test_param.inst_params[0].uri
acc1 = "-1"
u2 = t.process[1]
uri2 = cfg_file.test_param.inst_params[1].uri
acc2 = "-1"
# if have_reg then wait for couple of seconds for PUBLISH
# to complete (just in case pUBLISH is used)
if u1.inst_param.have_reg:
time.sleep(1)
if u2.inst_param.have_reg:
time.sleep(1)
# U1 adds U2 as buddy
u1.send("+b")
u1.send(uri2)
u1.expect("Subscription state changed NULL --> SENT")
u1.expect("Presence subscription.*is ACCEPTED")
if not u2.inst_param.have_publish:
# Process incoming SUBSCRIBE in U2
# Finds out which account gets the subscription in U2
line = u2.expect("pjsua_pres.*subscription.*using account")
acc2 = line.split("using account ")[1]
# wait until we've got Online notification
u1.expect(uri2 + ".*Online")
# Synchronize stdout
u1.sync_stdout()
u2.sync_stdout()
# U2 adds U1 as buddy
u2.send("+b")
u2.send(uri1)
u2.expect("Subscription state changed NULL --> SENT")
u2.expect("Presence subscription.*is ACCEPTED")
if not u1.inst_param.have_publish:
# Process incoming SUBSCRIBE in U1
# Finds out which account gets the subscription in U1
line = u1.expect("pjsua_pres.*subscription.*using account")
acc1 = line.split("using account ")[1]
# wait until we've got Online notification
u2.expect(uri1 + ".*Online")
# Synchronize stdout
u1.sync_stdout()
u2.sync_stdout()
# Set current account in both U1 and U2
if acc1!="-1":
u1.send(">")
u1.send(acc1)
u1.expect("Current account changed")
if acc2!="-1":
u2.send(">")
u2.send(acc2)
u2.expect("Current account changed")
# Synchronize stdout
u1.sync_stdout()
u2.sync_stdout()
# u2 toggles online status
u2.send("t")
u1.expect(uri2 + ".*status.*Offline")
u2.expect("offline")
# Synchronize stdout
u1.sync_stdout()
u2.sync_stdout()
# u1 toggles online status
u1.send("t")
u2.expect(uri1 + ".*status.*Offline")
u1.expect("offline")
# Synchronize stdout
u1.sync_stdout()
u2.sync_stdout()
# u2 set online status to On the phone
u2.send("T")
u2.send("3")
u1.expect(uri2 + ".*status.*On the phone")
u2.expect("On the phone")
# Synchronize stdout
u1.sync_stdout()
u2.sync_stdout()
# Synchronize stdout
u1.sync_stdout()
u2.sync_stdout()
# U1 send IM
im_text = "Hello World from U1"
u1.send("i")
u1.send(uri2)
u2.expect(" is typing")
u1.send(im_text)
u1.expect(im_text+".*delivered successfully")
u2.expect("MESSAGE from.*"+im_text)
# Synchronize stdout
u1.sync_stdout()
u2.sync_stdout()
# Here where it all comes together
test = cfg_file.test_param
test.test_func = test_func
| gpl-2.0 |
Akylas/CouchPotatoServer | libs/werkzeug/test.py | 13 | 32877 | # -*- coding: utf-8 -*-
"""
werkzeug.test
~~~~~~~~~~~~~
This module implements a client to WSGI applications for testing.
:copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import sys
import urlparse
import mimetypes
from time import time
from random import random
from itertools import chain
from tempfile import TemporaryFile
from cStringIO import StringIO
from cookielib import CookieJar
from urllib2 import Request as U2Request
from werkzeug._internal import _empty_stream, _get_environ
from werkzeug.wrappers import BaseRequest
from werkzeug.urls import url_encode, url_fix, iri_to_uri, _unquote
from werkzeug.wsgi import get_host, get_current_url, ClosingIterator
from werkzeug.utils import dump_cookie
from werkzeug.datastructures import FileMultiDict, MultiDict, \
CombinedMultiDict, Headers, FileStorage
def stream_encode_multipart(values, use_tempfile=True, threshold=1024 * 500,
boundary=None, charset='utf-8'):
"""Encode a dict of values (either strings or file descriptors or
:class:`FileStorage` objects.) into a multipart encoded string stored
in a file descriptor.
"""
if boundary is None:
boundary = '---------------WerkzeugFormPart_%s%s' % (time(), random())
_closure = [StringIO(), 0, False]
if use_tempfile:
def write(string):
stream, total_length, on_disk = _closure
if on_disk:
stream.write(string)
else:
length = len(string)
if length + _closure[1] <= threshold:
stream.write(string)
else:
new_stream = TemporaryFile('wb+')
new_stream.write(stream.getvalue())
new_stream.write(string)
_closure[0] = new_stream
_closure[2] = True
_closure[1] = total_length + length
else:
write = _closure[0].write
if not isinstance(values, MultiDict):
values = MultiDict(values)
for key, values in values.iterlists():
for value in values:
write('--%s\r\nContent-Disposition: form-data; name="%s"' %
(boundary, key))
reader = getattr(value, 'read', None)
if reader is not None:
filename = getattr(value, 'filename',
getattr(value, 'name', None))
content_type = getattr(value, 'content_type', None)
if content_type is None:
content_type = filename and \
mimetypes.guess_type(filename)[0] or \
'application/octet-stream'
if filename is not None:
write('; filename="%s"\r\n' % filename)
else:
write('\r\n')
write('Content-Type: %s\r\n\r\n' % content_type)
while 1:
chunk = reader(16384)
if not chunk:
break
write(chunk)
else:
if isinstance(value, unicode):
value = value.encode(charset)
write('\r\n\r\n' + str(value))
write('\r\n')
write('--%s--\r\n' % boundary)
length = int(_closure[0].tell())
_closure[0].seek(0)
return _closure[0], length, boundary
def encode_multipart(values, boundary=None, charset='utf-8'):
"""Like `stream_encode_multipart` but returns a tuple in the form
(``boundary``, ``data``) where data is a bytestring.
"""
stream, length, boundary = stream_encode_multipart(
values, use_tempfile=False, boundary=boundary, charset=charset)
return boundary, stream.read()
def File(fd, filename=None, mimetype=None):
"""Backwards compat."""
from warnings import warn
warn(DeprecationWarning('werkzeug.test.File is deprecated, use the '
'EnvironBuilder or FileStorage instead'))
return FileStorage(fd, filename=filename, content_type=mimetype)
class _TestCookieHeaders(object):
"""A headers adapter for cookielib
"""
def __init__(self, headers):
self.headers = headers
def getheaders(self, name):
headers = []
name = name.lower()
for k, v in self.headers:
if k.lower() == name:
headers.append(v)
return headers
class _TestCookieResponse(object):
"""Something that looks like a httplib.HTTPResponse, but is actually just an
adapter for our test responses to make them available for cookielib.
"""
def __init__(self, headers):
self.headers = _TestCookieHeaders(headers)
def info(self):
return self.headers
class _TestCookieJar(CookieJar):
"""A cookielib.CookieJar modified to inject and read cookie headers from
and to wsgi environments, and wsgi application responses.
"""
def inject_wsgi(self, environ):
"""Inject the cookies as client headers into the server's wsgi
environment.
"""
cvals = []
for cookie in self:
cvals.append('%s=%s' % (cookie.name, cookie.value))
if cvals:
environ['HTTP_COOKIE'] = '; '.join(cvals)
def extract_wsgi(self, environ, headers):
"""Extract the server's set-cookie headers as cookies into the
cookie jar.
"""
self.extract_cookies(
_TestCookieResponse(headers),
U2Request(get_current_url(environ)),
)
def _iter_data(data):
"""Iterates over a dict or multidict yielding all keys and values.
This is used to iterate over the data passed to the
:class:`EnvironBuilder`.
"""
if isinstance(data, MultiDict):
for key, values in data.iterlists():
for value in values:
yield key, value
else:
for key, values in data.iteritems():
if isinstance(values, list):
for value in values:
yield key, value
else:
yield key, values
class EnvironBuilder(object):
"""This class can be used to conveniently create a WSGI environment
for testing purposes. It can be used to quickly create WSGI environments
or request objects from arbitrary data.
The signature of this class is also used in some other places as of
Werkzeug 0.5 (:func:`create_environ`, :meth:`BaseResponse.from_values`,
:meth:`Client.open`). Because of this most of the functionality is
available through the constructor alone.
Files and regular form data can be manipulated independently of each
other with the :attr:`form` and :attr:`files` attributes, but are
passed with the same argument to the constructor: `data`.
`data` can be any of these values:
- a `str`: If it's a string it is converted into a :attr:`input_stream`,
the :attr:`content_length` is set and you have to provide a
:attr:`content_type`.
- a `dict`: If it's a dict the keys have to be strings and the values
any of the following objects:
- a :class:`file`-like object. These are converted into
:class:`FileStorage` objects automatically.
- a tuple. The :meth:`~FileMultiDict.add_file` method is called
with the tuple items as positional arguments.
.. versionadded:: 0.6
`path` and `base_url` can now be unicode strings that are encoded using
the :func:`iri_to_uri` function.
:param path: the path of the request. In the WSGI environment this will
end up as `PATH_INFO`. If the `query_string` is not defined
and there is a question mark in the `path` everything after
it is used as query string.
:param base_url: the base URL is a URL that is used to extract the WSGI
URL scheme, host (server name + server port) and the
script root (`SCRIPT_NAME`).
:param query_string: an optional string or dict with URL parameters.
:param method: the HTTP method to use, defaults to `GET`.
:param input_stream: an optional input stream. Do not specify this and
`data`. As soon as an input stream is set you can't
modify :attr:`args` and :attr:`files` unless you
set the :attr:`input_stream` to `None` again.
:param content_type: The content type for the request. As of 0.5 you
don't have to provide this when specifying files
and form data via `data`.
:param content_length: The content length for the request. You don't
have to specify this when providing data via
`data`.
:param errors_stream: an optional error stream that is used for
`wsgi.errors`. Defaults to :data:`stderr`.
:param multithread: controls `wsgi.multithread`. Defaults to `False`.
:param multiprocess: controls `wsgi.multiprocess`. Defaults to `False`.
:param run_once: controls `wsgi.run_once`. Defaults to `False`.
:param headers: an optional list or :class:`Headers` object of headers.
:param data: a string or dict of form data. See explanation above.
:param environ_base: an optional dict of environment defaults.
:param environ_overrides: an optional dict of environment overrides.
:param charset: the charset used to encode unicode data.
"""
#: the server protocol to use. defaults to HTTP/1.1
server_protocol = 'HTTP/1.1'
#: the wsgi version to use. defaults to (1, 0)
wsgi_version = (1, 0)
#: the default request class for :meth:`get_request`
request_class = BaseRequest
def __init__(self, path='/', base_url=None, query_string=None,
method='GET', input_stream=None, content_type=None,
content_length=None, errors_stream=None, multithread=False,
multiprocess=False, run_once=False, headers=None, data=None,
environ_base=None, environ_overrides=None, charset='utf-8'):
if query_string is None and '?' in path:
path, query_string = path.split('?', 1)
self.charset = charset
if isinstance(path, unicode):
path = iri_to_uri(path, charset)
self.path = path
if base_url is not None:
if isinstance(base_url, unicode):
base_url = iri_to_uri(base_url, charset)
else:
base_url = url_fix(base_url, charset)
self.base_url = base_url
if isinstance(query_string, basestring):
self.query_string = query_string
else:
if query_string is None:
query_string = MultiDict()
elif not isinstance(query_string, MultiDict):
query_string = MultiDict(query_string)
self.args = query_string
self.method = method
if headers is None:
headers = Headers()
elif not isinstance(headers, Headers):
headers = Headers(headers)
self.headers = headers
self.content_type = content_type
if errors_stream is None:
errors_stream = sys.stderr
self.errors_stream = errors_stream
self.multithread = multithread
self.multiprocess = multiprocess
self.run_once = run_once
self.environ_base = environ_base
self.environ_overrides = environ_overrides
self.input_stream = input_stream
self.content_length = content_length
self.closed = False
if data:
if input_stream is not None:
raise TypeError('can\'t provide input stream and data')
if isinstance(data, basestring):
self.input_stream = StringIO(data)
if self.content_length is None:
self.content_length = len(data)
else:
for key, value in _iter_data(data):
if isinstance(value, (tuple, dict)) or \
hasattr(value, 'read'):
self._add_file_from_data(key, value)
else:
self.form.setlistdefault(key).append(value)
def _add_file_from_data(self, key, value):
"""Called in the EnvironBuilder to add files from the data dict."""
if isinstance(value, tuple):
self.files.add_file(key, *value)
elif isinstance(value, dict):
from warnings import warn
warn(DeprecationWarning('it\'s no longer possible to pass dicts '
'as `data`. Use tuples or FileStorage '
'objects instead'), stacklevel=2)
value = dict(value)
mimetype = value.pop('mimetype', None)
if mimetype is not None:
value['content_type'] = mimetype
self.files.add_file(key, **value)
else:
self.files.add_file(key, value)
def _get_base_url(self):
return urlparse.urlunsplit((self.url_scheme, self.host,
self.script_root, '', '')).rstrip('/') + '/'
def _set_base_url(self, value):
if value is None:
scheme = 'http'
netloc = 'localhost'
scheme = 'http'
script_root = ''
else:
scheme, netloc, script_root, qs, anchor = urlparse.urlsplit(value)
if qs or anchor:
raise ValueError('base url must not contain a query string '
'or fragment')
self.script_root = script_root.rstrip('/')
self.host = netloc
self.url_scheme = scheme
base_url = property(_get_base_url, _set_base_url, doc='''
The base URL is a URL that is used to extract the WSGI
URL scheme, host (server name + server port) and the
script root (`SCRIPT_NAME`).''')
del _get_base_url, _set_base_url
def _get_content_type(self):
ct = self.headers.get('Content-Type')
if ct is None and not self._input_stream:
if self.method in ('POST', 'PUT', 'PATCH'):
if self._files:
return 'multipart/form-data'
return 'application/x-www-form-urlencoded'
return None
return ct
def _set_content_type(self, value):
if value is None:
self.headers.pop('Content-Type', None)
else:
self.headers['Content-Type'] = value
content_type = property(_get_content_type, _set_content_type, doc='''
The content type for the request. Reflected from and to the
:attr:`headers`. Do not set if you set :attr:`files` or
:attr:`form` for auto detection.''')
del _get_content_type, _set_content_type
def _get_content_length(self):
return self.headers.get('Content-Length', type=int)
def _set_content_length(self, value):
if value is None:
self.headers.pop('Content-Length', None)
else:
self.headers['Content-Length'] = str(value)
content_length = property(_get_content_length, _set_content_length, doc='''
The content length as integer. Reflected from and to the
:attr:`headers`. Do not set if you set :attr:`files` or
:attr:`form` for auto detection.''')
del _get_content_length, _set_content_length
def form_property(name, storage, doc):
key = '_' + name
def getter(self):
if self._input_stream is not None:
raise AttributeError('an input stream is defined')
rv = getattr(self, key)
if rv is None:
rv = storage()
setattr(self, key, rv)
return rv
def setter(self, value):
self._input_stream = None
setattr(self, key, value)
return property(getter, setter, doc)
form = form_property('form', MultiDict, doc='''
A :class:`MultiDict` of form values.''')
files = form_property('files', FileMultiDict, doc='''
A :class:`FileMultiDict` of uploaded files. You can use the
:meth:`~FileMultiDict.add_file` method to add new files to the
dict.''')
del form_property
def _get_input_stream(self):
return self._input_stream
def _set_input_stream(self, value):
self._input_stream = value
self._form = self._files = None
input_stream = property(_get_input_stream, _set_input_stream, doc='''
An optional input stream. If you set this it will clear
:attr:`form` and :attr:`files`.''')
del _get_input_stream, _set_input_stream
def _get_query_string(self):
if self._query_string is None:
if self._args is not None:
return url_encode(self._args, charset=self.charset)
return ''
return self._query_string
def _set_query_string(self, value):
self._query_string = value
self._args = None
query_string = property(_get_query_string, _set_query_string, doc='''
The query string. If you set this to a string :attr:`args` will
no longer be available.''')
del _get_query_string, _set_query_string
def _get_args(self):
if self._query_string is not None:
raise AttributeError('a query string is defined')
if self._args is None:
self._args = MultiDict()
return self._args
def _set_args(self, value):
self._query_string = None
self._args = value
args = property(_get_args, _set_args, doc='''
The URL arguments as :class:`MultiDict`.''')
del _get_args, _set_args
@property
def server_name(self):
"""The server name (read-only, use :attr:`host` to set)"""
return self.host.split(':', 1)[0]
@property
def server_port(self):
"""The server port as integer (read-only, use :attr:`host` to set)"""
pieces = self.host.split(':', 1)
if len(pieces) == 2 and pieces[1].isdigit():
return int(pieces[1])
elif self.url_scheme == 'https':
return 443
return 80
def __del__(self):
try:
self.close()
except Exception:
pass
def close(self):
"""Closes all files. If you put real :class:`file` objects into the
:attr:`files` dict you can call this method to automatically close
them all in one go.
"""
if self.closed:
return
try:
files = self.files.itervalues()
except AttributeError:
files = ()
for f in files:
try:
f.close()
except Exception, e:
pass
self.closed = True
def get_environ(self):
"""Return the built environ."""
input_stream = self.input_stream
content_length = self.content_length
content_type = self.content_type
if input_stream is not None:
start_pos = input_stream.tell()
input_stream.seek(0, 2)
end_pos = input_stream.tell()
input_stream.seek(start_pos)
content_length = end_pos - start_pos
elif content_type == 'multipart/form-data':
values = CombinedMultiDict([self.form, self.files])
input_stream, content_length, boundary = \
stream_encode_multipart(values, charset=self.charset)
content_type += '; boundary="%s"' % boundary
elif content_type == 'application/x-www-form-urlencoded':
values = url_encode(self.form, charset=self.charset)
content_length = len(values)
input_stream = StringIO(values)
else:
input_stream = _empty_stream
result = {}
if self.environ_base:
result.update(self.environ_base)
def _path_encode(x):
if isinstance(x, unicode):
x = x.encode(self.charset)
return _unquote(x)
result.update({
'REQUEST_METHOD': self.method,
'SCRIPT_NAME': _path_encode(self.script_root),
'PATH_INFO': _path_encode(self.path),
'QUERY_STRING': self.query_string,
'SERVER_NAME': self.server_name,
'SERVER_PORT': str(self.server_port),
'HTTP_HOST': self.host,
'SERVER_PROTOCOL': self.server_protocol,
'CONTENT_TYPE': content_type or '',
'CONTENT_LENGTH': str(content_length or '0'),
'wsgi.version': self.wsgi_version,
'wsgi.url_scheme': self.url_scheme,
'wsgi.input': input_stream,
'wsgi.errors': self.errors_stream,
'wsgi.multithread': self.multithread,
'wsgi.multiprocess': self.multiprocess,
'wsgi.run_once': self.run_once
})
for key, value in self.headers.to_list(self.charset):
result['HTTP_%s' % key.upper().replace('-', '_')] = value
if self.environ_overrides:
result.update(self.environ_overrides)
return result
def get_request(self, cls=None):
"""Returns a request with the data. If the request class is not
specified :attr:`request_class` is used.
:param cls: The request wrapper to use.
"""
if cls is None:
cls = self.request_class
return cls(self.get_environ())
class ClientRedirectError(Exception):
"""
If a redirect loop is detected when using follow_redirects=True with
the :cls:`Client`, then this exception is raised.
"""
class Client(object):
"""This class allows to send requests to a wrapped application.
The response wrapper can be a class or factory function that takes
three arguments: app_iter, status and headers. The default response
wrapper just returns a tuple.
Example::
class ClientResponse(BaseResponse):
...
client = Client(MyApplication(), response_wrapper=ClientResponse)
The use_cookies parameter indicates whether cookies should be stored and
sent for subsequent requests. This is True by default, but passing False
will disable this behaviour.
If you want to request some subdomain of your application you may set
`allow_subdomain_redirects` to `True` as if not no external redirects
are allowed.
.. versionadded:: 0.5
`use_cookies` is new in this version. Older versions did not provide
builtin cookie support.
"""
def __init__(self, application, response_wrapper=None, use_cookies=True,
allow_subdomain_redirects=False):
self.application = application
self.response_wrapper = response_wrapper
if use_cookies:
self.cookie_jar = _TestCookieJar()
else:
self.cookie_jar = None
self.allow_subdomain_redirects = allow_subdomain_redirects
def set_cookie(self, server_name, key, value='', max_age=None,
expires=None, path='/', domain=None, secure=None,
httponly=False, charset='utf-8'):
"""Sets a cookie in the client's cookie jar. The server name
is required and has to match the one that is also passed to
the open call.
"""
assert self.cookie_jar is not None, 'cookies disabled'
header = dump_cookie(key, value, max_age, expires, path, domain,
secure, httponly, charset)
environ = create_environ(path, base_url='http://' + server_name)
headers = [('Set-Cookie', header)]
self.cookie_jar.extract_wsgi(environ, headers)
def delete_cookie(self, server_name, key, path='/', domain=None):
"""Deletes a cookie in the test client."""
self.set_cookie(server_name, key, expires=0, max_age=0,
path=path, domain=domain)
def run_wsgi_app(self, environ, buffered=False):
"""Runs the wrapped WSGI app with the given environment."""
if self.cookie_jar is not None:
self.cookie_jar.inject_wsgi(environ)
rv = run_wsgi_app(self.application, environ, buffered=buffered)
if self.cookie_jar is not None:
self.cookie_jar.extract_wsgi(environ, rv[2])
return rv
def resolve_redirect(self, response, new_location, environ, buffered=False):
"""Resolves a single redirect and triggers the request again
directly on this redirect client.
"""
scheme, netloc, script_root, qs, anchor = urlparse.urlsplit(new_location)
base_url = urlparse.urlunsplit((scheme, netloc, '', '', '')).rstrip('/') + '/'
cur_server_name = netloc.split(':', 1)[0].split('.')
real_server_name = get_host(environ).rsplit(':', 1)[0].split('.')
if self.allow_subdomain_redirects:
allowed = cur_server_name[-len(real_server_name):] == real_server_name
else:
allowed = cur_server_name == real_server_name
if not allowed:
raise RuntimeError('%r does not support redirect to '
'external targets' % self.__class__)
# For redirect handling we temporarily disable the response
# wrapper. This is not threadsafe but not a real concern
# since the test client must not be shared anyways.
old_response_wrapper = self.response_wrapper
self.response_wrapper = None
try:
return self.open(path=script_root, base_url=base_url,
query_string=qs, as_tuple=True,
buffered=buffered)
finally:
self.response_wrapper = old_response_wrapper
def open(self, *args, **kwargs):
"""Takes the same arguments as the :class:`EnvironBuilder` class with
some additions: You can provide a :class:`EnvironBuilder` or a WSGI
environment as only argument instead of the :class:`EnvironBuilder`
arguments and two optional keyword arguments (`as_tuple`, `buffered`)
that change the type of the return value or the way the application is
executed.
.. versionchanged:: 0.5
If a dict is provided as file in the dict for the `data` parameter
the content type has to be called `content_type` now instead of
`mimetype`. This change was made for consistency with
:class:`werkzeug.FileWrapper`.
The `follow_redirects` parameter was added to :func:`open`.
Additional parameters:
:param as_tuple: Returns a tuple in the form ``(environ, result)``
:param buffered: Set this to True to buffer the application run.
This will automatically close the application for
you as well.
:param follow_redirects: Set this to True if the `Client` should
follow HTTP redirects.
"""
as_tuple = kwargs.pop('as_tuple', False)
buffered = kwargs.pop('buffered', False)
follow_redirects = kwargs.pop('follow_redirects', False)
environ = None
if not kwargs and len(args) == 1:
if isinstance(args[0], EnvironBuilder):
environ = args[0].get_environ()
elif isinstance(args[0], dict):
environ = args[0]
if environ is None:
builder = EnvironBuilder(*args, **kwargs)
try:
environ = builder.get_environ()
finally:
builder.close()
response = self.run_wsgi_app(environ, buffered=buffered)
# handle redirects
redirect_chain = []
while 1:
status_code = int(response[1].split(None, 1)[0])
if status_code not in (301, 302, 303, 305, 307) \
or not follow_redirects:
break
new_location = Headers.linked(response[2])['location']
new_redirect_entry = (new_location, status_code)
if new_redirect_entry in redirect_chain:
raise ClientRedirectError('loop detected')
redirect_chain.append(new_redirect_entry)
environ, response = self.resolve_redirect(response, new_location,
environ, buffered=buffered)
if self.response_wrapper is not None:
response = self.response_wrapper(*response)
if as_tuple:
return environ, response
return response
def get(self, *args, **kw):
"""Like open but method is enforced to GET."""
kw['method'] = 'GET'
return self.open(*args, **kw)
def patch(self, *args, **kw):
"""Like open but method is enforced to PATCH."""
kw['method'] = 'PATCH'
return self.open(*args, **kw)
def post(self, *args, **kw):
"""Like open but method is enforced to POST."""
kw['method'] = 'POST'
return self.open(*args, **kw)
def head(self, *args, **kw):
"""Like open but method is enforced to HEAD."""
kw['method'] = 'HEAD'
return self.open(*args, **kw)
def put(self, *args, **kw):
"""Like open but method is enforced to PUT."""
kw['method'] = 'PUT'
return self.open(*args, **kw)
def delete(self, *args, **kw):
"""Like open but method is enforced to DELETE."""
kw['method'] = 'DELETE'
return self.open(*args, **kw)
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self.application
)
def create_environ(*args, **kwargs):
"""Create a new WSGI environ dict based on the values passed. The first
parameter should be the path of the request which defaults to '/'. The
second one can either be an absolute path (in that case the host is
localhost:80) or a full path to the request with scheme, netloc port and
the path to the script.
This accepts the same arguments as the :class:`EnvironBuilder`
constructor.
.. versionchanged:: 0.5
This function is now a thin wrapper over :class:`EnvironBuilder` which
was added in 0.5. The `headers`, `environ_base`, `environ_overrides`
and `charset` parameters were added.
"""
builder = EnvironBuilder(*args, **kwargs)
try:
return builder.get_environ()
finally:
builder.close()
def run_wsgi_app(app, environ, buffered=False):
"""Return a tuple in the form (app_iter, status, headers) of the
application output. This works best if you pass it an application that
returns an iterator all the time.
Sometimes applications may use the `write()` callable returned
by the `start_response` function. This tries to resolve such edge
cases automatically. But if you don't get the expected output you
should set `buffered` to `True` which enforces buffering.
If passed an invalid WSGI application the behavior of this function is
undefined. Never pass non-conforming WSGI applications to this function.
:param app: the application to execute.
:param buffered: set to `True` to enforce buffering.
:return: tuple in the form ``(app_iter, status, headers)``
"""
environ = _get_environ(environ)
response = []
buffer = []
def start_response(status, headers, exc_info=None):
if exc_info is not None:
raise exc_info[0], exc_info[1], exc_info[2]
response[:] = [status, headers]
return buffer.append
app_iter = app(environ, start_response)
# when buffering we emit the close call early and convert the
# application iterator into a regular list
if buffered:
close_func = getattr(app_iter, 'close', None)
try:
app_iter = list(app_iter)
finally:
if close_func is not None:
close_func()
# otherwise we iterate the application iter until we have
# a response, chain the already received data with the already
# collected data and wrap it in a new `ClosingIterator` if
# we have a close callable.
else:
while not response:
buffer.append(app_iter.next())
if buffer:
close_func = getattr(app_iter, 'close', None)
app_iter = chain(buffer, app_iter)
if close_func is not None:
app_iter = ClosingIterator(app_iter, close_func)
return app_iter, response[0], response[1]
| gpl-3.0 |
foreverfaint/scrapy | scrapy/commands/runspider.py | 8 | 3114 | import sys
import os
from importlib import import_module
from scrapy.utils.spider import iter_spider_classes
from scrapy.command import ScrapyCommand
from scrapy.exceptions import UsageError
from scrapy.utils.conf import arglist_to_dict
def _import_file(filepath):
abspath = os.path.abspath(filepath)
dirname, file = os.path.split(abspath)
fname, fext = os.path.splitext(file)
if fext != '.py':
raise ValueError("Not a Python source file: %s" % abspath)
if dirname:
sys.path = [dirname] + sys.path
try:
module = import_module(fname)
finally:
if dirname:
sys.path.pop(0)
return module
class Command(ScrapyCommand):
requires_project = False
def syntax(self):
return "[options] <spider_file>"
def short_desc(self):
return "Run a self-contained spider (without creating a project)"
def long_desc(self):
return "Run the spider defined in the given file"
def add_options(self, parser):
ScrapyCommand.add_options(self, parser)
parser.add_option("-a", dest="spargs", action="append", default=[], metavar="NAME=VALUE", \
help="set spider argument (may be repeated)")
parser.add_option("-o", "--output", metavar="FILE", \
help="dump scraped items into FILE (use - for stdout)")
parser.add_option("-t", "--output-format", metavar="FORMAT", default="jsonlines", \
help="format to use for dumping items with -o (default: %default)")
def process_options(self, args, opts):
ScrapyCommand.process_options(self, args, opts)
try:
opts.spargs = arglist_to_dict(opts.spargs)
except ValueError:
raise UsageError("Invalid -a value, use -a NAME=VALUE", print_help=False)
if opts.output:
if opts.output == '-':
self.settings.overrides['FEED_URI'] = 'stdout:'
else:
self.settings.overrides['FEED_URI'] = opts.output
valid_output_formats = self.settings['FEED_EXPORTERS'].keys() + self.settings['FEED_EXPORTERS_BASE'].keys()
if opts.output_format not in valid_output_formats:
raise UsageError('Invalid/unrecognized output format: %s, Expected %s' % (opts.output_format,valid_output_formats))
self.settings.overrides['FEED_FORMAT'] = opts.output_format
def run(self, args, opts):
if len(args) != 1:
raise UsageError()
filename = args[0]
if not os.path.exists(filename):
raise UsageError("File not found: %s\n" % filename)
try:
module = _import_file(filename)
except (ImportError, ValueError) as e:
raise UsageError("Unable to load %r: %s\n" % (filename, e))
spclasses = list(iter_spider_classes(module))
if not spclasses:
raise UsageError("No spider found in file: %s\n" % filename)
spider = spclasses.pop()(**opts.spargs)
crawler = self.crawler_process.create_crawler()
crawler.crawl(spider)
self.crawler_process.start()
| bsd-3-clause |
kevinr/750book-web | 750book-web-env/lib/python2.7/site-packages/celery/loaders/base.py | 2 | 6936 | # -*- coding: utf-8 -*-
"""
celery.loaders.base
~~~~~~~~~~~~~~~~~~~
Loader base class.
:copyright: (c) 2009 - 2012 by Ask Solem.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import importlib
import os
import re
import traceback
import warnings
from anyjson import deserialize
from datetime import datetime
from ..datastructures import DictAttribute
from ..exceptions import ImproperlyConfigured
from ..utils import (cached_property, get_cls_by_name,
import_from_cwd as _import_from_cwd)
from ..utils.functional import maybe_list
from ..utils.encoding import safe_str
BUILTIN_MODULES = frozenset(["celery.task"])
ERROR_ENVVAR_NOT_SET = (
"""The environment variable %r is not set,
and as such the configuration could not be loaded.
Please set this variable and make it point to
a configuration module.""")
class BaseLoader(object):
"""The base class for loaders.
Loaders handles,
* Reading celery client/worker configurations.
* What happens when a task starts?
See :meth:`on_task_init`.
* What happens when the worker starts?
See :meth:`on_worker_init`.
* What modules are imported to find tasks?
"""
builtin_modules = BUILTIN_MODULES
configured = False
error_envvar_not_set = ERROR_ENVVAR_NOT_SET
override_backends = {}
worker_initialized = False
_conf = None
def __init__(self, app=None, **kwargs):
from ..app import app_or_default
self.app = app_or_default(app)
self.task_modules = set()
def now(self):
return datetime.utcnow()
def on_task_init(self, task_id, task):
"""This method is called before a task is executed."""
pass
def on_process_cleanup(self):
"""This method is called after a task is executed."""
pass
def on_worker_init(self):
"""This method is called when the worker (:program:`celeryd`)
starts."""
pass
def on_worker_process_init(self):
"""This method is called when a child process starts."""
pass
def import_task_module(self, module):
self.task_modules.add(module)
return self.import_from_cwd(module)
def import_module(self, module, package=None):
return importlib.import_module(module, package=package)
def import_from_cwd(self, module, imp=None, package=None):
return _import_from_cwd(module,
self.import_module if imp is None else imp,
package=package)
def import_default_modules(self):
imports = set(maybe_list(self.conf.get("CELERY_IMPORTS") or ()))
return [self.import_task_module(module)
for module in imports | self.builtin_modules]
def init_worker(self):
if not self.worker_initialized:
self.worker_initialized = True
self.on_worker_init()
def init_worker_process(self):
self.on_worker_process_init()
def config_from_envvar(self, variable_name, silent=False):
module_name = os.environ.get(variable_name)
if not module_name:
if silent:
return False
raise ImproperlyConfigured(self.error_envvar_not_set % module_name)
return self.config_from_object(module_name, silent=silent)
def config_from_object(self, obj, silent=False):
if isinstance(obj, basestring):
try:
if "." in obj:
obj = get_cls_by_name(obj, imp=self.import_from_cwd)
else:
obj = self.import_from_cwd(obj)
except (ImportError, AttributeError):
if silent:
return False
raise
if not hasattr(obj, "__getitem__"):
obj = DictAttribute(obj)
self._conf = obj
return True
def cmdline_config_parser(self, args, namespace="celery",
re_type=re.compile(r"\((\w+)\)"),
extra_types={"json": deserialize},
override_types={"tuple": "json",
"list": "json",
"dict": "json"}):
from ..app.defaults import Option, NAMESPACES
namespace = namespace.upper()
typemap = dict(Option.typemap, **extra_types)
def getarg(arg):
"""Parse a single configuration definition from
the command line."""
## find key/value
# ns.key=value|ns_key=value (case insensitive)
key, value = arg.split('=', 1)
key = key.upper().replace(".", "_")
## find namespace.
# .key=value|_key=value expands to default namespace.
if key[0] == '_':
ns, key = namespace, key[1:]
else:
# find namespace part of key
ns, key = key.split('_', 1)
ns_key = (ns and ns + "_" or "") + key
# (type)value makes cast to custom type.
cast = re_type.match(value)
if cast:
type_ = cast.groups()[0]
type_ = override_types.get(type_, type_)
value = value[len(cast.group()):]
value = typemap[type_](value)
else:
try:
value = NAMESPACES[ns][key].to_python(value)
except ValueError, exc:
# display key name in error message.
raise ValueError("%r: %s" % (ns_key, exc))
return ns_key, value
return dict(map(getarg, args))
def mail_admins(self, subject, body, fail_silently=False,
sender=None, to=None, host=None, port=None,
user=None, password=None, timeout=None,
use_ssl=False, use_tls=False):
try:
message = self.mail.Message(sender=sender, to=to,
subject=safe_str(subject),
body=safe_str(body))
mailer = self.mail.Mailer(host=host, port=port,
user=user, password=password,
timeout=timeout, use_ssl=use_ssl,
use_tls=use_tls)
mailer.send(message)
except Exception, exc:
if not fail_silently:
raise
warnings.warn(self.mail.SendmailWarning(
"Mail could not be sent: %r %r\n%r" % (
exc, {"To": to, "Subject": subject},
traceback.format_stack())))
@property
def conf(self):
"""Loader configuration."""
if self._conf is None:
self._conf = self.read_configuration()
return self._conf
@cached_property
def mail(self):
return self.import_module("celery.utils.mail")
| mit |
hhorii/cassandra | pylib/cqlshlib/helptopics.py | 91 | 4532 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class CQL3HelpTopics(object):
def get_help_topics(self):
return [t[5:] for t in dir(self) if t.startswith('help_')]
def get_help_topic(self, topic):
return getattr(self, 'help_' + topic.lower())()
def help_types(self):
return 'types'
def help_timestamp(self):
return 'usingtimestamps'
def help_date(self):
return 'usingdates'
def help_time(self):
return 'usingtime'
def help_blob(self):
return 'constants'
def help_uuid(self):
return 'constants'
def help_boolean(self):
return 'constants'
def help_int(self):
return 'constants'
def help_counter(self):
return 'counters'
def help_text(self):
return 'constants'
help_ascii = help_text
def help_use(self):
return 'useStmt'
def help_insert(self):
return 'insertStmt'
def help_update(self):
return 'updateStmt'
def help_delete(self):
return 'deleteStmt'
def help_select(self):
return 'selectStmt'
def help_json(self):
return 'json'
def help_select_json(self):
return 'selectJson'
def help_insert_json(self):
return 'insertJson'
def help_batch(self):
return 'batchStmt'
help_begin = help_batch
help_apply = help_batch
def help_create_keyspace(self):
return 'createKeyspaceStmt'
def help_alter_keyspace(self):
return 'alterKeyspaceStmt'
def help_drop_keyspace(self):
return 'dropKeyspaceStmt'
def help_create_table(self):
return 'createTableStmt'
help_create_columnfamily = help_create_table
def help_alter_table(self):
return 'alterTableStmt'
def help_drop_table(self):
return 'dropTableStmt'
help_drop_columnfamily = help_drop_table
def help_create_index(self):
return 'createIndexStmt'
def help_drop_index(self):
return 'dropIndexStmt'
def help_truncate(self):
return 'truncateStmt'
def help_create_type(self):
return 'createTypeStmt'
def help_alter_type(self):
return 'alterTypeStmt'
def help_drop_type(self):
return 'dropTypeStmt'
def help_create_function(self):
return 'createFunctionStmt'
def help_drop_function(self):
return 'dropFunctionStmt'
def help_functions(self):
return 'functions'
def help_create_aggregate(self):
return 'createAggregateStmt'
def help_drop_aggregate(self):
return 'dropAggregateStmt'
def help_aggregates(self):
return 'aggregates'
def help_create_trigger(self):
return 'createTriggerStmt'
def help_drop_trigger(self):
return 'dropTriggerStmt'
def help_create_materialized_view(self):
return 'createMVStmt'
def help_alter_materialized_view(self):
return 'alterMVStmt'
def help_drop_materialized_view(self):
return 'dropMVStmt'
def help_keywords(self):
return 'appendixA'
def help_create_user(self):
return 'createUserStmt'
def help_alter_user(self):
return 'alterUserStmt'
def help_drop_user(self):
return 'dropUserStmt'
def help_list_users(self):
return 'listUsersStmt'
def help_create_role(self):
return 'createRoleStmt'
def help_drop_role(self):
return 'dropRoleStmt'
def help_list_roles(self):
return 'listRolesStmt'
def help_permissions(self):
return 'permissions'
def help_list_permissions(self):
return 'listPermissionsStmt'
def help_grant(self):
return 'grantRoleStmt'
def help_revoke(self):
return 'revokeRoleStmt'
| apache-2.0 |
Lyrositor/moul-scripts | Python/system/encodings/hex_codec.py | 528 | 2309 | """ Python 'hex_codec' Codec - 2-digit hex content transfer encoding
Unlike most of the other codecs which target Unicode, this codec
will return Python string objects for both encode and decode.
Written by Marc-Andre Lemburg (mal@lemburg.com).
"""
import codecs, binascii
### Codec APIs
def hex_encode(input,errors='strict'):
""" Encodes the object input and returns a tuple (output
object, length consumed).
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
"""
assert errors == 'strict'
output = binascii.b2a_hex(input)
return (output, len(input))
def hex_decode(input,errors='strict'):
""" Decodes the object input and returns a tuple (output
object, length consumed).
input must be an object which provides the bf_getreadbuf
buffer slot. Python strings, buffer objects and memory
mapped files are examples of objects providing this slot.
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
"""
assert errors == 'strict'
output = binascii.a2b_hex(input)
return (output, len(input))
class Codec(codecs.Codec):
def encode(self, input,errors='strict'):
return hex_encode(input,errors)
def decode(self, input,errors='strict'):
return hex_decode(input,errors)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
assert self.errors == 'strict'
return binascii.b2a_hex(input)
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
assert self.errors == 'strict'
return binascii.a2b_hex(input)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='hex',
encode=hex_encode,
decode=hex_decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
| gpl-3.0 |
synergeticsedx/deployment-wipro | openedx/core/lib/block_structure/factory.py | 7 | 3851 | """
Module for factory class for BlockStructure objects.
"""
from .block_structure import BlockStructureModulestoreData, BlockStructureBlockData
class BlockStructureFactory(object):
"""
Factory class for BlockStructure objects.
"""
@classmethod
def create_from_modulestore(cls, root_block_usage_key, modulestore):
"""
Creates and returns a block structure from the modulestore
starting at the given root_block_usage_key.
Arguments:
root_block_usage_key (UsageKey) - The usage_key for the root
of the block structure that is to be created.
modulestore (ModuleStoreRead) - The modulestore that
contains the data for the xBlocks within the block
structure starting at root_block_usage_key.
Returns:
BlockStructureModulestoreData - The created block structure
with instantiated xBlocks from the given modulestore
starting at root_block_usage_key.
Raises:
xmodule.modulestore.exceptions.ItemNotFoundError if a block for
root_block_usage_key is not found in the modulestore.
"""
block_structure = BlockStructureModulestoreData(root_block_usage_key)
blocks_visited = set()
def build_block_structure(xblock):
"""
Recursively update the block structure with the given xBlock
and its descendants.
"""
# Check if the xblock was already visited (can happen in
# DAGs).
if xblock.location in blocks_visited:
return
# Add the xBlock.
blocks_visited.add(xblock.location)
block_structure._add_xblock(xblock.location, xblock) # pylint: disable=protected-access
# Add relations with its children and recurse.
for child in xblock.get_children():
block_structure._add_relation(xblock.location, child.location) # pylint: disable=protected-access
build_block_structure(child)
root_xblock = modulestore.get_item(root_block_usage_key, depth=None, lazy=False)
build_block_structure(root_xblock)
return block_structure
@classmethod
def create_from_cache(cls, root_block_usage_key, block_structure_cache):
"""
Deserializes and returns the block structure starting at
root_block_usage_key from the given cache, if it's found in the cache.
The given root_block_usage_key must equate the root_block_usage_key
previously passed to serialize_to_cache.
Arguments:
root_block_usage_key (UsageKey) - The usage_key for the root
of the block structure that is to be deserialized from
the given cache.
block_structure_cache (BlockStructureCache) - The
cache from which the block structure is to be
deserialized.
Returns:
BlockStructure - The deserialized block structure starting
at root_block_usage_key, if found in the cache.
NoneType - If the root_block_usage_key is not found in the cache.
"""
return block_structure_cache.get(root_block_usage_key)
@classmethod
def create_new(cls, root_block_usage_key, block_relations, transformer_data, block_data_map):
"""
Returns a new block structure for given the arguments.
"""
block_structure = BlockStructureBlockData(root_block_usage_key)
block_structure._block_relations = block_relations # pylint: disable=protected-access
block_structure.transformer_data = transformer_data
block_structure._block_data_map = block_data_map # pylint: disable=protected-access
return block_structure
| agpl-3.0 |
lizan/envoy | tools/type_whisperer/type_whisperer.py | 3 | 2450 | # protoc plugin to map from FileDescriptorProtos to a tools.type_whisperer.Types
# proto. This is the type information for a single .proto, consumed by
# typedb_gen.py.
from tools.api_proto_plugin import plugin
from tools.api_proto_plugin import visitor
from tools.type_whisperer.types_pb2 import Types
from udpa.annotations import migrate_pb2
from udpa.annotations import status_pb2
class TypeWhispererVisitor(visitor.Visitor):
"""Visitor to compute type information from a FileDescriptor proto.
See visitor.Visitor for visitor method docs comments.
"""
def __init__(self):
super(TypeWhispererVisitor, self).__init__()
self._types = Types()
def VisitService(self, service_proto, type_context):
pass
def VisitEnum(self, enum_proto, type_context):
type_desc = self._types.types[type_context.name]
type_desc.next_version_upgrade = any(v.options.deprecated for v in enum_proto.value)
type_desc.deprecated_type = type_context.deprecated
def VisitMessage(self, msg_proto, type_context, nested_msgs, nested_enums):
type_desc = self._types.types[type_context.name]
type_desc.map_entry = msg_proto.options.map_entry
type_desc.deprecated_type = type_context.deprecated
type_deps = set([])
for f in msg_proto.field:
if f.type_name.startswith('.'):
type_deps.add(f.type_name[1:])
if f.options.deprecated:
type_desc.next_version_upgrade = True
type_desc.type_dependencies.extend(type_deps)
def VisitFile(self, file_proto, type_context, services, msgs, enums):
next_version_package = ''
if file_proto.options.HasExtension(migrate_pb2.file_migrate):
next_version_package = file_proto.options.Extensions[migrate_pb2.file_migrate].move_to_package
for t in self._types.types.values():
t.qualified_package = file_proto.package
t.proto_path = file_proto.name
t.active = file_proto.options.Extensions[
status_pb2.file_status].package_version_status == status_pb2.ACTIVE
if next_version_package:
t.next_version_package = next_version_package
t.next_version_upgrade = True
# Return in text proto format. This makes things easier to debug, these
# don't need to be compact as they are only interim build artifacts.
return str(self._types)
def Main():
plugin.Plugin([
plugin.DirectOutputDescriptor('.types.pb_text', TypeWhispererVisitor),
])
if __name__ == '__main__':
Main()
| apache-2.0 |
SatSim/linux-xlnx-xenomai | tools/perf/tests/attr.py | 1266 | 9424 | #! /usr/bin/python
import os
import sys
import glob
import optparse
import tempfile
import logging
import shutil
import ConfigParser
class Fail(Exception):
def __init__(self, test, msg):
self.msg = msg
self.test = test
def getMsg(self):
return '\'%s\' - %s' % (self.test.path, self.msg)
class Unsup(Exception):
def __init__(self, test):
self.test = test
def getMsg(self):
return '\'%s\'' % self.test.path
class Event(dict):
terms = [
'cpu',
'flags',
'type',
'size',
'config',
'sample_period',
'sample_type',
'read_format',
'disabled',
'inherit',
'pinned',
'exclusive',
'exclude_user',
'exclude_kernel',
'exclude_hv',
'exclude_idle',
'mmap',
'comm',
'freq',
'inherit_stat',
'enable_on_exec',
'task',
'watermark',
'precise_ip',
'mmap_data',
'sample_id_all',
'exclude_host',
'exclude_guest',
'exclude_callchain_kernel',
'exclude_callchain_user',
'wakeup_events',
'bp_type',
'config1',
'config2',
'branch_sample_type',
'sample_regs_user',
'sample_stack_user',
]
def add(self, data):
for key, val in data:
log.debug(" %s = %s" % (key, val))
self[key] = val
def __init__(self, name, data, base):
log.debug(" Event %s" % name);
self.name = name;
self.group = ''
self.add(base)
self.add(data)
def compare_data(self, a, b):
# Allow multiple values in assignment separated by '|'
a_list = a.split('|')
b_list = b.split('|')
for a_item in a_list:
for b_item in b_list:
if (a_item == b_item):
return True
elif (a_item == '*') or (b_item == '*'):
return True
return False
def equal(self, other):
for t in Event.terms:
log.debug(" [%s] %s %s" % (t, self[t], other[t]));
if not self.has_key(t) or not other.has_key(t):
return False
if not self.compare_data(self[t], other[t]):
return False
return True
def diff(self, other):
for t in Event.terms:
if not self.has_key(t) or not other.has_key(t):
continue
if not self.compare_data(self[t], other[t]):
log.warning("expected %s=%s, got %s" % (t, self[t], other[t]))
# Test file description needs to have following sections:
# [config]
# - just single instance in file
# - needs to specify:
# 'command' - perf command name
# 'args' - special command arguments
# 'ret' - expected command return value (0 by default)
#
# [eventX:base]
# - one or multiple instances in file
# - expected values assignments
class Test(object):
def __init__(self, path, options):
parser = ConfigParser.SafeConfigParser()
parser.read(path)
log.warning("running '%s'" % path)
self.path = path
self.test_dir = options.test_dir
self.perf = options.perf
self.command = parser.get('config', 'command')
self.args = parser.get('config', 'args')
try:
self.ret = parser.get('config', 'ret')
except:
self.ret = 0
self.expect = {}
self.result = {}
log.debug(" loading expected events");
self.load_events(path, self.expect)
def is_event(self, name):
if name.find("event") == -1:
return False
else:
return True
def load_events(self, path, events):
parser_event = ConfigParser.SafeConfigParser()
parser_event.read(path)
# The event record section header contains 'event' word,
# optionaly followed by ':' allowing to load 'parent
# event' first as a base
for section in filter(self.is_event, parser_event.sections()):
parser_items = parser_event.items(section);
base_items = {}
# Read parent event if there's any
if (':' in section):
base = section[section.index(':') + 1:]
parser_base = ConfigParser.SafeConfigParser()
parser_base.read(self.test_dir + '/' + base)
base_items = parser_base.items('event')
e = Event(section, parser_items, base_items)
events[section] = e
def run_cmd(self, tempdir):
cmd = "PERF_TEST_ATTR=%s %s %s -o %s/perf.data %s" % (tempdir,
self.perf, self.command, tempdir, self.args)
ret = os.WEXITSTATUS(os.system(cmd))
log.info(" '%s' ret %d " % (cmd, ret))
if ret != int(self.ret):
raise Unsup(self)
def compare(self, expect, result):
match = {}
log.debug(" compare");
# For each expected event find all matching
# events in result. Fail if there's not any.
for exp_name, exp_event in expect.items():
exp_list = []
log.debug(" matching [%s]" % exp_name)
for res_name, res_event in result.items():
log.debug(" to [%s]" % res_name)
if (exp_event.equal(res_event)):
exp_list.append(res_name)
log.debug(" ->OK")
else:
log.debug(" ->FAIL");
log.debug(" match: [%s] matches %s" % (exp_name, str(exp_list)))
# we did not any matching event - fail
if (not exp_list):
exp_event.diff(res_event)
raise Fail(self, 'match failure');
match[exp_name] = exp_list
# For each defined group in the expected events
# check we match the same group in the result.
for exp_name, exp_event in expect.items():
group = exp_event.group
if (group == ''):
continue
for res_name in match[exp_name]:
res_group = result[res_name].group
if res_group not in match[group]:
raise Fail(self, 'group failure')
log.debug(" group: [%s] matches group leader %s" %
(exp_name, str(match[group])))
log.debug(" matched")
def resolve_groups(self, events):
for name, event in events.items():
group_fd = event['group_fd'];
if group_fd == '-1':
continue;
for iname, ievent in events.items():
if (ievent['fd'] == group_fd):
event.group = iname
log.debug('[%s] has group leader [%s]' % (name, iname))
break;
def run(self):
tempdir = tempfile.mkdtemp();
try:
# run the test script
self.run_cmd(tempdir);
# load events expectation for the test
log.debug(" loading result events");
for f in glob.glob(tempdir + '/event*'):
self.load_events(f, self.result);
# resolve group_fd to event names
self.resolve_groups(self.expect);
self.resolve_groups(self.result);
# do the expectation - results matching - both ways
self.compare(self.expect, self.result)
self.compare(self.result, self.expect)
finally:
# cleanup
shutil.rmtree(tempdir)
def run_tests(options):
for f in glob.glob(options.test_dir + '/' + options.test):
try:
Test(f, options).run()
except Unsup, obj:
log.warning("unsupp %s" % obj.getMsg())
def setup_log(verbose):
global log
level = logging.CRITICAL
if verbose == 1:
level = logging.WARNING
if verbose == 2:
level = logging.INFO
if verbose >= 3:
level = logging.DEBUG
log = logging.getLogger('test')
log.setLevel(level)
ch = logging.StreamHandler()
ch.setLevel(level)
formatter = logging.Formatter('%(message)s')
ch.setFormatter(formatter)
log.addHandler(ch)
USAGE = '''%s [OPTIONS]
-d dir # tests dir
-p path # perf binary
-t test # single test
-v # verbose level
''' % sys.argv[0]
def main():
parser = optparse.OptionParser(usage=USAGE)
parser.add_option("-t", "--test",
action="store", type="string", dest="test")
parser.add_option("-d", "--test-dir",
action="store", type="string", dest="test_dir")
parser.add_option("-p", "--perf",
action="store", type="string", dest="perf")
parser.add_option("-v", "--verbose",
action="count", dest="verbose")
options, args = parser.parse_args()
if args:
parser.error('FAILED wrong arguments %s' % ' '.join(args))
return -1
setup_log(options.verbose)
if not options.test_dir:
print 'FAILED no -d option specified'
sys.exit(-1)
if not options.test:
options.test = 'test*'
try:
run_tests(options)
except Fail, obj:
print "FAILED %s" % obj.getMsg();
sys.exit(-1)
sys.exit(0)
if __name__ == '__main__':
main()
| gpl-2.0 |
enriquesanchezb/practica_utad_2016 | venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/euctwfreq.py | 3133 | 34872 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# EUCTW frequency table
# Converted from big5 work
# by Taiwan's Mandarin Promotion Council
# <http:#www.edu.tw:81/mandr/>
# 128 --> 0.42261
# 256 --> 0.57851
# 512 --> 0.74851
# 1024 --> 0.89384
# 2048 --> 0.97583
#
# Idea Distribution Ratio = 0.74851/(1-0.74851) =2.98
# Random Distribution Ration = 512/(5401-512)=0.105
#
# Typical Distribution Ratio about 25% of Ideal one, still much higher than RDR
EUCTW_TYPICAL_DISTRIBUTION_RATIO = 0.75
# Char to FreqOrder table ,
EUCTW_TABLE_SIZE = 8102
EUCTWCharToFreqOrder = (
1,1800,1506, 255,1431, 198, 9, 82, 6,7310, 177, 202,3615,1256,2808, 110, # 2742
3735, 33,3241, 261, 76, 44,2113, 16,2931,2184,1176, 659,3868, 26,3404,2643, # 2758
1198,3869,3313,4060, 410,2211, 302, 590, 361,1963, 8, 204, 58,4296,7311,1931, # 2774
63,7312,7313, 317,1614, 75, 222, 159,4061,2412,1480,7314,3500,3068, 224,2809, # 2790
3616, 3, 10,3870,1471, 29,2774,1135,2852,1939, 873, 130,3242,1123, 312,7315, # 2806
4297,2051, 507, 252, 682,7316, 142,1914, 124, 206,2932, 34,3501,3173, 64, 604, # 2822
7317,2494,1976,1977, 155,1990, 645, 641,1606,7318,3405, 337, 72, 406,7319, 80, # 2838
630, 238,3174,1509, 263, 939,1092,2644, 756,1440,1094,3406, 449, 69,2969, 591, # 2854
179,2095, 471, 115,2034,1843, 60, 50,2970, 134, 806,1868, 734,2035,3407, 180, # 2870
995,1607, 156, 537,2893, 688,7320, 319,1305, 779,2144, 514,2374, 298,4298, 359, # 2886
2495, 90,2707,1338, 663, 11, 906,1099,2545, 20,2436, 182, 532,1716,7321, 732, # 2902
1376,4062,1311,1420,3175, 25,2312,1056, 113, 399, 382,1949, 242,3408,2467, 529, # 2918
3243, 475,1447,3617,7322, 117, 21, 656, 810,1297,2295,2329,3502,7323, 126,4063, # 2934
706, 456, 150, 613,4299, 71,1118,2036,4064, 145,3069, 85, 835, 486,2114,1246, # 2950
1426, 428, 727,1285,1015, 800, 106, 623, 303,1281,7324,2127,2354, 347,3736, 221, # 2966
3503,3110,7325,1955,1153,4065, 83, 296,1199,3070, 192, 624, 93,7326, 822,1897, # 2982
2810,3111, 795,2064, 991,1554,1542,1592, 27, 43,2853, 859, 139,1456, 860,4300, # 2998
437, 712,3871, 164,2392,3112, 695, 211,3017,2096, 195,3872,1608,3504,3505,3618, # 3014
3873, 234, 811,2971,2097,3874,2229,1441,3506,1615,2375, 668,2076,1638, 305, 228, # 3030
1664,4301, 467, 415,7327, 262,2098,1593, 239, 108, 300, 200,1033, 512,1247,2077, # 3046
7328,7329,2173,3176,3619,2673, 593, 845,1062,3244, 88,1723,2037,3875,1950, 212, # 3062
266, 152, 149, 468,1898,4066,4302, 77, 187,7330,3018, 37, 5,2972,7331,3876, # 3078
7332,7333, 39,2517,4303,2894,3177,2078, 55, 148, 74,4304, 545, 483,1474,1029, # 3094
1665, 217,1869,1531,3113,1104,2645,4067, 24, 172,3507, 900,3877,3508,3509,4305, # 3110
32,1408,2811,1312, 329, 487,2355,2247,2708, 784,2674, 4,3019,3314,1427,1788, # 3126
188, 109, 499,7334,3620,1717,1789, 888,1217,3020,4306,7335,3510,7336,3315,1520, # 3142
3621,3878, 196,1034, 775,7337,7338, 929,1815, 249, 439, 38,7339,1063,7340, 794, # 3158
3879,1435,2296, 46, 178,3245,2065,7341,2376,7342, 214,1709,4307, 804, 35, 707, # 3174
324,3622,1601,2546, 140, 459,4068,7343,7344,1365, 839, 272, 978,2257,2572,3409, # 3190
2128,1363,3623,1423, 697, 100,3071, 48, 70,1231, 495,3114,2193,7345,1294,7346, # 3206
2079, 462, 586,1042,3246, 853, 256, 988, 185,2377,3410,1698, 434,1084,7347,3411, # 3222
314,2615,2775,4308,2330,2331, 569,2280, 637,1816,2518, 757,1162,1878,1616,3412, # 3238
287,1577,2115, 768,4309,1671,2854,3511,2519,1321,3737, 909,2413,7348,4069, 933, # 3254
3738,7349,2052,2356,1222,4310, 765,2414,1322, 786,4311,7350,1919,1462,1677,2895, # 3270
1699,7351,4312,1424,2437,3115,3624,2590,3316,1774,1940,3413,3880,4070, 309,1369, # 3286
1130,2812, 364,2230,1653,1299,3881,3512,3882,3883,2646, 525,1085,3021, 902,2000, # 3302
1475, 964,4313, 421,1844,1415,1057,2281, 940,1364,3116, 376,4314,4315,1381, 7, # 3318
2520, 983,2378, 336,1710,2675,1845, 321,3414, 559,1131,3022,2742,1808,1132,1313, # 3334
265,1481,1857,7352, 352,1203,2813,3247, 167,1089, 420,2814, 776, 792,1724,3513, # 3350
4071,2438,3248,7353,4072,7354, 446, 229, 333,2743, 901,3739,1200,1557,4316,2647, # 3366
1920, 395,2744,2676,3740,4073,1835, 125, 916,3178,2616,4317,7355,7356,3741,7357, # 3382
7358,7359,4318,3117,3625,1133,2547,1757,3415,1510,2313,1409,3514,7360,2145, 438, # 3398
2591,2896,2379,3317,1068, 958,3023, 461, 311,2855,2677,4074,1915,3179,4075,1978, # 3414
383, 750,2745,2617,4076, 274, 539, 385,1278,1442,7361,1154,1964, 384, 561, 210, # 3430
98,1295,2548,3515,7362,1711,2415,1482,3416,3884,2897,1257, 129,7363,3742, 642, # 3446
523,2776,2777,2648,7364, 141,2231,1333, 68, 176, 441, 876, 907,4077, 603,2592, # 3462
710, 171,3417, 404, 549, 18,3118,2393,1410,3626,1666,7365,3516,4319,2898,4320, # 3478
7366,2973, 368,7367, 146, 366, 99, 871,3627,1543, 748, 807,1586,1185, 22,2258, # 3494
379,3743,3180,7368,3181, 505,1941,2618,1991,1382,2314,7369, 380,2357, 218, 702, # 3510
1817,1248,3418,3024,3517,3318,3249,7370,2974,3628, 930,3250,3744,7371, 59,7372, # 3526
585, 601,4078, 497,3419,1112,1314,4321,1801,7373,1223,1472,2174,7374, 749,1836, # 3542
690,1899,3745,1772,3885,1476, 429,1043,1790,2232,2116, 917,4079, 447,1086,1629, # 3558
7375, 556,7376,7377,2020,1654, 844,1090, 105, 550, 966,1758,2815,1008,1782, 686, # 3574
1095,7378,2282, 793,1602,7379,3518,2593,4322,4080,2933,2297,4323,3746, 980,2496, # 3590
544, 353, 527,4324, 908,2678,2899,7380, 381,2619,1942,1348,7381,1341,1252, 560, # 3606
3072,7382,3420,2856,7383,2053, 973, 886,2080, 143,4325,7384,7385, 157,3886, 496, # 3622
4081, 57, 840, 540,2038,4326,4327,3421,2117,1445, 970,2259,1748,1965,2081,4082, # 3638
3119,1234,1775,3251,2816,3629, 773,1206,2129,1066,2039,1326,3887,1738,1725,4083, # 3654
279,3120, 51,1544,2594, 423,1578,2130,2066, 173,4328,1879,7386,7387,1583, 264, # 3670
610,3630,4329,2439, 280, 154,7388,7389,7390,1739, 338,1282,3073, 693,2857,1411, # 3686
1074,3747,2440,7391,4330,7392,7393,1240, 952,2394,7394,2900,1538,2679, 685,1483, # 3702
4084,2468,1436, 953,4085,2054,4331, 671,2395, 79,4086,2441,3252, 608, 567,2680, # 3718
3422,4087,4088,1691, 393,1261,1791,2396,7395,4332,7396,7397,7398,7399,1383,1672, # 3734
3748,3182,1464, 522,1119, 661,1150, 216, 675,4333,3888,1432,3519, 609,4334,2681, # 3750
2397,7400,7401,7402,4089,3025, 0,7403,2469, 315, 231,2442, 301,3319,4335,2380, # 3766
7404, 233,4090,3631,1818,4336,4337,7405, 96,1776,1315,2082,7406, 257,7407,1809, # 3782
3632,2709,1139,1819,4091,2021,1124,2163,2778,1777,2649,7408,3074, 363,1655,3183, # 3798
7409,2975,7410,7411,7412,3889,1567,3890, 718, 103,3184, 849,1443, 341,3320,2934, # 3814
1484,7413,1712, 127, 67, 339,4092,2398, 679,1412, 821,7414,7415, 834, 738, 351, # 3830
2976,2146, 846, 235,1497,1880, 418,1992,3749,2710, 186,1100,2147,2746,3520,1545, # 3846
1355,2935,2858,1377, 583,3891,4093,2573,2977,7416,1298,3633,1078,2549,3634,2358, # 3862
78,3750,3751, 267,1289,2099,2001,1594,4094, 348, 369,1274,2194,2175,1837,4338, # 3878
1820,2817,3635,2747,2283,2002,4339,2936,2748, 144,3321, 882,4340,3892,2749,3423, # 3894
4341,2901,7417,4095,1726, 320,7418,3893,3026, 788,2978,7419,2818,1773,1327,2859, # 3910
3894,2819,7420,1306,4342,2003,1700,3752,3521,2359,2650, 787,2022, 506, 824,3636, # 3926
534, 323,4343,1044,3322,2023,1900, 946,3424,7421,1778,1500,1678,7422,1881,4344, # 3942
165, 243,4345,3637,2521, 123, 683,4096, 764,4346, 36,3895,1792, 589,2902, 816, # 3958
626,1667,3027,2233,1639,1555,1622,3753,3896,7423,3897,2860,1370,1228,1932, 891, # 3974
2083,2903, 304,4097,7424, 292,2979,2711,3522, 691,2100,4098,1115,4347, 118, 662, # 3990
7425, 611,1156, 854,2381,1316,2861, 2, 386, 515,2904,7426,7427,3253, 868,2234, # 4006
1486, 855,2651, 785,2212,3028,7428,1040,3185,3523,7429,3121, 448,7430,1525,7431, # 4022
2164,4348,7432,3754,7433,4099,2820,3524,3122, 503, 818,3898,3123,1568, 814, 676, # 4038
1444, 306,1749,7434,3755,1416,1030, 197,1428, 805,2821,1501,4349,7435,7436,7437, # 4054
1993,7438,4350,7439,7440,2195, 13,2779,3638,2980,3124,1229,1916,7441,3756,2131, # 4070
7442,4100,4351,2399,3525,7443,2213,1511,1727,1120,7444,7445, 646,3757,2443, 307, # 4086
7446,7447,1595,3186,7448,7449,7450,3639,1113,1356,3899,1465,2522,2523,7451, 519, # 4102
7452, 128,2132, 92,2284,1979,7453,3900,1512, 342,3125,2196,7454,2780,2214,1980, # 4118
3323,7455, 290,1656,1317, 789, 827,2360,7456,3758,4352, 562, 581,3901,7457, 401, # 4134
4353,2248, 94,4354,1399,2781,7458,1463,2024,4355,3187,1943,7459, 828,1105,4101, # 4150
1262,1394,7460,4102, 605,4356,7461,1783,2862,7462,2822, 819,2101, 578,2197,2937, # 4166
7463,1502, 436,3254,4103,3255,2823,3902,2905,3425,3426,7464,2712,2315,7465,7466, # 4182
2332,2067, 23,4357, 193, 826,3759,2102, 699,1630,4104,3075, 390,1793,1064,3526, # 4198
7467,1579,3076,3077,1400,7468,4105,1838,1640,2863,7469,4358,4359, 137,4106, 598, # 4214
3078,1966, 780, 104, 974,2938,7470, 278, 899, 253, 402, 572, 504, 493,1339,7471, # 4230
3903,1275,4360,2574,2550,7472,3640,3029,3079,2249, 565,1334,2713, 863, 41,7473, # 4246
7474,4361,7475,1657,2333, 19, 463,2750,4107, 606,7476,2981,3256,1087,2084,1323, # 4262
2652,2982,7477,1631,1623,1750,4108,2682,7478,2864, 791,2714,2653,2334, 232,2416, # 4278
7479,2983,1498,7480,2654,2620, 755,1366,3641,3257,3126,2025,1609, 119,1917,3427, # 4294
862,1026,4109,7481,3904,3760,4362,3905,4363,2260,1951,2470,7482,1125, 817,4110, # 4310
4111,3906,1513,1766,2040,1487,4112,3030,3258,2824,3761,3127,7483,7484,1507,7485, # 4326
2683, 733, 40,1632,1106,2865, 345,4113, 841,2524, 230,4364,2984,1846,3259,3428, # 4342
7486,1263, 986,3429,7487, 735, 879, 254,1137, 857, 622,1300,1180,1388,1562,3907, # 4358
3908,2939, 967,2751,2655,1349, 592,2133,1692,3324,2985,1994,4114,1679,3909,1901, # 4374
2185,7488, 739,3642,2715,1296,1290,7489,4115,2198,2199,1921,1563,2595,2551,1870, # 4390
2752,2986,7490, 435,7491, 343,1108, 596, 17,1751,4365,2235,3430,3643,7492,4366, # 4406
294,3527,2940,1693, 477, 979, 281,2041,3528, 643,2042,3644,2621,2782,2261,1031, # 4422
2335,2134,2298,3529,4367, 367,1249,2552,7493,3530,7494,4368,1283,3325,2004, 240, # 4438
1762,3326,4369,4370, 836,1069,3128, 474,7495,2148,2525, 268,3531,7496,3188,1521, # 4454
1284,7497,1658,1546,4116,7498,3532,3533,7499,4117,3327,2684,1685,4118, 961,1673, # 4470
2622, 190,2005,2200,3762,4371,4372,7500, 570,2497,3645,1490,7501,4373,2623,3260, # 4486
1956,4374, 584,1514, 396,1045,1944,7502,4375,1967,2444,7503,7504,4376,3910, 619, # 4502
7505,3129,3261, 215,2006,2783,2553,3189,4377,3190,4378, 763,4119,3763,4379,7506, # 4518
7507,1957,1767,2941,3328,3646,1174, 452,1477,4380,3329,3130,7508,2825,1253,2382, # 4534
2186,1091,2285,4120, 492,7509, 638,1169,1824,2135,1752,3911, 648, 926,1021,1324, # 4550
4381, 520,4382, 997, 847,1007, 892,4383,3764,2262,1871,3647,7510,2400,1784,4384, # 4566
1952,2942,3080,3191,1728,4121,2043,3648,4385,2007,1701,3131,1551, 30,2263,4122, # 4582
7511,2026,4386,3534,7512, 501,7513,4123, 594,3431,2165,1821,3535,3432,3536,3192, # 4598
829,2826,4124,7514,1680,3132,1225,4125,7515,3262,4387,4126,3133,2336,7516,4388, # 4614
4127,7517,3912,3913,7518,1847,2383,2596,3330,7519,4389, 374,3914, 652,4128,4129, # 4630
375,1140, 798,7520,7521,7522,2361,4390,2264, 546,1659, 138,3031,2445,4391,7523, # 4646
2250, 612,1848, 910, 796,3765,1740,1371, 825,3766,3767,7524,2906,2554,7525, 692, # 4662
444,3032,2624, 801,4392,4130,7526,1491, 244,1053,3033,4131,4132, 340,7527,3915, # 4678
1041,2987, 293,1168, 87,1357,7528,1539, 959,7529,2236, 721, 694,4133,3768, 219, # 4694
1478, 644,1417,3331,2656,1413,1401,1335,1389,3916,7530,7531,2988,2362,3134,1825, # 4710
730,1515, 184,2827, 66,4393,7532,1660,2943, 246,3332, 378,1457, 226,3433, 975, # 4726
3917,2944,1264,3537, 674, 696,7533, 163,7534,1141,2417,2166, 713,3538,3333,4394, # 4742
3918,7535,7536,1186, 15,7537,1079,1070,7538,1522,3193,3539, 276,1050,2716, 758, # 4758
1126, 653,2945,3263,7539,2337, 889,3540,3919,3081,2989, 903,1250,4395,3920,3434, # 4774
3541,1342,1681,1718, 766,3264, 286, 89,2946,3649,7540,1713,7541,2597,3334,2990, # 4790
7542,2947,2215,3194,2866,7543,4396,2498,2526, 181, 387,1075,3921, 731,2187,3335, # 4806
7544,3265, 310, 313,3435,2299, 770,4134, 54,3034, 189,4397,3082,3769,3922,7545, # 4822
1230,1617,1849, 355,3542,4135,4398,3336, 111,4136,3650,1350,3135,3436,3035,4137, # 4838
2149,3266,3543,7546,2784,3923,3924,2991, 722,2008,7547,1071, 247,1207,2338,2471, # 4854
1378,4399,2009, 864,1437,1214,4400, 373,3770,1142,2216, 667,4401, 442,2753,2555, # 4870
3771,3925,1968,4138,3267,1839, 837, 170,1107, 934,1336,1882,7548,7549,2118,4139, # 4886
2828, 743,1569,7550,4402,4140, 582,2384,1418,3437,7551,1802,7552, 357,1395,1729, # 4902
3651,3268,2418,1564,2237,7553,3083,3772,1633,4403,1114,2085,4141,1532,7554, 482, # 4918
2446,4404,7555,7556,1492, 833,1466,7557,2717,3544,1641,2829,7558,1526,1272,3652, # 4934
4142,1686,1794, 416,2556,1902,1953,1803,7559,3773,2785,3774,1159,2316,7560,2867, # 4950
4405,1610,1584,3036,2419,2754, 443,3269,1163,3136,7561,7562,3926,7563,4143,2499, # 4966
3037,4406,3927,3137,2103,1647,3545,2010,1872,4144,7564,4145, 431,3438,7565, 250, # 4982
97, 81,4146,7566,1648,1850,1558, 160, 848,7567, 866, 740,1694,7568,2201,2830, # 4998
3195,4147,4407,3653,1687, 950,2472, 426, 469,3196,3654,3655,3928,7569,7570,1188, # 5014
424,1995, 861,3546,4148,3775,2202,2685, 168,1235,3547,4149,7571,2086,1674,4408, # 5030
3337,3270, 220,2557,1009,7572,3776, 670,2992, 332,1208, 717,7573,7574,3548,2447, # 5046
3929,3338,7575, 513,7576,1209,2868,3339,3138,4409,1080,7577,7578,7579,7580,2527, # 5062
3656,3549, 815,1587,3930,3931,7581,3550,3439,3777,1254,4410,1328,3038,1390,3932, # 5078
1741,3933,3778,3934,7582, 236,3779,2448,3271,7583,7584,3657,3780,1273,3781,4411, # 5094
7585, 308,7586,4412, 245,4413,1851,2473,1307,2575, 430, 715,2136,2449,7587, 270, # 5110
199,2869,3935,7588,3551,2718,1753, 761,1754, 725,1661,1840,4414,3440,3658,7589, # 5126
7590, 587, 14,3272, 227,2598, 326, 480,2265, 943,2755,3552, 291, 650,1883,7591, # 5142
1702,1226, 102,1547, 62,3441, 904,4415,3442,1164,4150,7592,7593,1224,1548,2756, # 5158
391, 498,1493,7594,1386,1419,7595,2055,1177,4416, 813, 880,1081,2363, 566,1145, # 5174
4417,2286,1001,1035,2558,2599,2238, 394,1286,7596,7597,2068,7598, 86,1494,1730, # 5190
3936, 491,1588, 745, 897,2948, 843,3340,3937,2757,2870,3273,1768, 998,2217,2069, # 5206
397,1826,1195,1969,3659,2993,3341, 284,7599,3782,2500,2137,2119,1903,7600,3938, # 5222
2150,3939,4151,1036,3443,1904, 114,2559,4152, 209,1527,7601,7602,2949,2831,2625, # 5238
2385,2719,3139, 812,2560,7603,3274,7604,1559, 737,1884,3660,1210, 885, 28,2686, # 5254
3553,3783,7605,4153,1004,1779,4418,7606, 346,1981,2218,2687,4419,3784,1742, 797, # 5270
1642,3940,1933,1072,1384,2151, 896,3941,3275,3661,3197,2871,3554,7607,2561,1958, # 5286
4420,2450,1785,7608,7609,7610,3942,4154,1005,1308,3662,4155,2720,4421,4422,1528, # 5302
2600, 161,1178,4156,1982, 987,4423,1101,4157, 631,3943,1157,3198,2420,1343,1241, # 5318
1016,2239,2562, 372, 877,2339,2501,1160, 555,1934, 911,3944,7611, 466,1170, 169, # 5334
1051,2907,2688,3663,2474,2994,1182,2011,2563,1251,2626,7612, 992,2340,3444,1540, # 5350
2721,1201,2070,2401,1996,2475,7613,4424, 528,1922,2188,1503,1873,1570,2364,3342, # 5366
3276,7614, 557,1073,7615,1827,3445,2087,2266,3140,3039,3084, 767,3085,2786,4425, # 5382
1006,4158,4426,2341,1267,2176,3664,3199, 778,3945,3200,2722,1597,2657,7616,4427, # 5398
7617,3446,7618,7619,7620,3277,2689,1433,3278, 131, 95,1504,3946, 723,4159,3141, # 5414
1841,3555,2758,2189,3947,2027,2104,3665,7621,2995,3948,1218,7622,3343,3201,3949, # 5430
4160,2576, 248,1634,3785, 912,7623,2832,3666,3040,3786, 654, 53,7624,2996,7625, # 5446
1688,4428, 777,3447,1032,3950,1425,7626, 191, 820,2120,2833, 971,4429, 931,3202, # 5462
135, 664, 783,3787,1997, 772,2908,1935,3951,3788,4430,2909,3203, 282,2723, 640, # 5478
1372,3448,1127, 922, 325,3344,7627,7628, 711,2044,7629,7630,3952,2219,2787,1936, # 5494
3953,3345,2220,2251,3789,2300,7631,4431,3790,1258,3279,3954,3204,2138,2950,3955, # 5510
3956,7632,2221, 258,3205,4432, 101,1227,7633,3280,1755,7634,1391,3281,7635,2910, # 5526
2056, 893,7636,7637,7638,1402,4161,2342,7639,7640,3206,3556,7641,7642, 878,1325, # 5542
1780,2788,4433, 259,1385,2577, 744,1183,2267,4434,7643,3957,2502,7644, 684,1024, # 5558
4162,7645, 472,3557,3449,1165,3282,3958,3959, 322,2152, 881, 455,1695,1152,1340, # 5574
660, 554,2153,4435,1058,4436,4163, 830,1065,3346,3960,4437,1923,7646,1703,1918, # 5590
7647, 932,2268, 122,7648,4438, 947, 677,7649,3791,2627, 297,1905,1924,2269,4439, # 5606
2317,3283,7650,7651,4164,7652,4165, 84,4166, 112, 989,7653, 547,1059,3961, 701, # 5622
3558,1019,7654,4167,7655,3450, 942, 639, 457,2301,2451, 993,2951, 407, 851, 494, # 5638
4440,3347, 927,7656,1237,7657,2421,3348, 573,4168, 680, 921,2911,1279,1874, 285, # 5654
790,1448,1983, 719,2167,7658,7659,4441,3962,3963,1649,7660,1541, 563,7661,1077, # 5670
7662,3349,3041,3451, 511,2997,3964,3965,3667,3966,1268,2564,3350,3207,4442,4443, # 5686
7663, 535,1048,1276,1189,2912,2028,3142,1438,1373,2834,2952,1134,2012,7664,4169, # 5702
1238,2578,3086,1259,7665, 700,7666,2953,3143,3668,4170,7667,4171,1146,1875,1906, # 5718
4444,2601,3967, 781,2422, 132,1589, 203, 147, 273,2789,2402, 898,1786,2154,3968, # 5734
3969,7668,3792,2790,7669,7670,4445,4446,7671,3208,7672,1635,3793, 965,7673,1804, # 5750
2690,1516,3559,1121,1082,1329,3284,3970,1449,3794, 65,1128,2835,2913,2759,1590, # 5766
3795,7674,7675, 12,2658, 45, 976,2579,3144,4447, 517,2528,1013,1037,3209,7676, # 5782
3796,2836,7677,3797,7678,3452,7679,2602, 614,1998,2318,3798,3087,2724,2628,7680, # 5798
2580,4172, 599,1269,7681,1810,3669,7682,2691,3088, 759,1060, 489,1805,3351,3285, # 5814
1358,7683,7684,2386,1387,1215,2629,2252, 490,7685,7686,4173,1759,2387,2343,7687, # 5830
4448,3799,1907,3971,2630,1806,3210,4449,3453,3286,2760,2344, 874,7688,7689,3454, # 5846
3670,1858, 91,2914,3671,3042,3800,4450,7690,3145,3972,2659,7691,3455,1202,1403, # 5862
3801,2954,2529,1517,2503,4451,3456,2504,7692,4452,7693,2692,1885,1495,1731,3973, # 5878
2365,4453,7694,2029,7695,7696,3974,2693,1216, 237,2581,4174,2319,3975,3802,4454, # 5894
4455,2694,3560,3457, 445,4456,7697,7698,7699,7700,2761, 61,3976,3672,1822,3977, # 5910
7701, 687,2045, 935, 925, 405,2660, 703,1096,1859,2725,4457,3978,1876,1367,2695, # 5926
3352, 918,2105,1781,2476, 334,3287,1611,1093,4458, 564,3146,3458,3673,3353, 945, # 5942
2631,2057,4459,7702,1925, 872,4175,7703,3459,2696,3089, 349,4176,3674,3979,4460, # 5958
3803,4177,3675,2155,3980,4461,4462,4178,4463,2403,2046, 782,3981, 400, 251,4179, # 5974
1624,7704,7705, 277,3676, 299,1265, 476,1191,3804,2121,4180,4181,1109, 205,7706, # 5990
2582,1000,2156,3561,1860,7707,7708,7709,4464,7710,4465,2565, 107,2477,2157,3982, # 6006
3460,3147,7711,1533, 541,1301, 158, 753,4182,2872,3562,7712,1696, 370,1088,4183, # 6022
4466,3563, 579, 327, 440, 162,2240, 269,1937,1374,3461, 968,3043, 56,1396,3090, # 6038
2106,3288,3354,7713,1926,2158,4467,2998,7714,3564,7715,7716,3677,4468,2478,7717, # 6054
2791,7718,1650,4469,7719,2603,7720,7721,3983,2661,3355,1149,3356,3984,3805,3985, # 6070
7722,1076, 49,7723, 951,3211,3289,3290, 450,2837, 920,7724,1811,2792,2366,4184, # 6086
1908,1138,2367,3806,3462,7725,3212,4470,1909,1147,1518,2423,4471,3807,7726,4472, # 6102
2388,2604, 260,1795,3213,7727,7728,3808,3291, 708,7729,3565,1704,7730,3566,1351, # 6118
1618,3357,2999,1886, 944,4185,3358,4186,3044,3359,4187,7731,3678, 422, 413,1714, # 6134
3292, 500,2058,2345,4188,2479,7732,1344,1910, 954,7733,1668,7734,7735,3986,2404, # 6150
4189,3567,3809,4190,7736,2302,1318,2505,3091, 133,3092,2873,4473, 629, 31,2838, # 6166
2697,3810,4474, 850, 949,4475,3987,2955,1732,2088,4191,1496,1852,7737,3988, 620, # 6182
3214, 981,1242,3679,3360,1619,3680,1643,3293,2139,2452,1970,1719,3463,2168,7738, # 6198
3215,7739,7740,3361,1828,7741,1277,4476,1565,2047,7742,1636,3568,3093,7743, 869, # 6214
2839, 655,3811,3812,3094,3989,3000,3813,1310,3569,4477,7744,7745,7746,1733, 558, # 6230
4478,3681, 335,1549,3045,1756,4192,3682,1945,3464,1829,1291,1192, 470,2726,2107, # 6246
2793, 913,1054,3990,7747,1027,7748,3046,3991,4479, 982,2662,3362,3148,3465,3216, # 6262
3217,1946,2794,7749, 571,4480,7750,1830,7751,3570,2583,1523,2424,7752,2089, 984, # 6278
4481,3683,1959,7753,3684, 852, 923,2795,3466,3685, 969,1519, 999,2048,2320,1705, # 6294
7754,3095, 615,1662, 151, 597,3992,2405,2321,1049, 275,4482,3686,4193, 568,3687, # 6310
3571,2480,4194,3688,7755,2425,2270, 409,3218,7756,1566,2874,3467,1002, 769,2840, # 6326
194,2090,3149,3689,2222,3294,4195, 628,1505,7757,7758,1763,2177,3001,3993, 521, # 6342
1161,2584,1787,2203,2406,4483,3994,1625,4196,4197, 412, 42,3096, 464,7759,2632, # 6358
4484,3363,1760,1571,2875,3468,2530,1219,2204,3814,2633,2140,2368,4485,4486,3295, # 6374
1651,3364,3572,7760,7761,3573,2481,3469,7762,3690,7763,7764,2271,2091, 460,7765, # 6390
4487,7766,3002, 962, 588,3574, 289,3219,2634,1116, 52,7767,3047,1796,7768,7769, # 6406
7770,1467,7771,1598,1143,3691,4198,1984,1734,1067,4488,1280,3365, 465,4489,1572, # 6422
510,7772,1927,2241,1812,1644,3575,7773,4490,3692,7774,7775,2663,1573,1534,7776, # 6438
7777,4199, 536,1807,1761,3470,3815,3150,2635,7778,7779,7780,4491,3471,2915,1911, # 6454
2796,7781,3296,1122, 377,3220,7782, 360,7783,7784,4200,1529, 551,7785,2059,3693, # 6470
1769,2426,7786,2916,4201,3297,3097,2322,2108,2030,4492,1404, 136,1468,1479, 672, # 6486
1171,3221,2303, 271,3151,7787,2762,7788,2049, 678,2727, 865,1947,4493,7789,2013, # 6502
3995,2956,7790,2728,2223,1397,3048,3694,4494,4495,1735,2917,3366,3576,7791,3816, # 6518
509,2841,2453,2876,3817,7792,7793,3152,3153,4496,4202,2531,4497,2304,1166,1010, # 6534
552, 681,1887,7794,7795,2957,2958,3996,1287,1596,1861,3154, 358, 453, 736, 175, # 6550
478,1117, 905,1167,1097,7796,1853,1530,7797,1706,7798,2178,3472,2287,3695,3473, # 6566
3577,4203,2092,4204,7799,3367,1193,2482,4205,1458,2190,2205,1862,1888,1421,3298, # 6582
2918,3049,2179,3474, 595,2122,7800,3997,7801,7802,4206,1707,2636, 223,3696,1359, # 6598
751,3098, 183,3475,7803,2797,3003, 419,2369, 633, 704,3818,2389, 241,7804,7805, # 6614
7806, 838,3004,3697,2272,2763,2454,3819,1938,2050,3998,1309,3099,2242,1181,7807, # 6630
1136,2206,3820,2370,1446,4207,2305,4498,7808,7809,4208,1055,2605, 484,3698,7810, # 6646
3999, 625,4209,2273,3368,1499,4210,4000,7811,4001,4211,3222,2274,2275,3476,7812, # 6662
7813,2764, 808,2606,3699,3369,4002,4212,3100,2532, 526,3370,3821,4213, 955,7814, # 6678
1620,4214,2637,2427,7815,1429,3700,1669,1831, 994, 928,7816,3578,1260,7817,7818, # 6694
7819,1948,2288, 741,2919,1626,4215,2729,2455, 867,1184, 362,3371,1392,7820,7821, # 6710
4003,4216,1770,1736,3223,2920,4499,4500,1928,2698,1459,1158,7822,3050,3372,2877, # 6726
1292,1929,2506,2842,3701,1985,1187,2071,2014,2607,4217,7823,2566,2507,2169,3702, # 6742
2483,3299,7824,3703,4501,7825,7826, 666,1003,3005,1022,3579,4218,7827,4502,1813, # 6758
2253, 574,3822,1603, 295,1535, 705,3823,4219, 283, 858, 417,7828,7829,3224,4503, # 6774
4504,3051,1220,1889,1046,2276,2456,4004,1393,1599, 689,2567, 388,4220,7830,2484, # 6790
802,7831,2798,3824,2060,1405,2254,7832,4505,3825,2109,1052,1345,3225,1585,7833, # 6806
809,7834,7835,7836, 575,2730,3477, 956,1552,1469,1144,2323,7837,2324,1560,2457, # 6822
3580,3226,4005, 616,2207,3155,2180,2289,7838,1832,7839,3478,4506,7840,1319,3704, # 6838
3705,1211,3581,1023,3227,1293,2799,7841,7842,7843,3826, 607,2306,3827, 762,2878, # 6854
1439,4221,1360,7844,1485,3052,7845,4507,1038,4222,1450,2061,2638,4223,1379,4508, # 6870
2585,7846,7847,4224,1352,1414,2325,2921,1172,7848,7849,3828,3829,7850,1797,1451, # 6886
7851,7852,7853,7854,2922,4006,4007,2485,2346, 411,4008,4009,3582,3300,3101,4509, # 6902
1561,2664,1452,4010,1375,7855,7856, 47,2959, 316,7857,1406,1591,2923,3156,7858, # 6918
1025,2141,3102,3157, 354,2731, 884,2224,4225,2407, 508,3706, 726,3583, 996,2428, # 6934
3584, 729,7859, 392,2191,1453,4011,4510,3707,7860,7861,2458,3585,2608,1675,2800, # 6950
919,2347,2960,2348,1270,4511,4012, 73,7862,7863, 647,7864,3228,2843,2255,1550, # 6966
1346,3006,7865,1332, 883,3479,7866,7867,7868,7869,3301,2765,7870,1212, 831,1347, # 6982
4226,4512,2326,3830,1863,3053, 720,3831,4513,4514,3832,7871,4227,7872,7873,4515, # 6998
7874,7875,1798,4516,3708,2609,4517,3586,1645,2371,7876,7877,2924, 669,2208,2665, # 7014
2429,7878,2879,7879,7880,1028,3229,7881,4228,2408,7882,2256,1353,7883,7884,4518, # 7030
3158, 518,7885,4013,7886,4229,1960,7887,2142,4230,7888,7889,3007,2349,2350,3833, # 7046
516,1833,1454,4014,2699,4231,4519,2225,2610,1971,1129,3587,7890,2766,7891,2961, # 7062
1422, 577,1470,3008,1524,3373,7892,7893, 432,4232,3054,3480,7894,2586,1455,2508, # 7078
2226,1972,1175,7895,1020,2732,4015,3481,4520,7896,2733,7897,1743,1361,3055,3482, # 7094
2639,4016,4233,4521,2290, 895, 924,4234,2170, 331,2243,3056, 166,1627,3057,1098, # 7110
7898,1232,2880,2227,3374,4522, 657, 403,1196,2372, 542,3709,3375,1600,4235,3483, # 7126
7899,4523,2767,3230, 576, 530,1362,7900,4524,2533,2666,3710,4017,7901, 842,3834, # 7142
7902,2801,2031,1014,4018, 213,2700,3376, 665, 621,4236,7903,3711,2925,2430,7904, # 7158
2431,3302,3588,3377,7905,4237,2534,4238,4525,3589,1682,4239,3484,1380,7906, 724, # 7174
2277, 600,1670,7907,1337,1233,4526,3103,2244,7908,1621,4527,7909, 651,4240,7910, # 7190
1612,4241,2611,7911,2844,7912,2734,2307,3058,7913, 716,2459,3059, 174,1255,2701, # 7206
4019,3590, 548,1320,1398, 728,4020,1574,7914,1890,1197,3060,4021,7915,3061,3062, # 7222
3712,3591,3713, 747,7916, 635,4242,4528,7917,7918,7919,4243,7920,7921,4529,7922, # 7238
3378,4530,2432, 451,7923,3714,2535,2072,4244,2735,4245,4022,7924,1764,4531,7925, # 7254
4246, 350,7926,2278,2390,2486,7927,4247,4023,2245,1434,4024, 488,4532, 458,4248, # 7270
4025,3715, 771,1330,2391,3835,2568,3159,2159,2409,1553,2667,3160,4249,7928,2487, # 7286
2881,2612,1720,2702,4250,3379,4533,7929,2536,4251,7930,3231,4252,2768,7931,2015, # 7302
2736,7932,1155,1017,3716,3836,7933,3303,2308, 201,1864,4253,1430,7934,4026,7935, # 7318
7936,7937,7938,7939,4254,1604,7940, 414,1865, 371,2587,4534,4535,3485,2016,3104, # 7334
4536,1708, 960,4255, 887, 389,2171,1536,1663,1721,7941,2228,4027,2351,2926,1580, # 7350
7942,7943,7944,1744,7945,2537,4537,4538,7946,4539,7947,2073,7948,7949,3592,3380, # 7366
2882,4256,7950,4257,2640,3381,2802, 673,2703,2460, 709,3486,4028,3593,4258,7951, # 7382
1148, 502, 634,7952,7953,1204,4540,3594,1575,4541,2613,3717,7954,3718,3105, 948, # 7398
3232, 121,1745,3837,1110,7955,4259,3063,2509,3009,4029,3719,1151,1771,3838,1488, # 7414
4030,1986,7956,2433,3487,7957,7958,2093,7959,4260,3839,1213,1407,2803, 531,2737, # 7430
2538,3233,1011,1537,7960,2769,4261,3106,1061,7961,3720,3721,1866,2883,7962,2017, # 7446
120,4262,4263,2062,3595,3234,2309,3840,2668,3382,1954,4542,7963,7964,3488,1047, # 7462
2704,1266,7965,1368,4543,2845, 649,3383,3841,2539,2738,1102,2846,2669,7966,7967, # 7478
1999,7968,1111,3596,2962,7969,2488,3842,3597,2804,1854,3384,3722,7970,7971,3385, # 7494
2410,2884,3304,3235,3598,7972,2569,7973,3599,2805,4031,1460, 856,7974,3600,7975, # 7510
2885,2963,7976,2886,3843,7977,4264, 632,2510, 875,3844,1697,3845,2291,7978,7979, # 7526
4544,3010,1239, 580,4545,4265,7980, 914, 936,2074,1190,4032,1039,2123,7981,7982, # 7542
7983,3386,1473,7984,1354,4266,3846,7985,2172,3064,4033, 915,3305,4267,4268,3306, # 7558
1605,1834,7986,2739, 398,3601,4269,3847,4034, 328,1912,2847,4035,3848,1331,4270, # 7574
3011, 937,4271,7987,3602,4036,4037,3387,2160,4546,3388, 524, 742, 538,3065,1012, # 7590
7988,7989,3849,2461,7990, 658,1103, 225,3850,7991,7992,4547,7993,4548,7994,3236, # 7606
1243,7995,4038, 963,2246,4549,7996,2705,3603,3161,7997,7998,2588,2327,7999,4550, # 7622
8000,8001,8002,3489,3307, 957,3389,2540,2032,1930,2927,2462, 870,2018,3604,1746, # 7638
2770,2771,2434,2463,8003,3851,8004,3723,3107,3724,3490,3390,3725,8005,1179,3066, # 7654
8006,3162,2373,4272,3726,2541,3163,3108,2740,4039,8007,3391,1556,2542,2292, 977, # 7670
2887,2033,4040,1205,3392,8008,1765,3393,3164,2124,1271,1689, 714,4551,3491,8009, # 7686
2328,3852, 533,4273,3605,2181, 617,8010,2464,3308,3492,2310,8011,8012,3165,8013, # 7702
8014,3853,1987, 618, 427,2641,3493,3394,8015,8016,1244,1690,8017,2806,4274,4552, # 7718
8018,3494,8019,8020,2279,1576, 473,3606,4275,3395, 972,8021,3607,8022,3067,8023, # 7734
8024,4553,4554,8025,3727,4041,4042,8026, 153,4555, 356,8027,1891,2888,4276,2143, # 7750
408, 803,2352,8028,3854,8029,4277,1646,2570,2511,4556,4557,3855,8030,3856,4278, # 7766
8031,2411,3396, 752,8032,8033,1961,2964,8034, 746,3012,2465,8035,4279,3728, 698, # 7782
4558,1892,4280,3608,2543,4559,3609,3857,8036,3166,3397,8037,1823,1302,4043,2706, # 7798
3858,1973,4281,8038,4282,3167, 823,1303,1288,1236,2848,3495,4044,3398, 774,3859, # 7814
8039,1581,4560,1304,2849,3860,4561,8040,2435,2161,1083,3237,4283,4045,4284, 344, # 7830
1173, 288,2311, 454,1683,8041,8042,1461,4562,4046,2589,8043,8044,4563, 985, 894, # 7846
8045,3399,3168,8046,1913,2928,3729,1988,8047,2110,1974,8048,4047,8049,2571,1194, # 7862
425,8050,4564,3169,1245,3730,4285,8051,8052,2850,8053, 636,4565,1855,3861, 760, # 7878
1799,8054,4286,2209,1508,4566,4048,1893,1684,2293,8055,8056,8057,4287,4288,2210, # 7894
479,8058,8059, 832,8060,4049,2489,8061,2965,2490,3731, 990,3109, 627,1814,2642, # 7910
4289,1582,4290,2125,2111,3496,4567,8062, 799,4291,3170,8063,4568,2112,1737,3013, # 7926
1018, 543, 754,4292,3309,1676,4569,4570,4050,8064,1489,8065,3497,8066,2614,2889, # 7942
4051,8067,8068,2966,8069,8070,8071,8072,3171,4571,4572,2182,1722,8073,3238,3239, # 7958
1842,3610,1715, 481, 365,1975,1856,8074,8075,1962,2491,4573,8076,2126,3611,3240, # 7974
433,1894,2063,2075,8077, 602,2741,8078,8079,8080,8081,8082,3014,1628,3400,8083, # 7990
3172,4574,4052,2890,4575,2512,8084,2544,2772,8085,8086,8087,3310,4576,2891,8088, # 8006
4577,8089,2851,4578,4579,1221,2967,4053,2513,8090,8091,8092,1867,1989,8093,8094, # 8022
8095,1895,8096,8097,4580,1896,4054, 318,8098,2094,4055,4293,8099,8100, 485,8101, # 8038
938,3862, 553,2670, 116,8102,3863,3612,8103,3498,2671,2773,3401,3311,2807,8104, # 8054
3613,2929,4056,1747,2930,2968,8105,8106, 207,8107,8108,2672,4581,2514,8109,3015, # 8070
890,3614,3864,8110,1877,3732,3402,8111,2183,2353,3403,1652,8112,8113,8114, 941, # 8086
2294, 208,3499,4057,2019, 330,4294,3865,2892,2492,3733,4295,8115,8116,8117,8118, # 8102
#Everything below is of no interest for detection purpose
2515,1613,4582,8119,3312,3866,2516,8120,4058,8121,1637,4059,2466,4583,3867,8122, # 8118
2493,3016,3734,8123,8124,2192,8125,8126,2162,8127,8128,8129,8130,8131,8132,8133, # 8134
8134,8135,8136,8137,8138,8139,8140,8141,8142,8143,8144,8145,8146,8147,8148,8149, # 8150
8150,8151,8152,8153,8154,8155,8156,8157,8158,8159,8160,8161,8162,8163,8164,8165, # 8166
8166,8167,8168,8169,8170,8171,8172,8173,8174,8175,8176,8177,8178,8179,8180,8181, # 8182
8182,8183,8184,8185,8186,8187,8188,8189,8190,8191,8192,8193,8194,8195,8196,8197, # 8198
8198,8199,8200,8201,8202,8203,8204,8205,8206,8207,8208,8209,8210,8211,8212,8213, # 8214
8214,8215,8216,8217,8218,8219,8220,8221,8222,8223,8224,8225,8226,8227,8228,8229, # 8230
8230,8231,8232,8233,8234,8235,8236,8237,8238,8239,8240,8241,8242,8243,8244,8245, # 8246
8246,8247,8248,8249,8250,8251,8252,8253,8254,8255,8256,8257,8258,8259,8260,8261, # 8262
8262,8263,8264,8265,8266,8267,8268,8269,8270,8271,8272,8273,8274,8275,8276,8277, # 8278
8278,8279,8280,8281,8282,8283,8284,8285,8286,8287,8288,8289,8290,8291,8292,8293, # 8294
8294,8295,8296,8297,8298,8299,8300,8301,8302,8303,8304,8305,8306,8307,8308,8309, # 8310
8310,8311,8312,8313,8314,8315,8316,8317,8318,8319,8320,8321,8322,8323,8324,8325, # 8326
8326,8327,8328,8329,8330,8331,8332,8333,8334,8335,8336,8337,8338,8339,8340,8341, # 8342
8342,8343,8344,8345,8346,8347,8348,8349,8350,8351,8352,8353,8354,8355,8356,8357, # 8358
8358,8359,8360,8361,8362,8363,8364,8365,8366,8367,8368,8369,8370,8371,8372,8373, # 8374
8374,8375,8376,8377,8378,8379,8380,8381,8382,8383,8384,8385,8386,8387,8388,8389, # 8390
8390,8391,8392,8393,8394,8395,8396,8397,8398,8399,8400,8401,8402,8403,8404,8405, # 8406
8406,8407,8408,8409,8410,8411,8412,8413,8414,8415,8416,8417,8418,8419,8420,8421, # 8422
8422,8423,8424,8425,8426,8427,8428,8429,8430,8431,8432,8433,8434,8435,8436,8437, # 8438
8438,8439,8440,8441,8442,8443,8444,8445,8446,8447,8448,8449,8450,8451,8452,8453, # 8454
8454,8455,8456,8457,8458,8459,8460,8461,8462,8463,8464,8465,8466,8467,8468,8469, # 8470
8470,8471,8472,8473,8474,8475,8476,8477,8478,8479,8480,8481,8482,8483,8484,8485, # 8486
8486,8487,8488,8489,8490,8491,8492,8493,8494,8495,8496,8497,8498,8499,8500,8501, # 8502
8502,8503,8504,8505,8506,8507,8508,8509,8510,8511,8512,8513,8514,8515,8516,8517, # 8518
8518,8519,8520,8521,8522,8523,8524,8525,8526,8527,8528,8529,8530,8531,8532,8533, # 8534
8534,8535,8536,8537,8538,8539,8540,8541,8542,8543,8544,8545,8546,8547,8548,8549, # 8550
8550,8551,8552,8553,8554,8555,8556,8557,8558,8559,8560,8561,8562,8563,8564,8565, # 8566
8566,8567,8568,8569,8570,8571,8572,8573,8574,8575,8576,8577,8578,8579,8580,8581, # 8582
8582,8583,8584,8585,8586,8587,8588,8589,8590,8591,8592,8593,8594,8595,8596,8597, # 8598
8598,8599,8600,8601,8602,8603,8604,8605,8606,8607,8608,8609,8610,8611,8612,8613, # 8614
8614,8615,8616,8617,8618,8619,8620,8621,8622,8623,8624,8625,8626,8627,8628,8629, # 8630
8630,8631,8632,8633,8634,8635,8636,8637,8638,8639,8640,8641,8642,8643,8644,8645, # 8646
8646,8647,8648,8649,8650,8651,8652,8653,8654,8655,8656,8657,8658,8659,8660,8661, # 8662
8662,8663,8664,8665,8666,8667,8668,8669,8670,8671,8672,8673,8674,8675,8676,8677, # 8678
8678,8679,8680,8681,8682,8683,8684,8685,8686,8687,8688,8689,8690,8691,8692,8693, # 8694
8694,8695,8696,8697,8698,8699,8700,8701,8702,8703,8704,8705,8706,8707,8708,8709, # 8710
8710,8711,8712,8713,8714,8715,8716,8717,8718,8719,8720,8721,8722,8723,8724,8725, # 8726
8726,8727,8728,8729,8730,8731,8732,8733,8734,8735,8736,8737,8738,8739,8740,8741) # 8742
# flake8: noqa
| apache-2.0 |
babycaseny/meld | help/C/buildwebhelp.py | 1 | 2578 | #! /usr/bin/python3
import glob
import os
import subprocess
import sys
from bs4 import BeautifulSoup
JEKYLL_HEADER = """---
layout: help
title: Meld - Help
---
"""
SCSS_HEADER = """
#help-content {
border-left: solid 1px #e0e0df;
border-right: solid 1px #e0e0df;
background-color: #ffffff;
}
#help-content div.body {
border: none !important; }
#help-content div.headbar {
margin: 10px !important;
}
#help-content div.footbar {
margin: 10px !important;
}
#help-content {
.title {
line-height: 1em;
}
h1 {
font-family: sans-serif;
font-weight: bold;
text-shadow: none;
color: black;
}
h2 {
font-family: sans-serif;
text-shadow: none;
color: black;
}
"""
SCSS_FOOTER = """
}
"""
def munge_html(filename):
if not os.path.exists(filename):
print("File not found: " + filename, file=sys.stderr)
sys.exit(1)
with open(filename) as f:
contents = f.read()
soup = BeautifulSoup(contents, "lxml")
body = "".join([str(tag) for tag in soup.body])
body = JEKYLL_HEADER + body
print("Rewriting " + filename)
with open(filename, "w") as f:
f.write(body)
def munge_css(filename):
if not os.path.exists(filename):
print("File not found: " + filename, file=sys.stderr)
sys.exit(1)
with open(filename) as f:
contents = f.read()
contents = SCSS_HEADER + contents + SCSS_FOOTER
new_css = sassify(contents)
print("Rewriting " + filename)
with open(filename, 'w') as f:
f.write(new_css)
def sassify(scss_string):
scss = subprocess.Popen(
['scss', '-s'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True,
)
stdout, stderr = scss.communicate(scss_string)
return stdout
if __name__ == "__main__":
if os.path.exists('html'):
print("Refusing to overwrite existing html/ folder", file=sys.stderr)
sys.exit(1)
print("Generating CSS with gnome-doc-tool...", file=sys.stderr)
subprocess.check_call(['gnome-doc-tool', 'css'])
print("Generating HTML with gnome-doc-tool...", file=sys.stderr)
subprocess.check_call(['gnome-doc-tool', 'html', '-c', 'index.css',
'--copy-graphics', '*.page'])
os.mkdir('html')
for filename in glob.glob('*.html'):
munge_html(filename)
os.rename(filename, os.path.join('html', filename))
munge_css('index.css')
os.rename('index.css', os.path.join('html', 'index.css'))
print("Embeddable documentation written to html/", file=sys.stderr)
| gpl-2.0 |
ansible/ansible | lib/ansible/plugins/lookup/items.py | 29 | 1804 | # (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
name: items
author: Michael DeHaan
version_added: historical
short_description: list of items
description:
- this lookup returns a list of items given to it, if any of the top level items is also a list it will flatten it, but it will not recurse
notes:
- this is the standard lookup used for loops in most examples
- check out the 'flattened' lookup for recursive flattening
- if you do not want flattening nor any other transformation look at the 'list' lookup.
options:
_terms:
description: list of items
required: True
"""
EXAMPLES = """
- name: "loop through list"
debug:
msg: "An item: {{ item }}"
with_items:
- 1
- 2
- 3
- name: add several users
user:
name: "{{ item }}"
groups: "wheel"
state: present
with_items:
- testuser1
- testuser2
- name: "loop through list from a variable"
debug:
msg: "An item: {{ item }}"
with_items: "{{ somelist }}"
- name: more complex items to add several users
user:
name: "{{ item.name }}"
uid: "{{ item.uid }}"
groups: "{{ item.groups }}"
state: present
with_items:
- { name: testuser1, uid: 1002, groups: "wheel, staff" }
- { name: testuser2, uid: 1003, groups: staff }
"""
RETURN = """
_raw:
description:
- once flattened list
type: list
"""
from ansible.plugins.lookup import LookupBase
class LookupModule(LookupBase):
def run(self, terms, **kwargs):
return self._flatten(terms)
| gpl-3.0 |
KuroeKurose/gem5 | src/arch/x86/isa/insts/general_purpose/control_transfer/jump.py | 30 | 5493 | # Copyright (c) 2007 The Hewlett-Packard Development Company
# Copyright (c) 2012-2013 AMD
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
def macroop JMP_I
{
# Make the default data size of jumps 64 bits in 64 bit mode
.adjust_env oszIn64Override
rdip t1
limm t2, imm
wrip t1, t2
};
def macroop JMP_R
{
# Make the default data size of jumps 64 bits in 64 bit mode
.adjust_env oszIn64Override
wripi reg, 0
};
def macroop JMP_M
{
# Make the default data size of jumps 64 bits in 64 bit mode
.adjust_env oszIn64Override
ld t1, seg, sib, disp
wripi t1, 0
};
def macroop JMP_P
{
# Make the default data size of jumps 64 bits in 64 bit mode
.adjust_env oszIn64Override
rdip t7
ld t1, seg, riprel, disp
wripi t1, 0
};
def macroop JMP_FAR_M
{
limm t1, 0, dataSize=8
limm t2, 0, dataSize=8
lea t1, seg, sib, disp, dataSize=asz
ld t2, seg, [1, t0, t1], dsz
ld t1, seg, [1, t0, t1]
br rom_label("jmpFarWork")
};
def macroop JMP_FAR_P
{
limm t1, 0, dataSize=8
limm t2, 0, dataSize=8
rdip t7, dataSize=asz
lea t1, seg, riprel, disp, dataSize=asz
ld t2, seg, [1, t0, t1], dsz
ld t1, seg, [1, t0, t1]
br rom_label("jmpFarWork")
};
def macroop JMP_FAR_I
{
# Put the whole far pointer into a register.
limm t2, imm, dataSize=8
# Figure out the width of the offset.
limm t3, dsz, dataSize=8
slli t3, t3, 3, dataSize=8
# Get the offset into t1.
mov t1, t0, t2
# Get the selector into t2.
srl t2, t2, t3, dataSize=8
mov t2, t0, t2, dataSize=2
br rom_label("jmpFarWork")
};
def rom
{
extern jmpFarWork:
# t1 has the offset and t2 has the new selector.
# This is intended to run in protected mode.
andi t0, t2, 0xFC, flags=(EZF,), dataSize=2
fault "std::make_shared<GeneralProtection>(0)", flags=(CEZF,)
andi t3, t2, 0xF8, dataSize=8
andi t0, t2, 0x4, flags=(EZF,), dataSize=2
br rom_local_label("farJmpGlobalDescriptor"), flags=(CEZF,)
ld t4, tsl, [1, t0, t3], dataSize=8, addressSize=8, atCPL0=True
br rom_local_label("farJmpProcessDescriptor")
farJmpGlobalDescriptor:
ld t4, tsg, [1, t0, t3], dataSize=8, addressSize=8, atCPL0=True
farJmpProcessDescriptor:
rcri t0, t4, 13, flags=(ECF,), dataSize=2
br rom_local_label("farJmpSystemDescriptor"), flags=(nCECF,)
chks t2, t4, CSCheck, dataSize=8
wrdl cs, t4, t2, dataSize=4
wrsel cs, t2, dataSize=4
wrip t0, t1, dataSize=4
eret
farJmpSystemDescriptor:
panic "Far jumps to system descriptors aren't implemented"
eret
};
def macroop JMP_FAR_REAL_M
{
lea t1, seg, sib, disp, dataSize=asz
ld t2, seg, [1, t0, t1], dsz
ld t1, seg, [1, t0, t1]
zexti t3, t1, 15, dataSize=8
slli t3, t3, 4, dataSize=8
wrsel cs, t1, dataSize=2
wrbase cs, t3, dataSize=8
wrip t0, t2, dataSize=asz
};
def macroop JMP_FAR_REAL_P
{
panic "Real mode far jump executed in 64 bit mode!"
};
def macroop JMP_FAR_REAL_I
{
# Put the whole far pointer into a register.
limm t2, imm, dataSize=8
# Figure out the width of the offset.
limm t3, dsz, dataSize=8
slli t3, t3, 3, dataSize=8
# Get the selector into t1.
srl t1, t2, t3, dataSize=8
mov t1, t0, t1, dataSize=2
# And get the offset into t2
mov t2, t0, t2
slli t3, t1, 4, dataSize=8
wrsel cs, t1, dataSize=2
wrbase cs, t3, dataSize=8
wrip t0, t2, dataSize=asz
};
'''
| bsd-3-clause |
nordri/check_domains | lib/python2.7/site-packages/distribute-0.6.24-py2.7.egg/setuptools/package_index.py | 59 | 30447 | """PyPI and direct package downloading"""
import sys, os.path, re, urlparse, urllib, urllib2, shutil, random, socket, cStringIO
import httplib
from pkg_resources import *
from distutils import log
from distutils.errors import DistutilsError
try:
from hashlib import md5
except ImportError:
from md5 import md5
from fnmatch import translate
EGG_FRAGMENT = re.compile(r'^egg=([-A-Za-z0-9_.]+)$')
HREF = re.compile("""href\\s*=\\s*['"]?([^'"> ]+)""", re.I)
# this is here to fix emacs' cruddy broken syntax highlighting
PYPI_MD5 = re.compile(
'<a href="([^"#]+)">([^<]+)</a>\n\s+\\(<a (?:title="MD5 hash"\n\s+)'
'href="[^?]+\?:action=show_md5&digest=([0-9a-f]{32})">md5</a>\\)'
)
URL_SCHEME = re.compile('([-+.a-z0-9]{2,}):',re.I).match
EXTENSIONS = ".tar.gz .tar.bz2 .tar .zip .tgz".split()
__all__ = [
'PackageIndex', 'distros_for_url', 'parse_bdist_wininst',
'interpret_distro_name',
]
_SOCKET_TIMEOUT = 15
def parse_bdist_wininst(name):
"""Return (base,pyversion) or (None,None) for possible .exe name"""
lower = name.lower()
base, py_ver, plat = None, None, None
if lower.endswith('.exe'):
if lower.endswith('.win32.exe'):
base = name[:-10]
plat = 'win32'
elif lower.startswith('.win32-py',-16):
py_ver = name[-7:-4]
base = name[:-16]
plat = 'win32'
elif lower.endswith('.win-amd64.exe'):
base = name[:-14]
plat = 'win-amd64'
elif lower.startswith('.win-amd64-py',-20):
py_ver = name[-7:-4]
base = name[:-20]
plat = 'win-amd64'
return base,py_ver,plat
def egg_info_for_url(url):
scheme, server, path, parameters, query, fragment = urlparse.urlparse(url)
base = urllib2.unquote(path.split('/')[-1])
if '#' in base: base, fragment = base.split('#',1)
return base,fragment
def distros_for_url(url, metadata=None):
"""Yield egg or source distribution objects that might be found at a URL"""
base, fragment = egg_info_for_url(url)
for dist in distros_for_location(url, base, metadata): yield dist
if fragment:
match = EGG_FRAGMENT.match(fragment)
if match:
for dist in interpret_distro_name(
url, match.group(1), metadata, precedence = CHECKOUT_DIST
):
yield dist
def distros_for_location(location, basename, metadata=None):
"""Yield egg or source distribution objects based on basename"""
if basename.endswith('.egg.zip'):
basename = basename[:-4] # strip the .zip
if basename.endswith('.egg') and '-' in basename:
# only one, unambiguous interpretation
return [Distribution.from_location(location, basename, metadata)]
if basename.endswith('.exe'):
win_base, py_ver, platform = parse_bdist_wininst(basename)
if win_base is not None:
return interpret_distro_name(
location, win_base, metadata, py_ver, BINARY_DIST, platform
)
# Try source distro extensions (.zip, .tgz, etc.)
#
for ext in EXTENSIONS:
if basename.endswith(ext):
basename = basename[:-len(ext)]
return interpret_distro_name(location, basename, metadata)
return [] # no extension matched
def distros_for_filename(filename, metadata=None):
"""Yield possible egg or source distribution objects based on a filename"""
return distros_for_location(
normalize_path(filename), os.path.basename(filename), metadata
)
def interpret_distro_name(location, basename, metadata,
py_version=None, precedence=SOURCE_DIST, platform=None
):
"""Generate alternative interpretations of a source distro name
Note: if `location` is a filesystem filename, you should call
``pkg_resources.normalize_path()`` on it before passing it to this
routine!
"""
# Generate alternative interpretations of a source distro name
# Because some packages are ambiguous as to name/versions split
# e.g. "adns-python-1.1.0", "egenix-mx-commercial", etc.
# So, we generate each possible interepretation (e.g. "adns, python-1.1.0"
# "adns-python, 1.1.0", and "adns-python-1.1.0, no version"). In practice,
# the spurious interpretations should be ignored, because in the event
# there's also an "adns" package, the spurious "python-1.1.0" version will
# compare lower than any numeric version number, and is therefore unlikely
# to match a request for it. It's still a potential problem, though, and
# in the long run PyPI and the distutils should go for "safe" names and
# versions in distribution archive names (sdist and bdist).
parts = basename.split('-')
if not py_version:
for i,p in enumerate(parts[2:]):
if len(p)==5 and p.startswith('py2.'):
return # It's a bdist_dumb, not an sdist -- bail out
for p in range(1,len(parts)+1):
yield Distribution(
location, metadata, '-'.join(parts[:p]), '-'.join(parts[p:]),
py_version=py_version, precedence = precedence,
platform = platform
)
REL = re.compile("""<([^>]*\srel\s*=\s*['"]?([^'">]+)[^>]*)>""", re.I)
# this line is here to fix emacs' cruddy broken syntax highlighting
def find_external_links(url, page):
"""Find rel="homepage" and rel="download" links in `page`, yielding URLs"""
for match in REL.finditer(page):
tag, rel = match.groups()
rels = map(str.strip, rel.lower().split(','))
if 'homepage' in rels or 'download' in rels:
for match in HREF.finditer(tag):
yield urlparse.urljoin(url, htmldecode(match.group(1)))
for tag in ("<th>Home Page", "<th>Download URL"):
pos = page.find(tag)
if pos!=-1:
match = HREF.search(page,pos)
if match:
yield urlparse.urljoin(url, htmldecode(match.group(1)))
user_agent = "Python-urllib/%s distribute/%s" % (
sys.version[:3], require('distribute')[0].version
)
class PackageIndex(Environment):
"""A distribution index that scans web pages for download URLs"""
def __init__(self, index_url="http://pypi.python.org/simple", hosts=('*',),
*args, **kw
):
Environment.__init__(self,*args,**kw)
self.index_url = index_url + "/"[:not index_url.endswith('/')]
self.scanned_urls = {}
self.fetched_urls = {}
self.package_pages = {}
self.allows = re.compile('|'.join(map(translate,hosts))).match
self.to_scan = []
def process_url(self, url, retrieve=False):
"""Evaluate a URL as a possible download, and maybe retrieve it"""
if url in self.scanned_urls and not retrieve:
return
self.scanned_urls[url] = True
if not URL_SCHEME(url):
self.process_filename(url)
return
else:
dists = list(distros_for_url(url))
if dists:
if not self.url_ok(url):
return
self.debug("Found link: %s", url)
if dists or not retrieve or url in self.fetched_urls:
map(self.add, dists)
return # don't need the actual page
if not self.url_ok(url):
self.fetched_urls[url] = True
return
self.info("Reading %s", url)
f = self.open_url(url, "Download error on %s: %%s -- Some packages may not be found!" % url)
if f is None: return
self.fetched_urls[url] = self.fetched_urls[f.url] = True
if 'html' not in f.headers.get('content-type', '').lower():
f.close() # not html, we can't process it
return
base = f.url # handle redirects
page = f.read()
if not isinstance(page, str): # We are in Python 3 and got bytes. We want str.
if isinstance(f, urllib2.HTTPError):
# Errors have no charset, assume latin1:
charset = 'latin-1'
else:
charset = f.headers.get_param('charset') or 'latin-1'
page = page.decode(charset, "ignore")
f.close()
for match in HREF.finditer(page):
link = urlparse.urljoin(base, htmldecode(match.group(1)))
self.process_url(link)
if url.startswith(self.index_url) and getattr(f,'code',None)!=404:
page = self.process_index(url, page)
def process_filename(self, fn, nested=False):
# process filenames or directories
if not os.path.exists(fn):
self.warn("Not found: %s", fn)
return
if os.path.isdir(fn) and not nested:
path = os.path.realpath(fn)
for item in os.listdir(path):
self.process_filename(os.path.join(path,item), True)
dists = distros_for_filename(fn)
if dists:
self.debug("Found: %s", fn)
map(self.add, dists)
def url_ok(self, url, fatal=False):
s = URL_SCHEME(url)
if (s and s.group(1).lower()=='file') or self.allows(urlparse.urlparse(url)[1]):
return True
msg = "\nLink to % s ***BLOCKED*** by --allow-hosts\n"
if fatal:
raise DistutilsError(msg % url)
else:
self.warn(msg, url)
def scan_egg_links(self, search_path):
for item in search_path:
if os.path.isdir(item):
for entry in os.listdir(item):
if entry.endswith('.egg-link'):
self.scan_egg_link(item, entry)
def scan_egg_link(self, path, entry):
lines = filter(None, map(str.strip, open(os.path.join(path, entry))))
if len(lines)==2:
for dist in find_distributions(os.path.join(path, lines[0])):
dist.location = os.path.join(path, *lines)
dist.precedence = SOURCE_DIST
self.add(dist)
def process_index(self,url,page):
"""Process the contents of a PyPI page"""
def scan(link):
# Process a URL to see if it's for a package page
if link.startswith(self.index_url):
parts = map(
urllib2.unquote, link[len(self.index_url):].split('/')
)
if len(parts)==2 and '#' not in parts[1]:
# it's a package page, sanitize and index it
pkg = safe_name(parts[0])
ver = safe_version(parts[1])
self.package_pages.setdefault(pkg.lower(),{})[link] = True
return to_filename(pkg), to_filename(ver)
return None, None
# process an index page into the package-page index
for match in HREF.finditer(page):
try:
scan( urlparse.urljoin(url, htmldecode(match.group(1))) )
except ValueError:
pass
pkg, ver = scan(url) # ensure this page is in the page index
if pkg:
# process individual package page
for new_url in find_external_links(url, page):
# Process the found URL
base, frag = egg_info_for_url(new_url)
if base.endswith('.py') and not frag:
if ver:
new_url+='#egg=%s-%s' % (pkg,ver)
else:
self.need_version_info(url)
self.scan_url(new_url)
return PYPI_MD5.sub(
lambda m: '<a href="%s#md5=%s">%s</a>' % m.group(1,3,2), page
)
else:
return "" # no sense double-scanning non-package pages
def need_version_info(self, url):
self.scan_all(
"Page at %s links to .py file(s) without version info; an index "
"scan is required.", url
)
def scan_all(self, msg=None, *args):
if self.index_url not in self.fetched_urls:
if msg: self.warn(msg,*args)
self.info(
"Scanning index of all packages (this may take a while)"
)
self.scan_url(self.index_url)
def find_packages(self, requirement):
self.scan_url(self.index_url + requirement.unsafe_name+'/')
if not self.package_pages.get(requirement.key):
# Fall back to safe version of the name
self.scan_url(self.index_url + requirement.project_name+'/')
if not self.package_pages.get(requirement.key):
# We couldn't find the target package, so search the index page too
self.not_found_in_index(requirement)
for url in list(self.package_pages.get(requirement.key,())):
# scan each page that might be related to the desired package
self.scan_url(url)
def obtain(self, requirement, installer=None):
self.prescan(); self.find_packages(requirement)
for dist in self[requirement.key]:
if dist in requirement:
return dist
self.debug("%s does not match %s", requirement, dist)
return super(PackageIndex, self).obtain(requirement,installer)
def check_md5(self, cs, info, filename, tfp):
if re.match('md5=[0-9a-f]{32}$', info):
self.debug("Validating md5 checksum for %s", filename)
if cs.hexdigest()<>info[4:]:
tfp.close()
os.unlink(filename)
raise DistutilsError(
"MD5 validation failed for "+os.path.basename(filename)+
"; possible download problem?"
)
def add_find_links(self, urls):
"""Add `urls` to the list that will be prescanned for searches"""
for url in urls:
if (
self.to_scan is None # if we have already "gone online"
or not URL_SCHEME(url) # or it's a local file/directory
or url.startswith('file:')
or list(distros_for_url(url)) # or a direct package link
):
# then go ahead and process it now
self.scan_url(url)
else:
# otherwise, defer retrieval till later
self.to_scan.append(url)
def prescan(self):
"""Scan urls scheduled for prescanning (e.g. --find-links)"""
if self.to_scan:
map(self.scan_url, self.to_scan)
self.to_scan = None # from now on, go ahead and process immediately
def not_found_in_index(self, requirement):
if self[requirement.key]: # we've seen at least one distro
meth, msg = self.info, "Couldn't retrieve index page for %r"
else: # no distros seen for this name, might be misspelled
meth, msg = (self.warn,
"Couldn't find index page for %r (maybe misspelled?)")
meth(msg, requirement.unsafe_name)
self.scan_all()
def download(self, spec, tmpdir):
"""Locate and/or download `spec` to `tmpdir`, returning a local path
`spec` may be a ``Requirement`` object, or a string containing a URL,
an existing local filename, or a project/version requirement spec
(i.e. the string form of a ``Requirement`` object). If it is the URL
of a .py file with an unambiguous ``#egg=name-version`` tag (i.e., one
that escapes ``-`` as ``_`` throughout), a trivial ``setup.py`` is
automatically created alongside the downloaded file.
If `spec` is a ``Requirement`` object or a string containing a
project/version requirement spec, this method returns the location of
a matching distribution (possibly after downloading it to `tmpdir`).
If `spec` is a locally existing file or directory name, it is simply
returned unchanged. If `spec` is a URL, it is downloaded to a subpath
of `tmpdir`, and the local filename is returned. Various errors may be
raised if a problem occurs during downloading.
"""
if not isinstance(spec,Requirement):
scheme = URL_SCHEME(spec)
if scheme:
# It's a url, download it to tmpdir
found = self._download_url(scheme.group(1), spec, tmpdir)
base, fragment = egg_info_for_url(spec)
if base.endswith('.py'):
found = self.gen_setup(found,fragment,tmpdir)
return found
elif os.path.exists(spec):
# Existing file or directory, just return it
return spec
else:
try:
spec = Requirement.parse(spec)
except ValueError:
raise DistutilsError(
"Not a URL, existing file, or requirement spec: %r" %
(spec,)
)
return getattr(self.fetch_distribution(spec, tmpdir),'location',None)
def fetch_distribution(self,
requirement, tmpdir, force_scan=False, source=False, develop_ok=False,
local_index=None
):
"""Obtain a distribution suitable for fulfilling `requirement`
`requirement` must be a ``pkg_resources.Requirement`` instance.
If necessary, or if the `force_scan` flag is set, the requirement is
searched for in the (online) package index as well as the locally
installed packages. If a distribution matching `requirement` is found,
the returned distribution's ``location`` is the value you would have
gotten from calling the ``download()`` method with the matching
distribution's URL or filename. If no matching distribution is found,
``None`` is returned.
If the `source` flag is set, only source distributions and source
checkout links will be considered. Unless the `develop_ok` flag is
set, development and system eggs (i.e., those using the ``.egg-info``
format) will be ignored.
"""
# process a Requirement
self.info("Searching for %s", requirement)
skipped = {}
dist = None
def find(req, env=None):
if env is None:
env = self
# Find a matching distribution; may be called more than once
for dist in env[req.key]:
if dist.precedence==DEVELOP_DIST and not develop_ok:
if dist not in skipped:
self.warn("Skipping development or system egg: %s",dist)
skipped[dist] = 1
continue
if dist in req and (dist.precedence<=SOURCE_DIST or not source):
self.info("Best match: %s", dist)
return dist.clone(
location=self.download(dist.location, tmpdir)
)
if force_scan:
self.prescan()
self.find_packages(requirement)
dist = find(requirement)
if local_index is not None:
dist = dist or find(requirement, local_index)
if dist is None and self.to_scan is not None:
self.prescan()
dist = find(requirement)
if dist is None and not force_scan:
self.find_packages(requirement)
dist = find(requirement)
if dist is None:
self.warn(
"No local packages or download links found for %s%s",
(source and "a source distribution of " or ""),
requirement,
)
return dist
def fetch(self, requirement, tmpdir, force_scan=False, source=False):
"""Obtain a file suitable for fulfilling `requirement`
DEPRECATED; use the ``fetch_distribution()`` method now instead. For
backward compatibility, this routine is identical but returns the
``location`` of the downloaded distribution instead of a distribution
object.
"""
dist = self.fetch_distribution(requirement,tmpdir,force_scan,source)
if dist is not None:
return dist.location
return None
def gen_setup(self, filename, fragment, tmpdir):
match = EGG_FRAGMENT.match(fragment)
dists = match and [d for d in
interpret_distro_name(filename, match.group(1), None) if d.version
] or []
if len(dists)==1: # unambiguous ``#egg`` fragment
basename = os.path.basename(filename)
# Make sure the file has been downloaded to the temp dir.
if os.path.dirname(filename) != tmpdir:
dst = os.path.join(tmpdir, basename)
from setuptools.command.easy_install import samefile
if not samefile(filename, dst):
shutil.copy2(filename, dst)
filename=dst
file = open(os.path.join(tmpdir, 'setup.py'), 'w')
file.write(
"from setuptools import setup\n"
"setup(name=%r, version=%r, py_modules=[%r])\n"
% (
dists[0].project_name, dists[0].version,
os.path.splitext(basename)[0]
)
)
file.close()
return filename
elif match:
raise DistutilsError(
"Can't unambiguously interpret project/version identifier %r; "
"any dashes in the name or version should be escaped using "
"underscores. %r" % (fragment,dists)
)
else:
raise DistutilsError(
"Can't process plain .py files without an '#egg=name-version'"
" suffix to enable automatic setup script generation."
)
dl_blocksize = 8192
def _download_to(self, url, filename):
self.info("Downloading %s", url)
# Download the file
fp, tfp, info = None, None, None
try:
if '#' in url:
url, info = url.split('#', 1)
fp = self.open_url(url)
if isinstance(fp, urllib2.HTTPError):
raise DistutilsError(
"Can't download %s: %s %s" % (url, fp.code,fp.msg)
)
cs = md5()
headers = fp.info()
blocknum = 0
bs = self.dl_blocksize
size = -1
if "content-length" in headers:
# Some servers return multiple Content-Length headers :(
content_length = headers.get("Content-Length")
size = int(content_length)
self.reporthook(url, filename, blocknum, bs, size)
tfp = open(filename,'wb')
while True:
block = fp.read(bs)
if block:
cs.update(block)
tfp.write(block)
blocknum += 1
self.reporthook(url, filename, blocknum, bs, size)
else:
break
if info: self.check_md5(cs, info, filename, tfp)
return headers
finally:
if fp: fp.close()
if tfp: tfp.close()
def reporthook(self, url, filename, blocknum, blksize, size):
pass # no-op
def open_url(self, url, warning=None):
if url.startswith('file:'):
return local_open(url)
try:
return open_with_auth(url)
except (ValueError, httplib.InvalidURL), v:
msg = ' '.join([str(arg) for arg in v.args])
if warning:
self.warn(warning, msg)
else:
raise DistutilsError('%s %s' % (url, msg))
except urllib2.HTTPError, v:
return v
except urllib2.URLError, v:
if warning:
self.warn(warning, v.reason)
else:
raise DistutilsError("Download error for %s: %s"
% (url, v.reason))
except httplib.BadStatusLine, v:
if warning:
self.warn(warning, v.line)
else:
raise DistutilsError('%s returned a bad status line. '
'The server might be down, %s' % \
(url, v.line))
except httplib.HTTPException, v:
if warning:
self.warn(warning, v)
else:
raise DistutilsError("Download error for %s: %s"
% (url, v))
def _download_url(self, scheme, url, tmpdir):
# Determine download filename
#
name = filter(None,urlparse.urlparse(url)[2].split('/'))
if name:
name = name[-1]
while '..' in name:
name = name.replace('..','.').replace('\\','_')
else:
name = "__downloaded__" # default if URL has no path contents
if name.endswith('.egg.zip'):
name = name[:-4] # strip the extra .zip before download
filename = os.path.join(tmpdir,name)
# Download the file
#
if scheme=='svn' or scheme.startswith('svn+'):
return self._download_svn(url, filename)
elif scheme=='file':
return urllib.url2pathname(urlparse.urlparse(url)[2])
else:
self.url_ok(url, True) # raises error if not allowed
return self._attempt_download(url, filename)
def scan_url(self, url):
self.process_url(url, True)
def _attempt_download(self, url, filename):
headers = self._download_to(url, filename)
if 'html' in headers.get('content-type','').lower():
return self._download_html(url, headers, filename)
else:
return filename
def _download_html(self, url, headers, filename):
file = open(filename)
for line in file:
if line.strip():
# Check for a subversion index page
if re.search(r'<title>([^- ]+ - )?Revision \d+:', line):
# it's a subversion index page:
file.close()
os.unlink(filename)
return self._download_svn(url, filename)
break # not an index page
file.close()
os.unlink(filename)
raise DistutilsError("Unexpected HTML page found at "+url)
def _download_svn(self, url, filename):
url = url.split('#',1)[0] # remove any fragment for svn's sake
self.info("Doing subversion checkout from %s to %s", url, filename)
os.system("svn checkout -q %s %s" % (url, filename))
return filename
def debug(self, msg, *args):
log.debug(msg, *args)
def info(self, msg, *args):
log.info(msg, *args)
def warn(self, msg, *args):
log.warn(msg, *args)
# This pattern matches a character entity reference (a decimal numeric
# references, a hexadecimal numeric reference, or a named reference).
entity_sub = re.compile(r'&(#(\d+|x[\da-fA-F]+)|[\w.:-]+);?').sub
def uchr(c):
if not isinstance(c, int):
return c
if c>255: return unichr(c)
return chr(c)
def decode_entity(match):
what = match.group(1)
if what.startswith('#x'):
what = int(what[2:], 16)
elif what.startswith('#'):
what = int(what[1:])
else:
from htmlentitydefs import name2codepoint
what = name2codepoint.get(what, match.group(0))
return uchr(what)
def htmldecode(text):
"""Decode HTML entities in the given text."""
return entity_sub(decode_entity, text)
def socket_timeout(timeout=15):
def _socket_timeout(func):
def _socket_timeout(*args, **kwargs):
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
return func(*args, **kwargs)
finally:
socket.setdefaulttimeout(old_timeout)
return _socket_timeout
return _socket_timeout
def open_with_auth(url):
"""Open a urllib2 request, handling HTTP authentication"""
scheme, netloc, path, params, query, frag = urlparse.urlparse(url)
if scheme in ('http', 'https'):
auth, host = urllib2.splituser(netloc)
else:
auth = None
if auth:
auth = "Basic " + urllib2.unquote(auth).encode('base64').strip()
new_url = urlparse.urlunparse((scheme,host,path,params,query,frag))
request = urllib2.Request(new_url)
request.add_header("Authorization", auth)
else:
request = urllib2.Request(url)
request.add_header('User-Agent', user_agent)
fp = urllib2.urlopen(request)
if auth:
# Put authentication info back into request URL if same host,
# so that links found on the page will work
s2, h2, path2, param2, query2, frag2 = urlparse.urlparse(fp.url)
if s2==scheme and h2==host:
fp.url = urlparse.urlunparse((s2,netloc,path2,param2,query2,frag2))
return fp
# adding a timeout to avoid freezing package_index
open_with_auth = socket_timeout(_SOCKET_TIMEOUT)(open_with_auth)
def fix_sf_url(url):
return url # backward compatibility
def local_open(url):
"""Read a local path, with special support for directories"""
scheme, server, path, param, query, frag = urlparse.urlparse(url)
filename = urllib.url2pathname(path)
if os.path.isfile(filename):
return urllib2.urlopen(url)
elif path.endswith('/') and os.path.isdir(filename):
files = []
for f in os.listdir(filename):
if f=='index.html':
fp = open(os.path.join(filename,f),'rb')
body = fp.read()
fp.close()
break
elif os.path.isdir(os.path.join(filename,f)):
f+='/'
files.append("<a href=%r>%s</a>" % (f,f))
else:
body = ("<html><head><title>%s</title>" % url) + \
"</head><body>%s</body></html>" % '\n'.join(files)
status, message = 200, "OK"
else:
status, message, body = 404, "Path not found", "Not found"
return urllib2.HTTPError(url, status, message,
{'content-type':'text/html'}, cStringIO.StringIO(body))
# this line is a kludge to keep the trailing blank lines for pje's editor | gpl-3.0 |
Elettronik/SickRage | lib/tornado/process.py | 19 | 12762 | #!/usr/bin/env python
#
# Copyright 2011 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utilities for working with multiple processes, including both forking
the server into multiple processes and managing subprocesses.
"""
from __future__ import absolute_import, division, print_function
import errno
import os
import signal
import subprocess
import sys
import time
from binascii import hexlify
from tornado.concurrent import Future
from tornado import ioloop
from tornado.iostream import PipeIOStream
from tornado.log import gen_log
from tornado.platform.auto import set_close_exec
from tornado import stack_context
from tornado.util import errno_from_exception, PY3
try:
import multiprocessing
except ImportError:
# Multiprocessing is not available on Google App Engine.
multiprocessing = None
if PY3:
long = int
# Re-export this exception for convenience.
try:
CalledProcessError = subprocess.CalledProcessError
except AttributeError:
# The subprocess module exists in Google App Engine, but is empty.
# This module isn't very useful in that case, but it should
# at least be importable.
if 'APPENGINE_RUNTIME' not in os.environ:
raise
def cpu_count():
"""Returns the number of processors on this machine."""
if multiprocessing is None:
return 1
try:
return multiprocessing.cpu_count()
except NotImplementedError:
pass
try:
return os.sysconf("SC_NPROCESSORS_CONF")
except (AttributeError, ValueError):
pass
gen_log.error("Could not detect number of processors; assuming 1")
return 1
def _reseed_random():
if 'random' not in sys.modules:
return
import random
# If os.urandom is available, this method does the same thing as
# random.seed (at least as of python 2.6). If os.urandom is not
# available, we mix in the pid in addition to a timestamp.
try:
seed = long(hexlify(os.urandom(16)), 16)
except NotImplementedError:
seed = int(time.time() * 1000) ^ os.getpid()
random.seed(seed)
def _pipe_cloexec():
r, w = os.pipe()
set_close_exec(r)
set_close_exec(w)
return r, w
_task_id = None
def fork_processes(num_processes, max_restarts=100):
"""Starts multiple worker processes.
If ``num_processes`` is None or <= 0, we detect the number of cores
available on this machine and fork that number of child
processes. If ``num_processes`` is given and > 0, we fork that
specific number of sub-processes.
Since we use processes and not threads, there is no shared memory
between any server code.
Note that multiple processes are not compatible with the autoreload
module (or the ``autoreload=True`` option to `tornado.web.Application`
which defaults to True when ``debug=True``).
When using multiple processes, no IOLoops can be created or
referenced until after the call to ``fork_processes``.
In each child process, ``fork_processes`` returns its *task id*, a
number between 0 and ``num_processes``. Processes that exit
abnormally (due to a signal or non-zero exit status) are restarted
with the same id (up to ``max_restarts`` times). In the parent
process, ``fork_processes`` returns None if all child processes
have exited normally, but will otherwise only exit by throwing an
exception.
"""
global _task_id
assert _task_id is None
if num_processes is None or num_processes <= 0:
num_processes = cpu_count()
if ioloop.IOLoop.initialized():
raise RuntimeError("Cannot run in multiple processes: IOLoop instance "
"has already been initialized. You cannot call "
"IOLoop.instance() before calling start_processes()")
gen_log.info("Starting %d processes", num_processes)
children = {}
def start_child(i):
pid = os.fork()
if pid == 0:
# child process
_reseed_random()
global _task_id
_task_id = i
return i
else:
children[pid] = i
return None
for i in range(num_processes):
id = start_child(i)
if id is not None:
return id
num_restarts = 0
while children:
try:
pid, status = os.wait()
except OSError as e:
if errno_from_exception(e) == errno.EINTR:
continue
raise
if pid not in children:
continue
id = children.pop(pid)
if os.WIFSIGNALED(status):
gen_log.warning("child %d (pid %d) killed by signal %d, restarting",
id, pid, os.WTERMSIG(status))
elif os.WEXITSTATUS(status) != 0:
gen_log.warning("child %d (pid %d) exited with status %d, restarting",
id, pid, os.WEXITSTATUS(status))
else:
gen_log.info("child %d (pid %d) exited normally", id, pid)
continue
num_restarts += 1
if num_restarts > max_restarts:
raise RuntimeError("Too many child restarts, giving up")
new_id = start_child(id)
if new_id is not None:
return new_id
# All child processes exited cleanly, so exit the master process
# instead of just returning to right after the call to
# fork_processes (which will probably just start up another IOLoop
# unless the caller checks the return value).
sys.exit(0)
def task_id():
"""Returns the current task id, if any.
Returns None if this process was not created by `fork_processes`.
"""
global _task_id
return _task_id
class Subprocess(object):
"""Wraps ``subprocess.Popen`` with IOStream support.
The constructor is the same as ``subprocess.Popen`` with the following
additions:
* ``stdin``, ``stdout``, and ``stderr`` may have the value
``tornado.process.Subprocess.STREAM``, which will make the corresponding
attribute of the resulting Subprocess a `.PipeIOStream`.
* A new keyword argument ``io_loop`` may be used to pass in an IOLoop.
The ``Subprocess.STREAM`` option and the ``set_exit_callback`` and
``wait_for_exit`` methods do not work on Windows. There is
therefore no reason to use this class instead of
``subprocess.Popen`` on that platform.
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated.
"""
STREAM = object()
_initialized = False
_waiting = {} # type: ignore
def __init__(self, *args, **kwargs):
self.io_loop = kwargs.pop('io_loop', None) or ioloop.IOLoop.current()
# All FDs we create should be closed on error; those in to_close
# should be closed in the parent process on success.
pipe_fds = []
to_close = []
if kwargs.get('stdin') is Subprocess.STREAM:
in_r, in_w = _pipe_cloexec()
kwargs['stdin'] = in_r
pipe_fds.extend((in_r, in_w))
to_close.append(in_r)
self.stdin = PipeIOStream(in_w, io_loop=self.io_loop)
if kwargs.get('stdout') is Subprocess.STREAM:
out_r, out_w = _pipe_cloexec()
kwargs['stdout'] = out_w
pipe_fds.extend((out_r, out_w))
to_close.append(out_w)
self.stdout = PipeIOStream(out_r, io_loop=self.io_loop)
if kwargs.get('stderr') is Subprocess.STREAM:
err_r, err_w = _pipe_cloexec()
kwargs['stderr'] = err_w
pipe_fds.extend((err_r, err_w))
to_close.append(err_w)
self.stderr = PipeIOStream(err_r, io_loop=self.io_loop)
try:
self.proc = subprocess.Popen(*args, **kwargs)
except:
for fd in pipe_fds:
os.close(fd)
raise
for fd in to_close:
os.close(fd)
for attr in ['stdin', 'stdout', 'stderr', 'pid']:
if not hasattr(self, attr): # don't clobber streams set above
setattr(self, attr, getattr(self.proc, attr))
self._exit_callback = None
self.returncode = None
def set_exit_callback(self, callback):
"""Runs ``callback`` when this process exits.
The callback takes one argument, the return code of the process.
This method uses a ``SIGCHLD`` handler, which is a global setting
and may conflict if you have other libraries trying to handle the
same signal. If you are using more than one ``IOLoop`` it may
be necessary to call `Subprocess.initialize` first to designate
one ``IOLoop`` to run the signal handlers.
In many cases a close callback on the stdout or stderr streams
can be used as an alternative to an exit callback if the
signal handler is causing a problem.
"""
self._exit_callback = stack_context.wrap(callback)
Subprocess.initialize(self.io_loop)
Subprocess._waiting[self.pid] = self
Subprocess._try_cleanup_process(self.pid)
def wait_for_exit(self, raise_error=True):
"""Returns a `.Future` which resolves when the process exits.
Usage::
ret = yield proc.wait_for_exit()
This is a coroutine-friendly alternative to `set_exit_callback`
(and a replacement for the blocking `subprocess.Popen.wait`).
By default, raises `subprocess.CalledProcessError` if the process
has a non-zero exit status. Use ``wait_for_exit(raise_error=False)``
to suppress this behavior and return the exit status without raising.
.. versionadded:: 4.2
"""
future = Future()
def callback(ret):
if ret != 0 and raise_error:
# Unfortunately we don't have the original args any more.
future.set_exception(CalledProcessError(ret, None))
else:
future.set_result(ret)
self.set_exit_callback(callback)
return future
@classmethod
def initialize(cls, io_loop=None):
"""Initializes the ``SIGCHLD`` handler.
The signal handler is run on an `.IOLoop` to avoid locking issues.
Note that the `.IOLoop` used for signal handling need not be the
same one used by individual Subprocess objects (as long as the
``IOLoops`` are each running in separate threads).
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated.
"""
if cls._initialized:
return
if io_loop is None:
io_loop = ioloop.IOLoop.current()
cls._old_sigchld = signal.signal(
signal.SIGCHLD,
lambda sig, frame: io_loop.add_callback_from_signal(cls._cleanup))
cls._initialized = True
@classmethod
def uninitialize(cls):
"""Removes the ``SIGCHLD`` handler."""
if not cls._initialized:
return
signal.signal(signal.SIGCHLD, cls._old_sigchld)
cls._initialized = False
@classmethod
def _cleanup(cls):
for pid in list(cls._waiting.keys()): # make a copy
cls._try_cleanup_process(pid)
@classmethod
def _try_cleanup_process(cls, pid):
try:
ret_pid, status = os.waitpid(pid, os.WNOHANG)
except OSError as e:
if errno_from_exception(e) == errno.ECHILD:
return
if ret_pid == 0:
return
assert ret_pid == pid
subproc = cls._waiting.pop(pid)
subproc.io_loop.add_callback_from_signal(
subproc._set_returncode, status)
def _set_returncode(self, status):
if os.WIFSIGNALED(status):
self.returncode = -os.WTERMSIG(status)
else:
assert os.WIFEXITED(status)
self.returncode = os.WEXITSTATUS(status)
# We've taken over wait() duty from the subprocess.Popen
# object. If we don't inform it of the process's return code,
# it will log a warning at destruction in python 3.6+.
self.proc.returncode = self.returncode
if self._exit_callback:
callback = self._exit_callback
self._exit_callback = None
callback(self.returncode)
| gpl-3.0 |
jackjennings/Mechanic | src/lib/site-packages/requests/packages/chardet/mbcssm.py | 1783 | 19590 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .constants import eStart, eError, eItsMe
# BIG5
BIG5_cls = (
1,1,1,1,1,1,1,1, # 00 - 07 #allow 0x00 as legal value
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,1, # 78 - 7f
4,4,4,4,4,4,4,4, # 80 - 87
4,4,4,4,4,4,4,4, # 88 - 8f
4,4,4,4,4,4,4,4, # 90 - 97
4,4,4,4,4,4,4,4, # 98 - 9f
4,3,3,3,3,3,3,3, # a0 - a7
3,3,3,3,3,3,3,3, # a8 - af
3,3,3,3,3,3,3,3, # b0 - b7
3,3,3,3,3,3,3,3, # b8 - bf
3,3,3,3,3,3,3,3, # c0 - c7
3,3,3,3,3,3,3,3, # c8 - cf
3,3,3,3,3,3,3,3, # d0 - d7
3,3,3,3,3,3,3,3, # d8 - df
3,3,3,3,3,3,3,3, # e0 - e7
3,3,3,3,3,3,3,3, # e8 - ef
3,3,3,3,3,3,3,3, # f0 - f7
3,3,3,3,3,3,3,0 # f8 - ff
)
BIG5_st = (
eError,eStart,eStart, 3,eError,eError,eError,eError,#00-07
eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eError,#08-0f
eError,eStart,eStart,eStart,eStart,eStart,eStart,eStart#10-17
)
Big5CharLenTable = (0, 1, 1, 2, 0)
Big5SMModel = {'classTable': BIG5_cls,
'classFactor': 5,
'stateTable': BIG5_st,
'charLenTable': Big5CharLenTable,
'name': 'Big5'}
# CP949
CP949_cls = (
1,1,1,1,1,1,1,1, 1,1,1,1,1,1,0,0, # 00 - 0f
1,1,1,1,1,1,1,1, 1,1,1,0,1,1,1,1, # 10 - 1f
1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, # 20 - 2f
1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, # 30 - 3f
1,4,4,4,4,4,4,4, 4,4,4,4,4,4,4,4, # 40 - 4f
4,4,5,5,5,5,5,5, 5,5,5,1,1,1,1,1, # 50 - 5f
1,5,5,5,5,5,5,5, 5,5,5,5,5,5,5,5, # 60 - 6f
5,5,5,5,5,5,5,5, 5,5,5,1,1,1,1,1, # 70 - 7f
0,6,6,6,6,6,6,6, 6,6,6,6,6,6,6,6, # 80 - 8f
6,6,6,6,6,6,6,6, 6,6,6,6,6,6,6,6, # 90 - 9f
6,7,7,7,7,7,7,7, 7,7,7,7,7,8,8,8, # a0 - af
7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7, # b0 - bf
7,7,7,7,7,7,9,2, 2,3,2,2,2,2,2,2, # c0 - cf
2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2, # d0 - df
2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2, # e0 - ef
2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,0, # f0 - ff
)
CP949_st = (
#cls= 0 1 2 3 4 5 6 7 8 9 # previous state =
eError,eStart, 3,eError,eStart,eStart, 4, 5,eError, 6, # eStart
eError,eError,eError,eError,eError,eError,eError,eError,eError,eError, # eError
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe, # eItsMe
eError,eError,eStart,eStart,eError,eError,eError,eStart,eStart,eStart, # 3
eError,eError,eStart,eStart,eStart,eStart,eStart,eStart,eStart,eStart, # 4
eError,eStart,eStart,eStart,eStart,eStart,eStart,eStart,eStart,eStart, # 5
eError,eStart,eStart,eStart,eStart,eError,eError,eStart,eStart,eStart, # 6
)
CP949CharLenTable = (0, 1, 2, 0, 1, 1, 2, 2, 0, 2)
CP949SMModel = {'classTable': CP949_cls,
'classFactor': 10,
'stateTable': CP949_st,
'charLenTable': CP949CharLenTable,
'name': 'CP949'}
# EUC-JP
EUCJP_cls = (
4,4,4,4,4,4,4,4, # 00 - 07
4,4,4,4,4,4,5,5, # 08 - 0f
4,4,4,4,4,4,4,4, # 10 - 17
4,4,4,5,4,4,4,4, # 18 - 1f
4,4,4,4,4,4,4,4, # 20 - 27
4,4,4,4,4,4,4,4, # 28 - 2f
4,4,4,4,4,4,4,4, # 30 - 37
4,4,4,4,4,4,4,4, # 38 - 3f
4,4,4,4,4,4,4,4, # 40 - 47
4,4,4,4,4,4,4,4, # 48 - 4f
4,4,4,4,4,4,4,4, # 50 - 57
4,4,4,4,4,4,4,4, # 58 - 5f
4,4,4,4,4,4,4,4, # 60 - 67
4,4,4,4,4,4,4,4, # 68 - 6f
4,4,4,4,4,4,4,4, # 70 - 77
4,4,4,4,4,4,4,4, # 78 - 7f
5,5,5,5,5,5,5,5, # 80 - 87
5,5,5,5,5,5,1,3, # 88 - 8f
5,5,5,5,5,5,5,5, # 90 - 97
5,5,5,5,5,5,5,5, # 98 - 9f
5,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
0,0,0,0,0,0,0,0, # e0 - e7
0,0,0,0,0,0,0,0, # e8 - ef
0,0,0,0,0,0,0,0, # f0 - f7
0,0,0,0,0,0,0,5 # f8 - ff
)
EUCJP_st = (
3, 4, 3, 5,eStart,eError,eError,eError,#00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe,eStart,eError,eStart,eError,eError,eError,#10-17
eError,eError,eStart,eError,eError,eError, 3,eError,#18-1f
3,eError,eError,eError,eStart,eStart,eStart,eStart#20-27
)
EUCJPCharLenTable = (2, 2, 2, 3, 1, 0)
EUCJPSMModel = {'classTable': EUCJP_cls,
'classFactor': 6,
'stateTable': EUCJP_st,
'charLenTable': EUCJPCharLenTable,
'name': 'EUC-JP'}
# EUC-KR
EUCKR_cls = (
1,1,1,1,1,1,1,1, # 00 - 07
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
1,1,1,1,1,1,1,1, # 40 - 47
1,1,1,1,1,1,1,1, # 48 - 4f
1,1,1,1,1,1,1,1, # 50 - 57
1,1,1,1,1,1,1,1, # 58 - 5f
1,1,1,1,1,1,1,1, # 60 - 67
1,1,1,1,1,1,1,1, # 68 - 6f
1,1,1,1,1,1,1,1, # 70 - 77
1,1,1,1,1,1,1,1, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,0,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,3,3,3, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,3,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
2,2,2,2,2,2,2,2, # e0 - e7
2,2,2,2,2,2,2,2, # e8 - ef
2,2,2,2,2,2,2,2, # f0 - f7
2,2,2,2,2,2,2,0 # f8 - ff
)
EUCKR_st = (
eError,eStart, 3,eError,eError,eError,eError,eError,#00-07
eItsMe,eItsMe,eItsMe,eItsMe,eError,eError,eStart,eStart #08-0f
)
EUCKRCharLenTable = (0, 1, 2, 0)
EUCKRSMModel = {'classTable': EUCKR_cls,
'classFactor': 4,
'stateTable': EUCKR_st,
'charLenTable': EUCKRCharLenTable,
'name': 'EUC-KR'}
# EUC-TW
EUCTW_cls = (
2,2,2,2,2,2,2,2, # 00 - 07
2,2,2,2,2,2,0,0, # 08 - 0f
2,2,2,2,2,2,2,2, # 10 - 17
2,2,2,0,2,2,2,2, # 18 - 1f
2,2,2,2,2,2,2,2, # 20 - 27
2,2,2,2,2,2,2,2, # 28 - 2f
2,2,2,2,2,2,2,2, # 30 - 37
2,2,2,2,2,2,2,2, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,2, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,6,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,3,4,4,4,4,4,4, # a0 - a7
5,5,1,1,1,1,1,1, # a8 - af
1,1,1,1,1,1,1,1, # b0 - b7
1,1,1,1,1,1,1,1, # b8 - bf
1,1,3,1,3,3,3,3, # c0 - c7
3,3,3,3,3,3,3,3, # c8 - cf
3,3,3,3,3,3,3,3, # d0 - d7
3,3,3,3,3,3,3,3, # d8 - df
3,3,3,3,3,3,3,3, # e0 - e7
3,3,3,3,3,3,3,3, # e8 - ef
3,3,3,3,3,3,3,3, # f0 - f7
3,3,3,3,3,3,3,0 # f8 - ff
)
EUCTW_st = (
eError,eError,eStart, 3, 3, 3, 4,eError,#00-07
eError,eError,eError,eError,eError,eError,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eError,eStart,eError,#10-17
eStart,eStart,eStart,eError,eError,eError,eError,eError,#18-1f
5,eError,eError,eError,eStart,eError,eStart,eStart,#20-27
eStart,eError,eStart,eStart,eStart,eStart,eStart,eStart #28-2f
)
EUCTWCharLenTable = (0, 0, 1, 2, 2, 2, 3)
EUCTWSMModel = {'classTable': EUCTW_cls,
'classFactor': 7,
'stateTable': EUCTW_st,
'charLenTable': EUCTWCharLenTable,
'name': 'x-euc-tw'}
# GB2312
GB2312_cls = (
1,1,1,1,1,1,1,1, # 00 - 07
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
3,3,3,3,3,3,3,3, # 30 - 37
3,3,1,1,1,1,1,1, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,4, # 78 - 7f
5,6,6,6,6,6,6,6, # 80 - 87
6,6,6,6,6,6,6,6, # 88 - 8f
6,6,6,6,6,6,6,6, # 90 - 97
6,6,6,6,6,6,6,6, # 98 - 9f
6,6,6,6,6,6,6,6, # a0 - a7
6,6,6,6,6,6,6,6, # a8 - af
6,6,6,6,6,6,6,6, # b0 - b7
6,6,6,6,6,6,6,6, # b8 - bf
6,6,6,6,6,6,6,6, # c0 - c7
6,6,6,6,6,6,6,6, # c8 - cf
6,6,6,6,6,6,6,6, # d0 - d7
6,6,6,6,6,6,6,6, # d8 - df
6,6,6,6,6,6,6,6, # e0 - e7
6,6,6,6,6,6,6,6, # e8 - ef
6,6,6,6,6,6,6,6, # f0 - f7
6,6,6,6,6,6,6,0 # f8 - ff
)
GB2312_st = (
eError,eStart,eStart,eStart,eStart,eStart, 3,eError,#00-07
eError,eError,eError,eError,eError,eError,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eError,eError,eStart,#10-17
4,eError,eStart,eStart,eError,eError,eError,eError,#18-1f
eError,eError, 5,eError,eError,eError,eItsMe,eError,#20-27
eError,eError,eStart,eStart,eStart,eStart,eStart,eStart #28-2f
)
# To be accurate, the length of class 6 can be either 2 or 4.
# But it is not necessary to discriminate between the two since
# it is used for frequency analysis only, and we are validing
# each code range there as well. So it is safe to set it to be
# 2 here.
GB2312CharLenTable = (0, 1, 1, 1, 1, 1, 2)
GB2312SMModel = {'classTable': GB2312_cls,
'classFactor': 7,
'stateTable': GB2312_st,
'charLenTable': GB2312CharLenTable,
'name': 'GB2312'}
# Shift_JIS
SJIS_cls = (
1,1,1,1,1,1,1,1, # 00 - 07
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,1, # 78 - 7f
3,3,3,3,3,2,2,3, # 80 - 87
3,3,3,3,3,3,3,3, # 88 - 8f
3,3,3,3,3,3,3,3, # 90 - 97
3,3,3,3,3,3,3,3, # 98 - 9f
#0xa0 is illegal in sjis encoding, but some pages does
#contain such byte. We need to be more error forgiven.
2,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
3,3,3,3,3,3,3,3, # e0 - e7
3,3,3,3,3,4,4,4, # e8 - ef
3,3,3,3,3,3,3,3, # f0 - f7
3,3,3,3,3,0,0,0) # f8 - ff
SJIS_st = (
eError,eStart,eStart, 3,eError,eError,eError,eError,#00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe,eError,eError,eStart,eStart,eStart,eStart #10-17
)
SJISCharLenTable = (0, 1, 1, 2, 0, 0)
SJISSMModel = {'classTable': SJIS_cls,
'classFactor': 6,
'stateTable': SJIS_st,
'charLenTable': SJISCharLenTable,
'name': 'Shift_JIS'}
# UCS2-BE
UCS2BE_cls = (
0,0,0,0,0,0,0,0, # 00 - 07
0,0,1,0,0,2,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,3,0,0,0,0, # 18 - 1f
0,0,0,0,0,0,0,0, # 20 - 27
0,3,3,3,3,3,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,0,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,0,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,0,0,0,0,0,0,0, # a0 - a7
0,0,0,0,0,0,0,0, # a8 - af
0,0,0,0,0,0,0,0, # b0 - b7
0,0,0,0,0,0,0,0, # b8 - bf
0,0,0,0,0,0,0,0, # c0 - c7
0,0,0,0,0,0,0,0, # c8 - cf
0,0,0,0,0,0,0,0, # d0 - d7
0,0,0,0,0,0,0,0, # d8 - df
0,0,0,0,0,0,0,0, # e0 - e7
0,0,0,0,0,0,0,0, # e8 - ef
0,0,0,0,0,0,0,0, # f0 - f7
0,0,0,0,0,0,4,5 # f8 - ff
)
UCS2BE_st = (
5, 7, 7,eError, 4, 3,eError,eError,#00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe, 6, 6, 6, 6,eError,eError,#10-17
6, 6, 6, 6, 6,eItsMe, 6, 6,#18-1f
6, 6, 6, 6, 5, 7, 7,eError,#20-27
5, 8, 6, 6,eError, 6, 6, 6,#28-2f
6, 6, 6, 6,eError,eError,eStart,eStart #30-37
)
UCS2BECharLenTable = (2, 2, 2, 0, 2, 2)
UCS2BESMModel = {'classTable': UCS2BE_cls,
'classFactor': 6,
'stateTable': UCS2BE_st,
'charLenTable': UCS2BECharLenTable,
'name': 'UTF-16BE'}
# UCS2-LE
UCS2LE_cls = (
0,0,0,0,0,0,0,0, # 00 - 07
0,0,1,0,0,2,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,3,0,0,0,0, # 18 - 1f
0,0,0,0,0,0,0,0, # 20 - 27
0,3,3,3,3,3,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,0,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,0,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,0,0,0,0,0,0,0, # a0 - a7
0,0,0,0,0,0,0,0, # a8 - af
0,0,0,0,0,0,0,0, # b0 - b7
0,0,0,0,0,0,0,0, # b8 - bf
0,0,0,0,0,0,0,0, # c0 - c7
0,0,0,0,0,0,0,0, # c8 - cf
0,0,0,0,0,0,0,0, # d0 - d7
0,0,0,0,0,0,0,0, # d8 - df
0,0,0,0,0,0,0,0, # e0 - e7
0,0,0,0,0,0,0,0, # e8 - ef
0,0,0,0,0,0,0,0, # f0 - f7
0,0,0,0,0,0,4,5 # f8 - ff
)
UCS2LE_st = (
6, 6, 7, 6, 4, 3,eError,eError,#00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe, 5, 5, 5,eError,eItsMe,eError,#10-17
5, 5, 5,eError, 5,eError, 6, 6,#18-1f
7, 6, 8, 8, 5, 5, 5,eError,#20-27
5, 5, 5,eError,eError,eError, 5, 5,#28-2f
5, 5, 5,eError, 5,eError,eStart,eStart #30-37
)
UCS2LECharLenTable = (2, 2, 2, 2, 2, 2)
UCS2LESMModel = {'classTable': UCS2LE_cls,
'classFactor': 6,
'stateTable': UCS2LE_st,
'charLenTable': UCS2LECharLenTable,
'name': 'UTF-16LE'}
# UTF-8
UTF8_cls = (
1,1,1,1,1,1,1,1, # 00 - 07 #allow 0x00 as a legal value
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
1,1,1,1,1,1,1,1, # 40 - 47
1,1,1,1,1,1,1,1, # 48 - 4f
1,1,1,1,1,1,1,1, # 50 - 57
1,1,1,1,1,1,1,1, # 58 - 5f
1,1,1,1,1,1,1,1, # 60 - 67
1,1,1,1,1,1,1,1, # 68 - 6f
1,1,1,1,1,1,1,1, # 70 - 77
1,1,1,1,1,1,1,1, # 78 - 7f
2,2,2,2,3,3,3,3, # 80 - 87
4,4,4,4,4,4,4,4, # 88 - 8f
4,4,4,4,4,4,4,4, # 90 - 97
4,4,4,4,4,4,4,4, # 98 - 9f
5,5,5,5,5,5,5,5, # a0 - a7
5,5,5,5,5,5,5,5, # a8 - af
5,5,5,5,5,5,5,5, # b0 - b7
5,5,5,5,5,5,5,5, # b8 - bf
0,0,6,6,6,6,6,6, # c0 - c7
6,6,6,6,6,6,6,6, # c8 - cf
6,6,6,6,6,6,6,6, # d0 - d7
6,6,6,6,6,6,6,6, # d8 - df
7,8,8,8,8,8,8,8, # e0 - e7
8,8,8,8,8,9,8,8, # e8 - ef
10,11,11,11,11,11,11,11, # f0 - f7
12,13,13,13,14,15,0,0 # f8 - ff
)
UTF8_st = (
eError,eStart,eError,eError,eError,eError, 12, 10,#00-07
9, 11, 8, 7, 6, 5, 4, 3,#08-0f
eError,eError,eError,eError,eError,eError,eError,eError,#10-17
eError,eError,eError,eError,eError,eError,eError,eError,#18-1f
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,#20-27
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,#28-2f
eError,eError, 5, 5, 5, 5,eError,eError,#30-37
eError,eError,eError,eError,eError,eError,eError,eError,#38-3f
eError,eError,eError, 5, 5, 5,eError,eError,#40-47
eError,eError,eError,eError,eError,eError,eError,eError,#48-4f
eError,eError, 7, 7, 7, 7,eError,eError,#50-57
eError,eError,eError,eError,eError,eError,eError,eError,#58-5f
eError,eError,eError,eError, 7, 7,eError,eError,#60-67
eError,eError,eError,eError,eError,eError,eError,eError,#68-6f
eError,eError, 9, 9, 9, 9,eError,eError,#70-77
eError,eError,eError,eError,eError,eError,eError,eError,#78-7f
eError,eError,eError,eError,eError, 9,eError,eError,#80-87
eError,eError,eError,eError,eError,eError,eError,eError,#88-8f
eError,eError, 12, 12, 12, 12,eError,eError,#90-97
eError,eError,eError,eError,eError,eError,eError,eError,#98-9f
eError,eError,eError,eError,eError, 12,eError,eError,#a0-a7
eError,eError,eError,eError,eError,eError,eError,eError,#a8-af
eError,eError, 12, 12, 12,eError,eError,eError,#b0-b7
eError,eError,eError,eError,eError,eError,eError,eError,#b8-bf
eError,eError,eStart,eStart,eStart,eStart,eError,eError,#c0-c7
eError,eError,eError,eError,eError,eError,eError,eError #c8-cf
)
UTF8CharLenTable = (0, 1, 0, 0, 0, 0, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6)
UTF8SMModel = {'classTable': UTF8_cls,
'classFactor': 16,
'stateTable': UTF8_st,
'charLenTable': UTF8CharLenTable,
'name': 'UTF-8'}
| mit |
Microvellum/Fluid-Designer | win64-vc/2.78/python/lib/genericpath.py | 36 | 4364 | """
Path operations common to more than one OS
Do not use directly. The OS specific modules import the appropriate
functions from this module themselves.
"""
import os
import stat
__all__ = ['commonprefix', 'exists', 'getatime', 'getctime', 'getmtime',
'getsize', 'isdir', 'isfile', 'samefile', 'sameopenfile',
'samestat']
# Does a path exist?
# This is false for dangling symbolic links on systems that support them.
def exists(path):
"""Test whether a path exists. Returns False for broken symbolic links"""
try:
os.stat(path)
except OSError:
return False
return True
# This follows symbolic links, so both islink() and isdir() can be true
# for the same path on systems that support symlinks
def isfile(path):
"""Test whether a path is a regular file"""
try:
st = os.stat(path)
except OSError:
return False
return stat.S_ISREG(st.st_mode)
# Is a path a directory?
# This follows symbolic links, so both islink() and isdir()
# can be true for the same path on systems that support symlinks
def isdir(s):
"""Return true if the pathname refers to an existing directory."""
try:
st = os.stat(s)
except OSError:
return False
return stat.S_ISDIR(st.st_mode)
def getsize(filename):
"""Return the size of a file, reported by os.stat()."""
return os.stat(filename).st_size
def getmtime(filename):
"""Return the last modification time of a file, reported by os.stat()."""
return os.stat(filename).st_mtime
def getatime(filename):
"""Return the last access time of a file, reported by os.stat()."""
return os.stat(filename).st_atime
def getctime(filename):
"""Return the metadata change time of a file, reported by os.stat()."""
return os.stat(filename).st_ctime
# Return the longest prefix of all list elements.
def commonprefix(m):
"Given a list of pathnames, returns the longest common leading component"
if not m: return ''
s1 = min(m)
s2 = max(m)
for i, c in enumerate(s1):
if c != s2[i]:
return s1[:i]
return s1
# Are two stat buffers (obtained from stat, fstat or lstat)
# describing the same file?
def samestat(s1, s2):
"""Test whether two stat buffers reference the same file"""
return (s1.st_ino == s2.st_ino and
s1.st_dev == s2.st_dev)
# Are two filenames really pointing to the same file?
def samefile(f1, f2):
"""Test whether two pathnames reference the same actual file"""
s1 = os.stat(f1)
s2 = os.stat(f2)
return samestat(s1, s2)
# Are two open files really referencing the same file?
# (Not necessarily the same file descriptor!)
def sameopenfile(fp1, fp2):
"""Test whether two open file objects reference the same file"""
s1 = os.fstat(fp1)
s2 = os.fstat(fp2)
return samestat(s1, s2)
# Split a path in root and extension.
# The extension is everything starting at the last dot in the last
# pathname component; the root is everything before that.
# It is always true that root + ext == p.
# Generic implementation of splitext, to be parametrized with
# the separators
def _splitext(p, sep, altsep, extsep):
"""Split the extension from a pathname.
Extension is everything from the last dot to the end, ignoring
leading dots. Returns "(root, ext)"; ext may be empty."""
# NOTE: This code must work for text and bytes strings.
sepIndex = p.rfind(sep)
if altsep:
altsepIndex = p.rfind(altsep)
sepIndex = max(sepIndex, altsepIndex)
dotIndex = p.rfind(extsep)
if dotIndex > sepIndex:
# skip all leading dots
filenameIndex = sepIndex + 1
while filenameIndex < dotIndex:
if p[filenameIndex:filenameIndex+1] != extsep:
return p[:dotIndex], p[dotIndex:]
filenameIndex += 1
return p, p[:0]
def _check_arg_types(funcname, *args):
hasstr = hasbytes = False
for s in args:
if isinstance(s, str):
hasstr = True
elif isinstance(s, bytes):
hasbytes = True
else:
raise TypeError('%s() argument must be str or bytes, not %r' %
(funcname, s.__class__.__name__)) from None
if hasstr and hasbytes:
raise TypeError("Can't mix strings and bytes in path components") from None
| gpl-3.0 |
npe9/depot_tools | third_party/gsutil/oauth2_plugin/oauth2_client.py | 45 | 22215 | # Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An OAuth2 client library.
This library provides a client implementation of the OAuth2 protocol (see
http://code.google.com/apis/accounts/docs/OAuth2.html).
**** Experimental API ****
This module is experimental and is subject to modification or removal without
notice.
"""
# This implementation is inspired by the implementation in
# http://code.google.com/p/google-api-python-client/source/browse/oauth2client/,
# with the following main differences:
# - This library uses the fancy_urllib monkey patch for urllib to correctly
# implement SSL certificate validation.
# - This library does not assume that client code is using the httplib2 library
# to make HTTP requests.
# - This library implements caching of access tokens independent of refresh
# tokens (in the python API client oauth2client, there is a single class that
# encapsulates both refresh and access tokens).
import cgi
import datetime
import errno
from hashlib import sha1
import logging
import os
import tempfile
import threading
import urllib
import urllib2
import urlparse
from boto import cacerts
from third_party import fancy_urllib
try:
import json
except ImportError:
try:
# Try to import from django, should work on App Engine
from django.utils import simplejson as json
except ImportError:
# Try for simplejson
import simplejson as json
LOG = logging.getLogger('oauth2_client')
# Lock used for checking/exchanging refresh token, so multithreaded
# operation doesn't attempt concurrent refreshes.
token_exchange_lock = threading.Lock()
class Error(Exception):
"""Base exception for the OAuth2 module."""
pass
class AccessTokenRefreshError(Error):
"""Error trying to exchange a refresh token into an access token."""
pass
class AuthorizationCodeExchangeError(Error):
"""Error trying to exchange an authorization code into a refresh token."""
pass
class TokenCache(object):
"""Interface for OAuth2 token caches."""
def PutToken(self, key, value):
raise NotImplementedError
def GetToken(self, key):
raise NotImplementedError
class NoopTokenCache(TokenCache):
"""A stub implementation of TokenCache that does nothing."""
def PutToken(self, key, value):
pass
def GetToken(self, key):
return None
class InMemoryTokenCache(TokenCache):
"""An in-memory token cache.
The cache is implemented by a python dict, and inherits the thread-safety
properties of dict.
"""
def __init__(self):
super(InMemoryTokenCache, self).__init__()
self.cache = dict()
def PutToken(self, key, value):
LOG.info('InMemoryTokenCache.PutToken: key=%s', key)
self.cache[key] = value
def GetToken(self, key):
value = self.cache.get(key, None)
LOG.info('InMemoryTokenCache.GetToken: key=%s%s present',
key, ' not' if value is None else '')
return value
class FileSystemTokenCache(TokenCache):
"""An implementation of a token cache that persists tokens on disk.
Each token object in the cache is stored in serialized form in a separate
file. The cache file's name can be configured via a path pattern that is
parameterized by the key under which a value is cached and optionally the
current processes uid as obtained by os.getuid().
Since file names are generally publicly visible in the system, it is important
that the cache key does not leak information about the token's value. If
client code computes cache keys from token values, a cryptographically strong
one-way function must be used.
"""
def __init__(self, path_pattern=None):
"""Creates a FileSystemTokenCache.
Args:
path_pattern: Optional string argument to specify the path pattern for
cache files. The argument should be a path with format placeholders
'%(key)s' and optionally '%(uid)s'. If the argument is omitted, the
default pattern
<tmpdir>/oauth2client-tokencache.%(uid)s.%(key)s
is used, where <tmpdir> is replaced with the system temp dir as
obtained from tempfile.gettempdir().
"""
super(FileSystemTokenCache, self).__init__()
self.path_pattern = path_pattern
if not path_pattern:
self.path_pattern = os.path.join(
tempfile.gettempdir(), 'oauth2_client-tokencache.%(uid)s.%(key)s')
def CacheFileName(self, key):
uid = '_'
try:
# os.getuid() doesn't seem to work in Windows
uid = str(os.getuid())
except:
pass
return self.path_pattern % {'key': key, 'uid': uid}
def PutToken(self, key, value):
"""Serializes the value to the key's filename.
To ensure that written tokens aren't leaked to a different users, we
a) unlink an existing cache file, if any (to ensure we don't fall victim
to symlink attacks and the like),
b) create a new file with O_CREAT | O_EXCL (to ensure nobody is trying to
race us)
If either of these steps fail, we simply give up (but log a warning). Not
caching access tokens is not catastrophic, and failure to create a file
can happen for either of the following reasons:
- someone is attacking us as above, in which case we want to default to
safe operation (not write the token);
- another legitimate process is racing us; in this case one of the two
will win and write the access token, which is fine;
- we don't have permission to remove the old file or write to the
specified directory, in which case we can't recover
Args:
key: the refresh_token hash key to store.
value: the access_token value to serialize.
"""
cache_file = self.CacheFileName(key)
LOG.info('FileSystemTokenCache.PutToken: key=%s, cache_file=%s',
key, cache_file)
try:
os.unlink(cache_file)
except:
# Ignore failure to unlink the file; if the file exists and can't be
# unlinked, the subsequent open with O_CREAT | O_EXCL will fail.
pass
flags = os.O_RDWR | os.O_CREAT | os.O_EXCL
# Accommodate Windows; stolen from python2.6/tempfile.py.
if hasattr(os, 'O_NOINHERIT'):
flags |= os.O_NOINHERIT
if hasattr(os, 'O_BINARY'):
flags |= os.O_BINARY
try:
fd = os.open(cache_file, flags, 0600)
except (OSError, IOError), e:
LOG.warning('FileSystemTokenCache.PutToken: '
'Failed to create cache file %s: %s', cache_file, e)
return
f = os.fdopen(fd, 'w+b')
f.write(value.Serialize())
f.close()
def GetToken(self, key):
"""Returns a deserialized access token from the key's filename."""
value = None
cache_file = self.CacheFileName(key)
try:
f = open(cache_file)
value = AccessToken.UnSerialize(f.read())
f.close()
except (IOError, OSError), e:
if e.errno != errno.ENOENT:
LOG.warning('FileSystemTokenCache.GetToken: '
'Failed to read cache file %s: %s', cache_file, e)
except Exception, e:
LOG.warning('FileSystemTokenCache.GetToken: '
'Failed to read cache file %s (possibly corrupted): %s',
cache_file, e)
LOG.info('FileSystemTokenCache.GetToken: key=%s%s present (cache_file=%s)',
key, ' not' if value is None else '', cache_file)
return value
class OAuth2Provider(object):
"""Encapsulates information about an OAuth2 provider."""
def __init__(self, label, authorization_uri, token_uri):
"""Creates an OAuth2Provider.
Args:
label: A string identifying this oauth2 provider, e.g. "Google".
authorization_uri: The provider's authorization URI.
token_uri: The provider's token endpoint URI.
"""
self.label = label
self.authorization_uri = authorization_uri
self.token_uri = token_uri
class OAuth2Client(object):
"""An OAuth2 client."""
def __init__(self, provider, client_id, client_secret,
url_opener=None,
proxy=None,
access_token_cache=None,
datetime_strategy=datetime.datetime):
"""Creates an OAuth2Client.
Args:
provider: The OAuth2Provider provider this client will authenticate
against.
client_id: The OAuth2 client ID of this client.
client_secret: The OAuth2 client secret of this client.
url_opener: An optinal urllib2.OpenerDirector to use for making HTTP
requests to the OAuth2 provider's token endpoint. The provided
url_opener *must* be configured to validate server SSL certificates
for requests to https connections, and to correctly handle proxying of
https requests. If this argument is omitted or None, a suitable
opener based on fancy_urllib is used.
proxy: An optional string specifying a HTTP proxy to be used, in the form
'<proxy>:<port>'. This option is only effective if the url_opener has
been configured with a fancy_urllib.FancyProxyHandler (this is the
case for the default url_opener).
access_token_cache: An optional instance of a TokenCache. If omitted or
None, an InMemoryTokenCache is used.
datetime_strategy: datetime module strategy to use.
"""
self.provider = provider
self.client_id = client_id
self.client_secret = client_secret
# datetime_strategy is used to invoke utcnow() on; it is injected into the
# constructor for unit testing purposes.
self.datetime_strategy = datetime_strategy
self._proxy = proxy
self.access_token_cache = access_token_cache or InMemoryTokenCache()
self.ca_certs_file = os.path.join(
os.path.dirname(os.path.abspath(cacerts.__file__)), 'cacerts.txt')
if url_opener is None:
# TODO(Google): set user agent?
url_opener = urllib2.build_opener(
fancy_urllib.FancyProxyHandler(),
fancy_urllib.FancyRedirectHandler(),
fancy_urllib.FancyHTTPSHandler())
self.url_opener = url_opener
def _TokenRequest(self, request):
"""Make a requst to this client's provider's token endpoint.
Args:
request: A dict with the request parameteres.
Returns:
A tuple (response, error) where,
- response is the parsed JSON response received from the token endpoint,
or None if no parseable response was received, and
- error is None if the request succeeded or
an Exception if an error occurred.
"""
body = urllib.urlencode(request)
LOG.debug('_TokenRequest request: %s', body)
response = None
try:
request = fancy_urllib.FancyRequest(
self.provider.token_uri, data=body)
if self._proxy:
request.set_proxy(self._proxy, 'http')
request.set_ssl_info(ca_certs=self.ca_certs_file)
result = self.url_opener.open(request)
resp_body = result.read()
LOG.debug('_TokenRequest response: %s', resp_body)
except urllib2.HTTPError, e:
try:
response = json.loads(e.read())
except:
pass
return (response, e)
try:
response = json.loads(resp_body)
except ValueError, e:
return (None, e)
return (response, None)
def GetAccessToken(self, refresh_token):
"""Given a RefreshToken, obtains a corresponding access token.
First, this client's access token cache is checked for an existing,
not-yet-expired access token for the provided refresh token. If none is
found, the client obtains a fresh access token for the provided refresh
token from the OAuth2 provider's token endpoint.
Args:
refresh_token: The RefreshToken object which to get an access token for.
Returns:
The cached or freshly obtained AccessToken.
Raises:
AccessTokenRefreshError if an error occurs.
"""
# Ensure only one thread at a time attempts to get (and possibly refresh)
# the access token. This doesn't prevent concurrent refresh attempts across
# multiple gsutil instances, but at least protects against multiple threads
# simultaneously attempting to refresh when gsutil -m is used.
token_exchange_lock.acquire()
try:
cache_key = refresh_token.CacheKey()
LOG.info('GetAccessToken: checking cache for key %s', cache_key)
access_token = self.access_token_cache.GetToken(cache_key)
LOG.debug('GetAccessToken: token from cache: %s', access_token)
if access_token is None or access_token.ShouldRefresh():
LOG.info('GetAccessToken: fetching fresh access token...')
access_token = self.FetchAccessToken(refresh_token)
LOG.debug('GetAccessToken: fresh access token: %s', access_token)
self.access_token_cache.PutToken(cache_key, access_token)
return access_token
finally:
token_exchange_lock.release()
def FetchAccessToken(self, refresh_token):
"""Fetches an access token from the provider's token endpoint.
Given a RefreshToken, fetches an access token from this client's OAuth2
provider's token endpoint.
Args:
refresh_token: The RefreshToken object which to get an access token for.
Returns:
The fetched AccessToken.
Raises:
AccessTokenRefreshError: if an error occurs.
"""
request = {
'grant_type': 'refresh_token',
'client_id': self.client_id,
'client_secret': self.client_secret,
'refresh_token': refresh_token.refresh_token,
}
LOG.debug('FetchAccessToken request: %s', request)
response, error = self._TokenRequest(request)
LOG.debug(
'FetchAccessToken response (error = %s): %s', error, response)
if error:
oauth2_error = ''
if response and response['error']:
oauth2_error = '; OAuth2 error: %s' % response['error']
raise AccessTokenRefreshError(
'Failed to exchange refresh token into access token; '
'request failed: %s%s' % (error, oauth2_error))
if 'access_token' not in response:
raise AccessTokenRefreshError(
'Failed to exchange refresh token into access token; response: %s' %
response)
token_expiry = None
if 'expires_in' in response:
token_expiry = (
self.datetime_strategy.utcnow() +
datetime.timedelta(seconds=int(response['expires_in'])))
return AccessToken(response['access_token'], token_expiry,
datetime_strategy=self.datetime_strategy)
def GetAuthorizationUri(self, redirect_uri, scopes, extra_params=None):
"""Gets the OAuth2 authorization URI and the specified scope(s).
Applications should navigate/redirect the user's user agent to this URI. The
user will be shown an approval UI requesting the user to approve access of
this client to the requested scopes under the identity of the authenticated
end user.
The application should expect the user agent to be redirected to the
specified redirect_uri after the user's approval/disapproval.
Installed applications may use the special redirect_uri
'urn:ietf:wg:oauth:2.0:oob' to indicate that instead of redirecting the
browser, the user be shown a confirmation page with a verification code.
The application should query the user for this code.
Args:
redirect_uri: Either the string 'urn:ietf:wg:oauth:2.0:oob' for a
non-web-based application, or a URI that handles the callback from the
authorization server.
scopes: A list of strings specifying the OAuth scopes the application
requests access to.
extra_params: Optional dictionary of additional parameters to be passed to
the OAuth2 authorization URI.
Returns:
The authorization URI for the specified scopes as a string.
"""
request = {
'response_type': 'code',
'client_id': self.client_id,
'redirect_uri': redirect_uri,
'scope': ' '.join(scopes),
}
if extra_params:
request.update(extra_params)
url_parts = list(urlparse.urlparse(self.provider.authorization_uri))
# 4 is the index of the query part
request.update(dict(cgi.parse_qsl(url_parts[4])))
url_parts[4] = urllib.urlencode(request)
return urlparse.urlunparse(url_parts)
def ExchangeAuthorizationCode(self, code, redirect_uri, scopes):
"""Exchanges an authorization code for a refresh token.
Invokes this client's OAuth2 provider's token endpoint to exchange an
authorization code into a refresh token.
Args:
code: the authrorization code.
redirect_uri: Either the string 'urn:ietf:wg:oauth:2.0:oob' for a
non-web-based application, or a URI that handles the callback from the
authorization server.
scopes: A list of strings specifying the OAuth scopes the application
requests access to.
Returns:
A tuple consting of the resulting RefreshToken and AccessToken.
Raises:
AuthorizationCodeExchangeError: if an error occurs.
"""
request = {
'grant_type': 'authorization_code',
'client_id': self.client_id,
'client_secret': self.client_secret,
'code': code,
'redirect_uri': redirect_uri,
'scope': ' '.join(scopes),
}
LOG.debug('ExchangeAuthorizationCode request: %s', request)
response, error = self._TokenRequest(request)
LOG.debug(
'ExchangeAuthorizationCode response (error = %s): %s',
error, response)
if error:
oauth2_error = ''
if response and response['error']:
oauth2_error = '; OAuth2 error: %s' % response['error']
raise AuthorizationCodeExchangeError(
'Failed to exchange refresh token into access token; '
'request failed: %s%s' % (str(error), oauth2_error))
if not 'access_token' in response:
raise AuthorizationCodeExchangeError(
'Failed to exchange authorization code into access token; '
'response: %s' % response)
token_expiry = None
if 'expires_in' in response:
token_expiry = (
self.datetime_strategy.utcnow() +
datetime.timedelta(seconds=int(response['expires_in'])))
access_token = AccessToken(response['access_token'], token_expiry,
datetime_strategy=self.datetime_strategy)
refresh_token = None
refresh_token_string = response.get('refresh_token', None)
token_exchange_lock.acquire()
try:
if refresh_token_string:
refresh_token = RefreshToken(self, refresh_token_string)
self.access_token_cache.PutToken(refresh_token.CacheKey(), access_token)
finally:
token_exchange_lock.release()
return (refresh_token, access_token)
class AccessToken(object):
"""Encapsulates an OAuth2 access token."""
def __init__(self, token, expiry, datetime_strategy=datetime.datetime):
self.token = token
self.expiry = expiry
self.datetime_strategy = datetime_strategy
@staticmethod
def UnSerialize(query):
"""Creates an AccessToken object from its serialized form."""
def GetValue(d, key):
return (d.get(key, [None]))[0]
kv = cgi.parse_qs(query)
if not kv['token']:
return None
expiry = None
expiry_tuple = GetValue(kv, 'expiry')
if expiry_tuple:
try:
expiry = datetime.datetime(
*[int(n) for n in expiry_tuple.split(',')])
except:
return None
return AccessToken(GetValue(kv, 'token'), expiry)
def Serialize(self):
"""Serializes this object as URI-encoded key-value pairs."""
# There's got to be a better way to serialize a datetime. Unfortunately,
# there is no reliable way to convert into a unix epoch.
kv = {'token': self.token}
if self.expiry:
t = self.expiry
tupl = (t.year, t.month, t.day, t.hour, t.minute, t.second, t.microsecond)
kv['expiry'] = ','.join([str(i) for i in tupl])
return urllib.urlencode(kv)
def ShouldRefresh(self, time_delta=300):
"""Whether the access token needs to be refreshed.
Args:
time_delta: refresh access token when it expires within time_delta secs.
Returns:
True if the token is expired or about to expire, False if the
token should be expected to work. Note that the token may still
be rejected, e.g. if it has been revoked server-side.
"""
if self.expiry is None:
return False
return (self.datetime_strategy.utcnow()
+ datetime.timedelta(seconds=time_delta) > self.expiry)
def __eq__(self, other):
return self.token == other.token and self.expiry == other.expiry
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return 'AccessToken(token=%s, expiry=%sZ)' % (self.token, self.expiry)
class RefreshToken(object):
"""Encapsulates an OAuth2 refresh token."""
def __init__(self, oauth2_client, refresh_token):
self.oauth2_client = oauth2_client
self.refresh_token = refresh_token
def CacheKey(self):
"""Computes a cache key for this refresh token.
The cache key is computed as the SHA1 hash of the token, and as such
satisfies the FileSystemTokenCache requirement that cache keys do not leak
information about token values.
Returns:
A hash key for this refresh token.
"""
h = sha1()
h.update(self.refresh_token)
return h.hexdigest()
def GetAuthorizationHeader(self):
"""Gets the access token HTTP authorication header value.
Returns:
The value of an Authorization HTTP header that authenticates
requests with an OAuth2 access token based on this refresh token.
"""
return 'Bearer %s' % self.oauth2_client.GetAccessToken(self).token
| bsd-3-clause |
Stanford-Online/edx-analytics-data-api | analytics_data_api/v0/views/courses.py | 1 | 29691 | import datetime
from itertools import groupby
import warnings
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.db import connections
from django.db.models import Max
from django.http import Http404
from django.utils.timezone import make_aware, utc
from rest_framework import generics
from rest_framework.response import Response
from rest_framework.views import APIView
from opaque_keys.edx.keys import CourseKey
from analytics_data_api.constants import enrollment_modes
from analytics_data_api.utils import dictfetchall, get_course_report_download_details
from analytics_data_api.v0 import models, serializers
from analytics_data_api.v0.exceptions import ReportFileNotFoundError
from analytics_data_api.v0.views.utils import raise_404_if_none
class BaseCourseView(generics.ListAPIView):
start_date = None
end_date = None
course_id = None
slug = None
allow_empty = False
def get(self, request, *args, **kwargs):
self.course_id = self.kwargs.get('course_id')
start_date = request.query_params.get('start_date')
end_date = request.query_params.get('end_date')
timezone = utc
self.start_date = self.parse_date(start_date, timezone)
self.end_date = self.parse_date(end_date, timezone)
return super(BaseCourseView, self).get(request, *args, **kwargs)
def parse_date(self, date, timezone):
if date:
try:
date = datetime.datetime.strptime(date, settings.DATETIME_FORMAT)
except ValueError:
date = datetime.datetime.strptime(date, settings.DATE_FORMAT)
date = make_aware(date, timezone)
return date
def apply_date_filtering(self, queryset):
raise NotImplementedError
@raise_404_if_none
def get_queryset(self):
queryset = self.model.objects.filter(course_id=self.course_id)
queryset = self.apply_date_filtering(queryset)
return queryset
def get_csv_filename(self):
course_key = CourseKey.from_string(self.course_id)
course_id = u'-'.join([course_key.org, course_key.course, course_key.run])
return u'{0}--{1}.csv'.format(course_id, self.slug)
def finalize_response(self, request, response, *args, **kwargs):
if request.META.get('HTTP_ACCEPT') == u'text/csv':
response['Content-Disposition'] = u'attachment; filename={}'.format(self.get_csv_filename())
return super(BaseCourseView, self).finalize_response(request, response, *args, **kwargs)
# pylint: disable=line-too-long
class CourseActivityWeeklyView(BaseCourseView):
"""
Get counts of users who performed specific activities in a course.
**Example request**
GET /api/v0/courses/{course_id}/activity/
**Response Values**
Returns a list of key/value pairs for student activities, as well as the
interval start and end dates and the course ID.
* any: The number of unique users who performed any action in the
course, including actions not counted in other categories in the
response.
* attempted_problem: The number of unique users who answered any
loncapa-based problem in the course.
* played_video: The number of unique users who started watching any
video in the course.
* posted_forum: The number of unique users who created a new post,
responded to a post, or submitted a comment on any discussion in
the course.
* interval_start: The time and date at which data started being
included in returned values.
* interval_end: The time and date at which data stopped being
included in returned values.
* course_id: The ID of the course for which data is returned.
* created: The date the counts were computed.
**Parameters**
You can specify the start and end dates for the time period for which
you want to get activity.
You specify dates in the format: YYYY-mm-ddTtttttt; for example,
``2014-12-15T000000``.
If no start or end dates are specified, the data for the week ending on
the previous day is returned.
start_date -- Date after which all data is returned (inclusive).
end_date -- Date before which all data is returned (exclusive).
"""
slug = u'engagement-activity'
model = models.CourseActivityWeekly
serializer_class = serializers.CourseActivityWeeklySerializer
def apply_date_filtering(self, queryset):
if self.start_date or self.end_date:
# Filter by start/end date
if self.start_date:
queryset = queryset.filter(interval_start__gte=self.start_date)
if self.end_date:
queryset = queryset.filter(interval_end__lt=self.end_date)
else:
# No date filter supplied, so only return data for the latest date
latest_date = queryset.aggregate(Max('interval_end'))
if latest_date:
latest_date = latest_date['interval_end__max']
queryset = queryset.filter(interval_end=latest_date)
return queryset
def get_queryset(self):
queryset = super(CourseActivityWeeklyView, self).get_queryset()
queryset = self.format_data(queryset)
return queryset
def _format_activity_type(self, activity_type):
activity_type = activity_type.lower()
# The data pipeline stores "any" as "active"; however, the API should display "any".
if activity_type == 'active':
activity_type = 'any'
return activity_type
def format_data(self, data):
"""
Group the data by date and combine multiple activity rows into a single row/element.
Arguments
data (iterable) -- Data to be formatted.
"""
formatted_data = []
for key, group in groupby(data, lambda x: (x.course_id, x.interval_start, x.interval_end)):
# Iterate over groups and create a single item with all activity types
item = {
u'course_id': key[0],
u'interval_start': key[1],
u'interval_end': key[2],
u'created': None
}
for activity in group:
activity_type = self._format_activity_type(activity.activity_type)
item[activity_type] = activity.count
item[u'created'] = max(activity.created, item[u'created']) if item[u'created'] else activity.created
formatted_data.append(item)
return formatted_data
class CourseActivityMostRecentWeekView(generics.RetrieveAPIView):
"""
Get counts of users who performed specific activities at least once during the most recently computed week.
**Example request**
GET /api/v0/courses/{course_id}/recent_activity/
**Response Values**
Returns a list of key/value pairs for student activities, as well as the
interval start and end dates and the course ID.
* activity_type: The type of activity counted. Possible values are:
* any: The number of unique users who performed any action in the
course, including actions not counted in other categories in the
response.
* attempted_problem: The number of unique users who answered any
loncapa-based problem in the course.
* played_video: The number of unique users who started watching
any video in the course.
* posted_forum: The number of unique users who created a new post,
responded to a post, or submitted a comment on any discussion in
the course.
* count: The number of unique users who performed the specified
action.
* interval_start: The time and date at which data started being
included in returned values.
* interval_end: The time and date at which data stopped being
included in returned values.
* course_id: The ID of the course for which data is returned.
**Parameters**
You can specify the activity type for which you want to get the count.
activity_type -- The type of activity: any (default), attempted_problem, played_video, posted_forum.
"""
serializer_class = serializers.CourseActivityByWeekSerializer
DEFAULT_ACTIVITY_TYPE = 'ACTIVE'
def _format_activity_type(self, activity_type):
"""
Modify the activity type parameter for use with our data.
Arguments:
activity_type (str): String to be formatted
"""
activity_type = activity_type.upper()
if activity_type == 'ANY':
activity_type = self.DEFAULT_ACTIVITY_TYPE
return activity_type
def _get_activity_type(self):
""" Retrieve the activity type from the query string. """
# Support the old label param
activity_type = self.request.query_params.get('label', None)
activity_type = activity_type or self.request.query_params.get('activity_type', self.DEFAULT_ACTIVITY_TYPE)
activity_type = self._format_activity_type(activity_type)
return activity_type
def get_object(self):
"""Select the activity report for the given course and activity type."""
warnings.warn('CourseActivityMostRecentWeekView has been deprecated! Use CourseActivityWeeklyView instead.',
DeprecationWarning)
course_id = self.kwargs.get('course_id')
activity_type = self._get_activity_type()
try:
return models.CourseActivityWeekly.get_most_recent(course_id, activity_type)
except ObjectDoesNotExist:
raise Http404
class BaseCourseEnrollmentView(BaseCourseView):
def apply_date_filtering(self, queryset):
if self.start_date or self.end_date:
# Filter by start/end date
if self.start_date:
queryset = queryset.filter(date__gte=self.start_date)
if self.end_date:
queryset = queryset.filter(date__lt=self.end_date)
else:
# No date filter supplied, so only return data for the latest date
latest_date = queryset.aggregate(Max('date'))
if latest_date:
latest_date = latest_date['date__max']
queryset = queryset.filter(date=latest_date)
return queryset
class CourseEnrollmentByBirthYearView(BaseCourseEnrollmentView):
"""
Get the number of enrolled users by birth year.
**Example request**
GET /api/v0/courses/{course_id}/enrollment/birth_year/
**Response Values**
Returns an array with a collection for each year in which a user was
born. Each collection contains:
* course_id: The ID of the course for which data is returned.
* date: The date for which the enrollment count was computed.
* birth_year: The birth year for which the enrollment count applies.
* count: The number of users who were born in the specified year.
* created: The date the count was computed.
**Parameters**
You can specify the start and end dates for which to count enrolled
users.
You specify dates in the format: YYYY-mm-dd; for example,
``2014-12-15``.
If no start or end dates are specified, the data for the previous day is
returned.
start_date -- Date after which enrolled students are counted (inclusive).
end_date -- Date before which enrolled students are counted (exclusive).
"""
slug = u'enrollment-age'
serializer_class = serializers.CourseEnrollmentByBirthYearSerializer
model = models.CourseEnrollmentByBirthYear
class CourseEnrollmentByEducationView(BaseCourseEnrollmentView):
"""
Get the number of enrolled users by education level.
**Example request**
GET /api/v0/courses/{course_id}/enrollment/education/
**Response Values**
Returns a collection for each level of education reported by a user.
Each collection contains:
* course_id: The ID of the course for which data is returned.
* date: The date for which the enrollment count was computed.
* education_level: The education level for which the enrollment
count applies.
* count: The number of userswho reported the specified education
level.
* created: The date the count was computed.
**Parameters**
You can specify the start and end dates for which to count enrolled
users.
You specify dates in the format: YYYY-mm-dd; for
example, ``2014-12-15``.
If no start or end dates are specified, the data for the previous day is
returned.
start_date -- Date after which enrolled students are counted (inclusive).
end_date -- Date before which enrolled students are counted (exclusive).
"""
slug = u'enrollment-education'
serializer_class = serializers.CourseEnrollmentByEducationSerializer
model = models.CourseEnrollmentByEducation
class CourseEnrollmentByGenderView(BaseCourseEnrollmentView):
"""
Get the number of enrolled users by gender.
**Example request**
GET /api/v0/courses/{course_id}/enrollment/gender/
**Response Values**
Returns the count of each gender specified by users:
* course_id: The ID of the course for which data is returned.
* date: The date for which the enrollment count was computed.
* female: The count of self-identified female users.
* male: The count of self-identified male users.
* other: The count of self-identified other users.
* unknown: The count of users who did not specify a gender.
* created: The date the counts were computed.
**Parameters**
You can specify the start and end dates for which to count enrolled
users.
You specify dates in the format: YYYY-mm-dd; for
example, ``2014-12-15``.
If no start or end dates are specified, the data for the previous day is
returned.
start_date -- Date after which enrolled students are counted (inclusive).
end_date -- Date before which enrolled students are counted (exclusive).
"""
slug = u'enrollment-gender'
serializer_class = serializers.CourseEnrollmentByGenderSerializer
model = models.CourseEnrollmentByGender
def get_queryset(self):
queryset = super(CourseEnrollmentByGenderView, self).get_queryset()
formatted_data = []
for key, group in groupby(queryset, lambda x: (x.course_id, x.date)):
# Iterate over groups and create a single item with gender data
item = {
u'course_id': key[0],
u'date': key[1],
u'created': None,
u'male': 0,
u'female': 0,
u'other': 0,
u'unknown': 0
}
for enrollment in group:
gender = enrollment.cleaned_gender.lower()
count = item.get(gender, 0)
count += enrollment.count
item[gender] = count
item[u'created'] = max(enrollment.created, item[u'created']) if item[u'created'] else enrollment.created
formatted_data.append(item)
return formatted_data
class CourseEnrollmentView(BaseCourseEnrollmentView):
"""
Get the number of enrolled users.
**Example request**
GET /api/v0/courses/{course_id}/enrollment/
**Response Values**
Returns the count of enrolled users:
* course_id: The ID of the course for which data is returned.
* date: The date for which the enrollment count was computed.
* count: The count of enrolled users.
* created: The date the count was computed.
**Parameters**
You can specify the start and end dates for which to count enrolled
users.
You specify dates in the format: YYYY-mm-dd; for
example, ``2014-12-15``.
If no start or end dates are specified, the data for the previous day is
returned.
start_date -- Date after which enrolled students are counted (inclusive).
end_date -- Date before which enrolled students are counted (exclusive).
"""
slug = u'enrollment'
serializer_class = serializers.CourseEnrollmentDailySerializer
model = models.CourseEnrollmentDaily
class CourseEnrollmentModeView(BaseCourseEnrollmentView):
"""
Get the number of enrolled users by enrollment mode.
**Example request**
GET /api/v0/courses/{course_id}/enrollment/mode/
**Response Values**
Returns the counts of users by mode:
* course_id: The ID of the course for which data is returned.
* date: The date for which the enrollment count was computed.
* count: The count of currently enrolled users.
* cumulative_count: The cumulative total of all users ever enrolled.
* created: The date the counts were computed.
* honor: The number of users currently enrolled in honor code mode.
* professional: The number of users currently enrolled in professional mode.
* verified: The number of users currently enrolled in verified mode.
**Parameters**
You can specify the start and end dates for which to count enrolled
users.
You specify dates in the format: YYYY-mm-dd; for
example, ``2014-12-15``.
If no start or end dates are specified, the data for the previous day is
returned.
start_date -- Date after which enrolled students are counted (inclusive).
end_date -- Date before which enrolled students are counted (exclusive).
"""
slug = u'enrollment_mode'
serializer_class = serializers.CourseEnrollmentModeDailySerializer
model = models.CourseEnrollmentModeDaily
def get_queryset(self):
queryset = super(CourseEnrollmentModeView, self).get_queryset()
formatted_data = []
for key, group in groupby(queryset, lambda x: (x.course_id, x.date)):
item = {
u'course_id': key[0],
u'date': key[1],
u'created': None
}
total = 0
cumulative_total = 0
for enrollment in group:
mode = enrollment.mode
item[mode] = enrollment.count
item[u'created'] = max(enrollment.created, item[u'created']) if item[u'created'] else enrollment.created
total += enrollment.count
cumulative_total += enrollment.cumulative_count
# Merge professional with non verified professional
item[enrollment_modes.PROFESSIONAL] = \
item.get(enrollment_modes.PROFESSIONAL, 0) + item.pop(enrollment_modes.PROFESSIONAL_NO_ID, 0)
item[u'count'] = total
item[u'cumulative_count'] = cumulative_total
formatted_data.append(item)
return formatted_data
# pylint: disable=line-too-long
class CourseEnrollmentByLocationView(BaseCourseEnrollmentView):
"""
Get the number of enrolled users by location.
Location is calculated based on the user's IP address. Users whose location
cannot be determined are counted as having a country.name of UNKNOWN.
Countries are denoted by their ISO 3166 country code.
**Example request**
GET /api/v0/courses/{course_id}/enrollment/location/
**Response Values**
Returns counts of genders specified by users:
* course_id: The ID of the course for which data is returned.
* date: The date for which the enrollment count was computed.
* country: Contains the following fields:
* alpha2: The two-letter country code.
* alpha3: The three-letter country code.
* name: The country name.
* count: The count of users from the country.
* created: The date the count was computed.
**Parameters**
You can specify the start and end dates for which to count enrolled
users.
You specify dates in the format: YYYY-mm-dd; for
example, ``2014-12-15``.
If no start or end dates are specified, the data for the previous day is
returned.
start_date -- Date after which enrolled students are counted (inclusive).
end_date -- Date before which enrolled students are counted (exclusive).
"""
slug = u'enrollment-location'
serializer_class = serializers.CourseEnrollmentByCountrySerializer
model = models.CourseEnrollmentByCountry
def get_queryset(self):
# Get all of the data from the database
queryset = super(CourseEnrollmentByLocationView, self).get_queryset()
items = queryset.all()
# Data must be sorted in order for groupby to work properly
items = sorted(items, key=lambda x: x.country.alpha2)
# Items to be returned by this method
returned_items = []
# Group data by date, country, and course ID
for key, group in groupby(items, lambda x: (x.date, x.country.alpha2, x.course_id)):
count = 0
date = key[0]
country_code = key[1]
course_id = key[2]
created = None
for item in group:
created = max(created, item.created) if created else item.created
count += item.count
# pylint: disable=no-value-for-parameter,unexpected-keyword-arg
returned_items.append(models.CourseEnrollmentByCountry(
course_id=course_id,
date=date,
country_code=country_code,
count=count,
created=created
))
# Note: We are returning a list, instead of a queryset. This is
# acceptable since the consuming code simply expects the returned
# value to be iterable, not necessarily a queryset.
return returned_items
# pylint: disable=abstract-method
class ProblemsListView(BaseCourseView):
"""
Get the problems.
**Example request**
GET /api/v0/courses/{course_id}/problems/
**Response Values**
Returns a collection of submission counts and part IDs for each problem. Each collection contains:
* module_id: The ID of the problem.
* total_submissions: Total number of submissions.
* correct_submissions: Total number of *correct* submissions.
* part_ids: List of problem part IDs.
"""
serializer_class = serializers.ProblemSerializer
allow_empty = False
@raise_404_if_none
def get_queryset(self):
# last_response_count is the number of submissions for the problem part and must
# be divided by the number of problem parts to get the problem submission rather
# than the problem *part* submissions
aggregation_query = """
SELECT
module_id,
SUM(last_response_count)/COUNT(DISTINCT part_id) AS total_submissions,
SUM(CASE WHEN correct=1 THEN last_response_count ELSE 0 END)/COUNT(DISTINCT part_id) AS correct_submissions,
GROUP_CONCAT(DISTINCT part_id) AS part_ids,
MAX(created) AS created
FROM answer_distribution
WHERE course_id = %s
GROUP BY module_id;
"""
connection = connections[settings.ANALYTICS_DATABASE]
with connection.cursor() as cursor:
if connection.vendor == 'mysql':
# The default value of group_concat_max_len, 1024, is too low for some course data. Increase this value
# to its maximum possible value. For more information see
# http://code.openark.org/blog/mysql/those-oversized-undersized-variables-defaults.
cursor.execute("SET @@group_concat_max_len = @@max_allowed_packet;")
cursor.execute("DESCRIBE answer_distribution;")
column_names = [row[0] for row in cursor.fetchall()]
# Alternate query for sqlite test database
else:
cursor.execute("PRAGMA table_info(answer_distribution)")
column_names = [row[1] for row in cursor.fetchall()]
if u'last_response_count' in column_names:
cursor.execute(aggregation_query, [self.course_id])
else:
cursor.execute(aggregation_query.replace('last_response_count', 'count'), [self.course_id])
rows = dictfetchall(cursor)
for row in rows:
# Convert the comma-separated list into an array of strings.
row['part_ids'] = row['part_ids'].split(',')
# Convert the aggregated decimal fields to integers
row['total_submissions'] = int(row['total_submissions'])
row['correct_submissions'] = int(row['correct_submissions'])
# Rather than write custom SQL for the SQLite backend, simply parse the timestamp.
created = row['created']
if not isinstance(created, datetime.datetime):
row['created'] = datetime.datetime.strptime(created, '%Y-%m-%d %H:%M:%S')
return rows
# pylint: disable=abstract-method
class ProblemsAndTagsListView(BaseCourseView):
"""
Get the problems with the connected tags.
**Example request**
GET /api/v0/courses/{course_id}/problems_and_tags/
**Response Values**
Returns a collection of submission counts and tags for each problem. Each collection contains:
* module_id: The ID of the problem.
* total_submissions: Total number of submissions.
* correct_submissions: Total number of *correct* submissions.
* tags: Dictionary that contains pairs "tag key: [tag value 1, tag value 2, ..., tag value N]".
"""
serializer_class = serializers.ProblemsAndTagsSerializer
allow_empty = False
model = models.ProblemsAndTags
@raise_404_if_none
def get_queryset(self):
queryset = self.model.objects.filter(course_id=self.course_id)
items = queryset.all()
result = {}
for v in items:
if v.module_id in result:
if v.tag_name not in result[v.module_id]['tags']:
result[v.module_id]['tags'][v.tag_name] = []
result[v.module_id]['tags'][v.tag_name].append(v.tag_value)
result[v.module_id]['tags'][v.tag_name].sort()
if result[v.module_id]['created'] < v.created:
result[v.module_id]['created'] = v.created
else:
result[v.module_id] = {
'module_id': v.module_id,
'total_submissions': v.total_submissions,
'correct_submissions': v.correct_submissions,
'tags': {
v.tag_name: [v.tag_value]
},
'created': v.created
}
return result.values()
class VideosListView(BaseCourseView):
"""
Get data for the videos in a course.
**Example request**
GET /api/v0/courses/{course_id}/videos/
**Response Values**
Returns a collection of video views and metadata for each video.
For each video, the collection the following data.
* video_id: The ID of the video.
* encoded_module_id: The encoded module ID.
* duration: The length of the video in seconds.
* segment_length: The length of each segment of the video in seconds.
* users_at_start: The number of viewers at the start of the video.
* users_at_end: The number of viewers at the end of the video.
* created: The date the video data was updated.
"""
serializer_class = serializers.VideoSerializer
allow_empty = False
model = models.Video
def apply_date_filtering(self, queryset):
# no date filtering for videos -- just return the queryset
return queryset
class ReportDownloadView(APIView):
"""
Get information needed to download a CSV report
**Example request**
GET /api/v0/courses/{course_id}/reports/{report_name}/
**Response Values**
Returns a single object with data about the report, with the following data:
* course_id: The ID of the course
* report_name: The name of the report
* download_url: The Internet location from which the report can be downloaded
The object may also return these items, if supported by the storage backend:
* last_modified: The date the report was last updated
* expiration_date: The date through which the link will be valid
* file_size: The size in bytes of the CSV download
"""
enabled_reports = settings.ENABLED_REPORT_IDENTIFIERS
def get(self, _request, course_id, report_name):
if report_name in self.enabled_reports:
response = get_course_report_download_details(course_id, report_name)
return Response(response)
else:
raise ReportFileNotFoundError(course_id=course_id, report_name=report_name)
| agpl-3.0 |
da1z/intellij-community | python/helpers/py3only/docutils/parsers/rst/languages/pt_br.py | 52 | 3956 | # $Id: pt_br.py 7119 2011-09-02 13:00:23Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Brazilian Portuguese-language mappings for language-dependent features of
reStructuredText.
"""
__docformat__ = 'reStructuredText'
directives = {
# language-dependent: fixed
'aten\u00E7\u00E3o': 'attention',
'cuidado': 'caution',
'code (translation required)': 'code',
'perigo': 'danger',
'erro': 'error',
'sugest\u00E3o': 'hint',
'importante': 'important',
'nota': 'note',
'dica': 'tip',
'aviso': 'warning',
'exorta\u00E7\u00E3o': 'admonition',
'barra-lateral': 'sidebar',
't\u00F3pico': 'topic',
'bloco-de-linhas': 'line-block',
'literal-interpretado': 'parsed-literal',
'rubrica': 'rubric',
'ep\u00EDgrafo': 'epigraph',
'destaques': 'highlights',
'cita\u00E7\u00E3o-destacada': 'pull-quote',
'compound (translation required)': 'compound',
'container (translation required)': 'container',
#'perguntas': 'questions',
#'qa': 'questions',
#'faq': 'questions',
'table (translation required)': 'table',
'csv-table (translation required)': 'csv-table',
'list-table (translation required)': 'list-table',
'meta': 'meta',
'math (translation required)': 'math',
#'imagemap': 'imagemap',
'imagem': 'image',
'figura': 'figure',
'inclus\u00E3o': 'include',
'cru': 'raw',
'substitui\u00E7\u00E3o': 'replace',
'unicode': 'unicode',
'data': 'date',
'classe': 'class',
'role (translation required)': 'role',
'default-role (translation required)': 'default-role',
'title (translation required)': 'title',
'\u00EDndice': 'contents',
'numsec': 'sectnum',
'numera\u00E7\u00E3o-de-se\u00E7\u00F5es': 'sectnum',
'header (translation required)': 'header',
'footer (translation required)': 'footer',
#u'notas-de-rorap\u00E9': 'footnotes',
#u'cita\u00E7\u00F5es': 'citations',
'links-no-rodap\u00E9': 'target-notes',
'restructuredtext-test-directive': 'restructuredtext-test-directive'}
"""Brazilian Portuguese name to registered (in directives/__init__.py)
directive name mapping."""
roles = {
# language-dependent: fixed
'abbrevia\u00E7\u00E3o': 'abbreviation',
'ab': 'abbreviation',
'acr\u00F4nimo': 'acronym',
'ac': 'acronym',
'code (translation required)': 'code',
'\u00EDndice-remissivo': 'index',
'i': 'index',
'subscrito': 'subscript',
'sub': 'subscript',
'sobrescrito': 'superscript',
'sob': 'superscript',
'refer\u00EAncia-a-t\u00EDtulo': 'title-reference',
't\u00EDtulo': 'title-reference',
't': 'title-reference',
'refer\u00EAncia-a-pep': 'pep-reference',
'pep': 'pep-reference',
'refer\u00EAncia-a-rfc': 'rfc-reference',
'rfc': 'rfc-reference',
'\u00EAnfase': 'emphasis',
'forte': 'strong',
'literal': 'literal',
'math (translation required)': 'math', # translation required?
'refer\u00EAncia-por-nome': 'named-reference',
'refer\u00EAncia-an\u00F4nima': 'anonymous-reference',
'refer\u00EAncia-a-nota-de-rodap\u00E9': 'footnote-reference',
'refer\u00EAncia-a-cita\u00E7\u00E3o': 'citation-reference',
'refer\u00EAncia-a-substitui\u00E7\u00E3o': 'substitution-reference',
'alvo': 'target',
'refer\u00EAncia-a-uri': 'uri-reference',
'uri': 'uri-reference',
'url': 'uri-reference',
'cru': 'raw',}
"""Mapping of Brazilian Portuguese role names to canonical role names
for interpreted text."""
| apache-2.0 |
vanhonit/xmario_center | build/lib.linux-i686-2.7/softwarecenter/ui/qml/pkglist.py | 4 | 7165 | #
# Copyright (C) 2011 Canonical
#
# Authors:
# Michael Vogt
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; version 3.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import os
from PyQt4 import QtCore
from PyQt4.QtCore import QAbstractListModel, QModelIndex, pyqtSlot
from softwarecenter.db.database import StoreDatabase, Application
from softwarecenter.db.pkginfo import get_pkg_info
from softwarecenter.db.categories import CategoriesParser
from softwarecenter.paths import XAPIAN_BASE_PATH
from softwarecenter.backend import get_install_backend
from softwarecenter.backend.reviews import get_review_loader
class PkgListModel(QAbstractListModel):
COLUMNS = ('_appname',
'_pkgname',
'_icon',
'_summary',
'_installed',
'_description',
'_ratings_total',
'_ratings_average',
'_installremoveprogress')
def __init__(self, parent=None):
super(PkgListModel, self).__init__()
self._docs = []
roles = dict(enumerate(PkgListModel.COLUMNS))
self.setRoleNames(roles)
self._query = ""
self._category = ""
pathname = os.path.join(XAPIAN_BASE_PATH, "xapian")
self.cache = get_pkg_info()
self.db = StoreDatabase(pathname, self.cache)
self.db.open(use_axi=False)
self.backend = get_install_backend()
self.backend.connect("transaction-progress-changed",
self._on_backend_transaction_progress_changed)
self.reviews = get_review_loader(self.cache)
# FIXME: get this from a parent
self._catparser = CategoriesParser(self.db)
self._categories = self._catparser.parse_applications_menu(
'/usr/share/app-install')
# QAbstractListModel code
def rowCount(self, parent=QModelIndex()):
return len(self._docs)
def data(self, index, role):
if not index.isValid():
return None
doc = self._docs[index.row()]
role = self.COLUMNS[role]
pkgname = unicode(self.db.get_pkgname(doc), "utf8", "ignore")
appname = unicode(self.db.get_appname(doc), "utf8", "ignore")
if role == "_pkgname":
return pkgname
elif role == "_appname":
return appname
elif role == "_summary":
return unicode(self.db.get_summary(doc))
elif role == "_installed":
if not pkgname in self.cache:
return False
return self.cache[pkgname].is_installed
elif role == "_description":
if not pkgname in self.cache:
return ""
return self.cache[pkgname].description
elif role == "_icon":
iconname = self.db.get_iconname(doc)
return self._findIcon(iconname)
elif role == "_ratings_average":
stats = self.reviews.get_review_stats(Application(appname,
pkgname))
if stats:
return stats.ratings_average
return 0
elif role == "_ratings_total":
stats = self.reviews.get_review_stats(Application(appname,
pkgname))
if stats:
return stats.ratings_total
return 0
elif role == "_installremoveprogress":
if pkgname in self.backend.pending_transactions:
return self.backend.pending_transactions[pkgname].progress
return -1
return None
# helper
def _on_backend_transaction_progress_changed(self, backend, pkgname,
progress):
column = self.COLUMNS.index("_installremoveprogress")
# FIXME: instead of the entire model, just find the row that changed
top = self.createIndex(0, column)
bottom = self.createIndex(self.rowCount() - 1, column)
self.dataChanged.emit(top, bottom)
def _findIcon(self, iconname):
path = "/usr/share/icons/Humanity/categories/32/applications-other.svg"
for ext in ["svg", "png", ".xpm"]:
p = "/usr/share/app-install/icons/%s" % iconname
if os.path.exists(p + ext):
path = "file://%s" % p + ext
break
return path
def clear(self):
if self._docs == []:
return
self.beginRemoveRows(QModelIndex(), 0, self.rowCount() - 1)
self._docs = []
self.endRemoveRows()
def _runQuery(self, querystr):
self.clear()
docs = self.db.get_docs_from_query(
str(querystr), start=0, end=500, category=self._category)
self.beginInsertRows(QModelIndex(), 0, len(docs) - 1)
self._docs = docs
self.endInsertRows()
# install/remove interface (for qml)
@pyqtSlot(str)
def installPackage(self, pkgname):
appname = ""
iconname = ""
app = Application(appname, pkgname)
self.backend.install(app, iconname)
@pyqtSlot(str)
def removePackage(self, pkgname):
appname = ""
iconname = ""
app = Application(appname, pkgname)
self.backend.remove(app, iconname)
# searchQuery property (for qml )
def getSearchQuery(self):
return self._query
def setSearchQuery(self, query):
self._query = query
self._runQuery(query)
searchQueryChanged = QtCore.pyqtSignal()
searchQuery = QtCore.pyqtProperty(unicode, getSearchQuery, setSearchQuery,
notify=searchQueryChanged)
# allow to refine searches for specific categories
@pyqtSlot(str)
def setCategory(self, catname):
# empty category resets it
if not catname:
self._category = None
else:
# search for the category
for cat in self._categories:
if cat.name == catname:
self._category = cat
break
else:
raise Exception("Can not find category '%s'" % catname)
# and trigger a query
self._runQuery(self._query)
if __name__ == "__main__":
from PyQt4.QtGui import QApplication
from PyQt4.QtDeclarative import QDeclarativeView
import sys
app = QApplication(sys.argv)
app.cache = get_pkg_info()
app.cache.open()
view = QDeclarativeView()
model = PkgListModel()
rc = view.rootContext()
rc.setContextProperty('pkglistmodel', model)
# load the main QML file into the view
qmlpath = os.path.join(os.path.dirname(__file__), "AppListView.qml")
view.setSource(qmlpath)
# show it
view.show()
sys.exit(app.exec_())
| gpl-3.0 |
bitesofcode/projexui | projexui/widgets/xoverlaywidget.py | 2 | 7944 |
from projexui.widgets.xtoolbutton import XToolButton
from projexui import resources
from xqt import QtCore, QtGui
class XOverlayWidget(QtGui.QWidget):
finished = QtCore.Signal(int)
def __init__(self, parent=None):
super(XOverlayWidget, self).__init__(parent)
# define custom properties
self._centralWidget = None
self._result = None
self._closable = True
self._closeAlignment = QtCore.Qt.AlignTop | QtCore.Qt.AlignRight
self._closeButton = XToolButton(self)
self._closeButton.setShadowed(True)
self._closeButton.setIcon(QtGui.QIcon(resources.find('img/overlay/close.png')))
self._closeButton.setIconSize(QtCore.QSize(24, 24))
self._closeButton.setToolTip('Close')
# create the coloring for the overlay
palette = self.palette()
clr = QtGui.QColor('#222222')
clr.setAlpha(210)
palette.setColor(palette.Window, clr)
self.setPalette(palette)
self.setAutoFillBackground(True)
# listen to the parents event filter
parent.installEventFilter(self)
# initialize the widget
self.hide()
self.move(0, 0)
self.resize(parent.size())
self._closeButton.clicked.connect(self.reject)
def accept(self):
"""
Accepts this overlay and exits the modal window.
"""
self.close()
self.setResult(1)
self.finished.emit(1)
def adjustSize(self):
"""
Adjusts the size of this widget as the parent resizes.
"""
# adjust the close button
align = self.closeAlignment()
if align & QtCore.Qt.AlignTop:
y = 6
else:
y = self.height() - 38
if align & QtCore.Qt.AlignLeft:
x = 6
else:
x = self.width() - 38
self._closeButton.move(x, y)
# adjust the central widget
widget = self.centralWidget()
if widget is not None:
center = self.rect().center()
widget.move(center.x() - widget.width() / 2, center.y() - widget.height() / 2)
def closeAlignment(self):
"""
Returns the alignment for the close button for this overlay widget.
:return <QtCore.Qt.Alignment>
"""
return self._closeAlignment
def centralWidget(self):
"""
Returns the central widget for this overlay. If there is one, then it will
be automatically moved with this object.
:return <QtGui.QWidget>
"""
return self._centralWidget
def isClosable(self):
"""
Returns whether or not the user should be able to close this overlay widget.
:return <bool>
"""
return self._closable
def keyPressEvent(self, event):
"""
Exits the modal window on an escape press.
:param event | <QtCore.QKeyPressEvent>
"""
if event.key() == QtCore.Qt.Key_Escape:
self.reject()
super(XOverlayWidget, self).keyPressEvent(event)
def eventFilter(self, object, event):
"""
Resizes this overlay as the widget resizes.
:param object | <QtCore.QObject>
event | <QtCore.QEvent>
:return <bool>
"""
if object == self.parent() and event.type() == QtCore.QEvent.Resize:
self.resize(event.size())
elif event.type() == QtCore.QEvent.Close:
self.setResult(0)
return False
def exec_(self, autodelete=True):
self.setAttribute(QtCore.Qt.WA_DeleteOnClose, False)
if self.centralWidget():
QtCore.QTimer.singleShot(0, self.centralWidget().setFocus)
loop = QtCore.QEventLoop()
while self.isVisible() and not QtCore.QCoreApplication.closingDown():
loop.processEvents()
if autodelete:
self.deleteLater()
return self.result()
def reject(self):
"""
Rejects this overlay and exits the modal window.
"""
self.close()
self.setResult(0)
self.finished.emit(0)
def result(self):
"""
Returns the result from this overlay widget.
:return <int>
"""
return int(self._result)
def resizeEvent(self, event):
"""
Handles a resize event for this overlay, centering the central widget if
one is found.
:param event | <QtCore.QEvent>
"""
super(XOverlayWidget, self).resizeEvent(event)
self.adjustSize()
def setCentralWidget(self, widget):
"""
Sets the central widget for this overlay to the inputed widget.
:param widget | <QtGui.QWidget>
"""
self._centralWidget = widget
if widget is not None:
widget.setParent(self)
widget.installEventFilter(self)
# create the drop shadow effect
effect = QtGui.QGraphicsDropShadowEffect(self)
effect.setColor(QtGui.QColor('black'))
effect.setBlurRadius(80)
effect.setOffset(0, 0)
widget.setGraphicsEffect(effect)
def setClosable(self, state):
"""
Sets whether or not the user should be able to close this overlay widget.
:param state | <bool>
"""
self._closable = state
if state:
self._closeButton.show()
else:
self._closeButton.hide()
def setCloseAlignment(self, align):
"""
Sets the alignment for the close button for this overlay widget.
:param align | <QtCore.Qt.Alignment>
"""
self._closeAlignment = align
def setResult(self, result):
"""
Sets the result for this overlay to the inputed value.
:param result | <int>
"""
self._result = result
def setVisible(self, state):
"""
Closes this widget and kills the result.
"""
super(XOverlayWidget, self).setVisible(state)
if not state:
self.setResult(0)
def showEvent(self, event):
"""
Ensures this widget is the top-most widget for its parent.
:param event | <QtCore.QEvent>
"""
super(XOverlayWidget, self).showEvent(event)
# raise to the top
self.raise_()
self._closeButton.setVisible(self.isClosable())
widget = self.centralWidget()
if widget:
center = self.rect().center()
start_x = end_x = center.x() - widget.width() / 2
start_y = -widget.height()
end_y = center.y() - widget.height() / 2
start = QtCore.QPoint(start_x, start_y)
end = QtCore.QPoint(end_x, end_y)
# create the movement animation
anim = QtCore.QPropertyAnimation(self)
anim.setPropertyName('pos')
anim.setTargetObject(widget)
anim.setStartValue(start)
anim.setEndValue(end)
anim.setDuration(500)
anim.setEasingCurve(QtCore.QEasingCurve.InOutQuad)
anim.finished.connect(anim.deleteLater)
anim.start()
@staticmethod
def modal(widget, parent=None, align=QtCore.Qt.AlignTop | QtCore.Qt.AlignRight, blurred=True):
"""
Creates a modal dialog for this overlay with the inputed widget. If the user
accepts the widget, then 1 will be returned, otherwise, 0 will be returned.
:param widget | <QtCore.QWidget>
"""
if parent is None:
parent = QtGui.QApplication.instance().activeWindow()
overlay = XOverlayWidget(parent)
overlay.setAttribute(QtCore.Qt.WA_DeleteOnClose)
overlay.setCentralWidget(widget)
overlay.setCloseAlignment(align)
overlay.show()
return overlay
| lgpl-3.0 |
MTASZTAKI/ApertusVR | plugins/physics/bulletPhysics/3rdParty/bullet3/examples/pybullet/gym/pybullet_envs/deep_mimic/learning/replay_buffer.py | 4 | 10316 | import numpy as np
import copy
from pybullet_utils.logger import Logger
import inspect as inspect
from pybullet_envs.deep_mimic.env.env import Env
import pybullet_utils.math_util as MathUtil
class ReplayBuffer(object):
TERMINATE_KEY = 'terminate'
PATH_START_KEY = 'path_start'
PATH_END_KEY = 'path_end'
def __init__(self, buffer_size):
assert buffer_size > 0
self.buffer_size = buffer_size
self.total_count = 0
self.buffer_head = 0
self.buffer_tail = MathUtil.INVALID_IDX
self.num_paths = 0
self._sample_buffers = dict()
self.buffers = None
self.clear()
return
def sample(self, n):
curr_size = self.get_current_size()
assert curr_size > 0
idx = np.empty(n, dtype=int)
# makes sure that the end states are not sampled
for i in range(n):
while True:
curr_idx = np.random.randint(0, curr_size, size=1)[0]
curr_idx += self.buffer_tail
curr_idx = np.mod(curr_idx, self.buffer_size)
if not self.is_path_end(curr_idx):
break
idx[i] = curr_idx
return idx
def sample_filtered(self, n, key):
assert key in self._sample_buffers
curr_buffer = self._sample_buffers[key]
idx = curr_buffer.sample(n)
return idx
def count_filtered(self, key):
curr_buffer = self._sample_buffers[key]
return curr_buffer.count
def get(self, key, idx):
return self.buffers[key][idx]
def get_all(self, key):
return self.buffers[key]
def get_idx_filtered(self, key):
assert key in self._sample_buffers
curr_buffer = self._sample_buffers[key]
idx = curr_buffer.slot_to_idx[:curr_buffer.count]
return idx
def get_path_start(self, idx):
return self.buffers[self.PATH_START_KEY][idx]
def get_path_end(self, idx):
return self.buffers[self.PATH_END_KEY][idx]
def get_pathlen(self, idx):
is_array = isinstance(idx, np.ndarray) or isinstance(idx, list)
if not is_array:
idx = [idx]
n = len(idx)
start_idx = self.get_path_start(idx)
end_idx = self.get_path_end(idx)
pathlen = np.empty(n, dtype=int)
for i in range(n):
curr_start = start_idx[i]
curr_end = end_idx[i]
if curr_start < curr_end:
curr_len = curr_end - curr_start
else:
curr_len = self.buffer_size - curr_start + curr_end
pathlen[i] = curr_len
if not is_array:
pathlen = pathlen[0]
return pathlen
def is_valid_path(self, idx):
start_idx = self.get_path_start(idx)
valid = start_idx != MathUtil.INVALID_IDX
return valid
def store(self, path):
start_idx = MathUtil.INVALID_IDX
n = path.pathlength()
if (n > 0):
assert path.is_valid()
if path.check_vals():
if self.buffers is None:
self._init_buffers(path)
idx = self._request_idx(n + 1)
self._store_path(path, idx)
self._add_sample_buffers(idx)
self.num_paths += 1
self.total_count += n + 1
start_idx = idx[0]
else:
Logger.print2('Invalid path data value detected')
return start_idx
def clear(self):
self.buffer_head = 0
self.buffer_tail = MathUtil.INVALID_IDX
self.num_paths = 0
for key in self._sample_buffers:
self._sample_buffers[key].clear()
return
def get_next_idx(self, idx):
next_idx = np.mod(idx + 1, self.buffer_size)
return next_idx
def is_terminal_state(self, idx):
terminate_flags = self.buffers[self.TERMINATE_KEY][idx]
terminate = terminate_flags != Env.Terminate.Null.value
is_end = self.is_path_end(idx)
terminal_state = np.logical_and(terminate, is_end)
return terminal_state
def check_terminal_flag(self, idx, flag):
terminate_flags = self.buffers[self.TERMINATE_KEY][idx]
terminate = terminate_flags == flag.value
return terminate
def is_path_end(self, idx):
is_end = self.buffers[self.PATH_END_KEY][idx] == idx
return is_end
def add_filter_key(self, key):
assert self.get_current_size() == 0
if key not in self._sample_buffers:
self._sample_buffers[key] = SampleBuffer(self.buffer_size)
return
def get_current_size(self):
if self.buffer_tail == MathUtil.INVALID_IDX:
return 0
elif self.buffer_tail < self.buffer_head:
return self.buffer_head - self.buffer_tail
else:
return self.buffer_size - self.buffer_tail + self.buffer_head
def _check_flags(self, key, flags):
return (flags & key) == key
def _add_sample_buffers(self, idx):
flags = self.buffers['flags']
for key in self._sample_buffers:
curr_buffer = self._sample_buffers[key]
filter_idx = [
i for i in idx if (self._check_flags(key, flags[i]) and not self.is_path_end(i))
]
curr_buffer.add(filter_idx)
return
def _free_sample_buffers(self, idx):
for key in self._sample_buffers:
curr_buffer = self._sample_buffers[key]
curr_buffer.free(idx)
return
def _init_buffers(self, path):
self.buffers = dict()
self.buffers[self.PATH_START_KEY] = MathUtil.INVALID_IDX * np.ones(self.buffer_size, dtype=int)
self.buffers[self.PATH_END_KEY] = MathUtil.INVALID_IDX * np.ones(self.buffer_size, dtype=int)
for key in dir(path):
val = getattr(path, key)
if not key.startswith('__') and not inspect.ismethod(val):
if key == self.TERMINATE_KEY:
self.buffers[self.TERMINATE_KEY] = np.zeros(shape=[self.buffer_size], dtype=int)
else:
val_type = type(val[0])
is_array = val_type == np.ndarray
if is_array:
shape = [self.buffer_size, val[0].shape[0]]
dtype = val[0].dtype
else:
shape = [self.buffer_size]
dtype = val_type
self.buffers[key] = np.zeros(shape, dtype=dtype)
return
def _request_idx(self, n):
assert n + 1 < self.buffer_size # bad things can happen if path is too long
remainder = n
idx = []
start_idx = self.buffer_head
while remainder > 0:
end_idx = np.minimum(start_idx + remainder, self.buffer_size)
remainder -= (end_idx - start_idx)
free_idx = list(range(start_idx, end_idx))
self._free_idx(free_idx)
idx += free_idx
start_idx = 0
self.buffer_head = (self.buffer_head + n) % self.buffer_size
return idx
def _free_idx(self, idx):
assert (idx[0] <= idx[-1])
n = len(idx)
if self.buffer_tail != MathUtil.INVALID_IDX:
update_tail = idx[0] <= idx[-1] and idx[0] <= self.buffer_tail and idx[-1] >= self.buffer_tail
update_tail |= idx[0] > idx[-1] and (idx[0] <= self.buffer_tail or
idx[-1] >= self.buffer_tail)
if update_tail:
i = 0
while i < n:
curr_idx = idx[i]
if self.is_valid_path(curr_idx):
start_idx = self.get_path_start(curr_idx)
end_idx = self.get_path_end(curr_idx)
pathlen = self.get_pathlen(curr_idx)
if start_idx < end_idx:
self.buffers[self.PATH_START_KEY][start_idx:end_idx + 1] = MathUtil.INVALID_IDX
self._free_sample_buffers(list(range(start_idx, end_idx + 1)))
else:
self.buffers[self.PATH_START_KEY][start_idx:self.buffer_size] = MathUtil.INVALID_IDX
self.buffers[self.PATH_START_KEY][0:end_idx + 1] = MathUtil.INVALID_IDX
self._free_sample_buffers(list(range(start_idx, self.buffer_size)))
self._free_sample_buffers(list(range(0, end_idx + 1)))
self.num_paths -= 1
i += pathlen + 1
self.buffer_tail = (end_idx + 1) % self.buffer_size
else:
i += 1
else:
self.buffer_tail = idx[0]
return
def _store_path(self, path, idx):
n = path.pathlength()
for key, data in self.buffers.items():
if key != self.PATH_START_KEY and key != self.PATH_END_KEY and key != self.TERMINATE_KEY:
val = getattr(path, key)
val_len = len(val)
assert val_len == n or val_len == n + 1
data[idx[:val_len]] = val
self.buffers[self.TERMINATE_KEY][idx] = path.terminate.value
self.buffers[self.PATH_START_KEY][idx] = idx[0]
self.buffers[self.PATH_END_KEY][idx] = idx[-1]
return
class SampleBuffer(object):
def __init__(self, size):
self.idx_to_slot = np.empty(shape=[size], dtype=int)
self.slot_to_idx = np.empty(shape=[size], dtype=int)
self.count = 0
self.clear()
return
def clear(self):
self.idx_to_slot.fill(MathUtil.INVALID_IDX)
self.slot_to_idx.fill(MathUtil.INVALID_IDX)
self.count = 0
return
def is_valid(self, idx):
return self.idx_to_slot[idx] != MathUtil.INVALID_IDX
def get_size(self):
return self.idx_to_slot.shape[0]
def add(self, idx):
for i in idx:
if not self.is_valid(i):
new_slot = self.count
assert new_slot >= 0
self.idx_to_slot[i] = new_slot
self.slot_to_idx[new_slot] = i
self.count += 1
return
def free(self, idx):
for i in idx:
if self.is_valid(i):
slot = self.idx_to_slot[i]
last_slot = self.count - 1
last_idx = self.slot_to_idx[last_slot]
self.idx_to_slot[last_idx] = slot
self.slot_to_idx[slot] = last_idx
self.idx_to_slot[i] = MathUtil.INVALID_IDX
self.slot_to_idx[last_slot] = MathUtil.INVALID_IDX
self.count -= 1
return
def sample(self, n):
if self.count > 0:
slots = np.random.randint(0, self.count, size=n)
idx = self.slot_to_idx[slots]
else:
idx = np.empty(shape=[0], dtype=int)
return idx
def check_consistency(self):
valid = True
if self.count < 0:
valid = False
if valid:
for i in range(self.get_size()):
if self.is_valid(i):
s = self.idx_to_slot[i]
if self.slot_to_idx[s] != i:
valid = False
break
s2i = self.slot_to_idx[i]
if s2i != MathUtil.INVALID_IDX:
i2s = self.idx_to_slot[s2i]
if i2s != i:
valid = False
break
count0 = np.sum(self.idx_to_slot == MathUtil.INVALID_IDX)
count1 = np.sum(self.slot_to_idx == MathUtil.INVALID_IDX)
valid &= count0 == count1
return valid
| mit |
mlperf/inference_results_v0.7 | closed/Nettrix/code/ssd-resnet34/tensorrt/calibrator.py | 12 | 3902 | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pycuda.driver as cuda
import pycuda.autoinit
import tensorrt as trt
import os, sys
sys.path.insert(0, os.getcwd())
from code.common import logging
from PIL import Image
class SSDResNet34EntropyCalibrator(trt.IInt8EntropyCalibrator2):
def __init__(self, data_dir, cache_file, batch_size, max_batches, force_calibration, calib_data_map):
# Whenever you specify a custom constructor for a TensorRT class,
# you MUST call the constructor of the parent explicitly.
trt.IInt8EntropyCalibrator2.__init__(self)
self.cache_file = cache_file
self.max_batches = max_batches
image_list = []
with open(calib_data_map) as f:
for line in f:
image_list.append(line.strip())
self.shape = (batch_size, 3, 1200, 1200)
self.device_input = cuda.mem_alloc(trt.volume(self.shape) * 4)
self.coco_id = 0
self.force_calibration = force_calibration
# Create a generator that will give us batches. We can use next() to iterate over the result.
def load_batches():
batch_id = 0
batch_size = self.shape[0]
batch_data = np.zeros(shape=self.shape, dtype=np.float32)
while self.coco_id < len(image_list) and batch_id < self.max_batches:
print("Calibrating with batch {}".format(batch_id))
batch_id += 1
end_coco_id = min(self.coco_id + batch_size, len(image_list))
for i in range(self.coco_id, end_coco_id):
batch_data[i - self.coco_id] = np.load(os.path.join(data_dir, image_list[i] + ".npy"))
self.coco_id = end_coco_id
shape = self.shape
data = batch_data.tobytes()
labels = bytes(b'')
yield data
self.batches = load_batches()
# If there is a cache, use it instead of calibrating again. Otherwise, implicitly return None.
if not self.force_calibration and os.path.exists(self.cache_file):
with open(self.cache_file, "rb") as f:
self.cache = f.read()
else:
self.cache = None
def get_batch_size(self):
return self.shape[0]
# TensorRT passes along the names of the engine bindings to the get_batch function.
# You don't necessarily have to use them, but they can be useful to understand the order of
# the inputs. The bindings list is expected to have the same ordering as 'names'.
def get_batch(self, names):
try:
# Get a single batch.
data = next(self.batches)
# Copy to device, then return a list containing pointers to input device buffers.
cuda.memcpy_htod(self.device_input, data)
return [int(self.device_input)]
except StopIteration:
# When we're out of batches, we return either [] or None.
# This signals to TensorRT that there is no calibration data remaining.
return None
def read_calibration_cache(self):
return self.cache
def write_calibration_cache(self, cache):
with open(self.cache_file, "wb") as f:
f.write(cache)
def clear_cache(self):
self.cache = None
| apache-2.0 |
mohitsethi/packstack | tests/installer/test_validators.py | 24 | 3858 | # -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import shutil
import tempfile
from unittest import TestCase
from packstack.installer.validators import *
from ..test_base import PackstackTestCaseMixin
class ValidatorsTestCase(PackstackTestCaseMixin, TestCase):
def setUp(self):
# Creating a temp directory that can be used by tests
self.tempdir = tempfile.mkdtemp()
def tearDown(self):
# remove the temp directory
shutil.rmtree(self.tempdir)
def test_validate_integer(self):
"""Test packstack.installer.validators.validate_integer"""
validate_integer('1')
self.assertRaises(ParamValidationError, validate_integer, 'test')
def test_validate_regexp(self):
"""Test packstack.installer.validators.validate_regexp"""
validate_regexp('Test_123', options=['\w'])
self.assertRaises(ParamValidationError, validate_regexp,
'!#$%', options=['\w'])
def test_validate_port(self):
"""Test packstack.installer.validators.validate_port"""
validate_port('666')
self.assertRaises(ParamValidationError, validate_port, 'test')
self.assertRaises(ParamValidationError, validate_port, '-3')
def test_validate_not_empty(self):
"""Test packstack.installer.validators.validate_not_empty"""
validate_not_empty('test')
validate_not_empty(False)
self.assertRaises(ParamValidationError, validate_not_empty, '')
self.assertRaises(ParamValidationError, validate_not_empty, [])
self.assertRaises(ParamValidationError, validate_not_empty, {})
def test_validate_options(self):
"""Test packstack.installer.validators.validate_options"""
validate_options('a', options=['a', 'b'])
validate_options('b', options=['a', 'b'])
self.assertRaises(ParamValidationError, validate_options,
'c', options=['a', 'b'])
def test_validate_ip(self):
"""Test packstack.installer.validators.validate_ip"""
validate_ip('127.0.0.1')
validate_ip('::1')
self.assertRaises(ParamValidationError, validate_ip, 'test')
def test_validate_file(self):
"""Test packstack.installer.validators.validate_file"""
fname = os.path.join(self.tempdir, '.test_validate_file')
bad_name = os.path.join(self.tempdir, '.me_no_exists')
with open(fname, 'w') as f:
f.write('test')
validate_file(fname)
self.assertRaises(ParamValidationError, validate_file, bad_name)
def test_validate_ping(self):
"""Test packstack.installer.validators.validate_ping"""
# ping to broadcast fails
self.assertRaises(ParamValidationError, validate_ping,
'255.255.255.255')
def test_validate_ssh(self):
"""Test packstack.installer.validators.validate_ssh"""
# ssh to broadcast fails
self.assertRaises(ParamValidationError, validate_ssh,
'255.255.255.255')
def test_validate_float(self):
"""Test packstack.installer.validators.validate_float"""
validate_float('5.3')
self.assertRaises(ParamValidationError, validate_float, 'test')
| apache-2.0 |
shepdelacreme/ansible | test/units/template/test_templar.py | 11 | 17101 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from jinja2.runtime import Context
from units.compat import unittest
from units.compat.mock import patch
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleUndefinedVariable
from ansible.module_utils.six import string_types
from ansible.template import Templar, AnsibleContext, AnsibleEnvironment
from ansible.utils.unsafe_proxy import AnsibleUnsafe, wrap_var
from units.mock.loader import DictDataLoader
class BaseTemplar(object):
def setUp(self):
self.test_vars = dict(
foo="bar",
bam="{{foo}}",
num=1,
var_true=True,
var_false=False,
var_dict=dict(a="b"),
bad_dict="{a='b'",
var_list=[1],
recursive="{{recursive}}",
some_var="blip",
some_static_var="static_blip",
some_keyword="{{ foo }}",
some_unsafe_var=wrap_var("unsafe_blip"),
some_static_unsafe_var=wrap_var("static_unsafe_blip"),
some_unsafe_keyword=wrap_var("{{ foo }}"),
)
self.fake_loader = DictDataLoader({
"/path/to/my_file.txt": "foo\n",
})
self.templar = Templar(loader=self.fake_loader, variables=self.test_vars)
def is_unsafe(self, obj):
if obj is None:
return False
if hasattr(obj, '__UNSAFE__'):
return True
if isinstance(obj, AnsibleUnsafe):
return True
if isinstance(obj, dict):
for key in obj.keys():
if self.is_unsafe(key) or self.is_unsafe(obj[key]):
return True
if isinstance(obj, list):
for item in obj:
if self.is_unsafe(item):
return True
if isinstance(obj, string_types) and hasattr(obj, '__UNSAFE__'):
return True
return False
# class used for testing arbitrary objects passed to template
class SomeClass(object):
foo = 'bar'
def __init__(self):
self.blip = 'blip'
class SomeUnsafeClass(AnsibleUnsafe):
def __init__(self):
super(SomeUnsafeClass, self).__init__()
self.blip = 'unsafe blip'
class TestTemplarTemplate(BaseTemplar, unittest.TestCase):
def test_lookup_jinja_dict_key_in_static_vars(self):
res = self.templar.template("{'some_static_var': '{{ some_var }}'}",
static_vars=['some_static_var'])
# self.assertEqual(res['{{ a_keyword }}'], "blip")
print(res)
def test_templatable(self):
res = self.templar.templatable('foo')
self.assertTrue(res)
def test_templatable_none(self):
res = self.templar.templatable(None)
self.assertTrue(res)
@patch('ansible.template.Templar.template', side_effect=AnsibleError)
def test_templatable_exception(self, mock_template):
res = self.templar.templatable('foo')
self.assertFalse(res)
def test_template_convert_bare_string(self):
res = self.templar.template('foo', convert_bare=True)
self.assertEqual(res, 'bar')
def test_template_convert_bare_nested(self):
res = self.templar.template('bam', convert_bare=True)
self.assertEqual(res, 'bar')
def test_template_convert_bare_unsafe(self):
res = self.templar.template('some_unsafe_var', convert_bare=True)
self.assertEqual(res, 'unsafe_blip')
# self.assertIsInstance(res, AnsibleUnsafe)
self.assertTrue(self.is_unsafe(res), 'returned value from template.template (%s) is not marked unsafe' % res)
def test_template_convert_bare_filter(self):
res = self.templar.template('bam|capitalize', convert_bare=True)
self.assertEqual(res, 'Bar')
def test_template_convert_bare_filter_unsafe(self):
res = self.templar.template('some_unsafe_var|capitalize', convert_bare=True)
self.assertEqual(res, 'Unsafe_blip')
# self.assertIsInstance(res, AnsibleUnsafe)
self.assertTrue(self.is_unsafe(res), 'returned value from template.template (%s) is not marked unsafe' % res)
def test_template_convert_data(self):
res = self.templar.template('{{foo}}', convert_data=True)
self.assertTrue(res)
self.assertEqual(res, 'bar')
@patch('ansible.template.safe_eval', side_effect=AnsibleError)
def test_template_convert_data_template_in_data(self, mock_safe_eval):
res = self.templar.template('{{bam}}', convert_data=True)
self.assertTrue(res)
self.assertEqual(res, 'bar')
def test_template_convert_data_bare(self):
res = self.templar.template('bam', convert_data=True)
self.assertTrue(res)
self.assertEqual(res, 'bam')
def test_template_convert_data_to_json(self):
res = self.templar.template('{{bam|to_json}}', convert_data=True)
self.assertTrue(res)
self.assertEqual(res, '"bar"')
def test_template_convert_data_convert_bare_data_bare(self):
res = self.templar.template('bam', convert_data=True, convert_bare=True)
self.assertTrue(res)
self.assertEqual(res, 'bar')
def test_template_unsafe_non_string(self):
unsafe_obj = AnsibleUnsafe()
res = self.templar.template(unsafe_obj)
self.assertTrue(self.is_unsafe(res), 'returned value from template.template (%s) is not marked unsafe' % res)
def test_template_unsafe_non_string_subclass(self):
unsafe_obj = SomeUnsafeClass()
res = self.templar.template(unsafe_obj)
self.assertTrue(self.is_unsafe(res), 'returned value from template.template (%s) is not marked unsafe' % res)
def test_weird(self):
data = u'''1 2 #}huh{# %}ddfg{% }}dfdfg{{ {%what%} {{#foo#}} {%{bar}%} {#%blip%#} {{asdfsd%} 3 4 {{foo}} 5 6 7'''
self.assertRaisesRegexp(AnsibleError,
'template error while templating string',
self.templar.template,
data)
class TestTemplarMisc(BaseTemplar, unittest.TestCase):
def test_templar_simple(self):
templar = self.templar
# test some basic templating
self.assertEqual(templar.template("{{foo}}"), "bar")
self.assertEqual(templar.template("{{foo}}\n"), "bar\n")
self.assertEqual(templar.template("{{foo}}\n", preserve_trailing_newlines=True), "bar\n")
self.assertEqual(templar.template("{{foo}}\n", preserve_trailing_newlines=False), "bar")
self.assertEqual(templar.template("{{bam}}"), "bar")
self.assertEqual(templar.template("{{num}}"), 1)
self.assertEqual(templar.template("{{var_true}}"), True)
self.assertEqual(templar.template("{{var_false}}"), False)
self.assertEqual(templar.template("{{var_dict}}"), dict(a="b"))
self.assertEqual(templar.template("{{bad_dict}}"), "{a='b'")
self.assertEqual(templar.template("{{var_list}}"), [1])
self.assertEqual(templar.template(1, convert_bare=True), 1)
# force errors
self.assertRaises(AnsibleUndefinedVariable, templar.template, "{{bad_var}}")
self.assertRaises(AnsibleUndefinedVariable, templar.template, "{{lookup('file', bad_var)}}")
self.assertRaises(AnsibleError, templar.template, "{{lookup('bad_lookup')}}")
self.assertRaises(AnsibleError, templar.template, "{{recursive}}")
self.assertRaises(AnsibleUndefinedVariable, templar.template, "{{foo-bar}}")
# test with fail_on_undefined=False
self.assertEqual(templar.template("{{bad_var}}", fail_on_undefined=False), "{{bad_var}}")
# test set_available_variables()
templar.set_available_variables(variables=dict(foo="bam"))
self.assertEqual(templar.template("{{foo}}"), "bam")
# variables must be a dict() for set_available_variables()
self.assertRaises(AssertionError, templar.set_available_variables, "foo=bam")
def test_templar_escape_backslashes(self):
# Rule of thumb: If escape backslashes is True you should end up with
# the same number of backslashes as when you started.
self.assertEqual(self.templar.template("\t{{foo}}", escape_backslashes=True), "\tbar")
self.assertEqual(self.templar.template("\t{{foo}}", escape_backslashes=False), "\tbar")
self.assertEqual(self.templar.template("\\{{foo}}", escape_backslashes=True), "\\bar")
self.assertEqual(self.templar.template("\\{{foo}}", escape_backslashes=False), "\\bar")
self.assertEqual(self.templar.template("\\{{foo + '\t' }}", escape_backslashes=True), "\\bar\t")
self.assertEqual(self.templar.template("\\{{foo + '\t' }}", escape_backslashes=False), "\\bar\t")
self.assertEqual(self.templar.template("\\{{foo + '\\t' }}", escape_backslashes=True), "\\bar\\t")
self.assertEqual(self.templar.template("\\{{foo + '\\t' }}", escape_backslashes=False), "\\bar\t")
self.assertEqual(self.templar.template("\\{{foo + '\\\\t' }}", escape_backslashes=True), "\\bar\\\\t")
self.assertEqual(self.templar.template("\\{{foo + '\\\\t' }}", escape_backslashes=False), "\\bar\\t")
def test_template_jinja2_extensions(self):
fake_loader = DictDataLoader({})
templar = Templar(loader=fake_loader)
old_exts = C.DEFAULT_JINJA2_EXTENSIONS
try:
C.DEFAULT_JINJA2_EXTENSIONS = "foo,bar"
self.assertEqual(templar._get_extensions(), ['foo', 'bar'])
finally:
C.DEFAULT_JINJA2_EXTENSIONS = old_exts
class TestTemplarLookup(BaseTemplar, unittest.TestCase):
def test_lookup_missing_plugin(self):
self.assertRaisesRegexp(AnsibleError,
r'lookup plugin \(not_a_real_lookup_plugin\) not found',
self.templar._lookup,
'not_a_real_lookup_plugin',
'an_arg', a_keyword_arg='a_keyword_arg_value')
def test_lookup_list(self):
res = self.templar._lookup('list', 'an_arg', 'another_arg')
self.assertEqual(res, 'an_arg,another_arg')
def test_lookup_jinja_undefined(self):
self.assertRaisesRegexp(AnsibleUndefinedVariable,
"'an_undefined_jinja_var' is undefined",
self.templar._lookup,
'list', '{{ an_undefined_jinja_var }}')
def test_lookup_jinja_defined(self):
res = self.templar._lookup('list', '{{ some_var }}')
self.assertTrue(self.is_unsafe(res))
# self.assertIsInstance(res, AnsibleUnsafe)
def test_lookup_jinja_dict_string_passed(self):
self.assertRaisesRegexp(AnsibleError,
"with_dict expects a dict",
self.templar._lookup,
'dict',
'{{ some_var }}')
def test_lookup_jinja_dict_list_passed(self):
self.assertRaisesRegexp(AnsibleError,
"with_dict expects a dict",
self.templar._lookup,
'dict',
['foo', 'bar'])
def test_lookup_jinja_kwargs(self):
res = self.templar._lookup('list', 'blip', random_keyword='12345')
self.assertTrue(self.is_unsafe(res))
# self.assertIsInstance(res, AnsibleUnsafe)
def test_lookup_jinja_list_wantlist(self):
res = self.templar._lookup('list', '{{ some_var }}', wantlist=True)
self.assertEqual(res, ["blip"])
def test_lookup_jinja_list_wantlist_undefined(self):
self.assertRaisesRegexp(AnsibleUndefinedVariable,
"'some_undefined_var' is undefined",
self.templar._lookup,
'list',
'{{ some_undefined_var }}',
wantlist=True)
def test_lookup_jinja_list_wantlist_unsafe(self):
res = self.templar._lookup('list', '{{ some_unsafe_var }}', wantlist=True)
for lookup_result in res:
self.assertTrue(self.is_unsafe(lookup_result))
# self.assertIsInstance(lookup_result, AnsibleUnsafe)
# Should this be an AnsibleUnsafe
# self.assertIsInstance(res, AnsibleUnsafe)
def test_lookup_jinja_dict(self):
res = self.templar._lookup('list', {'{{ a_keyword }}': '{{ some_var }}'})
self.assertEqual(res['{{ a_keyword }}'], "blip")
# TODO: Should this be an AnsibleUnsafe
# self.assertIsInstance(res['{{ a_keyword }}'], AnsibleUnsafe)
# self.assertIsInstance(res, AnsibleUnsafe)
def test_lookup_jinja_dict_unsafe(self):
res = self.templar._lookup('list', {'{{ some_unsafe_key }}': '{{ some_unsafe_var }}'})
self.assertTrue(self.is_unsafe(res['{{ some_unsafe_key }}']))
# self.assertIsInstance(res['{{ some_unsafe_key }}'], AnsibleUnsafe)
# TODO: Should this be an AnsibleUnsafe
# self.assertIsInstance(res, AnsibleUnsafe)
def test_lookup_jinja_dict_unsafe_value(self):
res = self.templar._lookup('list', {'{{ a_keyword }}': '{{ some_unsafe_var }}'})
self.assertTrue(self.is_unsafe(res['{{ a_keyword }}']))
# self.assertIsInstance(res['{{ a_keyword }}'], AnsibleUnsafe)
# TODO: Should this be an AnsibleUnsafe
# self.assertIsInstance(res, AnsibleUnsafe)
def test_lookup_jinja_none(self):
res = self.templar._lookup('list', None)
self.assertIsNone(res)
class TestAnsibleContext(BaseTemplar, unittest.TestCase):
def _context(self, variables=None):
variables = variables or {}
env = AnsibleEnvironment()
context = AnsibleContext(env, parent={}, name='some_context',
blocks={})
for key, value in variables.items():
context.vars[key] = value
return context
def test(self):
context = self._context()
self.assertIsInstance(context, AnsibleContext)
self.assertIsInstance(context, Context)
def test_resolve_unsafe(self):
context = self._context(variables={'some_unsafe_key': wrap_var('some_unsafe_string')})
res = context.resolve('some_unsafe_key')
# self.assertIsInstance(res, AnsibleUnsafe)
self.assertTrue(self.is_unsafe(res),
'return of AnsibleContext.resolve (%s) was expected to be marked unsafe but was not' % res)
def test_resolve_unsafe_list(self):
context = self._context(variables={'some_unsafe_key': [wrap_var('some unsafe string 1')]})
res = context.resolve('some_unsafe_key')
# self.assertIsInstance(res[0], AnsibleUnsafe)
self.assertTrue(self.is_unsafe(res),
'return of AnsibleContext.resolve (%s) was expected to be marked unsafe but was not' % res)
def test_resolve_unsafe_dict(self):
context = self._context(variables={'some_unsafe_key':
{'an_unsafe_dict': wrap_var('some unsafe string 1')}
})
res = context.resolve('some_unsafe_key')
self.assertTrue(self.is_unsafe(res['an_unsafe_dict']),
'return of AnsibleContext.resolve (%s) was expected to be marked unsafe but was not' % res['an_unsafe_dict'])
def test_resolve(self):
context = self._context(variables={'some_key': 'some_string'})
res = context.resolve('some_key')
self.assertEqual(res, 'some_string')
# self.assertNotIsInstance(res, AnsibleUnsafe)
self.assertFalse(self.is_unsafe(res),
'return of AnsibleContext.resolve (%s) was not expected to be marked unsafe but was' % res)
def test_resolve_none(self):
context = self._context(variables={'some_key': None})
res = context.resolve('some_key')
self.assertEqual(res, None)
# self.assertNotIsInstance(res, AnsibleUnsafe)
self.assertFalse(self.is_unsafe(res),
'return of AnsibleContext.resolve (%s) was not expected to be marked unsafe but was' % res)
| gpl-3.0 |
aleaxit/pysolper | permit/lib/dist/werkzeug/contrib/iterio.py | 25 | 8299 | # -*- coding: utf-8 -*-
r"""
werkzeug.contrib.iterio
~~~~~~~~~~~~~~~~~~~~~~~
This module implements a :class:`IterIO` that converts an iterator into
a stream object and the other way round. Converting streams into
iterators requires the `greenlet`_ module.
To convert an iterator into a stream all you have to do is to pass it
directly to the :class:`IterIO` constructor. In this example we pass it
a newly created generator::
def foo():
yield "something\n"
yield "otherthings"
stream = IterIO(foo())
print stream.read() # read the whole iterator
The other way round works a bit different because we have to ensure that
the code execution doesn't take place yet. An :class:`IterIO` call with a
callable as first argument does two things. The function itself is passed
an :class:`IterIO` stream it can feed. The object returned by the
:class:`IterIO` constructor on the other hand is not an stream object but
an iterator::
def foo(stream):
stream.write("some")
stream.write("thing")
stream.flush()
stream.write("otherthing")
iterator = IterIO(foo)
print iterator.next() # prints something
print iterator.next() # prints otherthing
iterator.next() # raises StopIteration
.. _greenlet: http://codespeak.net/py/dist/greenlet.html
:copyright: (c) 2010 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
try:
from py.magic import greenlet
except:
greenlet = None
class IterIO(object):
"""Instances of this object implement an interface compatible with the
standard Python :class:`file` object. Streams are either read-only or
write-only depending on how the object is created.
"""
def __new__(cls, obj):
try:
iterator = iter(obj)
except TypeError:
return IterI(obj)
return IterO(iterator)
def __iter__(self):
return self
def tell(self):
if self.closed:
raise ValueError('I/O operation on closed file')
return self.pos
def isatty(self):
if self.closed:
raise ValueError('I/O operation on closed file')
return False
def seek(self, pos, mode=0):
if self.closed:
raise ValueError('I/O operation on closed file')
raise IOError(9, 'Bad file descriptor')
def truncate(self, size=None):
if self.closed:
raise ValueError('I/O operation on closed file')
raise IOError(9, 'Bad file descriptor')
def write(self, s):
if self.closed:
raise ValueError('I/O operation on closed file')
raise IOError(9, 'Bad file descriptor')
def writelines(self, list):
if self.closed:
raise ValueError('I/O operation on closed file')
raise IOError(9, 'Bad file descriptor')
def read(self, n=-1):
if self.closed:
raise ValueError('I/O operation on closed file')
raise IOError(9, 'Bad file descriptor')
def readlines(self, sizehint=0):
if self.closed:
raise ValueError('I/O operation on closed file')
raise IOError(9, 'Bad file descriptor')
def readline(self, length=None):
if self.closed:
raise ValueError('I/O operation on closed file')
raise IOError(9, 'Bad file descriptor')
def flush(self):
if self.closed:
raise ValueError('I/O operation on closed file')
raise IOError(9, 'Bad file descriptor')
def next(self):
if self.closed:
raise StopIteration()
line = self.readline()
if not line:
raise StopIteration()
return line
class IterI(IterIO):
"""Convert an stream into an iterator."""
def __new__(cls, func):
if greenlet is None:
raise RuntimeError('IterI requires greenlet support')
stream = object.__new__(cls)
stream.__init__(greenlet.getcurrent())
def run():
func(stream)
stream.flush()
g = greenlet(run, stream._parent)
while 1:
rv = g.switch()
if not rv:
return
yield rv[0]
def __init__(self, parent):
self._parent = parent
self._buffer = []
self.closed = False
self.pos = 0
def close(self):
if not self.closed:
self.closed = True
def write(self, s):
if self.closed:
raise ValueError('I/O operation on closed file')
self.pos += len(s)
self._buffer.append(s)
def writelines(self, list):
self.write(''.join(list))
def flush(self):
if self.closed:
raise ValueError('I/O operation on closed file')
data = ''.join(self._buffer)
self._buffer = []
self._parent.switch((data,))
class IterO(IterIO):
"""Iter output. Wrap an iterator and give it a stream like interface."""
def __new__(cls, gen):
return object.__new__(cls)
def __init__(self, gen):
self._gen = gen
self._buf = ''
self.closed = False
self.pos = 0
def __iter__(self):
return self
def close(self):
if not self.closed:
self.closed = True
if hasattr(self._gen, 'close'):
self._gen.close()
def seek(self, pos, mode=0):
if self.closed:
raise ValueError('I/O operation on closed file')
if mode == 1:
pos += self.pos
elif mode == 2:
self.read()
self.pos = min(self.pos, self.pos + pos)
return
elif mode != 0:
raise IOError('Invalid argument')
buf = []
try:
tmp_end_pos = len(self._buf)
while pos > tmp_end_pos:
item = self._gen.next()
tmp_end_pos += len(item)
buf.append(item)
except StopIteration:
pass
if buf:
self._buf += ''.join(buf)
self.pos = max(0, pos)
def read(self, n=-1):
if self.closed:
raise ValueError('I/O operation on closed file')
if n < 0:
self._buf += ''.join(self._gen)
result = self._buf[self.pos:]
self.pos += len(result)
return result
new_pos = self.pos + n
buf = []
try:
tmp_end_pos = len(self._buf)
while new_pos > tmp_end_pos:
item = self._gen.next()
tmp_end_pos += len(item)
buf.append(item)
except StopIteration:
pass
if buf:
self._buf += ''.join(buf)
new_pos = max(0, new_pos)
try:
return self._buf[self.pos:new_pos]
finally:
self.pos = min(new_pos, len(self._buf))
def readline(self, length=None):
if self.closed:
raise ValueError('I/O operation on closed file')
nl_pos = self._buf.find('\n', self.pos)
buf = []
try:
pos = self.pos
while nl_pos < 0:
item = self._gen.next()
local_pos = item.find('\n')
buf.append(item)
if local_pos >= 0:
nl_pos = pos + local_pos
break
pos += len(item)
except StopIteration:
pass
if buf:
self._buf += ''.join(buf)
if nl_pos < 0:
new_pos = len(self._buf)
else:
new_pos = nl_pos + 1
if length is not None and self.pos + length < new_pos:
new_pos = self.pos + length
try:
return self._buf[self.pos:new_pos]
finally:
self.pos = min(new_pos, len(self._buf))
def readlines(self, sizehint=0):
total = 0
lines = []
line = self.readline()
while line:
lines.append(line)
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline()
return lines
| apache-2.0 |
rupran/ansible | contrib/inventory/vmware.py | 79 | 18441 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
VMware Inventory Script
=======================
Retrieve information about virtual machines from a vCenter server or
standalone ESX host. When `group_by=false` (in the INI file), host systems
are also returned in addition to VMs.
This script will attempt to read configuration from an INI file with the same
base filename if present, or `vmware.ini` if not. It is possible to create
symlinks to the inventory script to support multiple configurations, e.g.:
* `vmware.py` (this script)
* `vmware.ini` (default configuration, will be read by `vmware.py`)
* `vmware_test.py` (symlink to `vmware.py`)
* `vmware_test.ini` (test configuration, will be read by `vmware_test.py`)
* `vmware_other.py` (symlink to `vmware.py`, will read `vmware.ini` since no
`vmware_other.ini` exists)
The path to an INI file may also be specified via the `VMWARE_INI` environment
variable, in which case the filename matching rules above will not apply.
Host and authentication parameters may be specified via the `VMWARE_HOST`,
`VMWARE_USER` and `VMWARE_PASSWORD` environment variables; these options will
take precedence over options present in the INI file. An INI file is not
required if these options are specified using environment variables.
'''
from __future__ import print_function
import collections
import json
import logging
import optparse
import os
import ssl
import sys
import time
import ConfigParser
from six import text_type, string_types
# Disable logging message trigged by pSphere/suds.
try:
from logging import NullHandler
except ImportError:
from logging import Handler
class NullHandler(Handler):
def emit(self, record):
pass
logging.getLogger('psphere').addHandler(NullHandler())
logging.getLogger('suds').addHandler(NullHandler())
from psphere.client import Client
from psphere.errors import ObjectNotFoundError
from psphere.managedobjects import HostSystem, VirtualMachine, ManagedObject, Network, ClusterComputeResource
from suds.sudsobject import Object as SudsObject
class VMwareInventory(object):
def __init__(self, guests_only=None):
self.config = ConfigParser.SafeConfigParser()
if os.environ.get('VMWARE_INI', ''):
config_files = [os.environ['VMWARE_INI']]
else:
config_files = [os.path.abspath(sys.argv[0]).rstrip('.py') + '.ini', 'vmware.ini']
for config_file in config_files:
if os.path.exists(config_file):
self.config.read(config_file)
break
# Retrieve only guest VMs, or include host systems?
if guests_only is not None:
self.guests_only = guests_only
elif self.config.has_option('defaults', 'guests_only'):
self.guests_only = self.config.getboolean('defaults', 'guests_only')
else:
self.guests_only = True
# Read authentication information from VMware environment variables
# (if set), otherwise from INI file.
auth_host = os.environ.get('VMWARE_HOST')
if not auth_host and self.config.has_option('auth', 'host'):
auth_host = self.config.get('auth', 'host')
auth_user = os.environ.get('VMWARE_USER')
if not auth_user and self.config.has_option('auth', 'user'):
auth_user = self.config.get('auth', 'user')
auth_password = os.environ.get('VMWARE_PASSWORD')
if not auth_password and self.config.has_option('auth', 'password'):
auth_password = self.config.get('auth', 'password')
sslcheck = os.environ.get('VMWARE_SSLCHECK')
if not sslcheck and self.config.has_option('auth', 'sslcheck'):
sslcheck = self.config.get('auth', 'sslcheck')
if not sslcheck:
sslcheck = True
else:
if sslcheck.lower() in ['no', 'false']:
sslcheck = False
else:
sslcheck = True
# Limit the clusters being scanned
self.filter_clusters = os.environ.get('VMWARE_CLUSTERS')
if not self.filter_clusters and self.config.has_option('defaults', 'clusters'):
self.filter_clusters = self.config.get('defaults', 'clusters')
if self.filter_clusters:
self.filter_clusters = [x.strip() for x in self.filter_clusters.split(',') if x.strip()]
# Override certificate checks
if not sslcheck:
if hasattr(ssl, '_create_unverified_context'):
ssl._create_default_https_context = ssl._create_unverified_context
# Create the VMware client connection.
self.client = Client(auth_host, auth_user, auth_password)
def _put_cache(self, name, value):
'''
Saves the value to cache with the name given.
'''
if self.config.has_option('defaults', 'cache_dir'):
cache_dir = os.path.expanduser(self.config.get('defaults', 'cache_dir'))
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
cache_file = os.path.join(cache_dir, name)
with open(cache_file, 'w') as cache:
json.dump(value, cache)
def _get_cache(self, name, default=None):
'''
Retrieves the value from cache for the given name.
'''
if self.config.has_option('defaults', 'cache_dir'):
cache_dir = self.config.get('defaults', 'cache_dir')
cache_file = os.path.join(cache_dir, name)
if os.path.exists(cache_file):
if self.config.has_option('defaults', 'cache_max_age'):
cache_max_age = self.config.getint('defaults', 'cache_max_age')
else:
cache_max_age = 0
cache_stat = os.stat(cache_file)
if (cache_stat.st_mtime + cache_max_age) >= time.time():
with open(cache_file) as cache:
return json.load(cache)
return default
def _flatten_dict(self, d, parent_key='', sep='_'):
'''
Flatten nested dicts by combining keys with a separator. Lists with
only string items are included as is; any other lists are discarded.
'''
items = []
for k, v in d.items():
if k.startswith('_'):
continue
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, collections.MutableMapping):
items.extend(self._flatten_dict(v, new_key, sep).items())
elif isinstance(v, (list, tuple)):
if all([isinstance(x, string_types) for x in v]):
items.append((new_key, v))
else:
items.append((new_key, v))
return dict(items)
def _get_obj_info(self, obj, depth=99, seen=None):
'''
Recursively build a data structure for the given pSphere object (depth
only applies to ManagedObject instances).
'''
seen = seen or set()
if isinstance(obj, ManagedObject):
try:
obj_unicode = text_type(getattr(obj, 'name'))
except AttributeError:
obj_unicode = ()
if obj in seen:
return obj_unicode
seen.add(obj)
if depth <= 0:
return obj_unicode
d = {}
for attr in dir(obj):
if attr.startswith('_'):
continue
try:
val = getattr(obj, attr)
obj_info = self._get_obj_info(val, depth - 1, seen)
if obj_info != ():
d[attr] = obj_info
except Exception as e:
pass
return d
elif isinstance(obj, SudsObject):
d = {}
for key, val in iter(obj):
obj_info = self._get_obj_info(val, depth, seen)
if obj_info != ():
d[key] = obj_info
return d
elif isinstance(obj, (list, tuple)):
l = []
for val in iter(obj):
obj_info = self._get_obj_info(val, depth, seen)
if obj_info != ():
l.append(obj_info)
return l
elif isinstance(obj, (type(None), bool, int, long, float, string_types)):
return obj
else:
return ()
def _get_host_info(self, host, prefix='vmware'):
'''
Return a flattened dict with info about the given host system.
'''
host_info = {
'name': host.name,
}
for attr in ('datastore', 'network', 'vm'):
try:
value = getattr(host, attr)
host_info['%ss' % attr] = self._get_obj_info(value, depth=0)
except AttributeError:
host_info['%ss' % attr] = []
for k, v in self._get_obj_info(host.summary, depth=0).items():
if isinstance(v, collections.MutableMapping):
for k2, v2 in v.items():
host_info[k2] = v2
elif k != 'host':
host_info[k] = v
try:
host_info['ipAddress'] = host.config.network.vnic[0].spec.ip.ipAddress
except Exception as e:
print(e, file=sys.stderr)
host_info = self._flatten_dict(host_info, prefix)
if ('%s_ipAddress' % prefix) in host_info:
host_info['ansible_ssh_host'] = host_info['%s_ipAddress' % prefix]
return host_info
def _get_vm_info(self, vm, prefix='vmware'):
'''
Return a flattened dict with info about the given virtual machine.
'''
vm_info = {
'name': vm.name,
}
for attr in ('datastore', 'network'):
try:
value = getattr(vm, attr)
vm_info['%ss' % attr] = self._get_obj_info(value, depth=0)
except AttributeError:
vm_info['%ss' % attr] = []
try:
vm_info['resourcePool'] = self._get_obj_info(vm.resourcePool, depth=0)
except AttributeError:
vm_info['resourcePool'] = ''
try:
vm_info['guestState'] = vm.guest.guestState
except AttributeError:
vm_info['guestState'] = ''
for k, v in self._get_obj_info(vm.summary, depth=0).items():
if isinstance(v, collections.MutableMapping):
for k2, v2 in v.items():
if k2 == 'host':
k2 = 'hostSystem'
vm_info[k2] = v2
elif k != 'vm':
vm_info[k] = v
vm_info = self._flatten_dict(vm_info, prefix)
if ('%s_ipAddress' % prefix) in vm_info:
vm_info['ansible_ssh_host'] = vm_info['%s_ipAddress' % prefix]
return vm_info
def _add_host(self, inv, parent_group, host_name):
'''
Add the host to the parent group in the given inventory.
'''
p_group = inv.setdefault(parent_group, [])
if isinstance(p_group, dict):
group_hosts = p_group.setdefault('hosts', [])
else:
group_hosts = p_group
if host_name not in group_hosts:
group_hosts.append(host_name)
def _add_child(self, inv, parent_group, child_group):
'''
Add a child group to a parent group in the given inventory.
'''
if parent_group != 'all':
p_group = inv.setdefault(parent_group, {})
if not isinstance(p_group, dict):
inv[parent_group] = {'hosts': p_group}
p_group = inv[parent_group]
group_children = p_group.setdefault('children', [])
if child_group not in group_children:
group_children.append(child_group)
inv.setdefault(child_group, [])
def get_inventory(self, meta_hostvars=True):
'''
Reads the inventory from cache or VMware API via pSphere.
'''
# Use different cache names for guests only vs. all hosts.
if self.guests_only:
cache_name = '__inventory_guests__'
else:
cache_name = '__inventory_all__'
inv = self._get_cache(cache_name, None)
if inv is not None:
return inv
inv = {'all': {'hosts': []}}
if meta_hostvars:
inv['_meta'] = {'hostvars': {}}
default_group = os.path.basename(sys.argv[0]).rstrip('.py')
if not self.guests_only:
if self.config.has_option('defaults', 'hw_group'):
hw_group = self.config.get('defaults', 'hw_group')
else:
hw_group = default_group + '_hw'
if self.config.has_option('defaults', 'vm_group'):
vm_group = self.config.get('defaults', 'vm_group')
else:
vm_group = default_group + '_vm'
if self.config.has_option('defaults', 'prefix_filter'):
prefix_filter = self.config.get('defaults', 'prefix_filter')
else:
prefix_filter = None
if self.filter_clusters:
# Loop through clusters and find hosts:
hosts = []
for cluster in ClusterComputeResource.all(self.client):
if cluster.name in self.filter_clusters:
for host in cluster.host:
hosts.append(host)
else:
# Get list of all physical hosts
hosts = HostSystem.all(self.client)
# Loop through physical hosts:
for host in hosts:
if not self.guests_only:
self._add_host(inv, 'all', host.name)
self._add_host(inv, hw_group, host.name)
host_info = self._get_host_info(host)
if meta_hostvars:
inv['_meta']['hostvars'][host.name] = host_info
self._put_cache(host.name, host_info)
# Loop through all VMs on physical host.
for vm in host.vm:
if prefix_filter:
if vm.name.startswith( prefix_filter ):
continue
self._add_host(inv, 'all', vm.name)
self._add_host(inv, vm_group, vm.name)
vm_info = self._get_vm_info(vm)
if meta_hostvars:
inv['_meta']['hostvars'][vm.name] = vm_info
self._put_cache(vm.name, vm_info)
# Group by resource pool.
vm_resourcePool = vm_info.get('vmware_resourcePool', None)
if vm_resourcePool:
self._add_child(inv, vm_group, 'resource_pools')
self._add_child(inv, 'resource_pools', vm_resourcePool)
self._add_host(inv, vm_resourcePool, vm.name)
# Group by datastore.
for vm_datastore in vm_info.get('vmware_datastores', []):
self._add_child(inv, vm_group, 'datastores')
self._add_child(inv, 'datastores', vm_datastore)
self._add_host(inv, vm_datastore, vm.name)
# Group by network.
for vm_network in vm_info.get('vmware_networks', []):
self._add_child(inv, vm_group, 'networks')
self._add_child(inv, 'networks', vm_network)
self._add_host(inv, vm_network, vm.name)
# Group by guest OS.
vm_guestId = vm_info.get('vmware_guestId', None)
if vm_guestId:
self._add_child(inv, vm_group, 'guests')
self._add_child(inv, 'guests', vm_guestId)
self._add_host(inv, vm_guestId, vm.name)
# Group all VM templates.
vm_template = vm_info.get('vmware_template', False)
if vm_template:
self._add_child(inv, vm_group, 'templates')
self._add_host(inv, 'templates', vm.name)
self._put_cache(cache_name, inv)
return inv
def get_host(self, hostname):
'''
Read info about a specific host or VM from cache or VMware API.
'''
inv = self._get_cache(hostname, None)
if inv is not None:
return inv
if not self.guests_only:
try:
host = HostSystem.get(self.client, name=hostname)
inv = self._get_host_info(host)
except ObjectNotFoundError:
pass
if inv is None:
try:
vm = VirtualMachine.get(self.client, name=hostname)
inv = self._get_vm_info(vm)
except ObjectNotFoundError:
pass
if inv is not None:
self._put_cache(hostname, inv)
return inv or {}
def main():
parser = optparse.OptionParser()
parser.add_option('--list', action='store_true', dest='list',
default=False, help='Output inventory groups and hosts')
parser.add_option('--host', dest='host', default=None, metavar='HOST',
help='Output variables only for the given hostname')
# Additional options for use when running the script standalone, but never
# used by Ansible.
parser.add_option('--pretty', action='store_true', dest='pretty',
default=False, help='Output nicely-formatted JSON')
parser.add_option('--include-host-systems', action='store_true',
dest='include_host_systems', default=False,
help='Include host systems in addition to VMs')
parser.add_option('--no-meta-hostvars', action='store_false',
dest='meta_hostvars', default=True,
help='Exclude [\'_meta\'][\'hostvars\'] with --list')
options, args = parser.parse_args()
if options.include_host_systems:
vmware_inventory = VMwareInventory(guests_only=False)
else:
vmware_inventory = VMwareInventory()
if options.host is not None:
inventory = vmware_inventory.get_host(options.host)
else:
inventory = vmware_inventory.get_inventory(options.meta_hostvars)
json_kwargs = {}
if options.pretty:
json_kwargs.update({'indent': 4, 'sort_keys': True})
json.dump(inventory, sys.stdout, **json_kwargs)
if __name__ == '__main__':
main()
| gpl-3.0 |
pierotofy/OpenDroneMap | opendm/types.py | 1 | 15615 | import cv2
import re
import os
from opendm import get_image_size
from opendm import location
from opendm.gcp import GCPFile
from pyproj import CRS
import xmltodict as x2d
from six import string_types
from opendm import log
from opendm import io
from opendm import system
from opendm import context
from opendm.progress import progressbc
from opendm.photo import ODM_Photo
class ODM_Reconstruction(object):
def __init__(self, photos):
self.photos = photos
self.georef = None
self.gcp = None
self.geo_file = None
self.multi_camera = self.detect_multi_camera()
def detect_multi_camera(self):
"""
Looks at the reconstruction photos and determines if this
is a single or multi-camera setup.
"""
band_photos = {}
band_indexes = {}
for p in self.photos:
if not p.band_name in band_photos:
band_photos[p.band_name] = []
if not p.band_name in band_indexes:
band_indexes[p.band_name] = p.band_index
band_photos[p.band_name].append(p)
bands_count = len(band_photos)
if bands_count >= 2 and bands_count <= 8:
# Validate that all bands have the same number of images,
# otherwise this is not a multi-camera setup
img_per_band = len(band_photos[p.band_name])
for band in band_photos:
if len(band_photos[band]) != img_per_band:
log.ODM_ERROR("Multi-camera setup detected, but band \"%s\" (identified from \"%s\") has only %s images (instead of %s), perhaps images are missing or are corrupted. Please include all necessary files to process all bands and try again." % (band, band_photos[band][0].filename, len(band_photos[band]), img_per_band))
raise RuntimeError("Invalid multi-camera images")
mc = []
for band_name in band_indexes:
mc.append({'name': band_name, 'photos': band_photos[band_name]})
# Sort by band index
mc.sort(key=lambda x: band_indexes[x['name']])
return mc
return None
def is_georeferenced(self):
return self.georef is not None
def has_gcp(self):
return self.is_georeferenced() and self.gcp is not None
def georeference_with_gcp(self, gcp_file, output_coords_file, output_gcp_file, rerun=False):
if not io.file_exists(output_coords_file) or not io.file_exists(output_gcp_file) or rerun:
gcp = GCPFile(gcp_file)
if gcp.exists():
# Create coords file, we'll be using this later
# during georeferencing
with open(output_coords_file, 'w') as f:
coords_header = gcp.wgs84_utm_zone()
f.write(coords_header + "\n")
log.ODM_INFO("Generated coords file from GCP: %s" % coords_header)
# Convert GCP file to a UTM projection since the rest of the pipeline
# does not handle other SRS well.
rejected_entries = []
utm_gcp = GCPFile(gcp.create_utm_copy(output_gcp_file, filenames=[p.filename for p in self.photos], rejected_entries=rejected_entries, include_extras=False))
if not utm_gcp.exists():
raise RuntimeError("Could not project GCP file to UTM. Please double check your GCP file for mistakes.")
for re in rejected_entries:
log.ODM_WARNING("GCP line ignored (image not found): %s" % str(re))
if utm_gcp.entries_count() > 0:
log.ODM_INFO("%s GCP points will be used for georeferencing" % utm_gcp.entries_count())
else:
raise RuntimeError("A GCP file was provided, but no valid GCP entries could be used. Note that the GCP file is case sensitive (\".JPG\" is not the same as \".jpg\").")
self.gcp = utm_gcp
else:
log.ODM_WARNING("GCP file does not exist: %s" % gcp_file)
return
else:
log.ODM_INFO("Coordinates file already exist: %s" % output_coords_file)
log.ODM_INFO("GCP file already exist: %s" % output_gcp_file)
self.gcp = GCPFile(output_gcp_file)
self.georef = ODM_GeoRef.FromCoordsFile(output_coords_file)
return self.georef
def georeference_with_gps(self, images_path, output_coords_file, rerun=False):
try:
if not io.file_exists(output_coords_file) or rerun:
location.extract_utm_coords(self.photos, images_path, output_coords_file)
else:
log.ODM_INFO("Coordinates file already exist: %s" % output_coords_file)
self.georef = ODM_GeoRef.FromCoordsFile(output_coords_file)
except:
log.ODM_WARNING('Could not generate coordinates file. The orthophoto will not be georeferenced.')
self.gcp = GCPFile(None)
return self.georef
def save_proj_srs(self, file):
# Save proj to file for future use (unless this
# dataset is not georeferenced)
if self.is_georeferenced():
with open(file, 'w') as f:
f.write(self.get_proj_srs())
def get_proj_srs(self):
if self.is_georeferenced():
return self.georef.proj4()
def get_photo(self, filename):
for p in self.photos:
if p.filename == filename:
return p
class ODM_GeoRef(object):
@staticmethod
def FromProj(projstring):
return ODM_GeoRef(CRS.from_proj4(projstring))
@staticmethod
def FromCoordsFile(coords_file):
# check for coordinate file existence
if not io.file_exists(coords_file):
log.ODM_WARNING('Could not find file %s' % coords_file)
return
srs = None
with open(coords_file) as f:
# extract reference system and utm zone from first line.
# We will assume the following format:
# 'WGS84 UTM 17N' or 'WGS84 UTM 17N \n'
line = f.readline().rstrip()
srs = location.parse_srs_header(line)
return ODM_GeoRef(srs)
def __init__(self, srs):
self.srs = srs
self.utm_east_offset = 0
self.utm_north_offset = 0
self.transform = []
def proj4(self):
return self.srs.to_proj4()
def valid_utm_offsets(self):
return self.utm_east_offset and self.utm_north_offset
def extract_offsets(self, geo_sys_file):
if not io.file_exists(geo_sys_file):
log.ODM_ERROR('Could not find file %s' % geo_sys_file)
return
with open(geo_sys_file) as f:
offsets = f.readlines()[1].split(' ')
self.utm_east_offset = float(offsets[0])
self.utm_north_offset = float(offsets[1])
def parse_transformation_matrix(self, matrix_file):
if not io.file_exists(matrix_file):
log.ODM_ERROR('Could not find file %s' % matrix_file)
return
# Create a nested list for the transformation matrix
with open(matrix_file) as f:
for line in f:
# Handle matrix formats that either
# have leading or trailing brakets or just plain numbers.
line = re.sub(r"[\[\],]", "", line).strip()
self.transform += [[float(i) for i in line.split()]]
self.utm_east_offset = self.transform[0][3]
self.utm_north_offset = self.transform[1][3]
class ODM_Tree(object):
def __init__(self, root_path, gcp_file = None, geo_file = None):
# root path to the project
self.root_path = io.absolute_path_file(root_path)
self.input_images = io.join_paths(self.root_path, 'images')
# modules paths
# here are defined where all modules should be located in
# order to keep track all files al directories during the
# whole reconstruction process.
self.dataset_raw = io.join_paths(self.root_path, 'images')
self.opensfm = io.join_paths(self.root_path, 'opensfm')
self.mve = io.join_paths(self.root_path, 'mve')
self.odm_meshing = io.join_paths(self.root_path, 'odm_meshing')
self.odm_texturing = io.join_paths(self.root_path, 'odm_texturing')
self.odm_25dtexturing = io.join_paths(self.root_path, 'odm_texturing_25d')
self.odm_georeferencing = io.join_paths(self.root_path, 'odm_georeferencing')
self.odm_25dgeoreferencing = io.join_paths(self.root_path, 'odm_georeferencing_25d')
self.odm_filterpoints = io.join_paths(self.root_path, 'odm_filterpoints')
self.odm_orthophoto = io.join_paths(self.root_path, 'odm_orthophoto')
self.odm_report = io.join_paths(self.root_path, 'odm_report')
# important files paths
# benchmarking
self.benchmarking = io.join_paths(self.root_path, 'benchmark.txt')
self.dataset_list = io.join_paths(self.root_path, 'img_list.txt')
# opensfm
self.opensfm_tracks = io.join_paths(self.opensfm, 'tracks.csv')
self.opensfm_bundle = io.join_paths(self.opensfm, 'bundle_r000.out')
self.opensfm_bundle_list = io.join_paths(self.opensfm, 'list_r000.out')
self.opensfm_image_list = io.join_paths(self.opensfm, 'image_list.txt')
self.opensfm_reconstruction = io.join_paths(self.opensfm, 'reconstruction.json')
self.opensfm_reconstruction_nvm = io.join_paths(self.opensfm, 'undistorted/reconstruction.nvm')
self.opensfm_model = io.join_paths(self.opensfm, 'undistorted/depthmaps/merged.ply')
self.opensfm_transformation = io.join_paths(self.opensfm, 'geocoords_transformation.txt')
# mve
self.mve_model = io.join_paths(self.mve, 'mve_dense_point_cloud.ply')
self.mve_views = io.join_paths(self.mve, 'views')
# filter points
self.filtered_point_cloud = io.join_paths(self.odm_filterpoints, "point_cloud.ply")
# odm_meshing
self.odm_mesh = io.join_paths(self.odm_meshing, 'odm_mesh.ply')
self.odm_meshing_log = io.join_paths(self.odm_meshing, 'odm_meshing_log.txt')
self.odm_25dmesh = io.join_paths(self.odm_meshing, 'odm_25dmesh.ply')
self.odm_25dmeshing_log = io.join_paths(self.odm_meshing, 'odm_25dmeshing_log.txt')
# texturing
self.odm_texturing_undistorted_image_path = io.join_paths(
self.odm_texturing, 'undistorted')
self.odm_textured_model_obj = 'odm_textured_model.obj'
self.odm_textured_model_mtl = 'odm_textured_model.mtl'
# Log is only used by old odm_texturing
self.odm_texuring_log = 'odm_texturing_log.txt'
# odm_georeferencing
self.odm_georeferencing_coords = io.join_paths(
self.odm_georeferencing, 'coords.txt')
self.odm_georeferencing_gcp = gcp_file or io.find('gcp_list.txt', self.root_path)
self.odm_georeferencing_gcp_utm = io.join_paths(self.odm_georeferencing, 'gcp_list_utm.txt')
self.odm_geo_file = geo_file or io.find('geo.txt', self.root_path)
self.odm_georeferencing_utm_log = io.join_paths(
self.odm_georeferencing, 'odm_georeferencing_utm_log.txt')
self.odm_georeferencing_log = 'odm_georeferencing_log.txt'
self.odm_georeferencing_transform_file = 'odm_georeferencing_transform.txt'
self.odm_georeferencing_proj = 'proj.txt'
self.odm_georeferencing_model_txt_geo = 'odm_georeferencing_model_geo.txt'
self.odm_georeferencing_model_obj_geo = 'odm_textured_model_geo.obj'
self.odm_georeferencing_xyz_file = io.join_paths(
self.odm_georeferencing, 'odm_georeferenced_model.csv')
self.odm_georeferencing_las_json = io.join_paths(
self.odm_georeferencing, 'las.json')
self.odm_georeferencing_model_laz = io.join_paths(
self.odm_georeferencing, 'odm_georeferenced_model.laz')
self.odm_georeferencing_model_las = io.join_paths(
self.odm_georeferencing, 'odm_georeferenced_model.las')
self.odm_georeferencing_dem = io.join_paths(
self.odm_georeferencing, 'odm_georeferencing_model_dem.tif')
# odm_orthophoto
self.odm_orthophoto_render = io.join_paths(self.odm_orthophoto, 'odm_orthophoto_render.tif')
self.odm_orthophoto_tif = io.join_paths(self.odm_orthophoto, 'odm_orthophoto.tif')
self.odm_orthophoto_corners = io.join_paths(self.odm_orthophoto, 'odm_orthophoto_corners.txt')
self.odm_orthophoto_log = io.join_paths(self.odm_orthophoto, 'odm_orthophoto_log.txt')
self.odm_orthophoto_tif_log = io.join_paths(self.odm_orthophoto, 'gdal_translate_log.txt')
# tiles
self.orthophoto_tiles = io.join_paths(self.root_path, "orthophoto_tiles")
# Split-merge
self.submodels_path = io.join_paths(self.root_path, 'submodels')
# Tiles
self.entwine_pointcloud = self.path("entwine_pointcloud")
def path(self, *args):
return os.path.join(self.root_path, *args)
class ODM_Stage:
def __init__(self, name, args, progress=0.0, **params):
self.name = name
self.args = args
self.progress = progress
self.params = params
if self.params is None:
self.params = {}
self.next_stage = None
self.prev_stage = None
def connect(self, stage):
self.next_stage = stage
stage.prev_stage = self
return stage
def rerun(self):
"""
Does this stage need to be rerun?
"""
return (self.args.rerun is not None and self.args.rerun == self.name) or \
(self.args.rerun_all) or \
(self.args.rerun_from is not None and self.name in self.args.rerun_from)
def run(self, outputs = {}):
start_time = system.now_raw()
log.ODM_INFO('Running %s stage' % self.name)
self.process(self.args, outputs)
# The tree variable should always be populated at this point
if outputs.get('tree') is None:
raise Exception("Assert violation: tree variable is missing from outputs dictionary.")
if self.args.time:
system.benchmark(start_time, outputs['tree'].benchmarking, self.name)
log.ODM_INFO('Finished %s stage' % self.name)
self.update_progress_end()
# Last stage?
if self.args.end_with == self.name or self.args.rerun == self.name:
log.ODM_INFO("No more stages to run")
return
# Run next stage?
elif self.next_stage is not None:
self.next_stage.run(outputs)
def delta_progress(self):
if self.prev_stage:
return max(0.0, self.progress - self.prev_stage.progress)
else:
return max(0.0, self.progress)
def previous_stages_progress(self):
if self.prev_stage:
return max(0.0, self.prev_stage.progress)
else:
return 0.0
def update_progress_end(self):
self.update_progress(100.0)
def update_progress(self, progress):
progress = max(0.0, min(100.0, progress))
progressbc.send_update(self.previous_stages_progress() +
(self.delta_progress() / 100.0) * float(progress))
def process(self, args, outputs):
raise NotImplementedError
| gpl-3.0 |
conductor-tutti/test | lib/setuptools/command/rotate.py | 461 | 2038 | from distutils.util import convert_path
from distutils import log
from distutils.errors import DistutilsOptionError
import os
from setuptools import Command
from setuptools.compat import basestring
class rotate(Command):
"""Delete older distributions"""
description = "delete older distributions, keeping N newest files"
user_options = [
('match=', 'm', "patterns to match (required)"),
('dist-dir=', 'd', "directory where the distributions are"),
('keep=', 'k', "number of matching distributions to keep"),
]
boolean_options = []
def initialize_options(self):
self.match = None
self.dist_dir = None
self.keep = None
def finalize_options(self):
if self.match is None:
raise DistutilsOptionError(
"Must specify one or more (comma-separated) match patterns "
"(e.g. '.zip' or '.egg')"
)
if self.keep is None:
raise DistutilsOptionError("Must specify number of files to keep")
try:
self.keep = int(self.keep)
except ValueError:
raise DistutilsOptionError("--keep must be an integer")
if isinstance(self.match, basestring):
self.match = [
convert_path(p.strip()) for p in self.match.split(',')
]
self.set_undefined_options('bdist', ('dist_dir', 'dist_dir'))
def run(self):
self.run_command("egg_info")
from glob import glob
for pattern in self.match:
pattern = self.distribution.get_name() + '*' + pattern
files = glob(os.path.join(self.dist_dir, pattern))
files = [(os.path.getmtime(f), f) for f in files]
files.sort()
files.reverse()
log.info("%d file(s) matching %s", len(files), pattern)
files = files[self.keep:]
for (t, f) in files:
log.info("Deleting %s", f)
if not self.dry_run:
os.unlink(f)
| apache-2.0 |
zhulin2609/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/style/checkers/cmake_unittest.py | 121 | 4134 | # Copyright (C) 2012 Intel Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for cmake.py."""
import unittest2 as unittest
from cmake import CMakeChecker
class CMakeCheckerTest(unittest.TestCase):
"""Tests CMakeChecker class."""
def test_init(self):
"""Test __init__() method."""
def _mock_handle_style_error(self):
pass
checker = CMakeChecker("foo.cmake", _mock_handle_style_error)
self.assertEqual(checker._handle_style_error, _mock_handle_style_error)
def test_check(self):
"""Test check() method."""
errors = []
def _mock_handle_style_error(line_number, category, confidence,
message):
error = (line_number, category, confidence, message)
errors.append(error)
checker = CMakeChecker("foo.cmake", _mock_handle_style_error)
lines = [
'# This file is sample input for cmake_unittest.py and includes below problems:\n',
'IF ()',
'\tmessage("Error line with Tab")\n',
' message("Error line with endding spaces") \n',
' message( "Error line with space after (")\n',
' message("Error line with space before (" )\n',
' MESSAGE("Error line with upper case non-condtional command")\n',
' MESSage("Error line with upper case non-condtional command")\n',
' message("correct message line")\n',
'ENDif ()\n',
'\n',
'if()\n',
'endif ()\n',
'\n',
'macro ()\n',
'ENDMacro()\n',
'\n',
'function ()\n',
'endfunction()\n',
]
checker.check(lines)
self.maxDiff = None
self.assertEqual(errors, [
(3, 'whitespace/tab', 5, 'Line contains tab character.'),
(2, 'command/lowercase', 5, 'Use lowercase command "if"'),
(4, 'whitespace/trailing', 5, 'No trailing spaces'),
(5, 'whitespace/parentheses', 5, 'No space after "("'),
(6, 'whitespace/parentheses', 5, 'No space before ")"'),
(7, 'command/lowercase', 5, 'Use lowercase command "message"'),
(8, 'command/lowercase', 5, 'Use lowercase command "message"'),
(10, 'command/lowercase', 5, 'Use lowercase command "endif"'),
(12, 'whitespace/parentheses', 5, 'One space between command "if" and its parentheses, should be "if ("'),
(15, 'whitespace/parentheses', 5, 'No space between command "macro" and its parentheses, should be "macro("'),
(16, 'command/lowercase', 5, 'Use lowercase command "endmacro"'),
(18, 'whitespace/parentheses', 5, 'No space between command "function" and its parentheses, should be "function("'),
])
| bsd-3-clause |
dcode/fsf_rpm | fsf/fsf-server/modules/EXTRACT_HEXASCII_PE.py | 4 | 1284 | #!/usr/bin/env python
#
# Author: Jason Batchelor
# Description: Binary convert on hexascii printables contained within a stream
# believed to represent an executable file.
# Date: 03/07/2016
'''
Copyright 2016 Carnegie Mellon University
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import sys
import re
import binascii
def EXTRACT_HEXASCII_PE(s, buff):
# Function must return a dictionary
SUB_OBJ = {}
counter = 0
for m in re.finditer(r"4[dD]5[aA][0-9A-Fa-f]+", buff):
SUB_OBJ.update( { 'Object_%s' % counter : { 'Buffer' : binascii.unhexlify(m.group(0)) } } )
counter += 1
return SUB_OBJ
if __name__ == '__main__':
# For testing, s object can be None type if unused in function
print EXTRACT_HEXASCII_PE(None, sys.stdin.read())
| apache-2.0 |
lvgilmore/Luke | LukeClient/luke-client.py | 1 | 3179 | #! /usr/bin/python
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from LukeClient.Cpu import Cpu
from LukeClient.Disks import Disks
from LukeClient.Nics import Nics
from LukeClient.PollingStatus import PollingStatus
from LukeClient.Ram import Ram
from LukeClient.Server import Server
from requests import post
os.system("dhclient eth0")
server = Server(cpu=Cpu(), ram=Ram(), nics=Nics(), disks=Disks())
serverObject = {'Vendor': server.vendor,
'Model': server.model,
'Cpu': server.serverCpu.cpuObject,
'Ram': server.serverRam.ramObject,
'NICs': server.serverNics.nicsObject,
'Disks': server.serverDisks.disksObject}
port = 8000
# report = convert_to_json(serverObject)
report = "{\"Vendor\": \"vend\"," \
" \"Cpu\": {\"Sockets\": \"1\", \"Arch\": \"x86_64\", \
\"Speed\": \"2201.000\", \"Cores\": \"1\"}," \
" \"Ram\": {\"Size\": \"3062784\"}, " \
"\"NICs\": {\"ens33\": " \
"{\"Speed\": \"1000Mb/s\", \"Mac\": \"00:0c:29:3d:5e:ce\", \
\"Type\": \"Twisted Pair\", \"ip\": \"192.168.0.4\"}}," \
" \"Disks\": {\"sda\": {\"Vendor\": \"VMware\", \"Size\": \"2\"}, " \
"\"sr0\": {\"Vendor\": \"VMware\", \"Size\": \"5\"}}, " \
"\"ip\": \"192.168.0.5\", \"Model\": \"mod\"}"
req = "{\"requirements\": {\"Cpu\": {\"Sockets\": \"1\",\
\"Speed\": \"2201.000\", \"Cores\": \"1\"}," \
"\"Vendor\": \"vend\"}," \
"\"other_prop\": {\"Ram\": {\"Size\": \"3062784\"}, " \
"\"NICs\": {\"ens33\": " \
"{\"Speed\": \"1000Mb/s\", \"Mac\": \"00:0c:29:3d:5e:ce\", \
\"Type\": \"Twisted Pair\"}}," \
" \"Disks\": {\"sda\": {\"Vendor\": \"VMware\", \"Size\": \"2\"}, " \
"\"sr0\": {\"Vendor\": \"VMware\", \"Size\": \"5\"}}, " \
"\"Model\": \"mod\", \"profile\": \"common\"}}"
request = post("http://localhost:{}/request/".format(port), data={"request": req})
print(str(report))
# bare_metal_id = post("http://localhost:{}/baremetal/".format(port),
# data={"bare_metal": str(report)})
bare_metal_id = post("http://localhost:{}/baremetal/".format(port),
data={"bare_metal": report}).content
bare_metal_id = bare_metal_id[1:-1]
print(bare_metal_id)
polling_thread = PollingStatus(port, bare_metal_id)
polling_thread.start()
| gpl-3.0 |
kisoku/ansible | test/integration/gce_credentials.py | 275 | 1809 | import collections
import os
import yaml
try:
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
_ = Provider.GCE
except ImportError:
print("failed=True " + \
"msg='libcloud with GCE support (0.13.3+) required for this module'")
sys.exit(1)
def add_credentials_options(parser):
default_service_account_email=None
default_pem_file=None
default_project_id=None
# Load details from credentials.yml
if os.path.isfile('credentials.yml'):
credentials = yaml.load(open('credentials.yml', 'r'))
default_service_account_email = credentials['gce_service_account_email']
default_pem_file = credentials['gce_pem_file']
default_project_id = credentials['gce_project_id']
parser.add_option("--service_account_email",
action="store", dest="service_account_email",
default=default_service_account_email,
help="GCE service account email. Default is loaded from credentials.yml.")
parser.add_option("--pem_file",
action="store", dest="pem_file",
default=default_pem_file,
help="GCE client key. Default is loaded from credentials.yml.")
parser.add_option("--project_id",
action="store", dest="project_id",
default=default_project_id,
help="Google Cloud project ID. Default is loaded from credentials.yml.")
def check_required(opts, parser):
for required in ['service_account_email', 'pem_file', 'project_id']:
if getattr(opts, required) is None:
parser.error("Missing required parameter: --%s" % required)
def get_gce_driver(opts):
# Connect to GCE
gce_cls = get_driver(Provider.GCE)
return gce_cls(
opts.service_account_email, opts.pem_file, project=opts.project_id)
| gpl-3.0 |
zvolsky/vylet | models/plugin_config.py | 2 | 1689 | # -*- coding: utf-8 -*-
# Web2py plugin to manage/save/load config items.
# Items saved as single 'group' with 'item's can be loaded into single python dictionary with keys 'item's.
db.define_table('plugin_config_grp',
Field('grp', length=16),
Field('user_settings', 'boolean'),
Field('txt_label', length=92),
Field('txt_comment', 'text'),
format='%(grp)s'
)
db.define_table('plugin_config_key',
Field('plugin_config_grp_id', db.plugin_config_grp),
Field('dict_key', length=16),
Field('txt_label', length=92),
Field('txt_comment', 'text'),
format='%(dict_key)s'
)
db.define_table('plugin_config_val',
Field('plugin_config_key_id', db.plugin_config_key),
Field('auth_user_id', db.auth_user),
Field('dict_value', length=256),
format='%(dict_value)s'
)
# Following cofiguration values are defaults.
# You can change them in db.py or other model (alphabetically after db.py, but before this model).
# Example: to disable rights to do changes for the admin group members, set: plugins.config.admin_group=''
def _():
from gluon.tools import PluginManager
plugins = PluginManager('config',
admin_group='admin', # name of the admin group (all changes allowed for members). empty string to disable
# in addition changes are enabled for members of following groups:
# plugin_config_structure - change configuration structure
# plugin_config_global - change global settings
# everybody can change his/her own setting
)
plugin_manage_groups = _()
| agpl-3.0 |
La0/pelican-flickr | pelican-flickr/cached.py | 2 | 1437 | import json
import os
import tempfile
import main
class FlickrCached(object):
'''
Helper methods to cache objects
'''
id = None
cache_path = ''
data = {}
cached = True
# Template generation
slug = ''
url = ''
generated_path = ''
def __init__(self, cache_id):
self.id = cache_id
# Setup cache dir
cache_dir = os.path.join(tempfile.gettempdir(), 'pelican_flickr')
if main.FLICKR_CACHE and not os.path.isdir(cache_dir):
os.mkdir(cache_dir)
# Setup final cache path for object
self.cache_path = os.path.join(cache_dir, '%s.json' % self.id)
def fetch(self):
'''
Get a data set from cache
'''
if not main.FLICKR_CACHE:
return False
if not os.path.exists(self.cache_path):
return False
with open(self.cache_path, 'r') as f:
self.data = json.loads(f.read())
# Check data
if not self.data:
raise Exception('No data from %s' % self.cache_path)
return True
def save(self):
'''
Save a serializable data set in local cache
'''
if not main.FLICKR_CACHE:
return False
with open(self.cache_path, 'w') as f:
f.write(json.dumps(self.data))
return True
def build_paths(self, parts):
# Build path, url, and slug for a cache object
self.slug = '/'.join(parts)
self.generated_path = '%s/%s.html' % (main.FLICKR_OUTPUT_DIRNAME, self.slug)
self.url = '/' + self.generated_path
| gpl-3.0 |
ashishnitinpatil/django_appengine_project_template | django/core/files/images.py | 117 | 2181 | """
Utility functions for handling images.
Requires Pillow (or PIL), as you might imagine.
"""
import zlib
from django.core.files import File
class ImageFile(File):
"""
A mixin for use alongside django.core.files.base.File, which provides
additional features for dealing with images.
"""
def _get_width(self):
return self._get_image_dimensions()[0]
width = property(_get_width)
def _get_height(self):
return self._get_image_dimensions()[1]
height = property(_get_height)
def _get_image_dimensions(self):
if not hasattr(self, '_dimensions_cache'):
close = self.closed
self.open()
self._dimensions_cache = get_image_dimensions(self, close=close)
return self._dimensions_cache
def get_image_dimensions(file_or_path, close=False):
"""
Returns the (width, height) of an image, given an open file or a path. Set
'close' to True to close the file at the end if it is initially in an open
state.
"""
from django.utils.image import ImageFile as PILImageFile
p = PILImageFile.Parser()
if hasattr(file_or_path, 'read'):
file = file_or_path
file_pos = file.tell()
file.seek(0)
else:
file = open(file_or_path, 'rb')
close = True
try:
# Most of the time PIL only needs a small chunk to parse the image and
# get the dimensions, but with some TIFF files PIL needs to parse the
# whole file.
chunk_size = 1024
while 1:
data = file.read(chunk_size)
if not data:
break
try:
p.feed(data)
except zlib.error as e:
# ignore zlib complaining on truncated stream, just feed more
# data to parser (ticket #19457).
if e.args[0].startswith("Error -5"):
pass
else:
raise
if p.image:
return p.image.size
chunk_size = chunk_size*2
return None
finally:
if close:
file.close()
else:
file.seek(file_pos)
| bsd-2-clause |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/lib2to3/fixes/fix_execfile.py | 324 | 1998 | # Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer for execfile.
This converts usages of the execfile function into calls to the built-in
exec() function.
"""
from .. import fixer_base
from ..fixer_util import (Comma, Name, Call, LParen, RParen, Dot, Node,
ArgList, String, syms)
class FixExecfile(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
power< 'execfile' trailer< '(' arglist< filename=any [',' globals=any [',' locals=any ] ] > ')' > >
|
power< 'execfile' trailer< '(' filename=any ')' > >
"""
def transform(self, node, results):
assert results
filename = results["filename"]
globals = results.get("globals")
locals = results.get("locals")
# Copy over the prefix from the right parentheses end of the execfile
# call.
execfile_paren = node.children[-1].children[-1].clone()
# Construct open().read().
open_args = ArgList([filename.clone()], rparen=execfile_paren)
open_call = Node(syms.power, [Name(u"open"), open_args])
read = [Node(syms.trailer, [Dot(), Name(u'read')]),
Node(syms.trailer, [LParen(), RParen()])]
open_expr = [open_call] + read
# Wrap the open call in a compile call. This is so the filename will be
# preserved in the execed code.
filename_arg = filename.clone()
filename_arg.prefix = u" "
exec_str = String(u"'exec'", u" ")
compile_args = open_expr + [Comma(), filename_arg, Comma(), exec_str]
compile_call = Call(Name(u"compile"), compile_args, u"")
# Finally, replace the execfile call with an exec call.
args = [compile_call]
if globals is not None:
args.extend([Comma(), globals.clone()])
if locals is not None:
args.extend([Comma(), locals.clone()])
return Call(Name(u"exec"), args, prefix=node.prefix)
| gpl-3.0 |
jsherfey/infinitebrain | infinitebrain/settings.py | 1 | 6450 | """
Django settings for infinitebrain project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
PROJECT_DIR = os.path.dirname(__file__)
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '#bzf+3ho^kr2rnnc_1iooav_7ven)@4k(ku94z&0!1o4ner-s6'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# 2.5MB - 2621440
# 5MB - 5242880
# 10MB - 10485760
# 20MB - 20971520
# 50MB - 52428800
# 100MB 104857600
# 250MB - 214958080
# 500MB - 429916160
MAX_UPLOAD_SIZE = "52428800"
TEMPLATE_DEBUG = True
TEMPLATE_DIRS = [os.path.join(BASE_DIR, 'templates')]
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.request',
#'django_notify.context_processors.notifications',
'sekizai.context_processors.sekizai',
'django.core.context_processors.debug',
'django.core.context_processors.media',
)
#TEMPLATE_CONTEXT_PROCESSORS = DEFAULT_SETTINGS.TEMPLATE_CONTEXT_PROCESSORS + (
# 'django.core.context_processors.request',
# 'django.core.context_processors.i18n',
# 'sekizai.context_processors.sekizai',
# 'django.core.context_processors.debug',
#)
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.humanize',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'django.contrib.comments',
'haystack',
'modeldb',
'registration',
'twitter_feed',
'tweepy',
'south',
'taggit',
'taggit_templatetags',
'qhonuskan_votes',
# # # # # # # # # ################
# removed 'django-nyt'
# ###############################
'django_nyt',
'mptt',
'sekizai',
'sorl.thumbnail',
# 'wiki',
# 'wiki.plugins.attachments',
# 'wiki.plugins.notifications',
# 'wiki.plugins.images',
# 'wiki.plugins.macros',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
#'django_notify.middleware.NotificationsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'infinitebrain.urls'
WSGI_APPLICATION = 'infinitebrain.wsgi.application'
SOUTH_MIGRATION_MODULES = {
'taggit': 'taggit.south_migrations',
'django_nyt': 'django_nyt.south_migrations',
# 'wiki': 'wiki.south_migrations',
# 'images': 'wiki.plugins.images.south_migrations',
# 'notifications': 'wiki.plugins.notifications.south_migrations',
# 'attachments': 'wiki.plugins.attachments.south_migrations',
}
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
if DEBUG:
DATABASES = { 'default': {
'ENGINE': 'django.db.backends.sqlite3', # this just added for development
'NAME': 'devdb', # again, just added for development
'USER': '',
'PASSWORD': '',
}
}
else:
DATABASES = { 'default': {
'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'modulator', # Or path to database file if using sqlite3.
'USER': 'dev', # Not used with sqlite3.
'PASSWORD': 'l3rAInzRck', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'EST'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, "static/")
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, "media/")
LOGIN_REDIRECT_URL = '/'
# Registration settings
ACCOUNT_ACTIVATION_DAYS=7
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
EMAIL_HOST_USER = 'theinfinitebrain@gmail.com'
EMAIL_HOST_PASSWORD = 'l3rAInzRck'
EMAIL_USE_TLS = True
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
#EMAIL_HOST='localhost'
#EMAIL_PORT=25#587 #"587" #25 # Port to use for the SMTP server
SITE_ID = 1
EMAIL_SUBJECT_PREFIX='[infinitebrain]'
#DEFAULT_FROM_EMAIL = 'jason@infinitebrain.org'
# dummy server: sudo python -m smtpd -n -c DebuggingServer localhost:1025
# Twitter settings
# settings for @infbrain (jason_sherfey, jssherfey@gmail.com):
TWITTER_FEED_CONSUMER_PUBLIC_KEY = 'eLsEjizrCeDgXjnECO2W7n3cW'
TWITTER_FEED_CONSUMER_SECRET = 'TiXOSjVg8SE95Q1k49G9EMI2wWQcEqAWK6H99xuMcBl1RuzykS'
TWITTER_FEED_OPEN_AUTH_TOKEN = '66367399-kOOIF8YgmQLtsS53fjTNVqPZB1wlHKcM5zUxhnIHB'
TWITTER_FEED_OPEN_AUTH_SECRET = 'HyEUBa9WfckoOJs6Tih7MTxwUepSpA6seXzpC0hh2mlZt'
# settings for @jasonsherfey (sherfey@bu.edu):
#TWITTER_FEED_CONSUMER_PUBLIC_KEY = 'tWllmoyOjOi3T9ujUk7lH0M32'
#TWITTER_FEED_CONSUMER_SECRET = '22T6k9UfXZ4op4Zw0JwHi2WHRHF2tgzlfYOHfjFPrIy4kmY0I4'
#TWITTER_FEED_OPEN_AUTH_TOKEN = '2270834493-IcR5CNhh9YDpcpUM8KUGqylI6FokHbASdd9QCoa'
#TWITTER_FEED_OPEN_AUTH_SECRET = 'lC9mi67oTzN0bIDmhhbx36JiqcZM4OPvuAPhjSMpG3mWh'
# how to get API keys/tokens: https://twittercommunity.com/t/how-to-get-my-api-key/7033
# HAYSTACK settings. Path is the location of the whoosh index.
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.whoosh_backend.WhooshEngine',
'PATH': os.path.join(BASE_DIR, 'whoosh_index'),
},
}
| gpl-2.0 |
wenderen/servo | tests/wpt/web-platform-tests/tools/pywebsocket/src/mod_pywebsocket/headerparserhandler.py | 638 | 9836 | # Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""PythonHeaderParserHandler for mod_pywebsocket.
Apache HTTP Server and mod_python must be configured such that this
function is called to handle WebSocket request.
"""
import logging
from mod_python import apache
from mod_pywebsocket import common
from mod_pywebsocket import dispatch
from mod_pywebsocket import handshake
from mod_pywebsocket import util
# PythonOption to specify the handler root directory.
_PYOPT_HANDLER_ROOT = 'mod_pywebsocket.handler_root'
# PythonOption to specify the handler scan directory.
# This must be a directory under the root directory.
# The default is the root directory.
_PYOPT_HANDLER_SCAN = 'mod_pywebsocket.handler_scan'
# PythonOption to allow handlers whose canonical path is
# not under the root directory. It's disallowed by default.
# Set this option with value of 'yes' to allow.
_PYOPT_ALLOW_HANDLERS_OUTSIDE_ROOT = (
'mod_pywebsocket.allow_handlers_outside_root_dir')
# Map from values to their meanings. 'Yes' and 'No' are allowed just for
# compatibility.
_PYOPT_ALLOW_HANDLERS_OUTSIDE_ROOT_DEFINITION = {
'off': False, 'no': False, 'on': True, 'yes': True}
# (Obsolete option. Ignored.)
# PythonOption to specify to allow handshake defined in Hixie 75 version
# protocol. The default is None (Off)
_PYOPT_ALLOW_DRAFT75 = 'mod_pywebsocket.allow_draft75'
# Map from values to their meanings.
_PYOPT_ALLOW_DRAFT75_DEFINITION = {'off': False, 'on': True}
class ApacheLogHandler(logging.Handler):
"""Wrapper logging.Handler to emit log message to apache's error.log."""
_LEVELS = {
logging.DEBUG: apache.APLOG_DEBUG,
logging.INFO: apache.APLOG_INFO,
logging.WARNING: apache.APLOG_WARNING,
logging.ERROR: apache.APLOG_ERR,
logging.CRITICAL: apache.APLOG_CRIT,
}
def __init__(self, request=None):
logging.Handler.__init__(self)
self._log_error = apache.log_error
if request is not None:
self._log_error = request.log_error
# Time and level will be printed by Apache.
self._formatter = logging.Formatter('%(name)s: %(message)s')
def emit(self, record):
apache_level = apache.APLOG_DEBUG
if record.levelno in ApacheLogHandler._LEVELS:
apache_level = ApacheLogHandler._LEVELS[record.levelno]
msg = self._formatter.format(record)
# "server" parameter must be passed to have "level" parameter work.
# If only "level" parameter is passed, nothing shows up on Apache's
# log. However, at this point, we cannot get the server object of the
# virtual host which will process WebSocket requests. The only server
# object we can get here is apache.main_server. But Wherever (server
# configuration context or virtual host context) we put
# PythonHeaderParserHandler directive, apache.main_server just points
# the main server instance (not any of virtual server instance). Then,
# Apache follows LogLevel directive in the server configuration context
# to filter logs. So, we need to specify LogLevel in the server
# configuration context. Even if we specify "LogLevel debug" in the
# virtual host context which actually handles WebSocket connections,
# DEBUG level logs never show up unless "LogLevel debug" is specified
# in the server configuration context.
#
# TODO(tyoshino): Provide logging methods on request object. When
# request is mp_request object (when used together with Apache), the
# methods call request.log_error indirectly. When request is
# _StandaloneRequest, the methods call Python's logging facility which
# we create in standalone.py.
self._log_error(msg, apache_level, apache.main_server)
def _configure_logging():
logger = logging.getLogger()
# Logs are filtered by Apache based on LogLevel directive in Apache
# configuration file. We must just pass logs for all levels to
# ApacheLogHandler.
logger.setLevel(logging.DEBUG)
logger.addHandler(ApacheLogHandler())
_configure_logging()
_LOGGER = logging.getLogger(__name__)
def _parse_option(name, value, definition):
if value is None:
return False
meaning = definition.get(value.lower())
if meaning is None:
raise Exception('Invalid value for PythonOption %s: %r' %
(name, value))
return meaning
def _create_dispatcher():
_LOGGER.info('Initializing Dispatcher')
options = apache.main_server.get_options()
handler_root = options.get(_PYOPT_HANDLER_ROOT, None)
if not handler_root:
raise Exception('PythonOption %s is not defined' % _PYOPT_HANDLER_ROOT,
apache.APLOG_ERR)
handler_scan = options.get(_PYOPT_HANDLER_SCAN, handler_root)
allow_handlers_outside_root = _parse_option(
_PYOPT_ALLOW_HANDLERS_OUTSIDE_ROOT,
options.get(_PYOPT_ALLOW_HANDLERS_OUTSIDE_ROOT),
_PYOPT_ALLOW_HANDLERS_OUTSIDE_ROOT_DEFINITION)
dispatcher = dispatch.Dispatcher(
handler_root, handler_scan, allow_handlers_outside_root)
for warning in dispatcher.source_warnings():
apache.log_error(
'mod_pywebsocket: Warning in source loading: %s' % warning,
apache.APLOG_WARNING)
return dispatcher
# Initialize
_dispatcher = _create_dispatcher()
def headerparserhandler(request):
"""Handle request.
Args:
request: mod_python request.
This function is named headerparserhandler because it is the default
name for a PythonHeaderParserHandler.
"""
handshake_is_done = False
try:
# Fallback to default http handler for request paths for which
# we don't have request handlers.
if not _dispatcher.get_handler_suite(request.uri):
request.log_error(
'mod_pywebsocket: No handler for resource: %r' % request.uri,
apache.APLOG_INFO)
request.log_error(
'mod_pywebsocket: Fallback to Apache', apache.APLOG_INFO)
return apache.DECLINED
except dispatch.DispatchException, e:
request.log_error(
'mod_pywebsocket: Dispatch failed for error: %s' % e,
apache.APLOG_INFO)
if not handshake_is_done:
return e.status
try:
allow_draft75 = _parse_option(
_PYOPT_ALLOW_DRAFT75,
apache.main_server.get_options().get(_PYOPT_ALLOW_DRAFT75),
_PYOPT_ALLOW_DRAFT75_DEFINITION)
try:
handshake.do_handshake(
request, _dispatcher, allowDraft75=allow_draft75)
except handshake.VersionException, e:
request.log_error(
'mod_pywebsocket: Handshake failed for version error: %s' % e,
apache.APLOG_INFO)
request.err_headers_out.add(common.SEC_WEBSOCKET_VERSION_HEADER,
e.supported_versions)
return apache.HTTP_BAD_REQUEST
except handshake.HandshakeException, e:
# Handshake for ws/wss failed.
# Send http response with error status.
request.log_error(
'mod_pywebsocket: Handshake failed for error: %s' % e,
apache.APLOG_INFO)
return e.status
handshake_is_done = True
request._dispatcher = _dispatcher
_dispatcher.transfer_data(request)
except handshake.AbortedByUserException, e:
request.log_error('mod_pywebsocket: Aborted: %s' % e, apache.APLOG_INFO)
except Exception, e:
# DispatchException can also be thrown if something is wrong in
# pywebsocket code. It's caught here, then.
request.log_error('mod_pywebsocket: Exception occurred: %s\n%s' %
(e, util.get_stack_trace()),
apache.APLOG_ERR)
# Unknown exceptions before handshake mean Apache must handle its
# request with another handler.
if not handshake_is_done:
return apache.DECLINED
# Set assbackwards to suppress response header generation by Apache.
request.assbackwards = 1
return apache.DONE # Return DONE such that no other handlers are invoked.
# vi:sts=4 sw=4 et
| mpl-2.0 |
ruleant/weblate | weblate/trans/models/source.py | 1 | 2382 | # -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2014 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <http://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from django.db import models
from django.utils.translation import ugettext_lazy as _
from weblate.trans.models.subproject import SubProject
PRIORITY_CHOICES = (
(60, _('Very high')),
(80, _('High')),
(100, _('Medium')),
(120, _('Low')),
(140, _('Very low')),
)
class Source(models.Model):
checksum = models.CharField(max_length=40)
subproject = models.ForeignKey(SubProject)
timestamp = models.DateTimeField(auto_now_add=True)
priority = models.IntegerField(
default=100,
choices=PRIORITY_CHOICES,
)
class Meta(object):
permissions = (
('edit_priority', "Can edit priority"),
)
app_label = 'trans'
unique_together = ('checksum', 'subproject')
def __init__(self, *args, **kwargs):
super(Source, self).__init__(*args, **kwargs)
self.priority_modified = False
def __unicode__(self):
return 'src:{0}'.format(self.checksum)
def save(self, force_insert=False, **kwargs):
"""
Wrapper around save to indicate whether priority has been
modified.
"""
if force_insert:
self.priority_modified = (self.priority != 100)
else:
old = Source.objects.get(pk=self.pk)
self.priority_modified = (old.priority != self.priority)
super(Source, self).save(force_insert, **kwargs)
@models.permalink
def get_absolute_url(self):
return ('review_source', (), {
'project': self.subproject.project.slug,
'subproject': self.subproject.slug,
})
| gpl-3.0 |
CloudNcodeInc/django-datatable-view | datatableview/views.py | 1 | 25073 | # -*- encoding: utf-8 -*-
import json
from django.utils.encoding import force_text
import re
import operator
import logging
try:
from functools import reduce
except ImportError:
pass
from django.views.generic.list import ListView, MultipleObjectMixin
from django.http import HttpResponse, HttpResponseBadRequest
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import Model, Manager, Q
from django.utils.text import smart_split
from django.views.decorators.csrf import ensure_csrf_cookie
from django.conf import settings
from django import get_version
import six
from .forms import XEditableUpdateForm
from .utils import (FIELD_TYPES, FIELD_HANDLERS, ObjectListResult, DatatableOptions,
DatatableStructure, split_real_fields, filter_real_fields, resolve_orm_path,
get_first_orm_bit, get_field_definition)
log = logging.getLogger(__name__)
CAN_UPDATE_FIELDS = get_version().split('.') >= ['1', '5']
class DatatableMixin(MultipleObjectMixin):
"""
Converts a view into an AJAX interface for obtaining records.
The normal GET execution adds a ``DataTable`` object to the context which can be used to
streamline the dumping of the HTML skeleton required for datatables.js to hook. A ``DataTable``
object doesn't hold any data, just a structure superficially generated from the options on the
view.
The template is responsible for making the AJAX request back to this view to populate the table
with data.
"""
datatable_options = None
datatable_context_name = 'datatable'
datatable_options_class = DatatableOptions
datatable_structure_class = DatatableStructure
def get(self, request, *args, **kwargs):
"""
Detects AJAX access and returns appropriate serialized data. Normal access to the view is
unmodified.
"""
if request.is_ajax() or request.GET.get('ajax') == 'true':
return self.get_ajax(request, *args, **kwargs)
return super(DatatableMixin, self).get(request, *args, **kwargs)
def get_model(self):
if not self.model:
self.model = self.get_queryset().model
return self.model
def get_object_list(self):
""" Gets the core queryset, but applies the datatable options to it. """
return self.apply_queryset_options(self.get_queryset())
def get_ajax_url(self):
return self.request.path
def get_datatable_structure(self):
options = self._get_datatable_options()
model = self.get_model()
return self.datatable_structure_class(self.get_ajax_url(), options, model=model)
def get_datatable_options(self):
"""
Returns the DatatableOptions object for this view's configuration.
This method is guaranteed to be called only once per request.
"""
return self.datatable_options
def _get_datatable_options(self):
"""
Internal safe access. Guarantees that ``get_datatable_options()`` is called only once, so
that subclasses can use that method to modify the class attribute ``datatable_options``.
"""
if not hasattr(self, '_datatable_options'):
model = self.get_model()
options = self.get_datatable_options()
if options:
# Options are defined, but probably in a raw dict format
options = self.datatable_options_class(model, self.request.GET, **dict(options))
else:
# No options defined on the view
options = self.datatable_options_class(model, self.request.GET)
self._datatable_options = options
return self._datatable_options
def apply_queryset_options(self, queryset):
"""
Interprets the datatable options.
Options requiring manual massaging of the queryset are handled here. The output of this
method should be treated as a list, since complex options might convert it out of the
original queryset form.
"""
options = self._get_datatable_options()
# These will hold residue queries that cannot be handled in at the database level. Anything
# in these variables by the end will be handled manually (read: less efficiently)
sort_fields = []
searches = []
# This count is for the benefit of the frontend datatables.js
total_initial_record_count = queryset.count()
if options['ordering']:
db_fields, sort_fields = split_real_fields(self.get_model(), options['ordering'])
queryset = queryset.order_by(*db_fields)
if options['search']:
db_fields, searches = filter_real_fields(self.get_model(), options['columns'],
key=get_first_orm_bit)
db_fields.extend(options['search_fields'])
queries = [] # Queries generated to search all fields for all terms
search_terms = map(lambda q: q.strip("'\" "), smart_split(options['search']))
for term in search_terms:
term_queries = [] # Queries generated to search all fields for this term
# Every concrete database lookup string in 'columns' is followed to its trailing
# field descriptor. For example, "subdivision__name" terminates in a CharField.
# The field type determines how it is probed for search.
for column in db_fields:
column = get_field_definition(column)
for component_name in column.fields:
field_queries = [] # Queries generated to search this database field for the search term
field = resolve_orm_path(self.get_model(), component_name)
if field.choices:
# Query the database for the database value rather than display value
choices = field.get_flatchoices()
length = len(choices)
database_values = []
display_values = []
for choice in choices:
database_values.append(choice[0])
display_values.append(choice[1].lower())
for i in range(length):
if term.lower() in display_values[i]:
field_queries = [{component_name + '__iexact': database_values[i]}]
else:
for label, field_types in FIELD_TYPES.items():
if isinstance(field, tuple(field_types)):
# Queries generated to search this database field for the search term
handler = FIELD_HANDLERS.get(label)
if not handler:
raise ValueError("Unhandled field type %s. Please update "
"FIELD_HANDLERS." % label)
field_queries = handler(field, component_name, term)
break
# Append each field inspection for this term
if field_queries:
term_queries.extend(map(lambda q: Q(**q), field_queries))
# Append the logical OR of all field inspections for this term
if len(term_queries):
queries.append(reduce(operator.or_, term_queries))
# Apply the logical AND of all term inspections
if len(queries):
queryset = queryset.filter(reduce(operator.and_, queries))
# Append distinct() to eliminate duplicate rows
queryset = queryset.distinct()
# TODO: Remove "and not searches" from this conditional, since manual searches won't be done
if not sort_fields and not searches:
# We can shortcut and speed up the process if all operations are database-backed.
object_list = queryset
if options['search']:
object_list._dtv_unpaged_total = queryset.count()
else:
object_list._dtv_unpaged_total = total_initial_record_count
else:
object_list = ObjectListResult(queryset)
# # Manual searches
# # This is broken until it searches all items in object_list previous to the database
# # sort. That represents a runtime load that hits every row in code, rather than in the
# # database. If enabled, this would cripple performance on large datasets.
# if options['i_walk_the_dangerous_line_between_genius_and_insanity']:
# length = len(object_list)
# for i, obj in enumerate(reversed(object_list)):
# keep = False
# for column_info in searches:
# column_index = options['columns'].index(column_info)
# rich_data, plain_data = self.get_column_data(column_index, column_info, obj)
# for term in search_terms:
# if term.lower() in plain_data.lower():
# keep = True
# break
# if keep:
# break
#
# if not keep:
# removed = object_list.pop(length - 1 - i)
# # print column_info
# # print data
# # print '===='
# Sort the results manually for whatever remaining sort options are left over
def data_getter_orm(field_name):
def key(obj):
try:
return reduce(getattr, [obj] + field_name.split('__'))
except (AttributeError, ObjectDoesNotExist):
return None
return key
def data_getter_custom(i):
def key(obj):
rich_value, plain_value = self.get_column_data(i, options['columns'][i], obj)
return plain_value
return key
# Sort the list using the manual sort fields, back-to-front. `sort` is a stable
# operation, meaning that multiple passes can be made on the list using different
# criteria. The only catch is that the passes must be made in reverse order so that
# the "first" sort field with the most priority ends up getting applied last.
for sort_field in sort_fields[::-1]:
if sort_field.startswith('-'):
reverse = True
sort_field = sort_field[1:]
else:
reverse = False
if sort_field.startswith('!'):
key_function = data_getter_custom
sort_field = int(sort_field[1:])
else:
key_function = data_getter_orm
try:
object_list.sort(key=key_function(sort_field), reverse=reverse)
except TypeError as err:
log.error("Unable to sort on {0} - {1}".format(sort_field, err))
object_list._dtv_unpaged_total = len(object_list)
object_list._dtv_total_initial_record_count = total_initial_record_count
return object_list
def get_datatable_context_name(self):
return self.datatable_context_name
def get_datatable(self):
"""
Returns the helper object that can be used in the template to render the datatable skeleton.
"""
return self.get_datatable_structure()
def get_context_data(self, **kwargs):
context = super(DatatableMixin, self).get_context_data(**kwargs)
context[self.get_datatable_context_name()] = self.get_datatable()
return context
# Ajax execution methods
def get_ajax(self, request, *args, **kwargs):
"""
Called in place of normal ``get()`` when accessed via AJAX.
"""
object_list = self.get_object_list()
total = object_list._dtv_total_initial_record_count
filtered_total = object_list._dtv_unpaged_total
response_data = self.get_json_response_object(object_list, total, filtered_total)
response = HttpResponse(self.serialize_to_json(response_data),
content_type="application/json")
return response
def get_json_response_object(self, object_list, total, filtered_total):
"""
Returns the JSON-compatible dictionary that will be serialized for an AJAX response.
The value names are in the form "s~" for strings, "i~" for integers, and "a~" for arrays,
if you're unfamiliar with the old C-style jargon used in dataTables.js. "aa~" means
"array of arrays". In some instances, the author uses "ao~" for "array of objects", an
object being a javascript dictionary.
"""
object_list_page = self.paginate_object_list(object_list)
response_obj = {
'sEcho': self.request.GET.get('sEcho', None),
'iTotalRecords': total,
'iTotalDisplayRecords': filtered_total,
'aaData': [self.get_record_data(obj) for obj in object_list_page],
}
return response_obj
def paginate_object_list(self, object_list):
"""
If page_length is specified in the options or AJAX request, the result list is shortened to
the correct offset and length. Paged or not, the finalized object_list is then returned.
"""
options = self._get_datatable_options()
# Narrow the results to the appropriate page length for serialization
if options['page_length'] != -1:
i_begin = options['start_offset']
i_end = options['start_offset'] + options['page_length']
object_list = object_list[i_begin:i_end]
return object_list
def serialize_to_json(self, response_data):
""" Returns the JSON string for the compiled data object. """
indent = None
if settings.DEBUG:
indent = 4
return json.dumps(response_data, indent=indent)
def get_record_data(self, obj):
"""
Returns a list of column data intended to be passed directly back to dataTables.js.
Each column generates a 2-tuple of data. [0] is the data meant to be displayed to the client
and [1] is the data in plain-text form, meant for manual searches. One wouldn't want to
include HTML in [1], for example.
"""
options = self._get_datatable_options()
data = {
'DT_RowId': obj.pk,
}
for i, name in enumerate(options['columns']):
column_data = self.get_column_data(i, name, obj)[0]
if six.PY2 and isinstance(column_data, str): # not unicode
column_data = column_data.decode('utf-8')
data[str(i)] = six.text_type(column_data)
return data
def get_column_data(self, i, name, instance):
""" Finds the backing method for column ``name`` and returns the generated data. """
column = get_field_definition(name)
is_custom, f = self._get_resolver_method(i, column)
if is_custom:
args, kwargs = self._get_preloaded_data(instance)
try:
kwargs['default_value'] = self._get_column_data_default(instance, column)[1]
except AttributeError:
kwargs['default_value'] = None
kwargs['field_data'] = name
kwargs['view'] = self
values = f(instance, *args, **kwargs)
else:
values = f(instance, column)
if not isinstance(values, (tuple, list)):
if six.PY2:
if isinstance(values, str): # not unicode
values = values.decode('utf-8')
else:
values = unicode(values)
values = (values, re.sub(r'<[^>]+>', '', six.text_type(values)))
return values
def preload_record_data(self, instance):
"""
An empty hook for letting the view do something with ``instance`` before column lookups are
called against the object. The tuple of items returned will be passed as positional
arguments to any of the ``get_column_FIELD_NAME_data()`` methods.
"""
return ()
def _get_preloaded_data(self, instance):
"""
Fetches value from ``preload_record_data()``.
If a single value is returned and it is not a dict, list or tuple, it is made into a tuple.
The tuple will be supplied to the resolved method as ``*args``.
If the returned value is already a list/tuple, it will also be sent as ``*args``.
If the returned value is a dict, it will be sent as ``**kwargs``.
The two types cannot be mixed.
"""
preloaded_data = self.preload_record_data(instance)
if isinstance(preloaded_data, dict):
preloaded_args = ()
preloaded_kwargs = preloaded_data
elif isinstance(preloaded_data, (tuple, list)):
preloaded_args = preloaded_data
preloaded_kwargs = {}
else:
preloaded_args = (preloaded_data,)
preloaded_kwargs = {}
return preloaded_args, preloaded_kwargs
def _get_resolver_method(self, i, column):
"""
Using a slightly mangled version of the column's name (explained below) each column's value
is derived.
Each field can generate customized data by defining a method on the view called either
"get_column_FIELD_NAME_data" or "get_column_INDEX_data".
If the FIELD_NAME approach is used, the name is the raw field name (e.g., "street_name") or
else the friendly representation defined in a 2-tuple such as
("Street name", "subdivision__home__street_name"), where the name has non-alphanumeric
characters stripped to single underscores. For example, the friendly name
"Region: Subdivision Type" would convert to "Region_Subdivision_Type", requiring the method
name "get_column_Region_Subdivision_Type_data".
Alternatively, if the INDEX approach is used, a method will be fetched called
"get_column_0_data", or otherwise using the 0-based index of the column's position as
defined in the view's ``datatable_options['columns']`` setting.
Finally, if a third element is defined in the tuple, it will be treated as the function or
name of a member attribute which will be used directly.
"""
callback = column.callback
if callback:
if callable(callback):
return True, callback
return True, getattr(self, callback)
# Treat the 'nice name' as the starting point for looking up a method
name = force_text(column.pretty_name, errors="ignore")
if not name:
name = column.fields[0]
mangled_name = re.sub(r'[\W_]+', '_', force_text(name))
f = getattr(self, 'get_column_%s_data' % mangled_name, None)
if f:
return True, f
f = getattr(self, 'get_column_%d_data' % i, None)
if f:
return True, f
return False, self._get_column_data_default
def _get_column_data_default(self, instance, column, *args, **kwargs):
""" Default mechanism for resolving ``column`` through the model instance ``instance``. """
def chain_lookup(obj, bit):
try:
value = getattr(obj, bit)
except (AttributeError, ObjectDoesNotExist):
value = None
else:
if callable(value):
if isinstance(value, Manager):
pass
elif not hasattr(value, 'alters_data') or value.alters_data is not True:
value = value()
return value
values = []
for field_name in column.fields:
value = reduce(chain_lookup, [instance] + field_name.split('__'))
if isinstance(value, Model):
value = six.text_type(value)
if value is not None:
values.append(value)
if len(values) == 1:
value = values[0]
else:
value = u' '.join(map(six.text_type, values))
return value, value
class XEditableMixin(object):
xeditable_form_class = XEditableUpdateForm
xeditable_fieldname_param = 'xeditable_field' # GET parameter name used for choices ajax
def get(self, request, *args, **kwargs):
""" Introduces the ``ensure_csrf_cookie`` decorator and handles xeditable choices ajax. """
if request.GET.get(self.xeditable_fieldname_param):
return self.get_ajax_xeditable_choices(request, *args, **kwargs)
# Doing this in the method body at runtime instead of at declaration-time helps prevent
# collisions of other subclasses also trying to decorate their own get() methods.
method = super(XEditableMixin, self).get
method = ensure_csrf_cookie(method)
return method(request, *args, **kwargs)
def get_ajax_xeditable_choices(self, request, *args, **kwargs):
""" AJAX GET handler for xeditable queries asking for field choice lists. """
field_name = request.GET[self.xeditable_fieldname_param]
# Sanitize the requested field name by limiting valid names to the datatable_options columns
columns = self._get_datatable_options()['columns']
for name in columns:
if isinstance(name, (list, tuple)):
name = name[1]
if name == field_name:
break
else:
return HttpResponseBadRequest()
field = self.get_model()._meta.get_field_by_name(field_name)[0]
choices = self.get_field_choices(field, field_name)
return HttpResponse(json.dumps(choices))
def post(self, request, *args, **kwargs):
self.object_list = None
form = self.get_xeditable_form(self.get_xeditable_form_class())
if form.is_valid():
obj = self.get_update_object(form)
if obj is None:
data = json.dumps({
'status': 'error',
'message': "Object does not exist."
})
return HttpResponse(data, content_type="application/json", status=404)
return self.update_object(form, obj)
else:
data = json.dumps({
'status': 'error',
'message': "Invalid request",
'form_errors': form.errors,
})
return HttpResponse(data, content_type="application/json", status=400)
def get_xeditable_form_class(self):
return self.xeditable_form_class
def get_xeditable_form_kwargs(self):
kwargs = {
'model': self.get_queryset().model,
}
if self.request.method in ('POST', 'PUT'):
kwargs.update({
'data': self.request.POST,
})
return kwargs
def get_xeditable_form(self, form_class):
return form_class(**self.get_xeditable_form_kwargs())
def get_update_object(self, form):
"""
Retrieves the target object based on the update form's ``pk`` and the table's queryset.
"""
pk = form.cleaned_data['pk']
queryset = self.get_queryset()
try:
obj = queryset.get(pk=pk)
except queryset.model.DoesNotExist:
obj = None
return obj
def update_object(self, form, obj):
""" Saves the new value to the target object. """
field_name = form.cleaned_data['name']
value = form.cleaned_data['value']
setattr(obj, field_name, value)
save_kwargs = {}
if CAN_UPDATE_FIELDS:
save_kwargs['update_fields'] = [field_name]
obj.save(**save_kwargs)
data = json.dumps({
'status': 'success',
})
return HttpResponse(data, content_type="application/json")
def get_field_choices(self, field, field_name):
""" Returns the valid choices for ``field``. ``field_name`` is given for convenience. """
if self.request.GET.get('select2'):
names = ['id', 'text']
else:
names = ['value', 'text']
return [dict(zip(names, choice)) for choice in field.choices]
class DatatableView(DatatableMixin, ListView):
pass
class XEditableDatatableView(XEditableMixin, DatatableView):
pass
| apache-2.0 |
joshcartme/mezzanine | mezzanine/core/templatetags/mezzanine_tags.py | 3 | 24686 | from __future__ import absolute_import, division, unicode_literals
from future.builtins import int, open, str
from hashlib import md5
import os
try:
from urllib.parse import quote, unquote
except ImportError:
from urllib import quote, unquote
from django.apps import apps
from django.contrib import admin
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.contrib.sites.models import Site
from django.core.files import File
from django.core.files.storage import default_storage
from django.core.urlresolvers import reverse, resolve, NoReverseMatch
from django.db.models import Model
from django.template import Context, Node, Template, TemplateSyntaxError
from django.template.base import (TOKEN_BLOCK, TOKEN_COMMENT,
TOKEN_TEXT, TOKEN_VAR, TextNode)
from django.template.defaultfilters import escape
from django.template.loader import get_template
from django.utils import translation
from django.utils.html import strip_tags
from django.utils.text import capfirst
from mezzanine.conf import settings
from mezzanine.core.fields import RichTextField
from mezzanine.core.forms import get_edit_form
from mezzanine.utils.cache import nevercache_token, cache_installed
from mezzanine.utils.html import decode_entities
from mezzanine.utils.importing import import_dotted_path
from mezzanine.utils.sites import current_site_id, has_site_permission
from mezzanine.utils.urls import admin_url
from mezzanine.utils.views import is_editable
from mezzanine import template
register = template.Library()
if "compressor" in settings.INSTALLED_APPS:
@register.tag
def compress(parser, token):
"""
Shadows django-compressor's compress tag so it can be
loaded from ``mezzanine_tags``, allowing us to provide
a dummy version when django-compressor isn't installed.
"""
from compressor.templatetags.compress import compress
return compress(parser, token)
else:
@register.to_end_tag
def compress(parsed, context, token):
"""
Dummy tag for fallback when django-compressor isn't installed.
"""
return parsed
if cache_installed():
@register.tag
def nevercache(parser, token):
"""
Tag for two phased rendering. Converts enclosed template
code and content into text, which gets rendered separately
in ``mezzanine.core.middleware.UpdateCacheMiddleware``.
This is to bypass caching for the enclosed code and content.
"""
text = []
end_tag = "endnevercache"
tag_mapping = {
TOKEN_TEXT: ("", ""),
TOKEN_VAR: ("{{", "}}"),
TOKEN_BLOCK: ("{%", "%}"),
TOKEN_COMMENT: ("{#", "#}"),
}
delimiter = nevercache_token()
while parser.tokens:
token = parser.next_token()
if token.token_type == TOKEN_BLOCK and token.contents == end_tag:
return TextNode(delimiter + "".join(text) + delimiter)
start, end = tag_mapping[token.token_type]
text.append("%s%s%s" % (start, token.contents, end))
parser.unclosed_block_tag(end_tag)
else:
@register.to_end_tag
def nevercache(parsed, context, token):
"""
Dummy fallback ``nevercache`` for when caching is not
configured.
"""
return parsed
@register.simple_tag(takes_context=True)
def fields_for(context, form, template="includes/form_fields.html"):
"""
Renders fields for a form with an optional template choice.
"""
context["form_for_fields"] = form
return get_template(template).render(Context(context))
@register.inclusion_tag("includes/form_errors.html", takes_context=True)
def errors_for(context, form):
"""
Renders an alert if the form has any errors.
"""
context["form"] = form
return context
@register.filter
def sort_by(items, attr):
"""
General sort filter - sorts by either attribute or key.
"""
def key_func(item):
try:
return getattr(item, attr)
except AttributeError:
try:
return item[attr]
except TypeError:
getattr(item, attr) # Reraise AttributeError
return sorted(items, key=key_func)
@register.filter
def is_installed(app_name):
"""
Returns ``True`` if the given app name is in the
``INSTALLED_APPS`` setting.
"""
from warnings import warn
warn("The is_installed filter is deprecated. Please use the tag "
"{% ifinstalled appname %}{% endifinstalled %}")
return app_name in settings.INSTALLED_APPS
@register.tag
def ifinstalled(parser, token):
"""
Old-style ``if`` tag that renders contents if the given app is
installed. The main use case is:
{% ifinstalled app_name %}
{% include "app_name/template.html" %}
{% endifinstalled %}
so we need to manually pull out all tokens if the app isn't
installed, since if we used a normal ``if`` tag with a False arg,
the include tag will still try and find the template to include.
"""
try:
tag, app = token.split_contents()
except ValueError:
raise TemplateSyntaxError("ifinstalled should be in the form: "
"{% ifinstalled app_name %}"
"{% endifinstalled %}")
end_tag = "end" + tag
unmatched_end_tag = 1
if app.strip("\"'") not in settings.INSTALLED_APPS:
while unmatched_end_tag:
token = parser.tokens.pop(0)
if token.token_type == TOKEN_BLOCK:
block_name = token.contents.split()[0]
if block_name == tag:
unmatched_end_tag += 1
if block_name == end_tag:
unmatched_end_tag -= 1
parser.tokens.insert(0, token)
nodelist = parser.parse((end_tag,))
parser.delete_first_token()
class IfInstalledNode(Node):
def render(self, context):
return nodelist.render(context)
return IfInstalledNode()
@register.render_tag
def set_short_url_for(context, token):
"""
Sets the ``short_url`` attribute of the given model for share
links in the template.
"""
obj = context[token.split_contents()[1]]
obj.set_short_url()
return ""
@register.simple_tag
def gravatar_url(email, size=32):
"""
Return the full URL for a Gravatar given an email hash.
"""
bits = (md5(email.lower().encode("utf-8")).hexdigest(), size)
return "//www.gravatar.com/avatar/%s?s=%s&d=identicon&r=PG" % bits
@register.to_end_tag
def metablock(parsed):
"""
Remove HTML tags, entities and superfluous characters from
meta blocks.
"""
parsed = " ".join(parsed.replace("\n", "").split()).replace(" ,", ",")
return escape(strip_tags(decode_entities(parsed)))
@register.inclusion_tag("includes/pagination.html", takes_context=True)
def pagination_for(context, current_page, page_var="page", exclude_vars=""):
"""
Include the pagination template and data for persisting querystring
in pagination links. Can also contain a comma separated string of
var names in the current querystring to exclude from the pagination
links, via the ``exclude_vars`` arg.
"""
querystring = context["request"].GET.copy()
exclude_vars = [v for v in exclude_vars.split(",") if v] + [page_var]
for exclude_var in exclude_vars:
if exclude_var in querystring:
del querystring[exclude_var]
querystring = querystring.urlencode()
return {
"current_page": current_page,
"querystring": querystring,
"page_var": page_var,
}
@register.inclusion_tag("includes/search_form.html", takes_context=True)
def search_form(context, search_model_names=None):
"""
Includes the search form with a list of models to use as choices
for filtering the search by. Models should be a string with models
in the format ``app_label.model_name`` separated by spaces. The
string ``all`` can also be used, in which case the models defined
by the ``SEARCH_MODEL_CHOICES`` setting will be used.
"""
if not search_model_names or not settings.SEARCH_MODEL_CHOICES:
search_model_names = []
elif search_model_names == "all":
search_model_names = list(settings.SEARCH_MODEL_CHOICES)
else:
search_model_names = search_model_names.split(" ")
search_model_choices = []
for model_name in search_model_names:
try:
model = apps.get_model(*model_name.split(".", 1))
except LookupError:
pass
else:
verbose_name = model._meta.verbose_name_plural.capitalize()
search_model_choices.append((verbose_name, model_name))
context["search_model_choices"] = sorted(search_model_choices)
return context
@register.simple_tag
def thumbnail(image_url, width, height, upscale=True, quality=95, left=.5,
top=.5, padding=False, padding_color="#fff"):
"""
Given the URL to an image, resizes the image using the given width
and height on the first time it is requested, and returns the URL
to the new resized image. If width or height are zero then original
ratio is maintained. When ``upscale`` is False, images smaller than
the given size will not be grown to fill that size. The given width
and height thus act as maximum dimensions.
"""
if not image_url:
return ""
try:
from PIL import Image, ImageFile, ImageOps
except ImportError:
return ""
image_url = unquote(str(image_url)).split("?")[0]
if image_url.startswith(settings.MEDIA_URL):
image_url = image_url.replace(settings.MEDIA_URL, "", 1)
image_dir, image_name = os.path.split(image_url)
image_prefix, image_ext = os.path.splitext(image_name)
filetype = {".png": "PNG", ".gif": "GIF"}.get(image_ext, "JPEG")
thumb_name = "%s-%sx%s" % (image_prefix, width, height)
if not upscale:
thumb_name += "-no-upscale"
if left != .5 or top != .5:
left = min(1, max(0, left))
top = min(1, max(0, top))
thumb_name = "%s-%sx%s" % (thumb_name, left, top)
thumb_name += "-padded-%s" % padding_color if padding else ""
thumb_name = "%s%s" % (thumb_name, image_ext)
# `image_name` is used here for the directory path, as each image
# requires its own sub-directory using its own name - this is so
# we can consistently delete all thumbnails for an individual
# image, which is something we do in filebrowser when a new image
# is written, allowing us to purge any previously generated
# thumbnails that may match a new image name.
thumb_dir = os.path.join(settings.MEDIA_ROOT, image_dir,
settings.THUMBNAILS_DIR_NAME, image_name)
if not os.path.exists(thumb_dir):
try:
os.makedirs(thumb_dir)
except OSError:
pass
thumb_path = os.path.join(thumb_dir, thumb_name)
thumb_url = "%s/%s/%s" % (settings.THUMBNAILS_DIR_NAME,
quote(image_name.encode("utf-8")),
quote(thumb_name.encode("utf-8")))
image_url_path = os.path.dirname(image_url)
if image_url_path:
thumb_url = "%s/%s" % (image_url_path, thumb_url)
try:
thumb_exists = os.path.exists(thumb_path)
except UnicodeEncodeError:
# The image that was saved to a filesystem with utf-8 support,
# but somehow the locale has changed and the filesystem does not
# support utf-8.
from mezzanine.core.exceptions import FileSystemEncodingChanged
raise FileSystemEncodingChanged()
if thumb_exists:
# Thumbnail exists, don't generate it.
return thumb_url
elif not default_storage.exists(image_url):
# Requested image does not exist, just return its URL.
return image_url
f = default_storage.open(image_url)
try:
image = Image.open(f)
except:
# Invalid image format.
return image_url
image_info = image.info
to_width = int(width)
to_height = int(height)
from_width = image.size[0]
from_height = image.size[1]
if not upscale:
to_width = min(to_width, from_width)
to_height = min(to_height, from_height)
# Set dimensions.
if to_width == 0:
to_width = from_width * to_height // from_height
elif to_height == 0:
to_height = from_height * to_width // from_width
if image.mode not in ("P", "L", "RGBA"):
try:
image = image.convert("RGBA")
except:
return image_url
# Required for progressive jpgs.
ImageFile.MAXBLOCK = 2 * (max(image.size) ** 2)
# Padding.
if padding and to_width and to_height:
from_ratio = float(from_width) / from_height
to_ratio = float(to_width) / to_height
pad_size = None
if to_ratio < from_ratio:
pad_height = int(to_height * (float(from_width) / to_width))
pad_size = (from_width, pad_height)
pad_top = (pad_height - from_height) // 2
pad_left = 0
elif to_ratio > from_ratio:
pad_width = int(to_width * (float(from_height) / to_height))
pad_size = (pad_width, from_height)
pad_top = 0
pad_left = (pad_width - from_width) // 2
if pad_size is not None:
pad_container = Image.new("RGBA", pad_size, padding_color)
pad_container.paste(image, (pad_left, pad_top))
image = pad_container
# Create the thumbnail.
to_size = (to_width, to_height)
to_pos = (left, top)
try:
image = ImageOps.fit(image, to_size, Image.ANTIALIAS, 0, to_pos)
image = image.save(thumb_path, filetype, quality=quality, **image_info)
# Push a remote copy of the thumbnail if MEDIA_URL is
# absolute.
if "://" in settings.MEDIA_URL:
with open(thumb_path, "rb") as f:
default_storage.save(thumb_url, File(f))
except Exception:
# If an error occurred, a corrupted image may have been saved,
# so remove it, otherwise the check for it existing will just
# return the corrupted image next time it's requested.
try:
os.remove(thumb_path)
except Exception:
pass
return image_url
return thumb_url
@register.inclusion_tag("includes/editable_loader.html", takes_context=True)
def editable_loader(context):
"""
Set up the required JS/CSS for the in-line editing toolbar and controls.
"""
user = context["request"].user
context["has_site_permission"] = has_site_permission(user)
if settings.INLINE_EDITING_ENABLED and context["has_site_permission"]:
t = get_template("includes/editable_toolbar.html")
context["REDIRECT_FIELD_NAME"] = REDIRECT_FIELD_NAME
try:
context["editable_obj"]
except KeyError:
context["editable_obj"] = context.get("page", None)
context["toolbar"] = t.render(Context(context))
context["richtext_media"] = RichTextField().formfield().widget.media
return context
@register.filter
def richtext_filters(content):
"""
Takes a value edited via the WYSIWYG editor, and passes it through
each of the functions specified by the RICHTEXT_FILTERS setting.
"""
filter_names = settings.RICHTEXT_FILTERS
if not filter_names:
try:
filter_names = [settings.RICHTEXT_FILTER]
except AttributeError:
pass
else:
from warnings import warn
warn("The `RICHTEXT_FILTER` setting is deprecated in favor of "
"the new plural setting `RICHTEXT_FILTERS`.")
for filter_name in filter_names:
filter_func = import_dotted_path(filter_name)
content = filter_func(content)
return content
@register.filter
def richtext_filter(content):
"""
Deprecated version of richtext_filters above.
"""
from warnings import warn
warn("The `richtext_filter` template tag is deprecated in favor of "
"the new plural tag `richtext_filters`.")
return richtext_filters(content)
@register.to_end_tag
def editable(parsed, context, token):
"""
Add the required HTML to the parsed content for in-line editing,
such as the icon and edit form if the object is deemed to be
editable - either it has an ``editable`` method which returns
``True``, or the logged in user has change permissions for the
model.
"""
def parse_field(field):
field = field.split(".")
obj = context.get(field.pop(0), None)
attr = field.pop()
while field:
obj = getattr(obj, field.pop(0))
if callable(obj):
# Allows {% editable page.get_content_model.content %}
obj = obj()
return obj, attr
fields = [parse_field(f) for f in token.split_contents()[1:]]
if fields:
fields = [f for f in fields if len(f) == 2 and f[0] is fields[0][0]]
if not parsed.strip():
try:
parsed = "".join([str(getattr(*field)) for field in fields])
except AttributeError:
pass
if settings.INLINE_EDITING_ENABLED and fields and "request" in context:
obj = fields[0][0]
if isinstance(obj, Model) and is_editable(obj, context["request"]):
field_names = ",".join([f[1] for f in fields])
context["editable_form"] = get_edit_form(obj, field_names)
context["original"] = parsed
t = get_template("includes/editable_form.html")
return t.render(Context(context))
return parsed
@register.simple_tag
def try_url(url_name):
"""
Mimics Django's ``url`` template tag but fails silently. Used for
url names in admin templates as these won't resolve when admin
tests are running.
"""
from warnings import warn
warn("try_url is deprecated, use the url tag with the 'as' arg instead.")
try:
url = reverse(url_name)
except NoReverseMatch:
return ""
return url
def admin_app_list(request):
"""
Adopted from ``django.contrib.admin.sites.AdminSite.index``.
Returns a list of lists of models grouped and ordered according to
``mezzanine.conf.ADMIN_MENU_ORDER``. Called from the
``admin_dropdown_menu`` template tag as well as the ``app_list``
dashboard widget.
"""
app_dict = {}
# Model or view --> (group index, group title, item index, item title).
menu_order = {}
for (group_index, group) in enumerate(settings.ADMIN_MENU_ORDER):
group_title, items = group
group_title = group_title.title()
for (item_index, item) in enumerate(items):
if isinstance(item, (tuple, list)):
item_title, item = item
else:
item_title = None
menu_order[item] = (group_index, group_title,
item_index, item_title)
# Add all registered models, using group and title from menu order.
for (model, model_admin) in admin.site._registry.items():
opts = model._meta
in_menu = not hasattr(model_admin, "in_menu") or model_admin.in_menu()
if in_menu and request.user.has_module_perms(opts.app_label):
perms = model_admin.get_model_perms(request)
admin_url_name = ""
if perms["change"]:
admin_url_name = "changelist"
change_url = admin_url(model, admin_url_name)
else:
change_url = None
if perms["add"]:
admin_url_name = "add"
add_url = admin_url(model, admin_url_name)
else:
add_url = None
if admin_url_name:
model_label = "%s.%s" % (opts.app_label, opts.object_name)
try:
app_index, app_title, model_index, model_title = \
menu_order[model_label]
except KeyError:
app_index = None
app_title = opts.app_label.title()
model_index = None
model_title = None
else:
del menu_order[model_label]
if not model_title:
model_title = capfirst(model._meta.verbose_name_plural)
if app_title not in app_dict:
app_dict[app_title] = {
"index": app_index,
"name": app_title,
"models": [],
}
app_dict[app_title]["models"].append({
"index": model_index,
"perms": model_admin.get_model_perms(request),
"name": model_title,
"admin_url": change_url,
"add_url": add_url
})
# Menu may also contain view or url pattern names given as (title, name).
for (item_url, item) in menu_order.items():
app_index, app_title, item_index, item_title = item
try:
item_url = reverse(item_url)
except NoReverseMatch:
continue
if app_title not in app_dict:
app_dict[app_title] = {
"index": app_index,
"name": app_title,
"models": [],
}
app_dict[app_title]["models"].append({
"index": item_index,
"perms": {"custom": True},
"name": item_title,
"admin_url": item_url,
})
app_list = list(app_dict.values())
sort = lambda x: (x["index"] if x["index"] is not None else 999, x["name"])
for app in app_list:
app["models"].sort(key=sort)
app_list.sort(key=sort)
return app_list
@register.inclusion_tag("admin/includes/dropdown_menu.html",
takes_context=True)
def admin_dropdown_menu(context):
"""
Renders the app list for the admin dropdown menu navigation.
"""
user = context["request"].user
if user.is_staff:
context["dropdown_menu_app_list"] = admin_app_list(context["request"])
if user.is_superuser:
sites = Site.objects.all()
else:
sites = user.sitepermissions.sites.all()
context["dropdown_menu_sites"] = list(sites)
context["dropdown_menu_selected_site_id"] = current_site_id()
return context
@register.inclusion_tag("admin/includes/app_list.html", takes_context=True)
def app_list(context):
"""
Renders the app list for the admin dashboard widget.
"""
context["dashboard_app_list"] = admin_app_list(context["request"])
return context
@register.inclusion_tag("admin/includes/recent_actions.html",
takes_context=True)
def recent_actions(context):
"""
Renders the recent actions list for the admin dashboard widget.
"""
return context
@register.render_tag
def dashboard_column(context, token):
"""
Takes an index for retrieving the sequence of template tags from
``mezzanine.conf.DASHBOARD_TAGS`` to render into the admin
dashboard.
"""
column_index = int(token.split_contents()[1])
output = []
for tag in settings.DASHBOARD_TAGS[column_index]:
t = Template("{%% load %s %%}{%% %s %%}" % tuple(tag.split(".")))
output.append(t.render(Context(context)))
return "".join(output)
@register.simple_tag(takes_context=True)
def translate_url(context, language):
"""
Translates the current URL for the given language code, eg:
{% translate_url de %}
"""
try:
request = context["request"]
except KeyError:
return ""
view = resolve(request.path)
current_language = translation.get_language()
translation.activate(language)
try:
url = reverse(view.func, args=view.args, kwargs=view.kwargs)
except NoReverseMatch:
try:
url_name = (view.url_name if not view.namespace
else '%s:%s' % (view.namespace, view.url_name))
url = reverse(url_name, args=view.args, kwargs=view.kwargs)
except NoReverseMatch:
url_name = "admin:" + view.url_name
url = reverse(url_name, args=view.args, kwargs=view.kwargs)
translation.activate(current_language)
if context['request'].META["QUERY_STRING"]:
url += "?" + context['request'].META["QUERY_STRING"]
return url
| bsd-2-clause |
volatilityfoundation/volatility | volatility/renderers/html.py | 3 | 2358 | import StringIO
from volatility.renderers.basic import Renderer
try:
import ujson as json
except ImportError:
import json
__author__ = 'mike'
class HTMLRenderer(Renderer):
def __init__(self):
pass
def render(self, outfd, data):
"""Renders the treegrid to HTML"""
column_titles = ", \n".join(["{ \"title\": \"" + column.name + "\"}" for column in data.columns])
json = StringIO.StringIO()
JSONRenderer().render(json, data)
outfd.write("""<html>
<head>
<link rel="stylesheet" type="text/css" href="http://cdn.datatables.net/1.10.2/css/jquery.dataTables.css">
<script type="text/javascript" language="javascript" src="http://code.jquery.com/jquery-1.11.1.min.js"></script>
<script type="text/javascript" language="javascript" src="http://cdn.datatables.net/1.10.2/js/jquery.dataTables.min.js"></script>
<script type="text/javascript" class="init">
var dataSet = """ + json.getvalue() + """;
$(document).ready(function() {
$('#page').html( '<table cellpadding="0" cellspacing="0" border="0" class="display" id="data"></table>' );
$('#data').dataTable( {
"data": dataSet['rows'],
"columns": [""" + column_titles + """]
} );
} );
</script>
</head>
<body><div id="page"></div></body></html>""" + "\n")
class JSONRenderer(Renderer):
def render_row(self, node, accumulator):
return accumulator + [node.values]
def render(self, outfd, data):
"""Renderers a treegrid as columns/row items in JSON format"""
# TODO: Implement tree structure in JSON
if data.max_depth() > 1:
raise NotImplementedError("JSON output for trees has not yet been implemented")
# TODO: Output (basic) type information in JSON
json_input = {"columns": [column.name for column in data.columns], "rows": data.visit(None, self.render_row, [])}
return outfd.write(json.dumps(json_input,ensure_ascii=False))
| gpl-2.0 |
ramsateesh/designate | tools/install_venv.py | 11 | 2341 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2010 OpenStack Foundation
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import sys
import install_venv_common as install_venv # noqa
def print_help(venv, root):
help = """
Designate development environment setup is complete.
Designate development uses virtualenv to track and manage Python
dependencies while in development and testing.
To activate the Designate virtualenv for the extent of your current shell
session you can run:
$ source %s/bin/activate
Or, if you prefer, you can run commands in the virtualenv on a case by case
basis by running:
$ %s/tools/with_venv.sh <your command>
"""
print(help % (venv, root))
def main(argv):
root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
if os.environ.get('tools_path'):
root = os.environ['tools_path']
venv = os.path.join(root, '.venv')
if os.environ.get('venv'):
venv = os.environ['venv']
pip_requires = os.path.join(root, 'requirements.txt')
test_requires = os.path.join(root, 'test-requirements.txt')
py_version = "python%s.%s" % (sys.version_info[0], sys.version_info[1])
project = 'Designate'
install = install_venv.InstallVenv(root, venv, pip_requires, test_requires,
py_version, project)
options = install.parse_args(argv)
install.check_python_version()
install.check_dependencies()
install.create_virtualenv(no_site_packages=options.no_site_packages)
install.install_dependencies()
print_help(venv, root)
if __name__ == '__main__':
main(sys.argv)
| apache-2.0 |
llvm-mirror/lldb | packages/Python/lldbsuite/test/functionalities/data-formatter/data-formatter-stl/libcxx/optional/TestDataFormatterLibcxxOptional.py | 5 | 2676 | """
Test lldb data formatter subsystem.
"""
from __future__ import print_function
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class LibcxxOptionalDataFormatterTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
@add_test_categories(["libc++"])
## We are skipping clang version less that 5.0 since this test requires -std=c++17
@skipIf(oslist=no_match(["macosx"]), compiler="clang", compiler_version=['<', '5.0'])
## We are skipping gcc version less that 5.1 since this test requires -std=c++17
@skipIf(compiler="gcc", compiler_version=['<', '5.1'])
def test_with_run_command(self):
"""Test that that file and class static variables display correctly."""
self.build()
self.runCmd("file " + self.getBuildArtifact("a.out"), CURRENT_EXECUTABLE_SET)
bkpt = self.target().FindBreakpointByID(
lldbutil.run_break_set_by_source_regexp(
self, "break here"))
self.runCmd("run", RUN_SUCCEEDED)
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs=['stopped',
'stop reason = breakpoint'])
self.runCmd( "frame variable has_optional" )
output = self.res.GetOutput()
## The variable has_optional tells us if the test program
## detected we have a sufficient libc++ version to support optional
## false means we do not and therefore should skip the test
if output.find("(bool) has_optional = false") != -1 :
self.skipTest( "Optional not supported" )
lldbutil.continue_to_breakpoint(self.process(), bkpt)
self.expect("frame variable number_not_engaged",
substrs=['Has Value=false'])
self.expect("frame variable number_engaged",
substrs=['Has Value=true',
'Value = 42',
'}'])
self.expect("frame var numbers",
substrs=['(optional_int_vect) numbers = Has Value=true {',
'Value = size=4 {',
'[0] = 1',
'[1] = 2',
'[2] = 3',
'[3] = 4',
'}',
'}'])
self.expect("frame var ostring",
substrs=['(optional_string) ostring = Has Value=true {',
'Value = "hello"',
'}'])
| apache-2.0 |
alanjw/GreenOpenERP-Win-X86 | python/Lib/site-packages/unidecode/x027.py | 87 | 3785 | data = (
'[?]', # 0x00
'', # 0x01
'', # 0x02
'', # 0x03
'', # 0x04
'', # 0x05
'', # 0x06
'', # 0x07
'', # 0x08
'', # 0x09
'', # 0x0a
'', # 0x0b
'', # 0x0c
'', # 0x0d
'', # 0x0e
'', # 0x0f
'', # 0x10
'', # 0x11
'', # 0x12
'', # 0x13
'', # 0x14
'', # 0x15
'', # 0x16
'', # 0x17
'', # 0x18
'', # 0x19
'', # 0x1a
'', # 0x1b
'', # 0x1c
'', # 0x1d
'', # 0x1e
'', # 0x1f
'', # 0x20
'', # 0x21
'', # 0x22
'', # 0x23
'', # 0x24
'', # 0x25
'', # 0x26
'', # 0x27
'', # 0x28
'', # 0x29
'', # 0x2a
'', # 0x2b
'', # 0x2c
'', # 0x2d
'', # 0x2e
'', # 0x2f
'', # 0x30
'', # 0x31
'', # 0x32
'', # 0x33
'', # 0x34
'', # 0x35
'', # 0x36
'', # 0x37
'', # 0x38
'', # 0x39
'', # 0x3a
'', # 0x3b
'', # 0x3c
'', # 0x3d
'', # 0x3e
'', # 0x3f
'', # 0x40
'', # 0x41
'', # 0x42
'', # 0x43
'', # 0x44
'', # 0x45
'', # 0x46
'', # 0x47
'', # 0x48
'', # 0x49
'', # 0x4a
'', # 0x4b
'', # 0x4c
'', # 0x4d
'', # 0x4e
'', # 0x4f
'', # 0x50
'', # 0x51
'', # 0x52
'', # 0x53
'', # 0x54
'', # 0x55
'', # 0x56
'', # 0x57
'', # 0x58
'', # 0x59
'', # 0x5a
'', # 0x5b
'', # 0x5c
'', # 0x5d
'', # 0x5e
'[?]', # 0x5f
'[?]', # 0x60
'', # 0x61
'', # 0x62
'', # 0x63
'', # 0x64
'', # 0x65
'', # 0x66
'', # 0x67
'', # 0x68
'', # 0x69
'', # 0x6a
'', # 0x6b
'', # 0x6c
'', # 0x6d
'', # 0x6e
'', # 0x6f
'', # 0x70
'', # 0x71
'', # 0x72
'', # 0x73
'', # 0x74
'', # 0x75
'', # 0x76
'', # 0x77
'', # 0x78
'', # 0x79
'', # 0x7a
'', # 0x7b
'', # 0x7c
'', # 0x7d
'', # 0x7e
'', # 0x7f
'', # 0x80
'', # 0x81
'', # 0x82
'', # 0x83
'', # 0x84
'', # 0x85
'', # 0x86
'', # 0x87
'', # 0x88
'', # 0x89
'', # 0x8a
'', # 0x8b
'', # 0x8c
'', # 0x8d
'', # 0x8e
'', # 0x8f
'', # 0x90
'', # 0x91
'', # 0x92
'', # 0x93
'', # 0x94
'', # 0x95
'', # 0x96
'', # 0x97
'', # 0x98
'', # 0x99
'', # 0x9a
'', # 0x9b
'', # 0x9c
'', # 0x9d
'', # 0x9e
'', # 0x9f
'', # 0xa0
'', # 0xa1
'', # 0xa2
'', # 0xa3
'', # 0xa4
'', # 0xa5
'', # 0xa6
'', # 0xa7
'', # 0xa8
'', # 0xa9
'', # 0xaa
'', # 0xab
'', # 0xac
'', # 0xad
'', # 0xae
'', # 0xaf
'[?]', # 0xb0
'', # 0xb1
'', # 0xb2
'', # 0xb3
'', # 0xb4
'', # 0xb5
'', # 0xb6
'', # 0xb7
'', # 0xb8
'', # 0xb9
'', # 0xba
'', # 0xbb
'', # 0xbc
'', # 0xbd
'', # 0xbe
'[?]', # 0xbf
'[?]', # 0xc0
'[?]', # 0xc1
'[?]', # 0xc2
'[?]', # 0xc3
'[?]', # 0xc4
'[?]', # 0xc5
'[?]', # 0xc6
'[?]', # 0xc7
'[?]', # 0xc8
'[?]', # 0xc9
'[?]', # 0xca
'[?]', # 0xcb
'[?]', # 0xcc
'[?]', # 0xcd
'[?]', # 0xce
'[?]', # 0xcf
'[?]', # 0xd0
'[?]', # 0xd1
'[?]', # 0xd2
'[?]', # 0xd3
'[?]', # 0xd4
'[?]', # 0xd5
'[?]', # 0xd6
'[?]', # 0xd7
'[?]', # 0xd8
'[?]', # 0xd9
'[?]', # 0xda
'[?]', # 0xdb
'[?]', # 0xdc
'[?]', # 0xdd
'[?]', # 0xde
'[?]', # 0xdf
'[?]', # 0xe0
'[?]', # 0xe1
'[?]', # 0xe2
'[?]', # 0xe3
'[?]', # 0xe4
'[?]', # 0xe5
'[?]', # 0xe6
'[?]', # 0xe7
'[?]', # 0xe8
'[?]', # 0xe9
'[?]', # 0xea
'[?]', # 0xeb
'[?]', # 0xec
'[?]', # 0xed
'[?]', # 0xee
'[?]', # 0xef
'[?]', # 0xf0
'[?]', # 0xf1
'[?]', # 0xf2
'[?]', # 0xf3
'[?]', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)
| agpl-3.0 |
SerpentCS/odoo | addons/l10n_th/__init__.py | 893 | 1045 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
giocastagno/I.W._Delpuppo_Kopech_Castagno | turismo/sitio/migrations/0021_auto_20170525_0117.py | 1 | 5067 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-05-25 01:17
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sitio', '0020_perfil_usuario_localidad'),
]
operations = [
migrations.AlterField(
model_name='perfil_usuario',
name='localidad',
field=models.CharField(choices=[('Acebal', 'Acebal'), ('Alcorta', 'Alcorta'), ('Alejandra', 'Alejandra'), ('Álvarez', 'Álvarez'), ('Alvear', 'Alvear'), ('Arequito', 'Arequito'), ('Armstrong', 'Armstrong'), ('Arroyo Seco', 'Arroyo Seco'), ('Arteaga', 'Arteaga'), ('Ataliva', 'Ataliva'), ('Avellaneda', 'Avellaneda'), ('Barrancas', 'Barrancas'), ('Barrio Arroyo del Medio', 'Barrio Arroyo del Medio'), ('Barrio Mitre', 'Barrio Mitre'), ('Barrios Acapulco y Veracruz', 'Barrios Acapulco y Veracruz'), ('Berabevú', 'Berabevú'), ('Bigand', 'Bigand'), ('Bombal', 'Bombal'), ('Calchaquí', 'Calchaquí'), ('Capitán Bermúdez', 'Capitán Bermúdez'), ('Carcarañá', 'Carcarañá'), ('Carlos Pellegrini', 'Carlos Pellegrini'), ('Casilda', 'Casilda'), ('Cayastá', 'Cayastá'), ('Cañada Rosquín', 'Cañada Rosquín'), ('Cañada de Gómez', 'Cañada de Gómez'), ('Centeno', 'Centeno'), ('Ceres', 'Ceres'), ('Chabás', 'Chabás'), ('Chovet', 'Chovet'), ('Coronda', 'Coronda'), ('Correa', 'Correa'), ('El Trébol', 'El Trébol'), ('Elortondo', 'Elortondo'), ('Empalme Villa Constitución', 'Empalme Villa Constitución'), ('Esperanza', 'Esperanza'), ('Estación Clucellas', 'Estación Clucellas'), ('Fighiera', 'Fighiera'), ('Firmat', 'Firmat'), ('Florencia', 'Florencia'), ('Franck', 'Franck'), ('Frontera', 'Frontera'), ('Fuentes', 'Fuentes'), ('General Lagos', 'General Lagos'), ('Gobernador Crespo', 'Gobernador Crespo'), ('Gálvez', 'Gálvez'), ('Colonia Hansen', 'Colonia Hansen'), ('Helvecia', 'Helvecia'), ('Hersilia', 'Hersilia'), ('Hughes', 'Hughes'), ('Humberto Primo', 'Humberto Primo'), ('Humboldt', 'Humboldt'), ('Ibarlucea', 'Ibarlucea'), ('Juan de Garay', 'Juan de Garay'), ('La Gallareta', 'La Gallareta'), ('Laguna Paiva', 'Laguna Paiva'), ('Las Parejas', 'Las Parejas'), ('Las Rosas', 'Las Rosas'), ('Las Toscas', 'Las Toscas'), ('Lehmann', 'Lehmann'), ('Llambi Campbell', 'Llambi Campbell'), ('Los Quirquinchos', 'Los Quirquinchos'), ('Maciel', 'Maciel'), ('Malabrigo', 'Malabrigo'), ('Margarita', 'Margarita'), ('María Juana', 'María Juana'), ('María Susana', 'María Susana'), ('María Teresa', 'María Teresa'), ('Melincué', 'Melincué'), ('Moisés Ville', 'Moisés Ville'), ('Monte Oscuridad', 'Monte Oscuridad'), ('Monte Vera', 'Monte Vera'), ('Montes de Oca', 'Montes de Oca'), ('Murphy', 'Murphy'), ('Máximo Paz', 'Máximo Paz'), ('Nelson', 'Nelson'), ('Oliveros', 'Oliveros'), ('Paraná', 'Paraná'), ('Peyrano', 'Peyrano'), ('Piamonte', 'Piamonte'), ('Pilar', 'Pilar'), ('Progreso', 'Progreso'), ('Pueblo Esther', 'Pueblo Esther'), ('Puerto Gaboto', 'Puerto Gaboto'), ('Pujato', 'Pujato'), ('Rafaela', 'Rafaela'), ('Reconquista', 'Reconquista'), ('Ricardone', 'Ricardone'), ('Rivadavia', 'Rivadavia'), ('Romang', 'Romang'), ('Rosario', 'Rosario'), ('Rufino', 'Rufino'), ('San Antonio de Obligado', 'San Antonio de Obligado'), ('San Carlos Centro', 'San Carlos Centro'), ('San Carlos Sud', 'San Carlos Sud'), ('San Cristóbal', 'San Cristóbal'), ('San Genaro', 'San Genaro'), ('San Genaro Norte', 'San Genaro Norte'), ('San Gregorio', 'San Gregorio'), ('San Guillermo', 'San Guillermo'), ('San Javier', 'San Javier'), ('San Jerónimo Norte', 'San Jerónimo Norte'), ('San Jerónimo Sud', 'San Jerónimo Sud'), ('San Jorge', 'San Jorge'), ('San José de la Esquina', 'San José de la Esquina'), ('San Justo', 'San Justo'), ('San Martín de las Escobas', 'San Martín de las Escobas'), ('San Vicente', 'San Vicente'), ('Sancti Spiritu', 'Sancti Spiritu'), ('Santa Clara de Buena Vista', 'Santa Clara de Buena Vista'), ('Santa Clara de Saguier', 'Santa Clara de Saguier'), ('Santa Emilia', 'Santa Emilia'), ('Santa Fe', 'Santa Fe'), ('Santa Isabel', 'Santa Isabel'), ('Santa María Norte', 'Santa María Norte'), ('Santa Rosa de Calchines', 'Santa Rosa de Calchines'), ('Santa Teresa', 'Santa Teresa'), ('Sastre y Ortiz', 'Sastre y Ortiz'), ('Serodino', 'Serodino'), ('Soutomayor', 'Soutomayor'), ('Suardi', 'Suardi'), ('Sunchales', 'Sunchales'), ('Tacuarendí', 'Tacuarendí'), ('Teodelina', 'Teodelina'), ('Timbúes', 'Timbúes'), ('Tortugas', 'Tortugas'), ('Tostado', 'Tostado'), ('Totoras', 'Totoras'), ('Venado Tuerto', 'Venado Tuerto'), ('Vera', 'Vera'), ('Vicente Echeverría', 'Vicente Echeverría'), ('Videla', 'Videla'), ('Villa Cañás', 'Villa Cañás'), ('Villa Constitución', 'Villa Constitución'), ('Villa Eloísa', 'Villa Eloísa'), ('Villa Guillermina', 'Villa Guillermina'), ('Villa Minetti', 'Villa Minetti'), ('Villa Mugueta', 'Villa Mugueta'), ('Villa Ocampo', 'Villa Ocampo'), ('Villa Trinidad', 'Villa Trinidad'), ('Wheelwright', 'Wheelwright'), ('Zavalla', 'Zavalla')], max_length=100),
),
]
| mit |
ProfessionalIT/professionalit-webiste | sdk/google_appengine/lib/django-1.4/django/contrib/formtools/preview.py | 315 | 5754 | """
Formtools Preview application.
"""
from django.http import Http404
from django.shortcuts import render_to_response
from django.template.context import RequestContext
from django.utils.crypto import constant_time_compare
from django.contrib.formtools.utils import form_hmac
AUTO_ID = 'formtools_%s' # Each form here uses this as its auto_id parameter.
class FormPreview(object):
preview_template = 'formtools/preview.html'
form_template = 'formtools/form.html'
# METHODS SUBCLASSES SHOULDN'T OVERRIDE ###################################
def __init__(self, form):
# form should be a Form class, not an instance.
self.form, self.state = form, {}
def __call__(self, request, *args, **kwargs):
stage = {'1': 'preview', '2': 'post'}.get(request.POST.get(self.unused_name('stage')), 'preview')
self.parse_params(*args, **kwargs)
try:
method = getattr(self, stage + '_' + request.method.lower())
except AttributeError:
raise Http404
return method(request)
def unused_name(self, name):
"""
Given a first-choice name, adds an underscore to the name until it
reaches a name that isn't claimed by any field in the form.
This is calculated rather than being hard-coded so that no field names
are off-limits for use in the form.
"""
while 1:
try:
f = self.form.base_fields[name]
except KeyError:
break # This field name isn't being used by the form.
name += '_'
return name
def preview_get(self, request):
"Displays the form"
f = self.form(auto_id=self.get_auto_id(), initial=self.get_initial(request))
return render_to_response(self.form_template,
self.get_context(request, f),
context_instance=RequestContext(request))
def preview_post(self, request):
"Validates the POST data. If valid, displays the preview page. Else, redisplays form."
f = self.form(request.POST, auto_id=self.get_auto_id())
context = self.get_context(request, f)
if f.is_valid():
self.process_preview(request, f, context)
context['hash_field'] = self.unused_name('hash')
context['hash_value'] = self.security_hash(request, f)
return render_to_response(self.preview_template, context, context_instance=RequestContext(request))
else:
return render_to_response(self.form_template, context, context_instance=RequestContext(request))
def _check_security_hash(self, token, request, form):
expected = self.security_hash(request, form)
return constant_time_compare(token, expected)
def post_post(self, request):
"Validates the POST data. If valid, calls done(). Else, redisplays form."
f = self.form(request.POST, auto_id=self.get_auto_id())
if f.is_valid():
if not self._check_security_hash(request.POST.get(self.unused_name('hash'), ''),
request, f):
return self.failed_hash(request) # Security hash failed.
return self.done(request, f.cleaned_data)
else:
return render_to_response(self.form_template,
self.get_context(request, f),
context_instance=RequestContext(request))
# METHODS SUBCLASSES MIGHT OVERRIDE IF APPROPRIATE ########################
def get_auto_id(self):
"""
Hook to override the ``auto_id`` kwarg for the form. Needed when
rendering two form previews in the same template.
"""
return AUTO_ID
def get_initial(self, request):
"""
Takes a request argument and returns a dictionary to pass to the form's
``initial`` kwarg when the form is being created from an HTTP get.
"""
return {}
def get_context(self, request, form):
"Context for template rendering."
return {'form': form, 'stage_field': self.unused_name('stage'), 'state': self.state}
def parse_params(self, *args, **kwargs):
"""
Given captured args and kwargs from the URLconf, saves something in
self.state and/or raises Http404 if necessary.
For example, this URLconf captures a user_id variable:
(r'^contact/(?P<user_id>\d{1,6})/$', MyFormPreview(MyForm)),
In this case, the kwargs variable in parse_params would be
{'user_id': 32} for a request to '/contact/32/'. You can use that
user_id to make sure it's a valid user and/or save it for later, for
use in done().
"""
pass
def process_preview(self, request, form, context):
"""
Given a validated form, performs any extra processing before displaying
the preview page, and saves any extra data in context.
"""
pass
def security_hash(self, request, form):
"""
Calculates the security hash for the given HttpRequest and Form instances.
Subclasses may want to take into account request-specific information,
such as the IP address.
"""
return form_hmac(form)
def failed_hash(self, request):
"Returns an HttpResponse in the case of an invalid security hash."
return self.preview_post(request)
# METHODS SUBCLASSES MUST OVERRIDE ########################################
def done(self, request, cleaned_data):
"""
Does something with the cleaned_data and returns an
HttpResponseRedirect.
"""
raise NotImplementedError('You must define a done() method on your %s subclass.' % self.__class__.__name__)
| lgpl-3.0 |
Big-B702/python-for-android | python-modules/twisted/twisted/scripts/htmlizer.py | 81 | 1732 | # Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
#
"""HTML pretty-printing for Python source code."""
__version__ = '$Revision: 1.8 $'[11:-2]
from twisted.python import htmlizer, usage
from twisted import copyright
import os, sys
header = '''<html><head>
<title>%(title)s</title>
<meta name=\"Generator\" content="%(generator)s" />
%(alternate)s
%(stylesheet)s
</head>
<body>
'''
footer = """</body>"""
styleLink = '<link rel="stylesheet" href="%s" type="text/css" />'
alternateLink = '<link rel="alternate" href="%(source)s" type="text/x-python" />'
class Options(usage.Options):
synopsis = """%s [options] source.py
""" % (
os.path.basename(sys.argv[0]),)
optParameters = [
('stylesheet', 's', None, "URL of stylesheet to link to."),
]
zsh_extras = ["1:source python file:_files -g '*.py'"]
def parseArgs(self, filename):
self['filename'] = filename
def run():
options = Options()
try:
options.parseOptions()
except usage.UsageError, e:
print str(e)
sys.exit(1)
filename = options['filename']
if options.get('stylesheet') is not None:
stylesheet = styleLink % (options['stylesheet'],)
else:
stylesheet = ''
output = open(filename + '.html', 'w')
try:
output.write(header % {
'title': filename,
'generator': 'htmlizer/%s' % (copyright.longversion,),
'alternate': alternateLink % {'source': filename},
'stylesheet': stylesheet
})
htmlizer.filter(open(filename), output,
htmlizer.SmallerHTMLWriter)
output.write(footer)
finally:
output.close()
| apache-2.0 |
Zerknechterer/pyload | module/plugins/hoster/Keep2ShareCc.py | 1 | 4376 | # -*- coding: utf-8 -*-
import re
import urlparse
from module.plugins.internal.ReCaptcha import ReCaptcha
from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
class Keep2ShareCc(SimpleHoster):
__name__ = "Keep2ShareCc"
__type__ = "hoster"
__version__ = "0.23"
__pattern__ = r'https?://(?:www\.)?(keep2share|k2s|keep2s)\.cc/file/(?P<ID>\w+)'
__config__ = [("use_premium", "bool", "Use premium account if available", True)]
__description__ = """Keep2Share.cc hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("stickell", "l.stickell@yahoo.it"),
("Walter Purcaro", "vuolter@gmail.com")]
URL_REPLACEMENTS = [(__pattern__ + ".*", "http://keep2s.cc/file/\g<ID>")]
NAME_PATTERN = r'File: <span>(?P<N>.+?)</span>'
SIZE_PATTERN = r'Size: (?P<S>[^<]+)</div>'
OFFLINE_PATTERN = r'File not found or deleted|Sorry, this file is blocked or deleted|Error 404'
TEMP_OFFLINE_PATTERN = r'Downloading blocked due to'
LINK_FREE_PATTERN = r'"(.+?url.html\?file=.+?)"|window\.location\.href = \'(.+?)\';'
LINK_PREMIUM_PATTERN = r'window\.location\.href = \'(.+?)\';'
CAPTCHA_PATTERN = r'src="(/file/captcha\.html.+?)"'
WAIT_PATTERN = r'Please wait ([\d:]+) to download this file'
TEMP_ERROR_PATTERN = r'>\s*(Download count files exceed|Traffic limit exceed|Free account does not allow to download more than one file at the same time)'
ERROR_PATTERN = r'>\s*(Free user can\'t download large files|You no can access to this file|This download available only for premium users|This is private file)'
def checkErrors(self):
m = re.search(self.TEMP_ERROR_PATTERN, self.html)
if m:
self.info['error'] = m.group(1)
self.wantReconnect = True
self.retry(wait_time=30 * 60, reason=m.group(0))
m = re.search(self.ERROR_PATTERN, self.html)
if m:
errmsg = self.info['error'] = m.group(1)
self.error(errmsg)
m = re.search(self.WAIT_PATTERN, self.html)
if m:
self.logDebug("Hoster told us to wait for %s" % m.group(1))
# string to time convert courtesy of https://stackoverflow.com/questions/10663720
ftr = [3600, 60, 1]
wait_time = sum(a * b for a, b in zip(ftr, map(int, m.group(1).split(':'))))
self.wantReconnect = True
self.retry(wait_time=wait_time, reason="Please wait to download this file")
self.info.pop('error', None)
def handleFree(self, pyfile):
self.fid = re.search(r'<input type="hidden" name="slow_id" value="(.+?)">', self.html).group(1)
self.html = self.load(pyfile.url, post={'yt0': '', 'slow_id': self.fid})
# self.logDebug(self.fid)
# self.logDebug(pyfile.url)
self.checkErrors()
m = re.search(self.LINK_FREE_PATTERN, self.html)
if m is None:
self.handleCaptcha()
self.wait(31)
self.html = self.load(pyfile.url)
m = re.search(self.LINK_FREE_PATTERN, self.html)
if m is None:
self.error(_("Free download link not found"))
self.link = m.group(1)
def handleCaptcha(self):
post_data = {'free' : 1,
'freeDownloadRequest': 1,
'uniqueId' : self.fid,
'yt0' : ''}
m = re.search(r'id="(captcha\-form)"', self.html)
self.logDebug("captcha-form found %s" % m)
m = re.search(self.CAPTCHA_PATTERN, self.html)
self.logDebug("CAPTCHA_PATTERN found %s" % m)
if m:
captcha_url = urlparse.urljoin("http://keep2s.cc/", m.group(1))
post_data['CaptchaForm[code]'] = self.decryptCaptcha(captcha_url)
else:
recaptcha = ReCaptcha(self)
response, challenge = recaptcha.challenge()
post_data.update({'recaptcha_challenge_field': challenge,
'recaptcha_response_field' : response})
self.html = self.load(self.pyfile.url, post=post_data)
if 'verification code is incorrect' not in self.html:
self.correctCaptcha()
else:
self.invalidCaptcha()
getInfo = create_getInfo(Keep2ShareCc)
| gpl-3.0 |
omaciel/billreminder | src/lib/SettingsGConf.py | 2 | 4535 | import re
import os
try:
import gconf
except ImportError:
from gnome import gconf
class Settings(object):
def __init__(self, defaults, changedCb):
self._defaults = defaults
self._changedCb = changedCb
self._overrides = {}
def get(self, key, **kwargs):
return None
def set(self, key, val, **kwargs):
return False
def set_overrides(self, **overrides):
self._overrides = overrides
def proxy_enabled(self):
return False
def get_proxy(self):
return ("",0,"","")
def save(self):
pass
import logging
log = logging.getLogger("Settings")
class SettingsImpl(Settings):
"""
Settings implementation which stores settings in GConf
"""
BILLREMINDER_GCONF_DIR = "/apps/billreminder/"
VALID_KEY_TYPES = (bool, str, int, list, tuple)
def __init__(self, defaults, changedCb):
Settings.__init__(self, defaults, changedCb)
self._client = gconf.client_get_default()
self._client.add_dir(self.BILLREMINDER_GCONF_DIR[:-1], gconf.CLIENT_PRELOAD_RECURSIVE)
self._notifications = []
def _fix_key(self, key):
"""
Appends the BILLREMINDER_GCONF_PREFIX to the key if needed
@param key: The key to check
@type key: C{string}
@returns: The fixed key
@rtype: C{string}
"""
if not key.startswith(self.BILLREMINDER_GCONF_DIR):
return self.BILLREMINDER_GCONF_DIR + key
else:
return key
def _key_changed(self, client, cnxn_id, entry, data=None):
"""
Callback when a gconf key changes
"""
key = self._fix_key(entry.key)
self._changedCb(key)
def get(self, key, default=None):
"""
Returns the value of the key or the default value if the key is
not yet in gconf
"""
#check if the setting has been overridden for this session
if key in self._overrides:
try:
#try and cast to correct type
return type(self._defaults[key])(self._overrides[key])
except:
return self._overrides[key]
#function arguments override defaults
if default == None:
default = self._defaults.get(key, None)
vtype = type(default)
#we now have a valid key and type
if default == None:
log.warn("Unknown key: %s, must specify default value" % key)
return None
if vtype not in self.VALID_KEY_TYPES:
log.warn("Invalid key type: %s" % vtype)
return None
#for gconf refer to the full key path
key = self._fix_key(key)
if key not in self._notifications:
self._client.notify_add(key, self._key_changed)
self._notifications.append(key)
value = self._client.get(key)
if not value:
self.set(key, default)
return default
if vtype is bool:
return value.get_bool()
elif vtype is str:
return value.get_string()
elif vtype is int:
return value.get_int()
elif vtype in (list, tuple):
l = []
for i in value.get_list():
l.append(i.get_string())
return l
log.warn("Unknown gconf key: %s" % key)
return None
def set(self, key, value):
"""
Sets the key value in gconf and connects adds a signal
which is fired if the key changes
"""
#overidden settings only apply for this session, and are
#not set
if key in self._overrides:
return True
log.debug("Settings %s -> %s" % (key, value))
if key in self._defaults:
vtype = type(self._defaults[key])
else:
vtype = type(value)
if vtype not in self.VALID_KEY_TYPES:
log.warn("Invalid key type: %s" % vtype)
return False
#for gconf refer to the full key path
key = self._fix_key(key)
if vtype is bool:
self._client.set_bool(key, value)
elif vtype is str:
self._client.set_string(key, value)
elif vtype is int:
self._client.set_int(key, value)
elif vtype in (list, tuple):
#Save every value as a string
strvalues = [str(i) for i in value]
self._client.set_list(key, gconf.VALUE_STRING, strvalues)
return True
| gpl-3.0 |
qma/pants | src/python/pants/backend/jvm/tasks/jvm_tool_task_mixin.py | 6 | 1869 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.backend.core.tasks.task import TaskBase
from pants.backend.jvm.subsystems.jvm_tool_mixin import JvmToolMixin
class JvmToolTaskMixin(JvmToolMixin, TaskBase):
"""A JvmToolMixin specialized for mixing in to Tasks."""
@classmethod
def prepare(cls, options, round_manager):
super(JvmToolTaskMixin, cls).prepare(options, round_manager)
cls.prepare_tools(round_manager)
def tool_jar(self, key, scope=None):
"""Get the jar for the tool previously registered under key in the given scope.
:param string key: The key the tool configuration was registered under.
:param string scope: The scope the tool configuration was registered under; the task scope by
default.
:returns: A single jar path.
:rtype: string
:raises: `JvmToolMixin.InvalidToolClasspath` when the tool classpath is not composed of exactly
one jar.
"""
return self.tool_jar_from_products(self.context.products, key, scope=self._scope(scope))
def tool_classpath(self, key, scope=None):
"""Get a classpath for the tool previously registered under key in the given scope.
:param string key: The key the tool configuration was registered under.
:param string scope: The scope the tool configuration was registered under; the task scope by
default.
:returns: A list of paths.
:rtype: list
"""
return self.tool_classpath_from_products(self.context.products, key, scope=self._scope(scope))
def _scope(self, scope=None):
return scope or self.options_scope
| apache-2.0 |
amaurywalbert/twitter | net_structure/multilayer/unweighted_directed/hashmap_calc_nodes_correlation_v1.py | 1 | 6326 | # -*- coding: latin1 -*-
################################################################################################
#
#
import snap, datetime, sys, time, json, os, os.path, shutil, time, random, math
import numpy as np
from math import*
from scipy.stats.stats import pearsonr
reload(sys)
sys.setdefaultencoding('utf-8')
######################################################################################################################################################################
## Status - Versão 1 - Script para calcular a correlação entre número de vértices entre as camadas
## - Considerar apenas redes-ego com a presença do ego.
##
## INPUT: Redes-ego
##
## Output: arquivo texto. Formato:
##
##ID_ego a:amigos s:seguidores r:retuítes l:likes m:menções
######################################################################################################################################################################
######################################################################################################################################################################
#
# Cria diretórios
#
######################################################################################################################################################################
def create_dirs(x):
if not os.path.exists(x):
os.makedirs(x)
######################################################################################################################################################################
#
# Salvar arquivo no formato JSON: ego_id:{as:data,ar:data,al:data,am:data,...,rm:data}
#
######################################################################################################################################################################
def save_json(dataset_json):
with open(output_dir_json+"nodes_correlation.json","w") as f:
f.write(json.dumps(dataset_json))
######################################################################################################################################################################
#
# Calcula Correlação de Pearson entre nodes dos pares de layers
#
######################################################################################################################################################################
def calc_correlation(dataset):
pairs = {}
for k,v in dataset.iteritems():
for j, x in dataset.iteritems():
# if j >= k and j != k:
nodes1 = v
nodes2 = x
name = str(k)+str(j)
result,p = pearsonr(nodes1,nodes2) # Comparando o conjunto de vértices entre as camadas
pairs[name]={"pearson":result,"p-value":p}
print pairs
return pairs
######################################################################################################################################################################
######################################################################################################################################################################
#
# Método principal do programa.
#
######################################################################################################################################################################
######################################################################################################################################################################
def main():
os.system('clear')
print "################################################################################"
print" "
print" Cálculo de nodes correlation em entre as camadas Layer "
print" "
print"#################################################################################"
print
i=0
if os.path.exists(output_dir_json+"nodes_correlation.json"):
print ("Arquivo de destino já existe!"+str(output_dir_json+"nodes_correlation.json"))
else:
create_dirs(output_dir_json) # Cria diretótio para salvar arquivos.
_a = []
_s = []
_r = []
_l = []
_m = []
with open(source_file,'r') as f:
source = json.load(f)
for ego,v in dictionary.iteritems(): #Separar os vetores com as informações de cada ego em cada camada.
_a.append(source[ego]['a'])
_s.append(source[ego]['s'])
_r.append(source[ego]['r'])
_l.append(source[ego]['l'])
_m.append(source[ego]['m'])
nodes = {"a":_a,"s":_s,"r":_r,"l":_l,"m":_m}
pairs = calc_correlation(nodes)
save_json(pairs) # Salvar arquivo no formato JSON
print("\n######################################################################\n")
print("Script finalizado!")
print("\n######################################################################\n")
######################################################################################################################################################################
#
# INÍCIO DO PROGRAMA
#
######################################################################################################################################################################
data_dir = "/home/amaury/graphs_hashmap/n1/graphs_with_ego/" # Pegar a lista com os ids dos egos
source_file = "/home/amaury/Dropbox/net_structure_hashmap/multilayer/graphs_with_ego/unweighted_directed/json/basics/nodes.json"
output_dir_json = "/home/amaury/Dropbox/net_structure_hashmap/multilayer/graphs_with_ego/unweighted_directed/json/" # Pegar a lista com os ids dos egos
dictionary = {} #################################################### Tabela {chave:valor} para armazenar lista de egos
###### Iniciando dicionário - tabela hash a partir dos arquivos já criados.
print("######################################################################")
print ("Criando tabela hash...")
n = 0 #Conta quantos arquivos existem no diretório
for file in os.listdir(data_dir):
user_id = file.split(".edge_list")
user_id = str(user_id[0])
dictionary[user_id] = user_id
n+=1
print ("Tabela hash criada com sucesso...")
print("######################################################################\n")
if n <> 500:
print ("Diretório não contém lista com todos os egos...")
sys.exit()
else:
#Executa o método main
if __name__ == "__main__": main() | gpl-3.0 |
YouCantCodeWithUs/KMCIslandGrowth | Periodic.py | 1 | 1390 | #!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
import numpy as np
import Constants as gv
def PutInBox(x):
'''
Implement periodic boundary conditions along the x-axis.
Ri: np.array[x, y] - position of a single atom
L: Float - global variable indicating the width of the periodic cell
returns: np.array[x, y] - correct position using PBC
'''
x = x % gv.L
if abs(x) > gv.L/2:
x += gv.L if gv.L < 0 else -gv.L
return x
def PutAllInBox(R):
for i in range(len(R)/2):
R[2*i] = PutInBox(R[2*i])
return R
def Displacement(Ri, Rj):
'''
Least-distance spacial displacement between two atoms.
Ri: np.array[x, y] - position of first atom
Rj: np.array[x, y] - position of second atom
returns: np.array[x, y] - vector pointing from Ri to Rj
'''
return PutAllInBox(Rj-Ri)
def Distance(Ri, Rj):
'''
Least-distance between two atoms.
Ri: np.array[x, y] - position of first atom
Rj: np.array[x, y] - position of second atom
returns: Float - distance between Ri and Rj
'''
d = Displacement(Ri, Rj)
return np.sqrt(np.dot(d, d))
def Displacements(x, y, Rjs):
d = np.array([x, y]*(len(Rjs)/2))
return Displacement(d, Rjs)
def Distances(a, b):
Ds = []
for i in range(len(a)/2):
x = a[2*i]; y = a[2*i+1]
d = Displacements(x, y, b)
d *= d
D = [np.sqrt(d[2*j] + d[2*j+1]) for j in range(len(d)/2)]
Ds.append(np.array(D))
return np.array(Ds) | mit |
slank/ansible | lib/ansible/module_utils/ovirt.py | 7 | 21886 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
import inspect
import os
import time
from abc import ABCMeta, abstractmethod
from datetime import datetime
from distutils.version import LooseVersion
from enum import Enum
try:
import ovirtsdk4 as sdk
import ovirtsdk4.version as sdk_version
HAS_SDK = LooseVersion(sdk_version.VERSION) >= LooseVersion('4.0.0')
except ImportError:
HAS_SDK = False
BYTES_MAP = {
'kib': 2**10,
'mib': 2**20,
'gib': 2**30,
'tib': 2**40,
'pib': 2**50,
}
def check_sdk(module):
if not HAS_SDK:
module.fail_json(
msg='ovirtsdk4 version 4.0.0 or higher is required for this module'
)
def get_dict_of_struct(struct, connection=None, fetch_nested=False, attributes=None):
"""
Convert SDK Struct type into dictionary.
"""
def remove_underscore(val):
if val.startswith('_'):
val = val[1:]
remove_underscore(val)
return val
res = {}
if struct is not None:
for key, value in struct.__dict__.items():
nested = False
key = remove_underscore(key)
if value is None:
continue
elif isinstance(value, sdk.Struct):
res[key] = get_dict_of_struct(value)
elif isinstance(value, Enum) or isinstance(value, datetime):
res[key] = str(value)
elif isinstance(value, list) or isinstance(value, sdk.List):
if isinstance(value, sdk.List) and fetch_nested and value.href:
value = connection.follow_link(value)
nested = True
res[key] = []
for i in value:
if isinstance(i, sdk.Struct):
if not nested:
res[key].append(get_dict_of_struct(i))
else:
nested_obj = dict(
(attr, getattr(i, attr))
for attr in attributes if getattr(i, attr, None)
)
nested_obj['id'] = getattr(i, 'id', None),
res[key].append(nested_obj)
elif isinstance(i, Enum):
res[key].append(str(i))
else:
res[key].append(i)
else:
res[key] = value
return res
def create_connection(auth):
"""
Create a connection to Python SDK, from task `auth` parameter.
If user doesnt't have SSO token the `auth` dictionary has following parameters mandatory:
url, username, password
If user has SSO token the `auth` dictionary has following parameters mandatory:
url, token
The `ca_file` parameter is mandatory in case user want to use secure connection,
in case user want to use insecure connection, it's mandatory to send insecure=True.
:param auth: dictionary which contains needed values for connection creation
:return: Python SDK connection
"""
return sdk.Connection(
url=auth.get('url'),
username=auth.get('username'),
password=auth.get('password'),
ca_file=auth.get('ca_file', None),
insecure=auth.get('insecure', False),
token=auth.get('token', None),
kerberos=auth.get('kerberos', None),
)
def convert_to_bytes(param):
"""
This method convert units to bytes, which follow IEC standard.
:param param: value to be converted
"""
if param is None:
return None
# Get rid of whitespaces:
param = ''.join(param.split())
# Convert to bytes:
if param[-3].lower() in ['k', 'm', 'g', 't', 'p']:
return int(param[:-3]) * BYTES_MAP.get(param[-3:].lower(), 1)
elif param.isdigit():
return int(param) * 2**10
else:
raise ValueError(
"Unsupported value(IEC supported): '{value}'".format(value=param)
)
def follow_link(connection, link):
"""
This method returns the entity of the element which link points to.
:param connection: connection to the Python SDK
:param link: link of the entity
:return: entity which link points to
"""
if link:
return connection.follow_link(link)
else:
return None
def get_link_name(connection, link):
"""
This method returns the name of the element which link points to.
:param connection: connection to the Python SDK
:param link: link of the entity
:return: name of the entity, which link points to
"""
if link:
return connection.follow_link(link).name
else:
return None
def equal(param1, param2):
"""
Compare two parameters and return if they are equal.
This parameter doesn't run equal operation if first parameter is None.
With this approach we don't run equal operation in case user don't
specify parameter in their task.
:param param1: user inputted parameter
:param param2: value of entity parameter
:return: True if parameters are equal or first parameter is None, otherwise False
"""
if param1 is not None:
return param1 == param2
return True
def search_by_attributes(service, **kwargs):
"""
Search for the entity by attributes. Nested entities don't support search
via REST, so in case using search for nested entity we return all entities
and filter them by specified attributes.
"""
# Check if 'list' method support search(look for search parameter):
if 'search' in inspect.getargspec(service.list)[0]:
res = service.list(
search=' and '.join('{}={}'.format(k, v) for k, v in kwargs.items())
)
else:
res = [
e for e in service.list() if len([
k for k, v in kwargs.items() if getattr(e, k, None) == v
]) == len(kwargs)
]
res = res or [None]
return res[0]
def search_by_name(service, name, **kwargs):
"""
Search for the entity by its name. Nested entities don't support search
via REST, so in case using search for nested entity we return all entities
and filter them by name.
:param service: service of the entity
:param name: name of the entity
:return: Entity object returned by Python SDK
"""
# Check if 'list' method support search(look for search parameter):
if 'search' in inspect.getargspec(service.list)[0]:
res = service.list(
search="name={name}".format(name=name)
)
else:
res = [e for e in service.list() if e.name == name]
if kwargs:
res = [
e for e in service.list() if len([
k for k, v in kwargs.items() if getattr(e, k, None) == v
]) == len(kwargs)
]
res = res or [None]
return res[0]
def wait(
service,
condition,
fail_condition=lambda e: False,
timeout=180,
wait=True,
poll_interval=3,
):
"""
Wait until entity fulfill expected condition.
:param service: service of the entity
:param condition: condition to be fulfilled
:param fail_condition: if this condition is true, raise Exception
:param timeout: max time to wait in seconds
:param wait: if True wait for condition, if False don't wait
:param poll_interval: Number of seconds we should wait until next condition check
"""
# Wait until the desired state of the entity:
if wait:
start = time.time()
while time.time() < start + timeout:
# Exit if the condition of entity is valid:
entity = service.get()
if condition(entity):
return
elif fail_condition(entity):
raise Exception("Error while waiting on result state of the entity.")
# Sleep for `poll_interval` seconds if nor of the conditions apply:
time.sleep(float(poll_interval))
def __get_auth_dict():
OVIRT_URL = os.environ.get('OVIRT_URL')
OVIRT_USERNAME = os.environ.get('OVIRT_USERNAME')
OVIRT_PASSWORD = os.environ.get('OVIRT_PASSWORD')
OVIRT_TOKEN = os.environ.get('OVIRT_TOKEN')
OVIRT_CAFILE = os.environ.get('OVIRT_CAFILE')
OVIRT_INSECURE = OVIRT_CAFILE is None
env_vars = None
if OVIRT_URL and ((OVIRT_USERNAME and OVIRT_PASSWORD) or OVIRT_TOKEN):
env_vars = {
'url': OVIRT_URL,
'username': OVIRT_USERNAME,
'password': OVIRT_PASSWORD,
'insecure': OVIRT_INSECURE,
'token': OVIRT_TOKEN,
'ca_file': OVIRT_CAFILE,
}
if env_vars is not None:
auth = dict(default=env_vars, type='dict')
else:
auth = dict(required=True, type='dict')
return auth
def ovirt_facts_full_argument_spec(**kwargs):
"""
Extend parameters of facts module with parameters which are common to all
oVirt facts modules.
:param kwargs: kwargs to be extended
:return: extended dictionary with common parameters
"""
spec = dict(
auth=__get_auth_dict(),
fetch_nested=dict(default=False, type='bool'),
nested_attributes=dict(type='list'),
)
spec.update(kwargs)
return spec
def ovirt_full_argument_spec(**kwargs):
"""
Extend parameters of module with parameters which are common to all oVirt modules.
:param kwargs: kwargs to be extended
:return: extended dictionary with common parameters
"""
spec = dict(
auth=__get_auth_dict(),
timeout=dict(default=180, type='int'),
wait=dict(default=True, type='bool'),
poll_interval=dict(default=3, type='int'),
fetch_nested=dict(default=False, type='bool'),
nested_attributes=dict(type='list'),
)
spec.update(kwargs)
return spec
def check_params(module):
"""
Most modules must have either `name` or `id` specified.
"""
if module.params.get('name') is None and module.params.get('id') is None:
module.fail_json(msg='"name" or "id" is required')
class BaseModule(object):
"""
This is base class for oVirt modules. oVirt modules should inherit this
class and override method to customize specific needs of the module.
The only abstract method of this class is `build_entity`, which must
to be implemented in child class.
"""
__metaclass__ = ABCMeta
def __init__(self, connection, module, service, changed=False):
self._connection = connection
self._module = module
self._service = service
self._changed = changed
@property
def changed(self):
return self._changed
@changed.setter
def changed(self, changed):
if not self._changed:
self._changed = changed
@abstractmethod
def build_entity(self):
"""
This method should return oVirt Python SDK type, which we want to
create or update, initialized by values passed by Ansible module.
For example if we want to create VM, we will return following:
types.Vm(name=self._module.params['vm_name'])
:return: Specific instance of sdk.Struct.
"""
pass
def param(self, name, default=None):
"""
Return a module parameter specified by it's name.
"""
return self._module.params.get(name, default)
def update_check(self, entity):
"""
This method handle checks whether the entity values are same as values
passed to ansible module. By default we don't compare any values.
:param entity: Entity we want to compare with Ansible module values.
:return: True if values are same, so we don't need to update the entity.
"""
return True
def pre_create(self, entity):
"""
This method is called right before entity is created.
:param entity: Entity to be created or updated.
"""
pass
def post_create(self, entity):
"""
This method is called right after entity is created.
:param entity: Entity which was created.
"""
pass
def post_update(self, entity):
"""
This method is called right after entity is updated.
:param entity: Entity which was updated.
"""
pass
def create(self, entity=None, result_state=None, fail_condition=lambda e: False, search_params=None, **kwargs):
"""
Method which is called when state of the entity is 'present'. If user
don't provide `entity` parameter the entity is searched using
`search_params` parameter. If entity is found it's updated, whether
the entity should be updated is checked by `update_check` method.
The corresponding updated entity is build by `build_entity` method.
Function executed after entity is created can optionally be specified
in `post_create` parameter. Function executed after entity is updated
can optionally be specified in `post_update` parameter.
:param entity: Entity we want to update, if exists.
:param result_state: State which should entity has in order to finish task.
:param fail_condition: Function which checks incorrect state of entity, if it returns `True` Exception is raised.
:param search_params: Dictionary of parameters to be used for search.
:param kwargs: Additional parameters passed when creating entity.
:return: Dictionary with values returned by Ansible module.
"""
if entity is None:
entity = self.search_entity(search_params)
self.pre_create(entity)
if entity:
# Entity exists, so update it:
entity_service = self._service.service(entity.id)
if not self.update_check(entity):
if not self._module.check_mode:
entity_service.update(self.build_entity())
self.post_update(entity)
self.changed = True
else:
# Entity don't exists, so create it:
if not self._module.check_mode:
entity = self._service.add(
self.build_entity(),
**kwargs
)
self.post_create(entity)
self.changed = True
# Wait for the entity to be created and to be in the defined state:
entity_service = self._service.service(entity.id)
state_condition = lambda entity: entity
if result_state:
state_condition = lambda entity: entity and entity.status == result_state
wait(
service=entity_service,
condition=state_condition,
fail_condition=fail_condition,
wait=self._module.params['wait'],
timeout=self._module.params['timeout'],
poll_interval=self._module.params['poll_interval'],
)
return {
'changed': self.changed,
'id': entity.id,
type(entity).__name__.lower(): get_dict_of_struct(
struct=entity,
connection=self._connection,
fetch_nested=self._module.params.get('fetch_nested'),
attributes=self._module.params.get('nested_attributes'),
),
}
def pre_remove(self, entity):
"""
This method is called right before entity is removed.
:param entity: Entity which we want to remove.
"""
pass
def remove(self, entity=None, search_params=None, **kwargs):
"""
Method which is called when state of the entity is 'absent'. If user
don't provide `entity` parameter the entity is searched using
`search_params` parameter. If entity is found it's removed.
Function executed before remove is executed can optionally be specified
in `pre_remove` parameter.
:param entity: Entity we want to remove.
:param search_params: Dictionary of parameters to be used for search.
:param kwargs: Additional parameters passed when removing entity.
:return: Dictionary with values returned by Ansible module.
"""
if entity is None:
entity = self.search_entity(search_params)
if entity is None:
return {
'changed': self.changed,
'msg': "Entity wasn't found."
}
self.pre_remove(entity)
entity_service = self._service.service(entity.id)
if not self._module.check_mode:
entity_service.remove(**kwargs)
wait(
service=entity_service,
condition=lambda entity: not entity,
wait=self._module.params['wait'],
timeout=self._module.params['timeout'],
poll_interval=self._module.params['poll_interval'],
)
self.changed = True
return {
'changed': self.changed,
'id': entity.id,
type(entity).__name__.lower(): get_dict_of_struct(
struct=entity,
connection=self._connection,
fetch_nested=self._module.params.get('fetch_nested'),
attributes=self._module.params.get('nested_attributes'),
),
}
def action(
self,
action,
entity=None,
action_condition=lambda e: e,
wait_condition=lambda e: e,
fail_condition=lambda e: False,
pre_action=lambda e: e,
post_action=lambda e: None,
search_params=None,
**kwargs
):
"""
This method is executed when we want to change the state of some oVirt
entity. The action to be executed on oVirt service is specified by
`action` parameter. Whether the action should be executed can be
specified by passing `action_condition` parameter. State which the
entity should be in after execution of the action can be specified
by `wait_condition` parameter.
Function executed before an action on entity can optionally be specified
in `pre_action` parameter. Function executed after an action on entity can
optionally be specified in `post_action` parameter.
:param action: Action which should be executed by service on entity.
:param entity: Entity we want to run action on.
:param action_condition: Function which is executed when checking if action should be executed.
:param fail_condition: Function which checks incorrect state of entity, if it returns `True` Exception is raised.
:param wait_condition: Function which is executed when waiting on result state.
:param pre_action: Function which is executed before running the action.
:param post_action: Function which is executed after running the action.
:param search_params: Dictionary of parameters to be used for search.
:param kwargs: Additional parameters passed to action.
:return: Dictionary with values returned by Ansible module.
"""
if entity is None:
entity = self.search_entity(search_params)
entity = pre_action(entity)
if entity is None:
self._module.fail_json(
msg="Entity not found, can't run action '{}'.".format(
action
)
)
entity_service = self._service.service(entity.id)
entity = entity_service.get()
if action_condition(entity):
if not self._module.check_mode:
getattr(entity_service, action)(**kwargs)
self.changed = True
post_action(entity)
wait(
service=self._service.service(entity.id),
condition=wait_condition,
fail_condition=fail_condition,
wait=self._module.params['wait'],
timeout=self._module.params['timeout'],
poll_interval=self._module.params['poll_interval'],
)
return {
'changed': self.changed,
'id': entity.id,
type(entity).__name__.lower(): get_dict_of_struct(
struct=entity,
connection=self._connection,
fetch_nested=self._module.params.get('fetch_nested'),
attributes=self._module.params.get('nested_attributes'),
),
}
def search_entity(self, search_params=None):
"""
Always first try to search by `ID`, if ID isn't specified,
check if user constructed special search in `search_params`,
if not search by `name`.
"""
entity = None
if 'id' in self._module.params and self._module.params['id'] is not None:
entity = search_by_attributes(self._service, id=self._module.params['id'])
elif search_params is not None:
entity = search_by_attributes(self._service, **search_params)
elif 'name' in self._module.params and self._module.params['name'] is not None:
entity = search_by_attributes(self._service, name=self._module.params['name'])
return entity
| gpl-3.0 |
thanhacun/odoo | addons/stock_account/wizard/__init__.py | 351 | 1105 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import stock_change_standard_price
import stock_invoice_onshipping
import stock_valuation_history
import stock_return_picking
| agpl-3.0 |
lianliuwei/gyp | test/defines/gyptest-defines-env-regyp.py | 74 | 1374 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies build of an executable with C++ define specified by a gyp define, and
the use of the environment during regeneration when the gyp file changes.
"""
import os
import TestGyp
# Regenerating build files when a gyp file changes is currently only supported
# by the make and Android generators.
test = TestGyp.TestGyp(formats=['make', 'android'])
try:
os.environ['GYP_DEFINES'] = 'value=50'
test.run_gyp('defines.gyp')
finally:
# We clear the environ after calling gyp. When the auto-regeneration happens,
# the same define should be reused anyway. Reset to empty string first in
# case the platform doesn't support unsetenv.
os.environ['GYP_DEFINES'] = ''
del os.environ['GYP_DEFINES']
test.build('defines.gyp')
expect = """\
FOO is defined
VALUE is 1
2*PAREN_VALUE is 12
HASH_VALUE is a#1
"""
test.run_built_executable('defines', stdout=expect)
# Sleep so that the changed gyp file will have a newer timestamp than the
# previously generated build files.
test.sleep()
test.write('defines.gyp', test.read('defines-env.gyp'))
test.build('defines.gyp', test.ALL)
expect = """\
VALUE is 50
"""
test.run_built_executable('defines', stdout=expect)
test.pass_test()
| bsd-3-clause |
andyzsf/django | tests/utils_tests/test_crypto.py | 447 | 4581 | from __future__ import unicode_literals
import binascii
import hashlib
import unittest
from django.utils.crypto import constant_time_compare, pbkdf2
class TestUtilsCryptoMisc(unittest.TestCase):
def test_constant_time_compare(self):
# It's hard to test for constant time, just test the result.
self.assertTrue(constant_time_compare(b'spam', b'spam'))
self.assertFalse(constant_time_compare(b'spam', b'eggs'))
self.assertTrue(constant_time_compare('spam', 'spam'))
self.assertFalse(constant_time_compare('spam', 'eggs'))
class TestUtilsCryptoPBKDF2(unittest.TestCase):
# http://tools.ietf.org/html/draft-josefsson-pbkdf2-test-vectors-06
rfc_vectors = [
{
"args": {
"password": "password",
"salt": "salt",
"iterations": 1,
"dklen": 20,
"digest": hashlib.sha1,
},
"result": "0c60c80f961f0e71f3a9b524af6012062fe037a6",
},
{
"args": {
"password": "password",
"salt": "salt",
"iterations": 2,
"dklen": 20,
"digest": hashlib.sha1,
},
"result": "ea6c014dc72d6f8ccd1ed92ace1d41f0d8de8957",
},
{
"args": {
"password": "password",
"salt": "salt",
"iterations": 4096,
"dklen": 20,
"digest": hashlib.sha1,
},
"result": "4b007901b765489abead49d926f721d065a429c1",
},
# # this takes way too long :(
# {
# "args": {
# "password": "password",
# "salt": "salt",
# "iterations": 16777216,
# "dklen": 20,
# "digest": hashlib.sha1,
# },
# "result": "eefe3d61cd4da4e4e9945b3d6ba2158c2634e984",
# },
{
"args": {
"password": "passwordPASSWORDpassword",
"salt": "saltSALTsaltSALTsaltSALTsaltSALTsalt",
"iterations": 4096,
"dklen": 25,
"digest": hashlib.sha1,
},
"result": "3d2eec4fe41c849b80c8d83662c0e44a8b291a964cf2f07038",
},
{
"args": {
"password": "pass\0word",
"salt": "sa\0lt",
"iterations": 4096,
"dklen": 16,
"digest": hashlib.sha1,
},
"result": "56fa6aa75548099dcc37d7f03425e0c3",
},
]
regression_vectors = [
{
"args": {
"password": "password",
"salt": "salt",
"iterations": 1,
"dklen": 20,
"digest": hashlib.sha256,
},
"result": "120fb6cffcf8b32c43e7225256c4f837a86548c9",
},
{
"args": {
"password": "password",
"salt": "salt",
"iterations": 1,
"dklen": 20,
"digest": hashlib.sha512,
},
"result": "867f70cf1ade02cff3752599a3a53dc4af34c7a6",
},
{
"args": {
"password": "password",
"salt": "salt",
"iterations": 1000,
"dklen": 0,
"digest": hashlib.sha512,
},
"result": ("afe6c5530785b6cc6b1c6453384731bd5ee432ee"
"549fd42fb6695779ad8a1c5bf59de69c48f774ef"
"c4007d5298f9033c0241d5ab69305e7b64eceeb8d"
"834cfec"),
},
# Check leading zeros are not stripped (#17481)
{
"args": {
"password": b'\xba',
"salt": "salt",
"iterations": 1,
"dklen": 20,
"digest": hashlib.sha1,
},
"result": '0053d3b91a7f1e54effebd6d68771e8a6e0b2c5b',
},
]
def test_public_vectors(self):
for vector in self.rfc_vectors:
result = pbkdf2(**vector['args'])
self.assertEqual(binascii.hexlify(result).decode('ascii'),
vector['result'])
def test_regression_vectors(self):
for vector in self.regression_vectors:
result = pbkdf2(**vector['args'])
self.assertEqual(binascii.hexlify(result).decode('ascii'),
vector['result'])
| bsd-3-clause |
hmronline/home-assistant | tests/components/test_history.py | 4 | 7481 | """The tests the History component."""
# pylint: disable=protected-access,too-many-public-methods
from datetime import timedelta
import unittest
from unittest.mock import patch, sentinel
import homeassistant.core as ha
import homeassistant.util.dt as dt_util
from homeassistant.components import history, recorder
from tests.common import (
mock_http_component, mock_state_change_event, get_test_home_assistant)
class TestComponentHistory(unittest.TestCase):
"""Test History component."""
def setUp(self): # pylint: disable=invalid-name
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant(1)
def tearDown(self): # pylint: disable=invalid-name
"""Stop everything that was started."""
self.hass.stop()
def init_recorder(self):
"""Initialize the recorder."""
db_uri = 'sqlite://'
with patch('homeassistant.core.Config.path', return_value=db_uri):
recorder.setup(self.hass, config={
"recorder": {
"db_url": db_uri}})
self.hass.start()
recorder._INSTANCE.block_till_db_ready()
self.wait_recording_done()
def wait_recording_done(self):
"""Block till recording is done."""
self.hass.pool.block_till_done()
recorder._INSTANCE.block_till_done()
def test_setup(self):
"""Test setup method of history."""
mock_http_component(self.hass)
self.assertTrue(history.setup(self.hass, {}))
def test_last_5_states(self):
"""Test retrieving the last 5 states."""
self.init_recorder()
states = []
entity_id = 'test.last_5_states'
for i in range(7):
self.hass.states.set(entity_id, "State {}".format(i))
self.wait_recording_done()
if i > 1:
states.append(self.hass.states.get(entity_id))
self.assertEqual(
list(reversed(states)), history.last_5_states(entity_id))
def test_get_states(self):
"""Test getting states at a specific point in time."""
self.init_recorder()
states = []
now = dt_util.utcnow()
with patch('homeassistant.components.recorder.dt_util.utcnow',
return_value=now):
for i in range(5):
state = ha.State(
'test.point_in_time_{}'.format(i % 5),
"State {}".format(i),
{'attribute_test': i})
mock_state_change_event(self.hass, state)
states.append(state)
self.wait_recording_done()
future = now + timedelta(seconds=1)
with patch('homeassistant.components.recorder.dt_util.utcnow',
return_value=future):
for i in range(5):
state = ha.State(
'test.point_in_time_{}'.format(i % 5),
"State {}".format(i),
{'attribute_test': i})
mock_state_change_event(self.hass, state)
self.wait_recording_done()
# Get states returns everything before POINT
self.assertEqual(states,
sorted(history.get_states(future),
key=lambda state: state.entity_id))
# Test get_state here because we have a DB setup
self.assertEqual(
states[0], history.get_state(future, states[0].entity_id))
def test_state_changes_during_period(self):
"""Test state change during period."""
self.init_recorder()
entity_id = 'media_player.test'
def set_state(state):
self.hass.states.set(entity_id, state)
self.wait_recording_done()
return self.hass.states.get(entity_id)
start = dt_util.utcnow()
point = start + timedelta(seconds=1)
end = point + timedelta(seconds=1)
with patch('homeassistant.components.recorder.dt_util.utcnow',
return_value=start):
set_state('idle')
set_state('YouTube')
with patch('homeassistant.components.recorder.dt_util.utcnow',
return_value=point):
states = [
set_state('idle'),
set_state('Netflix'),
set_state('Plex'),
set_state('YouTube'),
]
with patch('homeassistant.components.recorder.dt_util.utcnow',
return_value=end):
set_state('Netflix')
set_state('Plex')
hist = history.state_changes_during_period(start, end, entity_id)
self.assertEqual(states, hist[entity_id])
def test_get_significant_states(self):
"""Test that only significant states are returned.
We inject a bunch of state updates from media player, zone and
thermostat. We should get back every thermostat change that
includes an attribute change, but only the state updates for
media player (attribute changes are not significant and not returned).
"""
self.init_recorder()
mp = 'media_player.test'
therm = 'thermostat.test'
zone = 'zone.home'
script_nc = 'script.cannot_cancel_this_one'
script_c = 'script.can_cancel_this_one'
def set_state(entity_id, state, **kwargs):
self.hass.states.set(entity_id, state, **kwargs)
self.wait_recording_done()
return self.hass.states.get(entity_id)
zero = dt_util.utcnow()
one = zero + timedelta(seconds=1)
two = one + timedelta(seconds=1)
three = two + timedelta(seconds=1)
four = three + timedelta(seconds=1)
states = {therm: [], mp: [], script_c: []}
with patch('homeassistant.components.recorder.dt_util.utcnow',
return_value=one):
states[mp].append(
set_state(mp, 'idle',
attributes={'media_title': str(sentinel.mt1)}))
states[mp].append(
set_state(mp, 'YouTube',
attributes={'media_title': str(sentinel.mt2)}))
states[therm].append(
set_state(therm, 20, attributes={'current_temperature': 19.5}))
with patch('homeassistant.components.recorder.dt_util.utcnow',
return_value=two):
# This state will be skipped only different in time
set_state(mp, 'YouTube',
attributes={'media_title': str(sentinel.mt3)})
# This state will be skipped because domain blacklisted
set_state(zone, 'zoning')
set_state(script_nc, 'off')
states[script_c].append(
set_state(script_c, 'off', attributes={'can_cancel': True}))
states[therm].append(
set_state(therm, 21, attributes={'current_temperature': 19.8}))
with patch('homeassistant.components.recorder.dt_util.utcnow',
return_value=three):
states[mp].append(
set_state(mp, 'Netflix',
attributes={'media_title': str(sentinel.mt4)}))
# Attributes changed even though state is the same
states[therm].append(
set_state(therm, 21, attributes={'current_temperature': 20}))
hist = history.get_significant_states(zero, four)
assert states == hist
| mit |
punchagan/zulip | zerver/lib/events.py | 1 | 55544 | # See https://zulip.readthedocs.io/en/latest/subsystems/events-system.html for
# high-level documentation on how this system works.
import copy
from typing import Any, Callable, Collection, Dict, Iterable, Optional, Sequence, Set
from django.conf import settings
from django.utils.translation import gettext as _
from version import API_FEATURE_LEVEL, ZULIP_VERSION
from zerver.lib.actions import (
default_stream_groups_to_dicts_sorted,
do_get_streams,
gather_subscriptions_helper,
get_available_notification_sounds,
get_default_streams_for_realm,
get_owned_bot_dicts,
get_web_public_streams,
get_web_public_subs,
streams_to_dicts_sorted,
)
from zerver.lib.alert_words import user_alert_words
from zerver.lib.avatar import avatar_url
from zerver.lib.bot_config import load_bot_config_template
from zerver.lib.external_accounts import DEFAULT_EXTERNAL_ACCOUNTS
from zerver.lib.hotspots import get_next_hotspots
from zerver.lib.integrations import EMBEDDED_BOTS, WEBHOOK_INTEGRATIONS
from zerver.lib.message import (
aggregate_unread_data,
apply_unread_message_event,
extract_unread_data_from_um_rows,
get_raw_unread_data,
get_recent_conversations_recipient_id,
get_recent_private_conversations,
get_starred_message_ids,
remove_message_id_from_unread_mgs,
)
from zerver.lib.narrow import check_supported_events_narrow_filter, read_stop_words
from zerver.lib.presence import get_presence_for_user, get_presences_for_realm
from zerver.lib.push_notifications import push_notifications_enabled
from zerver.lib.realm_icon import realm_icon_url
from zerver.lib.realm_logo import get_realm_logo_source, get_realm_logo_url
from zerver.lib.request import JsonableError
from zerver.lib.soft_deactivation import reactivate_user_if_soft_deactivated
from zerver.lib.stream_subscription import handle_stream_notifications_compatibility
from zerver.lib.topic import TOPIC_NAME
from zerver.lib.topic_mutes import get_topic_mutes
from zerver.lib.user_groups import user_groups_in_realm_serialized
from zerver.lib.user_mutes import get_user_mutes
from zerver.lib.user_status import get_user_info_dict
from zerver.lib.users import get_cross_realm_dicts, get_raw_user_data, is_administrator_role
from zerver.models import (
MAX_MESSAGE_LENGTH,
MAX_TOPIC_NAME_LENGTH,
Client,
CustomProfileField,
Message,
Realm,
Stream,
UserMessage,
UserProfile,
custom_profile_fields_for_realm,
get_default_stream_groups,
get_realm_domains,
get_realm_playgrounds,
linkifiers_for_realm,
realm_filters_for_realm,
)
from zerver.tornado.django_api import get_user_events, request_event_queue
from zproject.backends import email_auth_enabled, password_auth_enabled
class RestartEventException(Exception):
"""
Special error for handling restart events in apply_events.
"""
def add_realm_logo_fields(state: Dict[str, Any], realm: Realm) -> None:
state["realm_logo_url"] = get_realm_logo_url(realm, night=False)
state["realm_logo_source"] = get_realm_logo_source(realm, night=False)
state["realm_night_logo_url"] = get_realm_logo_url(realm, night=True)
state["realm_night_logo_source"] = get_realm_logo_source(realm, night=True)
state["max_logo_file_size"] = settings.MAX_LOGO_FILE_SIZE
def always_want(msg_type: str) -> bool:
"""
This function is used as a helper in
fetch_initial_state_data, when the user passes
in None for event_types, and we want to fetch
info for every event type. Defining this at module
level makes it easier to mock.
"""
return True
def fetch_initial_state_data(
user_profile: Optional[UserProfile],
*,
realm: Optional[Realm] = None,
event_types: Optional[Iterable[str]] = None,
queue_id: Optional[str] = "",
client_gravatar: bool = False,
user_avatar_url_field_optional: bool = False,
slim_presence: bool = False,
include_subscribers: bool = True,
include_streams: bool = True,
) -> Dict[str, Any]:
"""When `event_types` is None, fetches the core data powering the
webapp's `page_params` and `/api/v1/register` (for mobile/terminal
apps). Can also fetch a subset as determined by `event_types`.
The user_profile=None code path is used for logged-out public
access to streams with is_web_public=True.
Whenever you add new code to this function, you should also add
corresponding events for changes in the data structures and new
code to apply_events (and add a test in test_events.py).
"""
if realm is None:
assert user_profile is not None
realm = user_profile.realm
state: Dict[str, Any] = {"queue_id": queue_id}
if event_types is None:
# return True always
want: Callable[[str], bool] = always_want
else:
want = set(event_types).__contains__
# Show the version info unconditionally.
state["zulip_version"] = ZULIP_VERSION
state["zulip_feature_level"] = API_FEATURE_LEVEL
if want("alert_words"):
state["alert_words"] = [] if user_profile is None else user_alert_words(user_profile)
if want("custom_profile_fields"):
fields = custom_profile_fields_for_realm(realm.id)
state["custom_profile_fields"] = [f.as_dict() for f in fields]
state["custom_profile_field_types"] = {
item[4]: {"id": item[0], "name": str(item[1])}
for item in CustomProfileField.ALL_FIELD_TYPES
}
if want("hotspots"):
# Even if we offered special hotspots for guests without an
# account, we'd maybe need to store their state using cookies
# or local storage, rather than in the database.
state["hotspots"] = [] if user_profile is None else get_next_hotspots(user_profile)
if want("message"):
# Since the introduction of `anchor="latest"` in the API,
# `max_message_id` is primarily used for generating `local_id`
# values that are higher than this. We likely can eventually
# remove this parameter from the API.
user_messages = []
if user_profile is not None:
user_messages = (
UserMessage.objects.filter(user_profile=user_profile)
.order_by("-message_id")
.values("message_id")[:1]
)
if user_messages:
state["max_message_id"] = user_messages[0]["message_id"]
else:
state["max_message_id"] = -1
if want("muted_topics"):
state["muted_topics"] = [] if user_profile is None else get_topic_mutes(user_profile)
if want("muted_users"):
state["muted_users"] = [] if user_profile is None else get_user_mutes(user_profile)
if want("presence"):
state["presences"] = (
{} if user_profile is None else get_presences_for_realm(realm, slim_presence)
)
if want("realm"):
for property_name in Realm.property_types:
state["realm_" + property_name] = getattr(realm, property_name)
# Most state is handled via the property_types framework;
# these manual entries are for those realm settings that don't
# fit into that framework.
state["realm_authentication_methods"] = realm.authentication_methods_dict()
# We pretend these features are disabled because guests can't
# access them. In the future, we may want to move this logic
# to the frontends, so that we can correctly display what
# these fields are in the settings.
state["realm_allow_message_editing"] = (
False if user_profile is None else realm.allow_message_editing
)
state["realm_allow_community_topic_editing"] = (
False if user_profile is None else realm.allow_community_topic_editing
)
state["realm_allow_message_deleting"] = (
False if user_profile is None else realm.allow_message_deleting
)
state["realm_message_content_edit_limit_seconds"] = realm.message_content_edit_limit_seconds
state[
"realm_message_content_delete_limit_seconds"
] = realm.message_content_delete_limit_seconds
state[
"realm_community_topic_editing_limit_seconds"
] = Realm.DEFAULT_COMMUNITY_TOPIC_EDITING_LIMIT_SECONDS
# This setting determines whether to send presence and also
# whether to display of users list in the right sidebar; we
# want both behaviors for logged-out users. We may in the
# future choose to move this logic to the frontend.
state["realm_presence_disabled"] = True if user_profile is None else realm.presence_disabled
state["realm_icon_url"] = realm_icon_url(realm)
state["realm_icon_source"] = realm.icon_source
state["max_icon_file_size"] = settings.MAX_ICON_FILE_SIZE
add_realm_logo_fields(state, realm)
state["realm_bot_domain"] = realm.get_bot_domain()
state["realm_uri"] = realm.uri
state["realm_available_video_chat_providers"] = realm.VIDEO_CHAT_PROVIDERS
state["settings_send_digest_emails"] = settings.SEND_DIGEST_EMAILS
state["realm_digest_emails_enabled"] = (
realm.digest_emails_enabled and settings.SEND_DIGEST_EMAILS
)
state["realm_is_zephyr_mirror_realm"] = realm.is_zephyr_mirror_realm
state["realm_email_auth_enabled"] = email_auth_enabled(realm)
state["realm_password_auth_enabled"] = password_auth_enabled(realm)
state["realm_push_notifications_enabled"] = push_notifications_enabled()
state["realm_upload_quota"] = realm.upload_quota_bytes()
state["realm_plan_type"] = realm.plan_type
state["zulip_plan_is_not_limited"] = realm.plan_type != Realm.LIMITED
state["upgrade_text_for_wide_organization_logo"] = str(Realm.UPGRADE_TEXT_STANDARD)
state["realm_default_external_accounts"] = DEFAULT_EXTERNAL_ACCOUNTS
state["jitsi_server_url"] = settings.JITSI_SERVER_URL.rstrip("/")
state["development_environment"] = settings.DEVELOPMENT
state["server_generation"] = settings.SERVER_GENERATION
state["password_min_length"] = settings.PASSWORD_MIN_LENGTH
state["password_min_guesses"] = settings.PASSWORD_MIN_GUESSES
state["max_file_upload_size_mib"] = settings.MAX_FILE_UPLOAD_SIZE
state["max_avatar_file_size_mib"] = settings.MAX_AVATAR_FILE_SIZE
state["server_inline_image_preview"] = settings.INLINE_IMAGE_PREVIEW
state["server_inline_url_embed_preview"] = settings.INLINE_URL_EMBED_PREVIEW
state["server_avatar_changes_disabled"] = settings.AVATAR_CHANGES_DISABLED
state["server_name_changes_disabled"] = settings.NAME_CHANGES_DISABLED
state["giphy_rating_options"] = realm.GIPHY_RATING_OPTIONS
if realm.notifications_stream and not realm.notifications_stream.deactivated:
notifications_stream = realm.notifications_stream
state["realm_notifications_stream_id"] = notifications_stream.id
else:
state["realm_notifications_stream_id"] = -1
signup_notifications_stream = realm.get_signup_notifications_stream()
if signup_notifications_stream:
state["realm_signup_notifications_stream_id"] = signup_notifications_stream.id
else:
state["realm_signup_notifications_stream_id"] = -1
state["max_stream_name_length"] = Stream.MAX_NAME_LENGTH
state["max_stream_description_length"] = Stream.MAX_DESCRIPTION_LENGTH
state["max_topic_length"] = MAX_TOPIC_NAME_LENGTH
state["max_message_length"] = MAX_MESSAGE_LENGTH
if want("realm_domains"):
state["realm_domains"] = get_realm_domains(realm)
if want("realm_emoji"):
state["realm_emoji"] = realm.get_emoji()
if want("realm_linkifiers"):
state["realm_linkifiers"] = linkifiers_for_realm(realm.id)
# Backwards compatibility code.
if want("realm_filters"):
state["realm_filters"] = realm_filters_for_realm(realm.id)
if want("realm_playgrounds"):
state["realm_playgrounds"] = get_realm_playgrounds(realm)
if want("realm_user_groups"):
state["realm_user_groups"] = user_groups_in_realm_serialized(realm)
if user_profile is not None:
settings_user = user_profile
else:
# When UserProfile=None, we want to serve the values for various
# settings as the defaults. Instead of copying the default values
# from models.py here, we access these default values from a
# temporary UserProfile object that will not be saved to the database.
#
# We also can set various fields to avoid duplicating code
# unnecessarily.
settings_user = UserProfile(
full_name="Anonymous User",
email="username@example.com",
delivery_email="username@example.com",
realm=realm,
# We tag logged-out users as guests because most guest
# restrictions apply to these users as well, and it lets
# us avoid unnecessary conditionals.
role=UserProfile.ROLE_GUEST,
avatar_source=UserProfile.AVATAR_FROM_GRAVATAR,
# ID=0 is not used in real Zulip databases, ensuring this is unique.
id=0,
)
if want("realm_user"):
state["raw_users"] = get_raw_user_data(
realm,
user_profile,
client_gravatar=client_gravatar,
user_avatar_url_field_optional=user_avatar_url_field_optional,
)
state["cross_realm_bots"] = list(get_cross_realm_dicts())
# For the user's own avatar URL, we force
# client_gravatar=False, since that saves some unnecessary
# client-side code for handing medium-size avatars. See #8253
# for details.
state["avatar_source"] = settings_user.avatar_source
state["avatar_url_medium"] = avatar_url(
settings_user,
medium=True,
client_gravatar=False,
)
state["avatar_url"] = avatar_url(
settings_user,
medium=False,
client_gravatar=False,
)
state["can_create_streams"] = settings_user.can_create_streams()
state["can_subscribe_other_users"] = settings_user.can_subscribe_other_users()
state["can_invite_others_to_realm"] = settings_user.can_invite_others_to_realm()
state["is_admin"] = settings_user.is_realm_admin
state["is_owner"] = settings_user.is_realm_owner
state["is_moderator"] = settings_user.is_moderator
state["is_guest"] = settings_user.is_guest
state["user_id"] = settings_user.id
state["enter_sends"] = settings_user.enter_sends
state["email"] = settings_user.email
state["delivery_email"] = settings_user.delivery_email
state["full_name"] = settings_user.full_name
if want("realm_bot"):
state["realm_bots"] = [] if user_profile is None else get_owned_bot_dicts(user_profile)
# This does not yet have an apply_event counterpart, since currently,
# new entries for EMBEDDED_BOTS can only be added directly in the codebase.
if want("realm_embedded_bots"):
realm_embedded_bots = []
for bot in EMBEDDED_BOTS:
realm_embedded_bots.append(
{"name": bot.name, "config": load_bot_config_template(bot.name)}
)
state["realm_embedded_bots"] = realm_embedded_bots
# This does not have an apply_events counterpart either since
# this data is mostly static.
if want("realm_incoming_webhook_bots"):
realm_incoming_webhook_bots = []
for integration in WEBHOOK_INTEGRATIONS:
realm_incoming_webhook_bots.append(
{
"name": integration.name,
"config": {c[1]: c[0] for c in integration.config_options},
}
)
state["realm_incoming_webhook_bots"] = realm_incoming_webhook_bots
if want("recent_private_conversations"):
# A data structure containing records of this form:
#
# [{'max_message_id': 700175, 'user_ids': [801]}]
#
# for all recent private message conversations, ordered by the
# highest message ID in the conversation. The user_ids list
# is the list of users other than the current user in the
# private message conversation (so it is [] for PMs to self).
# Note that raw_recent_private_conversations is an
# intermediate form as a dictionary keyed by recipient_id,
# which is more efficient to update, and is rewritten to the
# final format in post_process_state.
state["raw_recent_private_conversations"] = (
{} if user_profile is None else get_recent_private_conversations(user_profile)
)
if want("subscription"):
if user_profile is not None:
sub_info = gather_subscriptions_helper(
user_profile,
include_subscribers=include_subscribers,
)
else:
sub_info = get_web_public_subs(realm)
state["subscriptions"] = sub_info.subscriptions
state["unsubscribed"] = sub_info.unsubscribed
state["never_subscribed"] = sub_info.never_subscribed
if want("update_message_flags") and want("message"):
# Keeping unread_msgs updated requires both message flag updates and
# message updates. This is due to the fact that new messages will not
# generate a flag update so we need to use the flags field in the
# message event.
if user_profile is not None:
state["raw_unread_msgs"] = get_raw_unread_data(user_profile)
else:
# For logged-out visitors, we treat all messages as read;
# calling this helper lets us return empty objects in the
# appropriate format.
state["raw_unread_msgs"] = extract_unread_data_from_um_rows([], user_profile)
if want("starred_messages"):
state["starred_messages"] = (
[] if user_profile is None else get_starred_message_ids(user_profile)
)
if want("stream"):
if include_streams:
# The webapp doesn't use the data from here; instead,
# it uses data from state["subscriptions"] and other
# places.
if user_profile is not None:
state["streams"] = do_get_streams(
user_profile, include_all_active=user_profile.is_realm_admin
)
else:
# TODO: This line isn't used by the webapp because it
# gets these data via the `subscriptions` key; it will
# be used when the mobile apps support logged-out
# access.
state["streams"] = get_web_public_streams(realm) # nocoverage
if want("default_streams"):
if settings_user.is_guest:
# Guest users and logged-out users don't have access to
# all default streams, so we pretend the organization
# doesn't have any.
state["realm_default_streams"] = []
else:
state["realm_default_streams"] = streams_to_dicts_sorted(
get_default_streams_for_realm(realm.id)
)
if want("default_stream_groups"):
if settings_user.is_guest:
state["realm_default_stream_groups"] = []
else:
state["realm_default_stream_groups"] = default_stream_groups_to_dicts_sorted(
get_default_stream_groups(realm)
)
if want("stop_words"):
state["stop_words"] = read_stop_words()
if want("update_display_settings"):
for prop in UserProfile.property_types:
state[prop] = getattr(settings_user, prop)
state["emojiset_choices"] = UserProfile.emojiset_choices()
if want("update_global_notifications"):
for notification in UserProfile.notification_setting_types:
state[notification] = getattr(settings_user, notification)
state["available_notification_sounds"] = get_available_notification_sounds()
if want("user_status"):
# We require creating an account to access statuses.
state["user_status"] = {} if user_profile is None else get_user_info_dict(realm_id=realm.id)
if want("video_calls"):
state["has_zoom_token"] = settings_user.zoom_token is not None
if want("giphy"):
# Normally, it would be a nasty security bug to send a
# server's API key to end users. However, GIPHY's API key
# security model is precisely to do that; every service
# publishes its API key (and GIPHY's client-side JS libraries
# require the API key to work). This security model makes
# sense because GIPHY API keys are all essentially equivalent
# in letting one search for GIFs; GIPHY only requires API keys
# to exist at all so that they can deactivate them in cases of
# abuse.
state["giphy_api_key"] = settings.GIPHY_API_KEY if settings.GIPHY_API_KEY else ""
return state
def apply_events(
user_profile: UserProfile,
*,
state: Dict[str, Any],
events: Iterable[Dict[str, Any]],
fetch_event_types: Optional[Collection[str]],
client_gravatar: bool,
slim_presence: bool,
include_subscribers: bool,
) -> None:
for event in events:
if event["type"] == "restart":
raise RestartEventException()
if fetch_event_types is not None and event["type"] not in fetch_event_types:
# TODO: continuing here is not, most precisely, correct.
# In theory, an event of one type, e.g. `realm_user`,
# could modify state that doesn't come from that
# `fetch_event_types` value, e.g. the `our_person` part of
# that code path. But it should be extremely rare, and
# fixing that will require a nontrivial refactor of
# `apply_event`. For now, be careful in your choice of
# `fetch_event_types`.
continue
apply_event(
user_profile,
state=state,
event=event,
client_gravatar=client_gravatar,
slim_presence=slim_presence,
include_subscribers=include_subscribers,
)
def apply_event(
user_profile: UserProfile,
*,
state: Dict[str, Any],
event: Dict[str, Any],
client_gravatar: bool,
slim_presence: bool,
include_subscribers: bool,
) -> None:
if event["type"] == "message":
state["max_message_id"] = max(state["max_message_id"], event["message"]["id"])
if "raw_unread_msgs" in state:
apply_unread_message_event(
user_profile,
state["raw_unread_msgs"],
event["message"],
event["flags"],
)
if event["message"]["type"] != "stream":
if "raw_recent_private_conversations" in state:
# Handle maintaining the recent_private_conversations data structure.
conversations = state["raw_recent_private_conversations"]
recipient_id = get_recent_conversations_recipient_id(
user_profile, event["message"]["recipient_id"], event["message"]["sender_id"]
)
if recipient_id not in conversations:
conversations[recipient_id] = dict(
user_ids=sorted(
user_dict["id"]
for user_dict in event["message"]["display_recipient"]
if user_dict["id"] != user_profile.id
),
)
conversations[recipient_id]["max_message_id"] = event["message"]["id"]
return
# Below, we handle maintaining first_message_id.
for sub_dict in state.get("subscriptions", []):
if event["message"]["stream_id"] == sub_dict["stream_id"]:
if sub_dict["first_message_id"] is None:
sub_dict["first_message_id"] = event["message"]["id"]
for stream_dict in state.get("streams", []):
if event["message"]["stream_id"] == stream_dict["stream_id"]:
if stream_dict["first_message_id"] is None:
stream_dict["first_message_id"] = event["message"]["id"]
elif event["type"] == "hotspots":
state["hotspots"] = event["hotspots"]
elif event["type"] == "custom_profile_fields":
state["custom_profile_fields"] = event["fields"]
custom_profile_field_ids = {field["id"] for field in state["custom_profile_fields"]}
if "raw_users" in state:
for user_dict in state["raw_users"].values():
if "profile_data" not in user_dict:
continue
profile_data = user_dict["profile_data"]
for (field_id, field_data) in list(profile_data.items()):
if int(field_id) not in custom_profile_field_ids:
del profile_data[field_id]
elif event["type"] == "realm_user":
person = event["person"]
person_user_id = person["user_id"]
if event["op"] == "add":
person = copy.deepcopy(person)
if client_gravatar:
if person["avatar_url"].startswith("https://secure.gravatar.com"):
person["avatar_url"] = None
person["is_active"] = True
if not person["is_bot"]:
person["profile_data"] = {}
state["raw_users"][person_user_id] = person
elif event["op"] == "remove":
state["raw_users"][person_user_id]["is_active"] = False
elif event["op"] == "update":
is_me = person_user_id == user_profile.id
if is_me:
if "avatar_url" in person and "avatar_url" in state:
state["avatar_source"] = person["avatar_source"]
state["avatar_url"] = person["avatar_url"]
state["avatar_url_medium"] = person["avatar_url_medium"]
if "role" in person:
state["is_admin"] = is_administrator_role(person["role"])
state["is_owner"] = person["role"] == UserProfile.ROLE_REALM_OWNER
state["is_moderator"] = person["role"] == UserProfile.ROLE_MODERATOR
state["is_guest"] = person["role"] == UserProfile.ROLE_GUEST
# Recompute properties based on is_admin/is_guest
state["can_create_streams"] = user_profile.can_create_streams()
state["can_subscribe_other_users"] = user_profile.can_subscribe_other_users()
state["can_invite_others_to_realm"] = user_profile.can_invite_others_to_realm()
# TODO: Probably rather than writing the perfect
# live-update code for the case of racing with the
# current user changing roles, we should just do a
# full refetch.
if "never_subscribed" in state:
sub_info = gather_subscriptions_helper(
user_profile,
include_subscribers=include_subscribers,
)
state["subscriptions"] = sub_info.subscriptions
state["unsubscribed"] = sub_info.unsubscribed
state["never_subscribed"] = sub_info.never_subscribed
if "streams" in state:
state["streams"] = do_get_streams(
user_profile, include_all_active=user_profile.is_realm_admin
)
for field in ["delivery_email", "email", "full_name"]:
if field in person and field in state:
state[field] = person[field]
# In the unlikely event that the current user
# just changed to/from being an admin, we need
# to add/remove the data on all bots in the
# realm. This is ugly and probably better
# solved by removing the all-realm-bots data
# given to admin users from this flow.
if "role" in person and "realm_bots" in state:
prev_state = state["raw_users"][user_profile.id]
was_admin = prev_state["is_admin"]
now_admin = is_administrator_role(person["role"])
if was_admin and not now_admin:
state["realm_bots"] = []
if not was_admin and now_admin:
state["realm_bots"] = get_owned_bot_dicts(user_profile)
if client_gravatar and "avatar_url" in person:
# Respect the client_gravatar setting in the `users` data.
if person["avatar_url"].startswith("https://secure.gravatar.com"):
person["avatar_url"] = None
person["avatar_url_medium"] = None
if person_user_id in state["raw_users"]:
p = state["raw_users"][person_user_id]
for field in p:
if field in person:
p[field] = person[field]
if "role" in person:
p["is_admin"] = is_administrator_role(person["role"])
p["is_owner"] = person["role"] == UserProfile.ROLE_REALM_OWNER
p["is_guest"] = person["role"] == UserProfile.ROLE_GUEST
if "custom_profile_field" in person:
custom_field_id = person["custom_profile_field"]["id"]
custom_field_new_value = person["custom_profile_field"]["value"]
if "rendered_value" in person["custom_profile_field"]:
p["profile_data"][str(custom_field_id)] = {
"value": custom_field_new_value,
"rendered_value": person["custom_profile_field"]["rendered_value"],
}
else:
p["profile_data"][str(custom_field_id)] = {
"value": custom_field_new_value,
}
else:
raise AssertionError("Unexpected event type {type}/{op}".format(**event))
elif event["type"] == "realm_bot":
if event["op"] == "add":
state["realm_bots"].append(event["bot"])
elif event["op"] == "remove":
user_id = event["bot"]["user_id"]
for bot in state["realm_bots"]:
if bot["user_id"] == user_id:
bot["is_active"] = False
elif event["op"] == "delete":
state["realm_bots"] = [
item for item in state["realm_bots"] if item["user_id"] != event["bot"]["user_id"]
]
elif event["op"] == "update":
for bot in state["realm_bots"]:
if bot["user_id"] == event["bot"]["user_id"]:
if "owner_id" in event["bot"]:
bot_owner_id = event["bot"]["owner_id"]
bot["owner_id"] = bot_owner_id
else:
bot.update(event["bot"])
else:
raise AssertionError("Unexpected event type {type}/{op}".format(**event))
elif event["type"] == "stream":
if event["op"] == "create":
for stream in event["streams"]:
if not stream["invite_only"]:
stream_data = copy.deepcopy(stream)
if include_subscribers:
stream_data["subscribers"] = []
# We know the stream has no traffic, and this
# field is not present in the event.
#
# TODO: Probably this should just be added to the event.
stream_data["stream_weekly_traffic"] = None
# Add stream to never_subscribed (if not invite_only)
state["never_subscribed"].append(stream_data)
if "streams" in state:
state["streams"].append(stream)
if "streams" in state:
state["streams"].sort(key=lambda elt: elt["name"])
if event["op"] == "delete":
deleted_stream_ids = {stream["stream_id"] for stream in event["streams"]}
if "streams" in state:
state["streams"] = [
s for s in state["streams"] if s["stream_id"] not in deleted_stream_ids
]
state["never_subscribed"] = [
stream
for stream in state["never_subscribed"]
if stream["stream_id"] not in deleted_stream_ids
]
if event["op"] == "update":
# For legacy reasons, we call stream data 'subscriptions' in
# the state var here, for the benefit of the JS code.
for obj in state["subscriptions"]:
if obj["name"].lower() == event["name"].lower():
obj[event["property"]] = event["value"]
if event["property"] == "description":
obj["rendered_description"] = event["rendered_description"]
# Also update the pure streams data
if "streams" in state:
for stream in state["streams"]:
if stream["name"].lower() == event["name"].lower():
prop = event["property"]
if prop in stream:
stream[prop] = event["value"]
if prop == "description":
stream["rendered_description"] = event["rendered_description"]
elif event["type"] == "default_streams":
state["realm_default_streams"] = event["default_streams"]
elif event["type"] == "default_stream_groups":
state["realm_default_stream_groups"] = event["default_stream_groups"]
elif event["type"] == "realm":
if event["op"] == "update":
field = "realm_" + event["property"]
state[field] = event["value"]
if event["property"] == "plan_type":
# Then there are some extra fields that also need to be set.
state["zulip_plan_is_not_limited"] = event["value"] != Realm.LIMITED
state["realm_upload_quota"] = event["extra_data"]["upload_quota"]
policy_permission_dict = {
"create_stream_policy": "can_create_streams",
"invite_to_stream_policy": "can_subscribe_other_users",
"invite_to_realm_policy": "can_invite_others_to_realm",
}
# Tricky interaction: Whether we can create streams and can subscribe other users
# can get changed here.
if field == "realm_waiting_period_threshold":
for policy, permission in policy_permission_dict.items():
if permission in state:
state[permission] = user_profile.has_permission(policy)
if event["property"] in policy_permission_dict.keys():
if policy_permission_dict[event["property"]] in state:
state[policy_permission_dict[event["property"]]] = user_profile.has_permission(
event["property"]
)
elif event["op"] == "update_dict":
for key, value in event["data"].items():
state["realm_" + key] = value
# It's a bit messy, but this is where we need to
# update the state for whether password authentication
# is enabled on this server.
if key == "authentication_methods":
state["realm_password_auth_enabled"] = value["Email"] or value["LDAP"]
state["realm_email_auth_enabled"] = value["Email"]
elif event["op"] == "deactivated":
# The realm has just been deactivated. If our request had
# arrived a moment later, we'd have rendered the
# deactivation UI; if it'd been a moment sooner, we've
# have rendered the app and then immediately got this
# event (or actually, more likely, an auth error on GET
# /events) and immediately reloaded into the same
# deactivation UI. Passing achieves the same result.
pass
else:
raise AssertionError("Unexpected event type {type}/{op}".format(**event))
elif event["type"] == "subscription":
if event["op"] == "add":
added_stream_ids = {sub["stream_id"] for sub in event["subscriptions"]}
was_added = lambda s: s["stream_id"] in added_stream_ids
existing_stream_ids = {sub["stream_id"] for sub in state["subscriptions"]}
# add the new subscriptions
for sub in event["subscriptions"]:
if sub["stream_id"] not in existing_stream_ids:
if "subscribers" in sub and not include_subscribers:
sub = copy.deepcopy(sub)
del sub["subscribers"]
state["subscriptions"].append(sub)
# remove them from unsubscribed if they had been there
state["unsubscribed"] = [s for s in state["unsubscribed"] if not was_added(s)]
# remove them from never_subscribed if they had been there
state["never_subscribed"] = [s for s in state["never_subscribed"] if not was_added(s)]
elif event["op"] == "remove":
removed_stream_ids = {sub["stream_id"] for sub in event["subscriptions"]}
was_removed = lambda s: s["stream_id"] in removed_stream_ids
# Find the subs we are affecting.
removed_subs = list(filter(was_removed, state["subscriptions"]))
# Remove our user from the subscribers of the removed subscriptions.
if include_subscribers:
for sub in removed_subs:
sub["subscribers"].remove(user_profile.id)
state["unsubscribed"] += removed_subs
# Now filter out the removed subscriptions from subscriptions.
state["subscriptions"] = [s for s in state["subscriptions"] if not was_removed(s)]
elif event["op"] == "update":
for sub in state["subscriptions"]:
if sub["stream_id"] == event["stream_id"]:
sub[event["property"]] = event["value"]
elif event["op"] == "peer_add":
if include_subscribers:
stream_ids = set(event["stream_ids"])
user_ids = set(event["user_ids"])
for sub_dict in [
state["subscriptions"],
state["unsubscribed"],
state["never_subscribed"],
]:
for sub in sub_dict:
if sub["stream_id"] in stream_ids:
subscribers = set(sub["subscribers"]) | user_ids
sub["subscribers"] = sorted(list(subscribers))
elif event["op"] == "peer_remove":
if include_subscribers:
stream_ids = set(event["stream_ids"])
user_ids = set(event["user_ids"])
for sub_dict in [
state["subscriptions"],
state["unsubscribed"],
state["never_subscribed"],
]:
for sub in sub_dict:
if sub["stream_id"] in stream_ids:
subscribers = set(sub["subscribers"]) - user_ids
sub["subscribers"] = sorted(list(subscribers))
else:
raise AssertionError("Unexpected event type {type}/{op}".format(**event))
elif event["type"] == "presence":
if slim_presence:
user_key = str(event["user_id"])
else:
user_key = event["email"]
state["presences"][user_key] = get_presence_for_user(event["user_id"], slim_presence)[
user_key
]
elif event["type"] == "update_message":
# We don't return messages in /register, so we don't need to
# do anything for content updates, but we may need to update
# the unread_msgs data if the topic of an unread message changed.
if "new_stream_id" in event:
stream_dict = state["raw_unread_msgs"]["stream_dict"]
stream_id = event["new_stream_id"]
for message_id in event["message_ids"]:
if message_id in stream_dict:
stream_dict[message_id]["stream_id"] = stream_id
if TOPIC_NAME in event:
stream_dict = state["raw_unread_msgs"]["stream_dict"]
topic = event[TOPIC_NAME]
for message_id in event["message_ids"]:
if message_id in stream_dict:
stream_dict[message_id]["topic"] = topic
elif event["type"] == "delete_message":
if "message_id" in event:
message_ids = [event["message_id"]]
else:
message_ids = event["message_ids"] # nocoverage
max_message = (
Message.objects.filter(usermessage__user_profile=user_profile).order_by("-id").first()
)
if max_message:
state["max_message_id"] = max_message.id
else:
state["max_message_id"] = -1
if "raw_unread_msgs" in state:
for remove_id in message_ids:
remove_message_id_from_unread_mgs(state["raw_unread_msgs"], remove_id)
# The remainder of this block is about maintaining recent_private_conversations
if "raw_recent_private_conversations" not in state or event["message_type"] != "private":
return
recipient_id = get_recent_conversations_recipient_id(
user_profile, event["recipient_id"], event["sender_id"]
)
# Ideally, we'd have test coverage for these two blocks. To
# do that, we'll need a test where we delete not-the-latest
# messages or delete a private message not in
# recent_private_conversations.
if recipient_id not in state["raw_recent_private_conversations"]: # nocoverage
return
old_max_message_id = state["raw_recent_private_conversations"][recipient_id][
"max_message_id"
]
if old_max_message_id not in message_ids: # nocoverage
return
# OK, we just deleted what had been the max_message_id for
# this recent conversation; we need to recompute that value
# from scratch. Definitely don't need to re-query everything,
# but this case is likely rare enough that it's reasonable to do so.
state["raw_recent_private_conversations"] = get_recent_private_conversations(user_profile)
elif event["type"] == "reaction":
# The client will get the message with the reactions directly
pass
elif event["type"] == "submessage":
# The client will get submessages with their messages
pass
elif event["type"] == "typing":
# Typing notification events are transient and thus ignored
pass
elif event["type"] == "attachment":
# Attachment events are just for updating the "uploads" UI;
# they are not sent directly.
pass
elif event["type"] == "update_message_flags":
# We don't return messages in `/register`, so most flags we
# can ignore, but we do need to update the unread_msgs data if
# unread state is changed.
if "raw_unread_msgs" in state and event["flag"] == "read" and event["op"] == "add":
for remove_id in event["messages"]:
remove_message_id_from_unread_mgs(state["raw_unread_msgs"], remove_id)
if event["flag"] == "starred" and "starred_messages" in state:
if event["op"] == "add":
state["starred_messages"] += event["messages"]
if event["op"] == "remove":
state["starred_messages"] = [
message
for message in state["starred_messages"]
if not (message in event["messages"])
]
elif event["type"] == "realm_domains":
if event["op"] == "add":
state["realm_domains"].append(event["realm_domain"])
elif event["op"] == "change":
for realm_domain in state["realm_domains"]:
if realm_domain["domain"] == event["realm_domain"]["domain"]:
realm_domain["allow_subdomains"] = event["realm_domain"]["allow_subdomains"]
elif event["op"] == "remove":
state["realm_domains"] = [
realm_domain
for realm_domain in state["realm_domains"]
if realm_domain["domain"] != event["domain"]
]
else:
raise AssertionError("Unexpected event type {type}/{op}".format(**event))
elif event["type"] == "realm_emoji":
state["realm_emoji"] = event["realm_emoji"]
elif event["type"] == "realm_export":
# These realm export events are only available to
# administrators, and aren't included in page_params.
pass
elif event["type"] == "alert_words":
state["alert_words"] = event["alert_words"]
elif event["type"] == "muted_topics":
state["muted_topics"] = event["muted_topics"]
elif event["type"] == "muted_users":
state["muted_users"] = event["muted_users"]
elif event["type"] == "realm_filters":
state["realm_filters"] = event["realm_filters"]
elif event["type"] == "realm_linkifiers":
state["realm_linkifiers"] = event["realm_linkifiers"]
elif event["type"] == "realm_playgrounds":
state["realm_playgrounds"] = event["realm_playgrounds"]
elif event["type"] == "update_display_settings":
assert event["setting_name"] in UserProfile.property_types
state[event["setting_name"]] = event["setting"]
elif event["type"] == "update_global_notifications":
assert event["notification_name"] in UserProfile.notification_setting_types
state[event["notification_name"]] = event["setting"]
elif event["type"] == "invites_changed":
pass
elif event["type"] == "user_group":
if event["op"] == "add":
state["realm_user_groups"].append(event["group"])
state["realm_user_groups"].sort(key=lambda group: group["id"])
elif event["op"] == "update":
for user_group in state["realm_user_groups"]:
if user_group["id"] == event["group_id"]:
user_group.update(event["data"])
elif event["op"] == "add_members":
for user_group in state["realm_user_groups"]:
if user_group["id"] == event["group_id"]:
user_group["members"].extend(event["user_ids"])
user_group["members"].sort()
elif event["op"] == "remove_members":
for user_group in state["realm_user_groups"]:
if user_group["id"] == event["group_id"]:
members = set(user_group["members"])
user_group["members"] = list(members - set(event["user_ids"]))
user_group["members"].sort()
elif event["op"] == "remove":
state["realm_user_groups"] = [
ug for ug in state["realm_user_groups"] if ug["id"] != event["group_id"]
]
else:
raise AssertionError("Unexpected event type {type}/{op}".format(**event))
elif event["type"] == "user_status":
user_id_str = str(event["user_id"])
user_status = state["user_status"]
away = event.get("away")
status_text = event.get("status_text")
if user_id_str not in user_status:
user_status[user_id_str] = {}
if away is not None:
if away:
user_status[user_id_str]["away"] = True
else:
user_status[user_id_str].pop("away", None)
if status_text is not None:
if status_text == "":
user_status[user_id_str].pop("status_text", None)
else:
user_status[user_id_str]["status_text"] = status_text
if not user_status[user_id_str]:
user_status.pop(user_id_str, None)
state["user_status"] = user_status
elif event["type"] == "has_zoom_token":
state["has_zoom_token"] = event["value"]
else:
raise AssertionError("Unexpected event type {}".format(event["type"]))
def do_events_register(
user_profile: UserProfile,
user_client: Client,
apply_markdown: bool = True,
client_gravatar: bool = False,
slim_presence: bool = False,
event_types: Optional[Sequence[str]] = None,
queue_lifespan_secs: int = 0,
all_public_streams: bool = False,
include_subscribers: bool = True,
include_streams: bool = True,
client_capabilities: Dict[str, bool] = {},
narrow: Collection[Sequence[str]] = [],
fetch_event_types: Optional[Collection[str]] = None,
) -> Dict[str, Any]:
# Technically we don't need to check this here because
# build_narrow_filter will check it, but it's nicer from an error
# handling perspective to do it before contacting Tornado
check_supported_events_narrow_filter(narrow)
notification_settings_null = client_capabilities.get("notification_settings_null", False)
bulk_message_deletion = client_capabilities.get("bulk_message_deletion", False)
user_avatar_url_field_optional = client_capabilities.get(
"user_avatar_url_field_optional", False
)
stream_typing_notifications = client_capabilities.get("stream_typing_notifications", False)
if user_profile.realm.email_address_visibility != Realm.EMAIL_ADDRESS_VISIBILITY_EVERYONE:
# If real email addresses are not available to the user, their
# clients cannot compute gravatars, so we force-set it to false.
client_gravatar = False
if fetch_event_types is not None:
event_types_set: Optional[Set[str]] = set(fetch_event_types)
elif event_types is not None:
event_types_set = set(event_types)
else:
event_types_set = None
# Fill up the UserMessage rows if a soft-deactivated user has returned
reactivate_user_if_soft_deactivated(user_profile)
while True:
# Note that we pass event_types, not fetch_event_types here, since
# that's what controls which future events are sent.
queue_id = request_event_queue(
user_profile,
user_client,
apply_markdown,
client_gravatar,
slim_presence,
queue_lifespan_secs,
event_types,
all_public_streams,
narrow=narrow,
bulk_message_deletion=bulk_message_deletion,
stream_typing_notifications=stream_typing_notifications,
)
if queue_id is None:
raise JsonableError(_("Could not allocate event queue"))
ret = fetch_initial_state_data(
user_profile,
event_types=event_types_set,
queue_id=queue_id,
client_gravatar=client_gravatar,
user_avatar_url_field_optional=user_avatar_url_field_optional,
slim_presence=slim_presence,
include_subscribers=include_subscribers,
include_streams=include_streams,
)
# Apply events that came in while we were fetching initial data
events = get_user_events(user_profile, queue_id, -1)
try:
apply_events(
user_profile,
state=ret,
events=events,
fetch_event_types=fetch_event_types,
client_gravatar=client_gravatar,
slim_presence=slim_presence,
include_subscribers=include_subscribers,
)
except RestartEventException:
# This represents a rare race condition, where Tornado
# restarted (and sent `restart` events) while we were waiting
# for fetch_initial_state_data to return. To avoid the client
# needing to reload shortly after loading, we recursively call
# do_events_register here.
continue
else:
break
post_process_state(user_profile, ret, notification_settings_null)
if len(events) > 0:
ret["last_event_id"] = events[-1]["id"]
else:
ret["last_event_id"] = -1
return ret
def post_process_state(
user_profile: Optional[UserProfile], ret: Dict[str, Any], notification_settings_null: bool
) -> None:
"""
NOTE:
Below is an example of post-processing initial state data AFTER we
apply events. For large payloads like `unread_msgs`, it's helpful
to have an intermediate data structure that is easy to manipulate
with O(1)-type operations as we apply events.
Then, only at the end, we put it in the form that's more appropriate
for client.
"""
if "raw_unread_msgs" in ret:
ret["unread_msgs"] = aggregate_unread_data(ret["raw_unread_msgs"])
del ret["raw_unread_msgs"]
"""
See the note above; the same technique applies below.
"""
if "raw_users" in ret:
user_dicts = list(ret["raw_users"].values())
user_dicts = sorted(user_dicts, key=lambda x: x["user_id"])
ret["realm_users"] = [d for d in user_dicts if d["is_active"]]
ret["realm_non_active_users"] = [d for d in user_dicts if not d["is_active"]]
"""
Be aware that we do intentional aliasing in the below code.
We can now safely remove the `is_active` field from all the
dicts that got partitioned into the two lists above.
We remove the field because it's already implied, and sending
it to clients makes clients prone to bugs where they "trust"
the field but don't actually update in live updates. It also
wastes bandwidth.
"""
for d in user_dicts:
d.pop("is_active")
del ret["raw_users"]
if "raw_recent_private_conversations" in ret:
# Reformat recent_private_conversations to be a list of dictionaries, rather than a dict.
ret["recent_private_conversations"] = sorted(
(
dict(
**value,
)
for (recipient_id, value) in ret["raw_recent_private_conversations"].items()
),
key=lambda x: -x["max_message_id"],
)
del ret["raw_recent_private_conversations"]
if not notification_settings_null and "subscriptions" in ret:
for stream_dict in ret["subscriptions"] + ret["unsubscribed"]:
handle_stream_notifications_compatibility(
user_profile, stream_dict, notification_settings_null
)
| apache-2.0 |
zhoulingjun/django | django/core/serializers/base.py | 273 | 7678 | """
Module for abstract serializer/unserializer base classes.
"""
from django.db import models
from django.utils import six
class SerializerDoesNotExist(KeyError):
"""The requested serializer was not found."""
pass
class SerializationError(Exception):
"""Something bad happened during serialization."""
pass
class DeserializationError(Exception):
"""Something bad happened during deserialization."""
@classmethod
def WithData(cls, original_exc, model, fk, field_value):
"""
Factory method for creating a deserialization error which has a more
explanatory messsage.
"""
return cls("%s: (%s:pk=%s) field_value was '%s'" % (original_exc, model, fk, field_value))
class ProgressBar(object):
progress_width = 75
def __init__(self, output, total_count):
self.output = output
self.total_count = total_count
self.prev_done = 0
def update(self, count):
if not self.output:
return
perc = count * 100 // self.total_count
done = perc * self.progress_width // 100
if self.prev_done >= done:
return
self.prev_done = done
cr = '' if self.total_count == 1 else '\r'
self.output.write(cr + '[' + '.' * done + ' ' * (self.progress_width - done) + ']')
if done == self.progress_width:
self.output.write('\n')
self.output.flush()
class Serializer(object):
"""
Abstract serializer base class.
"""
# Indicates if the implemented serializer is only available for
# internal Django use.
internal_use_only = False
progress_class = ProgressBar
def serialize(self, queryset, **options):
"""
Serialize a queryset.
"""
self.options = options
self.stream = options.pop("stream", six.StringIO())
self.selected_fields = options.pop("fields", None)
self.use_natural_foreign_keys = options.pop('use_natural_foreign_keys', False)
self.use_natural_primary_keys = options.pop('use_natural_primary_keys', False)
progress_bar = self.progress_class(
options.pop('progress_output', None), options.pop('object_count', 0)
)
self.start_serialization()
self.first = True
for count, obj in enumerate(queryset, start=1):
self.start_object(obj)
# Use the concrete parent class' _meta instead of the object's _meta
# This is to avoid local_fields problems for proxy models. Refs #17717.
concrete_model = obj._meta.concrete_model
for field in concrete_model._meta.local_fields:
if field.serialize:
if field.remote_field is None:
if self.selected_fields is None or field.attname in self.selected_fields:
self.handle_field(obj, field)
else:
if self.selected_fields is None or field.attname[:-3] in self.selected_fields:
self.handle_fk_field(obj, field)
for field in concrete_model._meta.many_to_many:
if field.serialize:
if self.selected_fields is None or field.attname in self.selected_fields:
self.handle_m2m_field(obj, field)
self.end_object(obj)
progress_bar.update(count)
if self.first:
self.first = False
self.end_serialization()
return self.getvalue()
def start_serialization(self):
"""
Called when serializing of the queryset starts.
"""
raise NotImplementedError('subclasses of Serializer must provide a start_serialization() method')
def end_serialization(self):
"""
Called when serializing of the queryset ends.
"""
pass
def start_object(self, obj):
"""
Called when serializing of an object starts.
"""
raise NotImplementedError('subclasses of Serializer must provide a start_object() method')
def end_object(self, obj):
"""
Called when serializing of an object ends.
"""
pass
def handle_field(self, obj, field):
"""
Called to handle each individual (non-relational) field on an object.
"""
raise NotImplementedError('subclasses of Serializer must provide an handle_field() method')
def handle_fk_field(self, obj, field):
"""
Called to handle a ForeignKey field.
"""
raise NotImplementedError('subclasses of Serializer must provide an handle_fk_field() method')
def handle_m2m_field(self, obj, field):
"""
Called to handle a ManyToManyField.
"""
raise NotImplementedError('subclasses of Serializer must provide an handle_m2m_field() method')
def getvalue(self):
"""
Return the fully serialized queryset (or None if the output stream is
not seekable).
"""
if callable(getattr(self.stream, 'getvalue', None)):
return self.stream.getvalue()
class Deserializer(six.Iterator):
"""
Abstract base deserializer class.
"""
def __init__(self, stream_or_string, **options):
"""
Init this serializer given a stream or a string
"""
self.options = options
if isinstance(stream_or_string, six.string_types):
self.stream = six.StringIO(stream_or_string)
else:
self.stream = stream_or_string
def __iter__(self):
return self
def __next__(self):
"""Iteration iterface -- return the next item in the stream"""
raise NotImplementedError('subclasses of Deserializer must provide a __next__() method')
class DeserializedObject(object):
"""
A deserialized model.
Basically a container for holding the pre-saved deserialized data along
with the many-to-many data saved with the object.
Call ``save()`` to save the object (with the many-to-many data) to the
database; call ``save(save_m2m=False)`` to save just the object fields
(and not touch the many-to-many stuff.)
"""
def __init__(self, obj, m2m_data=None):
self.object = obj
self.m2m_data = m2m_data
def __repr__(self):
return "<DeserializedObject: %s(pk=%s)>" % (
self.object._meta.label, self.object.pk)
def save(self, save_m2m=True, using=None, **kwargs):
# Call save on the Model baseclass directly. This bypasses any
# model-defined save. The save is also forced to be raw.
# raw=True is passed to any pre/post_save signals.
models.Model.save_base(self.object, using=using, raw=True, **kwargs)
if self.m2m_data and save_m2m:
for accessor_name, object_list in self.m2m_data.items():
setattr(self.object, accessor_name, object_list)
# prevent a second (possibly accidental) call to save() from saving
# the m2m data twice.
self.m2m_data = None
def build_instance(Model, data, db):
"""
Build a model instance.
If the model instance doesn't have a primary key and the model supports
natural keys, try to retrieve it from the database.
"""
obj = Model(**data)
if (obj.pk is None and hasattr(Model, 'natural_key') and
hasattr(Model._default_manager, 'get_by_natural_key')):
natural_key = obj.natural_key()
try:
obj.pk = Model._default_manager.db_manager(db).get_by_natural_key(*natural_key).pk
except Model.DoesNotExist:
pass
return obj
| bsd-3-clause |
swjtuacmer/Ranker | Ranker/venv/lib/python2.7/site-packages/pip/locations.py | 340 | 5626 | """Locations where we look for configs, install stuff, etc"""
from __future__ import absolute_import
import os
import os.path
import site
import sys
from distutils import sysconfig
from distutils.command.install import install, SCHEME_KEYS # noqa
from pip.compat import WINDOWS, expanduser
from pip.utils import appdirs
# Application Directories
USER_CACHE_DIR = appdirs.user_cache_dir("pip")
DELETE_MARKER_MESSAGE = '''\
This file is placed here by pip to indicate the source was put
here by pip.
Once this package is successfully installed this source code will be
deleted (unless you remove this file).
'''
PIP_DELETE_MARKER_FILENAME = 'pip-delete-this-directory.txt'
def write_delete_marker_file(directory):
"""
Write the pip delete marker file into this directory.
"""
filepath = os.path.join(directory, PIP_DELETE_MARKER_FILENAME)
with open(filepath, 'w') as marker_fp:
marker_fp.write(DELETE_MARKER_MESSAGE)
def running_under_virtualenv():
"""
Return True if we're running inside a virtualenv, False otherwise.
"""
if hasattr(sys, 'real_prefix'):
return True
elif sys.prefix != getattr(sys, "base_prefix", sys.prefix):
return True
return False
def virtualenv_no_global():
"""
Return True if in a venv and no system site packages.
"""
# this mirrors the logic in virtualenv.py for locating the
# no-global-site-packages.txt file
site_mod_dir = os.path.dirname(os.path.abspath(site.__file__))
no_global_file = os.path.join(site_mod_dir, 'no-global-site-packages.txt')
if running_under_virtualenv() and os.path.isfile(no_global_file):
return True
if running_under_virtualenv():
src_prefix = os.path.join(sys.prefix, 'src')
else:
# FIXME: keep src in cwd for now (it is not a temporary folder)
try:
src_prefix = os.path.join(os.getcwd(), 'src')
except OSError:
# In case the current working directory has been renamed or deleted
sys.exit(
"The folder you are executing pip from can no longer be found."
)
# under macOS + virtualenv sys.prefix is not properly resolved
# it is something like /path/to/python/bin/..
# Note: using realpath due to tmp dirs on OSX being symlinks
src_prefix = os.path.abspath(src_prefix)
# FIXME doesn't account for venv linked to global site-packages
site_packages = sysconfig.get_python_lib()
user_site = site.USER_SITE
user_dir = expanduser('~')
if WINDOWS:
bin_py = os.path.join(sys.prefix, 'Scripts')
bin_user = os.path.join(user_site, 'Scripts')
# buildout uses 'bin' on Windows too?
if not os.path.exists(bin_py):
bin_py = os.path.join(sys.prefix, 'bin')
bin_user = os.path.join(user_site, 'bin')
config_basename = 'pip.ini'
legacy_storage_dir = os.path.join(user_dir, 'pip')
legacy_config_file = os.path.join(
legacy_storage_dir,
config_basename,
)
else:
bin_py = os.path.join(sys.prefix, 'bin')
bin_user = os.path.join(user_site, 'bin')
config_basename = 'pip.conf'
legacy_storage_dir = os.path.join(user_dir, '.pip')
legacy_config_file = os.path.join(
legacy_storage_dir,
config_basename,
)
# Forcing to use /usr/local/bin for standard macOS framework installs
# Also log to ~/Library/Logs/ for use with the Console.app log viewer
if sys.platform[:6] == 'darwin' and sys.prefix[:16] == '/System/Library/':
bin_py = '/usr/local/bin'
site_config_files = [
os.path.join(path, config_basename)
for path in appdirs.site_config_dirs('pip')
]
def distutils_scheme(dist_name, user=False, home=None, root=None,
isolated=False, prefix=None):
"""
Return a distutils install scheme
"""
from distutils.dist import Distribution
scheme = {}
if isolated:
extra_dist_args = {"script_args": ["--no-user-cfg"]}
else:
extra_dist_args = {}
dist_args = {'name': dist_name}
dist_args.update(extra_dist_args)
d = Distribution(dist_args)
d.parse_config_files()
i = d.get_command_obj('install', create=True)
# NOTE: setting user or home has the side-effect of creating the home dir
# or user base for installations during finalize_options()
# ideally, we'd prefer a scheme class that has no side-effects.
assert not (user and prefix), "user={0} prefix={1}".format(user, prefix)
i.user = user or i.user
if user:
i.prefix = ""
i.prefix = prefix or i.prefix
i.home = home or i.home
i.root = root or i.root
i.finalize_options()
for key in SCHEME_KEYS:
scheme[key] = getattr(i, 'install_' + key)
# install_lib specified in setup.cfg should install *everything*
# into there (i.e. it takes precedence over both purelib and
# platlib). Note, i.install_lib is *always* set after
# finalize_options(); we only want to override here if the user
# has explicitly requested it hence going back to the config
if 'install_lib' in d.get_option_dict('install'):
scheme.update(dict(purelib=i.install_lib, platlib=i.install_lib))
if running_under_virtualenv():
scheme['headers'] = os.path.join(
sys.prefix,
'include',
'site',
'python' + sys.version[:3],
dist_name,
)
if root is not None:
path_no_drive = os.path.splitdrive(
os.path.abspath(scheme["headers"]))[1]
scheme["headers"] = os.path.join(
root,
path_no_drive[1:],
)
return scheme
| mit |
JohnMaguire/Cardinal | cardinal/plugins.py | 1 | 37107 | import os
import re
import string
import logging
import importlib
import inspect
import linecache
import random
import json
from collections import defaultdict
from copy import copy
from imp import reload
from cardinal.exceptions import (
CommandNotFoundError,
ConfigNotFoundError,
EventAlreadyExistsError,
EventCallbackError,
EventDoesNotExistError,
EventRejectedMessage,
PluginError,
)
from twisted.internet import defer
class PluginManager:
"""Keeps track of, loads, and unloads plugins."""
COMMAND_REGEX = re.compile(r'\.([A-Za-z0-9_-]+)\s?.*$')
"""Regex for matching standard commands.
This will check for anything beginning with a period (.) followed by any
alphanumeric character, then whitespace, then any character(s). This means
registered commands will be found so long as the registered command is
alphanumeric (or either _ or -), and any additional arguments will be up to
the plugin for handling.
"""
def __init__(self,
cardinal,
plugins,
blacklist,
_plugin_module_import_prefix='plugins',
_plugin_module_directory=None):
"""Creates a new instance, optionally with a list of plugins to load
Keyword arguments:
cardinal -- An instance of `CardinalBot` to pass to plugins.
plugins -- A list of plugins to be loaded when instanced.
Raises:
TypeError -- When the `plugins` argument is not a list.
"""
self.logger = logging.getLogger(__name__)
self.cardinal = cardinal
self._blacklist = blacklist
# Module name from which plugins are imported. This exists to assist
# in unit testing.
self._plugin_module_import_prefix = _plugin_module_import_prefix
if _plugin_module_directory is None:
self.plugins_directory = os.path.abspath(os.path.join(
os.path.dirname(os.path.realpath(os.path.abspath(__file__))),
'..',
'plugins',
))
else:
self.plugins_directory = _plugin_module_directory
# Used for iterating PluginManager plugins
self.iteration_counter = 0
self.plugins = {}
self._module_cache = {}
self.load(plugins)
def __iter__(self):
"""Part of the iterator protocol, returns iterator object.
In this case, this will return itself as it keeps track of the iterator
internally. Before returning itself, the iteration counter will be
reset to 0.
Returns:
PluginManager -- Returns the current instance.
"""
# Reset the iteration counter
self.iteration_counter = 0
return self
def __next__(self):
"""Part of the iterator protocol, returns the next plugin.
Returns:
dict -- Dictionary containing a plugin's data.
Raises:
StopIteration -- Raised when there are no plugins left.
"""
# Make sure we have the dictionary sorted so we return the proper
# element
keys = sorted(self.plugins.keys())
# Increment the counter
self.iteration_counter += 1
if self.iteration_counter > len(keys):
raise StopIteration
return self.plugins[keys[self.iteration_counter - 1]]
def _import_module(self, plugin, suffix='plugin'):
"""Given a plugin name, will import it from its directory or reload it.
Returns:
The module that was loaded.
"""
# Sort of a hack... this helps with debugging, as uncaught exceptions
# can show the wrong data (line numbers / relevant code) if linecache
# doesn't get cleared when a module is reloaded. This is Python's
# internal cache of code files and line numbers.
linecache.clearcache()
if plugin in self._module_cache:
module = reload(self._module_cache[plugin])
else:
module = importlib.import_module(
'%s.%s.%s' %
(self._plugin_module_import_prefix,
plugin,
suffix))
self._module_cache[plugin] = module
return module
def _instantiate_plugin(self, module, config=None):
"""Creates an instance of the plugin module.
If the setup() function of the plugin's module takes an argument then
we will provide the instance of CardinalBot to the plugin. If it takes
two, we will provide Cardinal, and its config.
Keyword arguments:
module -- The module to instantiate.
config -- A config, if any, belonging to the plugin.
Returns:
object -- The instance of the plugin.
Raises:
PluginError -- When a plugin's setup function has more than one
argument.
"""
if hasattr(module, 'entrypoint'):
entrypoint = module.entrypoint
# Old-style - will be deprecated in a future release of Cardinal
elif hasattr(module, 'setup') and inspect.isfunction(module.setup):
entrypoint = module.setup
else:
raise PluginError(
"Plugin must define an entrypoint attribute pointing to "
"the plugin's class definition or factory method/function."
)
try:
signature = inspect.signature(entrypoint)
except TypeError:
raise PluginError(
"Plugin's entrypoint must be a callable returning a new "
"instance of the plugin."
)
# Check whether the setup method on the module accepts an argument. If
# it does, they are expecting our instance of CardinalBot to be passed
# in. If not, just call setup. If there is more than one argument
# accepted, the method is invalid.
kwargs = {}
for param in signature.parameters:
if param == 'cardinal':
kwargs['cardinal'] = self.cardinal
elif param == 'config':
kwargs['config'] = config
else:
raise PluginError(
"Unknown parameter {} in entrypoint signature"
.format(param)
)
return entrypoint(**kwargs)
def _register_plugin_callbacks(self, callbacks):
"""Registers callbacks found in a plugin
Registers all event callbacks provided by _get_plugin_callbacks with
EventManager. Callback IDs will be stored in callback_ids so we can
remove them on unload. It is possible to have multiple methods as
callbacks for a single event, and to use the same method as a callback
for multiple events.
Keyword arguments:
callbacks - List of callbacks to register.
Returns:
dict -- Maps event names to a list of EventManager callback IDs.
"""
# Initialize variable to hold events callback IDs
callback_ids = defaultdict(list)
def rollback():
for event_name, ids in list(callback_ids.items()):
for id_ in ids:
self.cardinal.event_manager.remove_callback(
event_name, id_)
# Loop through list of dictionaries
try:
for callback in callbacks:
# Loop through all events the callback should be registered to
for event_name in callback['event_names']:
# Get callback ID from register_callback method
try:
id_ = self.cardinal.event_manager.register_callback(
event_name, callback['method'])
except Exception:
self.logger.exception(
"Error registering callback for event: {}"
.format(event_name))
raise
# Append to list of callbacks for given event_name
callback_ids[event_name].append(id_)
except Exception:
rollback()
raise
return callback_ids
def _unregister_plugin_callbacks(self, plugin):
"""Unregisters all events found in a plugin.
Will remove all callbacks stored in callback_ids from EventManager.
Keyword arguments:
plugin - The name of plugin to unregister events for.
"""
# Reference to plugin
plugin = self.plugins[plugin]
# Loop though each event name
for event_name in list(plugin['callback_ids'].keys()):
# Loop tough callbacks
for callback_id in plugin['callback_ids'][event_name]:
self.cardinal.event_manager.remove_callback(
event_name, callback_id)
# Remove callback ID from registered_events
plugin['callback_ids'][event_name].remove(callback_id)
def _close_plugin_instance(self, plugin):
"""Calls the close method on an instance of a plugin.
If the plugin's module has a close() function, we will check whether
it expects an instance of CardinalBot or not by checking whether it
accepts an argument or not. If it does, we will pass in the instance of
CardinalBot. This method is called just prior to removing the internal
reference to the plugin's instance.
Keyword arguments:
plugin -- The name of the plugin to remove the instance of.
Raises:
PluginError -- When a plugin's close function has more than one
argument.
"""
instance = self.plugins[plugin]['instance']
if hasattr(instance, 'close') and inspect.ismethod(instance.close):
# The plugin has a close method, so we now need to check how
# many arguments the method has. If it only has one, then the
# argument must be 'self' and therefore they aren't expecting
# us to pass in an instance of CardinalBot. If there are two
# arguments, they expect CardinalBot. Anything else is invalid.
argspec = inspect.getfullargspec(
instance.close
)
if len(argspec.args) == 1:
instance.close()
elif len(argspec.args) == 2:
instance.close(self.cardinal)
else:
raise PluginError("Unknown arguments for close function")
def _load_plugin_config(self, plugin):
"""Loads a JSON config for a given plugin
Keyword arguments:
plugin -- Name of plugin to load config for.
Raises:
ConfigNotFoundError -- Raised when expected config isn't found.
"""
# Initialize variable to hold plugin config
config = None
# Attempt to load and parse JSON config file
file_ = os.path.join(
self.plugins_directory,
plugin,
'config.json'
)
try:
f = open(file_, 'r')
config = json.load(f)
f.close()
# File did not exist or we can't open it for another reason
except IOError:
self.logger.debug(
"Can't open %s - maybe it doesn't exist?" % file_
)
# Thrown by json.load() when the content isn't valid JSON
except ValueError:
self.logger.warning(
"Invalid JSON in %s, skipping it" % file_
)
# If neither config was found, raise an exception
if not config:
raise ConfigNotFoundError(
"No config found for plugin: %s" % plugin
)
# Return config
return config
def _get_plugin_commands(self, instance):
"""Find the commands in a plugin and return them as callables.
Keyword arguments:
instance -- An instance of a plugin.
Returns:
list -- A list of callable commands.
"""
commands = []
# Loop through each method on the instance, checking whether it's a
# method meant to be interpreted as a command or not.
for method in dir(instance):
method = getattr(instance, method)
if callable(method) and (hasattr(method, 'regex') or
hasattr(method, 'commands')):
# Since this method has either the 'regex' or the 'commands'
# attribute assigned, it's registered as a command for
# Cardinal.
commands.append(method)
return commands
def _get_plugin_callbacks(self, instance):
"""Finds the event callbacks in a plugin and returns them as a list.
Keyword arguments:
instane -- An instance of plugin
Returns:
list -- A list of dictionaries holding event names and callable
methods.
"""
callbacks = []
for method in dir(instance):
method = getattr(instance, method)
if callable(method) and (hasattr(method, 'events')):
# Since this method has the 'events' attribute assigned,
# it is registered as a event for Cardinal
callbacks.append({
'event_names': method.events,
'method': method
})
return callbacks
def itercommands(self, channel=None):
"""Simple generator to iterate through all commands of loaded plugins.
Returns:
iterator -- Iterator for looping through commands
"""
# Loop through each plugin we have loaded
for name, plugin in list(self.plugins.items()):
if channel is not None and channel in plugin['blacklist']:
continue
# Loop through each of the plugins' commands (these are actually
# class methods with attributes assigned to them, so they are all
# callable) and yield the command
for command in plugin['commands']:
yield command
def load(self, plugins):
"""Takes either a plugin name or a list of plugins and loads them.
This involves attempting to import the plugin's module, import the
plugin's config module, instance the plugin's object, and finding its
commands and events.
Keyword arguments:
plugins -- This can be either a single or list of plugin names.
Returns:
list -- A list of failed plugins, or an empty list.
Raises:
TypeError -- When the `plugins` argument is not a string or list.
"""
global _module_cache
if isinstance(plugins, str):
plugins = [plugins]
if not isinstance(plugins, list):
raise TypeError(
"Plugins argument must be a string or list of plugins"
)
# List of plugins which failed to load
failed_plugins = []
for plugin in plugins:
# Reload flag so we can update the reload counter if necessary
self.logger.info("Attempting to load plugin: %s" % plugin)
# Import each plugin's module with our own hacky function to reload
# modules that have already been imported previously
try:
if plugin in list(self.plugins.keys()):
self.logger.info("Already loaded, unloading first: %s" %
plugin)
self.unload(plugin)
module = self._import_module(plugin)
except Exception:
# Probably a syntax error in the plugin, log the exception
self.logger.exception(
"Could not load plugin module: %s" % plugin
)
failed_plugins.append(plugin)
continue
# Attempt to load the config file for the given plugin.
config = None
try:
config = self._load_plugin_config(plugin)
except ConfigNotFoundError:
self.logger.debug(
"No config found for plugin: %s" % plugin
)
# Instanstiate the plugin
try:
instance = self._instantiate_plugin(module, config)
except Exception:
self.logger.exception(
"Could not instantiate plugin: %s" % plugin
)
failed_plugins.append(plugin)
continue
commands = self._get_plugin_commands(instance)
callbacks = self._get_plugin_callbacks(instance)
try:
# do this last to ensure the rollback functionality works
# correctly to remove callbacks if loading fails
callback_ids = self._register_plugin_callbacks(callbacks)
except Exception:
self.logger.exception(
"Could not register events for plugin: %s" % plugin
)
failed_plugins.append(plugin)
continue
self.plugins[plugin] = {
'name': plugin,
'instance': instance,
'commands': commands,
'callbacks': callbacks,
'callback_ids': callback_ids,
'config': config,
'blacklist': (
copy(self._blacklist[plugin])
if plugin in self._blacklist else
[]
),
}
self.logger.info("Plugin %s successfully loaded" % plugin)
return failed_plugins
def unload(self, plugins):
"""Takes either a plugin name or a list of plugins and unloads them.
Simply validates whether we have loaded a plugin by a given name, and
if so, clears all the data associated with it.
Keyword arguments:
plugins -- This can be either a single or list of plugin names.
Returns:
list -- A list of failed plugins, or an empty list.
Raises:
TypeError -- When the `plugins` argument is not a string or list.
"""
# If they passed in a string, convert it to a list
if isinstance(plugins, str):
plugins = [plugins]
if not isinstance(plugins, list):
raise TypeError("Plugins must be a string or list of plugins")
# We'll keep track of any plugins we failed to unload (either because
# we have no record of them being loaded or because the method was
# invalid.)
failed_plugins = []
for plugin in plugins:
self.logger.info("Attempting to unload plugin: %s" % plugin)
if plugin not in self.plugins:
self.logger.warning("Plugin was never loaded: %s" % plugin)
failed_plugins.append(plugin)
continue
self._unregister_plugin_callbacks(plugin)
try:
self._close_plugin_instance(plugin)
except Exception:
# Log the exception that came from trying to unload the
# plugin, but don't skip over the plugin. We'll still
# unload it.
self.logger.exception(
"Didn't close plugin cleanly: %s" % plugin
)
failed_plugins.append(plugin)
# Once all references of the plugin have been removed, Python will
# eventually do garbage collection. We only opened it in one
# location, so we'll get rid of that now.
del self.plugins[plugin]
return failed_plugins
def unload_all(self):
"""Unloads all loaded plugins.
This should theoretically only be called when quitting Cardinal (or
perhaps during a full reload) and therefore we don't need to really
pay attention to any failed plugins.
"""
self.logger.info("Unloading all plugins")
self.unload([plugin for plugin, data in list(self.plugins.items())])
def blacklist(self, plugin, channels):
"""Blacklists a plugin from given channels.
Keyword arguments:
plugin -- Name of plugin whose blacklist to operate on
channels -- A list of channels to add to the blacklist
Returns:
bool -- False if plugin doesn't exist.
"""
# If they passed in a string, convert it to a list
if isinstance(channels, str):
channels = [channels]
if not isinstance(channels, list):
raise TypeError("Plugins must be a string or list of plugins")
if plugin not in self.plugins:
return False
self.plugins[plugin]['blacklist'].extend(channels)
return True
def unblacklist(self, plugin, channels):
"""Removes channels from a plugin's blacklist.
Keyword arguments:
plugin -- Name of plugin whose blacklist to operate on
channels -- A list of channels to remove from the blacklist
Returns:
list/bool -- False if plugin doesn't exist, list of channels that
weren't blacklisted in the first place if it does.
"""
# If they passed in a string, convert it to a list
if isinstance(channels, str):
channels = [channels]
if not isinstance(channels, list):
raise TypeError("Plugins must be a string or list of plugins")
if plugin not in self.plugins:
return False
not_blacklisted = []
for channel in channels:
if channel not in self.plugins[plugin]['blacklist']:
not_blacklisted.append(channel)
continue
self.plugins[plugin]['blacklist'].remove(channel)
return not_blacklisted
def get_config(self, plugin):
"""Returns a loaded config for given plugin.
When a plugin is loaded, if a config is found, it will be stored in
PluginManager. This method returns a given plugin's config, so it can
be accessed elsewhere.
Keyword arguments:
plugin -- A string containing the name of a plugin.
Returns:
dict -- A dictionary containing the config.
Raises:
ConfigNotFoundError -- When no config exists for a given plugin name.
"""
if plugin not in self.plugins:
raise ConfigNotFoundError("Couldn't find requested plugin config")
if self.plugins[plugin]['config'] is None:
raise ConfigNotFoundError("Couldn't find requested plugin config")
return self.plugins[plugin]['config']
def call_command(self, user, channel, message):
"""Checks a message to see if it appears to be a command and calls it.
This is done by checking `COMMAND_REGEX` on a message. If the pattern
matches, we then check whether any plugins have a matching command.
Then we will check whether any plugins have registered a custom regex
expression matching the message.
Keyword arguments:
user -- A tuple containing a user's nick, ident, and hostname.
channel -- A string representing where replies should be sent.
message -- A string containing a message received by CardinalBot.
Raises:
CommandNotFoundError -- If the message appeared to be a command but
no matching plugins are loaded.
"""
# Keep track of whether we called a command for logging purposes
called_command = False
# Perform a regex match of the message to our command regexes, since
# only one of these can match, and the matching groups are in the same
# order, we only need to check the second one if the first fails, and
# we only need to use one variable to track this.
command_match = re.match(self.COMMAND_REGEX, message)
dl = []
for command in self.itercommands(channel):
# Check whether the current command has a regex to match by, and if
# it does, and the message given to us matches the regex, then call
# the command.
if hasattr(command, 'regex') and re.search(command.regex, message):
dl.append(self._call_command(command, user, channel, message))
called_command = True
continue
if not command_match:
continue
# Check if the plugin defined any commands and whether they match
# the message.
if (hasattr(command, 'commands') and
command_match.group(1) in command.commands):
dl.append(self._call_command(command, user, channel, message))
called_command = True
continue
# Since standard command regex wasn't found, there's no need to raise
# an exception - we weren't exactly expecting to find a command anyway.
# Alternatively, if we called a command, no need to raise an exception.
if called_command:
return defer.DeferredList(dl)
elif not command_match:
return defer.succeed(None)
# Since we found something that matched a command regex, yet no plugins
# that were loaded had a command matching, we can raise an exception.
raise CommandNotFoundError(
"Command syntax detected, but no matching command found: %s" %
message
)
def _call_command(self, command, user, channel, message):
"""Calls a command method and treats it as a Deferred.
Keyword arguments:
command -- A callable for the command that may return a Deferred.
user -- A tuple containing a user's nick, ident, and hostname.
channel -- A string representing where replies should be sent.
message -- A string containing a message received by CardinalBot.
"""
args = (self.cardinal, user, channel, message)
d = defer.maybeDeferred(
command, *args)
def errback(failure):
self.logger.error('Unhandled error: {}'.format(failure))
d.addErrback(errback)
return d
class EventManager:
def __init__(self, cardinal):
"""Initializes the logger"""
self.cardinal = cardinal
self.logger = logging.getLogger(__name__)
self.registered_events = defaultdict(dict)
self.registered_callbacks = defaultdict(dict)
def register(self, name, required_params):
"""Registers a plugin's event so other events can set callbacks.
Keyword arguments:
name -- Name of the event.
required_params -- Number of parameters a callback must take.
Raises:
EventAlreadyExistsError -- If register is attempted for an event name
already in use.
TypeError -- If required_params is not a number.
"""
self.logger.debug("Attempting to register event: %s" % name)
if name in self.registered_events:
self.logger.debug("Event already exists: %s" % name)
raise EventAlreadyExistsError("Event already exists: %s" % name)
if not isinstance(required_params, (int, int)):
self.logger.debug("Invalid required params: %s" % name)
raise TypeError("Required params must be an integer")
self.registered_events[name] = required_params
if name not in self.registered_callbacks:
self.registered_callbacks[name] = {}
self.logger.info("Registered event: %s" % name)
def remove(self, name):
"""Removes a registered event."""
self.logger.debug("Attempting to unregister event: %s" % name)
if name not in self.registered_events:
self.logger.debug("Event does not exist: %s" % name)
raise EventDoesNotExistError(
"Can't remove nonexistent event: %s" % name
)
del self.registered_events[name]
del self.registered_callbacks[name]
self.logger.info("Removed event: %s" % name)
def register_callback(self, event_name, callback):
"""Registers a callback to be called when an event fires.
Keyword arguments:
event_name -- Event name to bind callback to.
callback -- Callable to bind.
Raises:
EventCallbackError -- If an invalid callback is passed in.
"""
self.logger.debug(
"Attempting to register callback for event: %s" % event_name
)
try:
parameters = inspect.signature(callback).parameters
except TypeError:
raise EventCallbackError(
"Can't register callback that isn't callable"
)
num_func_args = 0
num_required_args = 0
accepts_vargs = False
for param in parameters.values():
if param.kind in (param.POSITIONAL_ONLY,
param.POSITIONAL_OR_KEYWORD):
num_func_args += 1
if param.default == param.empty:
num_required_args += 1
# As long as num_func_args doesn't exceed the number of arguments
# required, this ensures that the signature works
elif param.kind == param.VAR_POSITIONAL:
accepts_vargs = True
# We will never pass keyword arguments, so if the param must be a
# keyword, and there's no default set, we can error early
elif param.kind == param.KEYWORD_ONLY \
and param.default == param.empty:
raise EventCallbackError(
"Callbacks must not take required keyword arguments"
)
# These are irrelevant - we'll never pass any but they are optional
elif param.kind == param.VAR_KEYWORD:
pass
# If no event is registered, we will still register the callback but
# we can't sanity check it since the event hasn't been registered yet
if event_name not in self.registered_events:
if num_func_args < 1 and not accepts_vargs:
raise EventCallbackError(
"Callback must take at least one argument (cardinal)")
return self._add_callback(event_name, callback)
# Add one to needed args to account for CardinalBot being passed in
num_needed_args = self.registered_events[event_name] + 1
if (not accepts_vargs and num_func_args < num_needed_args):
raise EventCallbackError(
"Can't register callback with wrong number of arguments "
"(event passes %d, %d accepted)" %
(num_needed_args, num_func_args)
)
elif num_required_args > num_needed_args:
raise EventCallbackError(
"Can't register callback with wrong number of arguments "
"(event passes %d, %d required)" %
(num_needed_args, num_required_args)
)
return self._add_callback(event_name, callback)
def remove_callback(self, event_name, callback_id):
"""Removes a callback with a given ID from an event's callback list.
Keyword arguments:
event_name -- Event name to remove the callback from.
callback_id -- The ID generated when the callback was added.
"""
self.logger.debug(
"Removing callback %s from callback list for event: %s" %
(callback_id, event_name)
)
if event_name not in self.registered_callbacks:
self.logger.debug(
"Callback %s: Event has no callback list" % callback_id
)
return
if callback_id not in self.registered_callbacks[event_name]:
self.logger.debug(
"Callback %s: Callback does not exist in callback list" %
callback_id
)
return
del self.registered_callbacks[event_name][callback_id]
self.logger.info("Removed callback %s for event: %s",
callback_id, event_name)
def fire(self, name, *params):
"""Calls all callbacks with given event name.
Keyword arguments:
name -- Event name to fire.
params -- Params to pass to callbacks.
Raises:
EventDoesNotExistError -- If fire is called a nonexistent event.
Returns:
boolean -- Whether a callback (or multiple) was called successfully.
"""
self.logger.debug("Attempting to fire event: %s" % name)
if name not in self.registered_events:
self.logger.debug("Event does not exist: %s" % name)
raise EventDoesNotExistError(
"Can't call an event that does not exist: %s" % name
)
callbacks = self.registered_callbacks[name]
self.logger.debug(
"Calling %d callbacks for event: %s" %
(len(callbacks), name)
)
cb_deferreds = []
for callback_id, callback in callbacks.items():
d = defer.maybeDeferred(
callback, self.cardinal, *params)
# It is necessary to pass callback_id in to this function in order
# to make sure it doesn't change when the loop iterates
def success(_result, callback_id=callback_id):
self.logger.debug(
"Callback {} accepted event '{}'"
.format(callback_id, name)
)
return True
d.addCallback(success)
# It is necessary to pass callback_id in to this function in order
# to make sure it doesn't change when the loop iterates
def eventRejectedErrback(failure, callback_id=callback_id):
# If this exception is received, the plugin told us not to set
# the called flag true, so we can just log it and continue on.
# This might happen if a plugin realizes the event does not
# apply to it and wants the original caller to handle it
# normally.
failure.trap(EventRejectedMessage)
self.logger.debug(
"Callback {} rejected event '{}'"
.format(callback_id, name)
)
return False
d.addErrback(eventRejectedErrback)
# It is necessary to pass callback_id in to this function in order
# to make sure it doesn't change when the loop iterates
def errback(failure, callback_id=callback_id):
self.logger.error(
"Unhandled error during callback {} for event '{}': {}"
.format(callback_id, name, failure)
)
return False
d.addErrback(errback)
cb_deferreds.append(d)
dl = defer.DeferredList(cb_deferreds)
dl.addCallback(self._reduce_callback_accepted_statuses)
return dl
@staticmethod
def _reduce_callback_accepted_statuses(results):
"""Returns True if an event callback accepted the event.
This is a callback added to a DeferredList representing each of the
event callback Deferreds. If any one of them accepted the event, return
True back to the caller that fired the event.
"""
for res in results:
success, result = res
if success and result is True:
return True
return False
def _add_callback(self, event_name, callback):
"""Adds a callback to the event's callback list and returns an ID.
Keyword arguments:
event_name -- Event name to add the callback to.
callback -- The callback to add.
Returns:
string -- A callback ID to reference the callback with for removal.
"""
callback_id = self._generate_id()
while (event_name in self.registered_callbacks and
callback_id in self.registered_callbacks[event_name]):
callback_id = self._generate_id()
self.registered_callbacks[event_name][callback_id] = callback
self.logger.info(
"Registered callback %s for event: %s" %
(callback_id, event_name)
)
return callback_id
def _generate_id(size=6, chars=string.ascii_uppercase + string.digits):
"""
Thank you StackOverflow: http://stackoverflow.com/a/2257449/242129
Generates a random, 6 character string of letters and numbers (by
default.)
"""
return ''.join(random.choice(chars) for _ in range(6))
| mit |
chriskuehl/pre-commit-1 | pre_commit/languages/node.py | 4 | 1344 | from __future__ import unicode_literals
import contextlib
import sys
from pre_commit.languages import helpers
from pre_commit.util import clean_path_on_failure
ENVIRONMENT_DIR = 'node_env'
class NodeEnv(helpers.Environment):
@property
def env_prefix(self):
return ". '{{prefix}}{0}/bin/activate' &&".format(
helpers.environment_dir(ENVIRONMENT_DIR, self.language_version),
)
@contextlib.contextmanager
def in_env(repo_cmd_runner, language_version):
yield NodeEnv(repo_cmd_runner, language_version)
def install_environment(repo_cmd_runner, version='default'):
assert repo_cmd_runner.exists('package.json')
directory = helpers.environment_dir(ENVIRONMENT_DIR, version)
env_dir = repo_cmd_runner.path(directory)
with clean_path_on_failure(env_dir):
cmd = [
sys.executable, '-m', 'nodeenv', '--prebuilt',
'{{prefix}}{0}'.format(directory),
]
if version != 'default':
cmd.extend(['-n', version])
repo_cmd_runner.run(cmd)
with in_env(repo_cmd_runner, version) as node_env:
node_env.run("cd '{prefix}' && npm install -g")
def run_hook(repo_cmd_runner, hook, file_args):
with in_env(repo_cmd_runner, hook['language_version']) as env:
return helpers.run_hook(env, hook, file_args)
| mit |
geodrinx/gearthview | ext-libs/twisted/python/test/modules_helpers.py | 33 | 1758 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Facilities for helping test code which interacts with Python's module system
to load code.
"""
from __future__ import division, absolute_import
import sys
from twisted.python.filepath import FilePath
class TwistedModulesMixin:
"""
A mixin for C{twisted.trial.unittest.SynchronousTestCase} providing useful
methods for manipulating Python's module system.
"""
def replaceSysPath(self, sysPath):
"""
Replace sys.path, for the duration of the test, with the given value.
"""
originalSysPath = sys.path[:]
def cleanUpSysPath():
sys.path[:] = originalSysPath
self.addCleanup(cleanUpSysPath)
sys.path[:] = sysPath
def replaceSysModules(self, sysModules):
"""
Replace sys.modules, for the duration of the test, with the given value.
"""
originalSysModules = sys.modules.copy()
def cleanUpSysModules():
sys.modules.clear()
sys.modules.update(originalSysModules)
self.addCleanup(cleanUpSysModules)
sys.modules.clear()
sys.modules.update(sysModules)
def pathEntryWithOnePackage(self, pkgname=b"test_package"):
"""
Generate a L{FilePath} with one package, named C{pkgname}, on it, and
return the L{FilePath} of the path entry.
"""
# Remove utf-8 encode and bytes for path segments when Filepath
# supports Unicode paths on Python 3 (#2366, #4736, #5203).
entry = FilePath(self.mktemp().encode("utf-8"))
pkg = entry.child(b"test_package")
pkg.makedirs()
pkg.child(b"__init__.py").setContent(b"")
return entry
| gpl-3.0 |
mindriot101/bokeh | sphinx/source/docs/user_guide/examples/js_events.py | 3 | 2754 | """ Demonstration of how to register event callbacks using an adaptation
of the color_scatter example from the bokeh gallery
"""
import numpy as np
from bokeh.io import show, output_file
from bokeh.plotting import figure
from bokeh import events
from bokeh.models import CustomJS, Div, Button
from bokeh.layouts import column, row
def display_event(div, attributes=[], style = 'float:left;clear:left;font_size=10pt'):
"Build a suitable CustomJS to display the current event in the div model."
return CustomJS(args=dict(div=div), code="""
var attrs = %s; var args = [];
for (var i = 0; i<attrs.length; i++) {
args.push(attrs[i] + '=' + Number(cb_obj[attrs[i]]).toFixed(2));
}
var line = "<span style=%r><b>" + cb_obj.event_name + "</b>(" + args.join(", ") + ")</span>\\n";
var text = div.text.concat(line);
var lines = text.split("\\n")
if (lines.length > 35)
lines.shift();
div.text = lines.join("\\n");
""" % (attributes, style))
x = np.random.random(size=4000) * 100
y = np.random.random(size=4000) * 100
radii = np.random.random(size=4000) * 1.5
colors = ["#%02x%02x%02x" % (int(r), int(g), 150) for r, g in zip(50+2*x, 30+2*y)]
p = figure(tools="pan,wheel_zoom,zoom_in,zoom_out,reset")
p.scatter(x, y, radius=np.random.random(size=4000) * 1.5,
fill_color=colors, fill_alpha=0.6, line_color=None)
div = Div(width=1000)
button = Button(label="Button", button_type="success")
layout = column(button, row(p, div))
## Events with no attributes
button.js_on_event(events.ButtonClick, display_event(div)) # Button click
p.js_on_event(events.LODStart, display_event(div)) # Start of LOD display
p.js_on_event(events.LODEnd, display_event(div)) # End of LOD display
## Events with attributes
point_attributes = ['x','y','sx','sy'] # Point events
wheel_attributes = point_attributes+['delta'] # Mouse wheel event
pan_attributes = point_attributes + ['delta_x', 'delta_y'] # Pan event
pinch_attributes = point_attributes + ['scale'] # Pinch event
point_events = [events.Tap, events.DoubleTap, events.Press,
events.MouseMove, events.MouseEnter, events.MouseLeave,
events.PanStart, events.PanEnd, events.PinchStart, events.PinchEnd]
for event in point_events:
p.js_on_event(event,display_event(div, attributes=point_attributes))
p.js_on_event(events.MouseWheel, display_event(div,attributes=wheel_attributes))
p.js_on_event(events.Pan, display_event(div, attributes=pan_attributes))
p.js_on_event(events.Pinch, display_event(div, attributes=pinch_attributes))
output_file("js_events.html", title="JS Events Example")
show(layout)
| bsd-3-clause |
guettli/django | tests/delete/tests.py | 8 | 19085 | from __future__ import unicode_literals
from math import ceil
from django.db import IntegrityError, connection, models
from django.db.models.sql.constants import GET_ITERATOR_CHUNK_SIZE
from django.test import TestCase, skipIfDBFeature, skipUnlessDBFeature
from django.utils.six.moves import range
from .models import (
MR, A, Avatar, Base, Child, HiddenUser, HiddenUserProfile, M, M2MFrom,
M2MTo, MRNull, Parent, R, RChild, S, T, User, create_a, get_default_r,
)
class OnDeleteTests(TestCase):
def setUp(self):
self.DEFAULT = get_default_r()
def test_auto(self):
a = create_a('auto')
a.auto.delete()
self.assertFalse(A.objects.filter(name='auto').exists())
def test_auto_nullable(self):
a = create_a('auto_nullable')
a.auto_nullable.delete()
self.assertFalse(A.objects.filter(name='auto_nullable').exists())
def test_setvalue(self):
a = create_a('setvalue')
a.setvalue.delete()
a = A.objects.get(pk=a.pk)
self.assertEqual(self.DEFAULT, a.setvalue.pk)
def test_setnull(self):
a = create_a('setnull')
a.setnull.delete()
a = A.objects.get(pk=a.pk)
self.assertIsNone(a.setnull)
def test_setdefault(self):
a = create_a('setdefault')
a.setdefault.delete()
a = A.objects.get(pk=a.pk)
self.assertEqual(self.DEFAULT, a.setdefault.pk)
def test_setdefault_none(self):
a = create_a('setdefault_none')
a.setdefault_none.delete()
a = A.objects.get(pk=a.pk)
self.assertIsNone(a.setdefault_none)
def test_cascade(self):
a = create_a('cascade')
a.cascade.delete()
self.assertFalse(A.objects.filter(name='cascade').exists())
def test_cascade_nullable(self):
a = create_a('cascade_nullable')
a.cascade_nullable.delete()
self.assertFalse(A.objects.filter(name='cascade_nullable').exists())
def test_protect(self):
a = create_a('protect')
with self.assertRaises(IntegrityError):
a.protect.delete()
def test_do_nothing(self):
# Testing DO_NOTHING is a bit harder: It would raise IntegrityError for a normal model,
# so we connect to pre_delete and set the fk to a known value.
replacement_r = R.objects.create()
def check_do_nothing(sender, **kwargs):
obj = kwargs['instance']
obj.donothing_set.update(donothing=replacement_r)
models.signals.pre_delete.connect(check_do_nothing)
a = create_a('do_nothing')
a.donothing.delete()
a = A.objects.get(pk=a.pk)
self.assertEqual(replacement_r, a.donothing)
models.signals.pre_delete.disconnect(check_do_nothing)
def test_do_nothing_qscount(self):
"""
Test that a models.DO_NOTHING relation doesn't trigger a query.
"""
b = Base.objects.create()
with self.assertNumQueries(1):
# RelToBase should not be queried.
b.delete()
self.assertEqual(Base.objects.count(), 0)
def test_inheritance_cascade_up(self):
child = RChild.objects.create()
child.delete()
self.assertFalse(R.objects.filter(pk=child.pk).exists())
def test_inheritance_cascade_down(self):
child = RChild.objects.create()
parent = child.r_ptr
parent.delete()
self.assertFalse(RChild.objects.filter(pk=child.pk).exists())
def test_cascade_from_child(self):
a = create_a('child')
a.child.delete()
self.assertFalse(A.objects.filter(name='child').exists())
self.assertFalse(R.objects.filter(pk=a.child_id).exists())
def test_cascade_from_parent(self):
a = create_a('child')
R.objects.get(pk=a.child_id).delete()
self.assertFalse(A.objects.filter(name='child').exists())
self.assertFalse(RChild.objects.filter(pk=a.child_id).exists())
def test_setnull_from_child(self):
a = create_a('child_setnull')
a.child_setnull.delete()
self.assertFalse(R.objects.filter(pk=a.child_setnull_id).exists())
a = A.objects.get(pk=a.pk)
self.assertIsNone(a.child_setnull)
def test_setnull_from_parent(self):
a = create_a('child_setnull')
R.objects.get(pk=a.child_setnull_id).delete()
self.assertFalse(RChild.objects.filter(pk=a.child_setnull_id).exists())
a = A.objects.get(pk=a.pk)
self.assertIsNone(a.child_setnull)
def test_o2o_setnull(self):
a = create_a('o2o_setnull')
a.o2o_setnull.delete()
a = A.objects.get(pk=a.pk)
self.assertIsNone(a.o2o_setnull)
class DeletionTests(TestCase):
def test_m2m(self):
m = M.objects.create()
r = R.objects.create()
MR.objects.create(m=m, r=r)
r.delete()
self.assertFalse(MR.objects.exists())
r = R.objects.create()
MR.objects.create(m=m, r=r)
m.delete()
self.assertFalse(MR.objects.exists())
m = M.objects.create()
r = R.objects.create()
m.m2m.add(r)
r.delete()
through = M._meta.get_field('m2m').remote_field.through
self.assertFalse(through.objects.exists())
r = R.objects.create()
m.m2m.add(r)
m.delete()
self.assertFalse(through.objects.exists())
m = M.objects.create()
r = R.objects.create()
MRNull.objects.create(m=m, r=r)
r.delete()
self.assertFalse(not MRNull.objects.exists())
self.assertFalse(m.m2m_through_null.exists())
def test_bulk(self):
s = S.objects.create(r=R.objects.create())
for i in range(2 * GET_ITERATOR_CHUNK_SIZE):
T.objects.create(s=s)
# 1 (select related `T` instances)
# + 1 (select related `U` instances)
# + 2 (delete `T` instances in batches)
# + 1 (delete `s`)
self.assertNumQueries(5, s.delete)
self.assertFalse(S.objects.exists())
def test_instance_update(self):
deleted = []
related_setnull_sets = []
def pre_delete(sender, **kwargs):
obj = kwargs['instance']
deleted.append(obj)
if isinstance(obj, R):
related_setnull_sets.append(list(a.pk for a in obj.setnull_set.all()))
models.signals.pre_delete.connect(pre_delete)
a = create_a('update_setnull')
a.setnull.delete()
a = create_a('update_cascade')
a.cascade.delete()
for obj in deleted:
self.assertIsNone(obj.pk)
for pk_list in related_setnull_sets:
for a in A.objects.filter(id__in=pk_list):
self.assertIsNone(a.setnull)
models.signals.pre_delete.disconnect(pre_delete)
def test_deletion_order(self):
pre_delete_order = []
post_delete_order = []
def log_post_delete(sender, **kwargs):
pre_delete_order.append((sender, kwargs['instance'].pk))
def log_pre_delete(sender, **kwargs):
post_delete_order.append((sender, kwargs['instance'].pk))
models.signals.post_delete.connect(log_post_delete)
models.signals.pre_delete.connect(log_pre_delete)
r = R.objects.create(pk=1)
s1 = S.objects.create(pk=1, r=r)
s2 = S.objects.create(pk=2, r=r)
T.objects.create(pk=1, s=s1)
T.objects.create(pk=2, s=s2)
RChild.objects.create(r_ptr=r)
r.delete()
self.assertEqual(
pre_delete_order, [(T, 2), (T, 1), (RChild, 1), (S, 2), (S, 1), (R, 1)]
)
self.assertEqual(
post_delete_order, [(T, 1), (T, 2), (RChild, 1), (S, 1), (S, 2), (R, 1)]
)
models.signals.post_delete.disconnect(log_post_delete)
models.signals.pre_delete.disconnect(log_pre_delete)
def test_relational_post_delete_signals_happen_before_parent_object(self):
deletions = []
def log_post_delete(instance, **kwargs):
self.assertTrue(R.objects.filter(pk=instance.r_id))
self.assertIs(type(instance), S)
deletions.append(instance.id)
r = R.objects.create(pk=1)
S.objects.create(pk=1, r=r)
models.signals.post_delete.connect(log_post_delete, sender=S)
try:
r.delete()
finally:
models.signals.post_delete.disconnect(log_post_delete)
self.assertEqual(len(deletions), 1)
self.assertEqual(deletions[0], 1)
@skipUnlessDBFeature("can_defer_constraint_checks")
def test_can_defer_constraint_checks(self):
u = User.objects.create(
avatar=Avatar.objects.create()
)
a = Avatar.objects.get(pk=u.avatar_id)
# 1 query to find the users for the avatar.
# 1 query to delete the user
# 1 query to delete the avatar
# The important thing is that when we can defer constraint checks there
# is no need to do an UPDATE on User.avatar to null it out.
# Attach a signal to make sure we will not do fast_deletes.
calls = []
def noop(*args, **kwargs):
calls.append('')
models.signals.post_delete.connect(noop, sender=User)
self.assertNumQueries(3, a.delete)
self.assertFalse(User.objects.exists())
self.assertFalse(Avatar.objects.exists())
self.assertEqual(len(calls), 1)
models.signals.post_delete.disconnect(noop, sender=User)
@skipIfDBFeature("can_defer_constraint_checks")
def test_cannot_defer_constraint_checks(self):
u = User.objects.create(
avatar=Avatar.objects.create()
)
# Attach a signal to make sure we will not do fast_deletes.
calls = []
def noop(*args, **kwargs):
calls.append('')
models.signals.post_delete.connect(noop, sender=User)
a = Avatar.objects.get(pk=u.avatar_id)
# The below doesn't make sense... Why do we need to null out
# user.avatar if we are going to delete the user immediately after it,
# and there are no more cascades.
# 1 query to find the users for the avatar.
# 1 query to delete the user
# 1 query to null out user.avatar, because we can't defer the constraint
# 1 query to delete the avatar
self.assertNumQueries(4, a.delete)
self.assertFalse(User.objects.exists())
self.assertFalse(Avatar.objects.exists())
self.assertEqual(len(calls), 1)
models.signals.post_delete.disconnect(noop, sender=User)
def test_hidden_related(self):
r = R.objects.create()
h = HiddenUser.objects.create(r=r)
HiddenUserProfile.objects.create(user=h)
r.delete()
self.assertEqual(HiddenUserProfile.objects.count(), 0)
def test_large_delete(self):
TEST_SIZE = 2000
objs = [Avatar() for i in range(0, TEST_SIZE)]
Avatar.objects.bulk_create(objs)
# Calculate the number of queries needed.
batch_size = connection.ops.bulk_batch_size(['pk'], objs)
# The related fetches are done in batches.
batches = int(ceil(float(len(objs)) / batch_size))
# One query for Avatar.objects.all() and then one related fast delete for
# each batch.
fetches_to_mem = 1 + batches
# The Avatar objects are going to be deleted in batches of GET_ITERATOR_CHUNK_SIZE
queries = fetches_to_mem + TEST_SIZE // GET_ITERATOR_CHUNK_SIZE
self.assertNumQueries(queries, Avatar.objects.all().delete)
self.assertFalse(Avatar.objects.exists())
def test_large_delete_related(self):
TEST_SIZE = 2000
s = S.objects.create(r=R.objects.create())
for i in range(TEST_SIZE):
T.objects.create(s=s)
batch_size = max(connection.ops.bulk_batch_size(['pk'], range(TEST_SIZE)), 1)
# TEST_SIZE // batch_size (select related `T` instances)
# + 1 (select related `U` instances)
# + TEST_SIZE // GET_ITERATOR_CHUNK_SIZE (delete `T` instances in batches)
# + 1 (delete `s`)
expected_num_queries = (ceil(TEST_SIZE // batch_size) +
ceil(TEST_SIZE // GET_ITERATOR_CHUNK_SIZE) + 2)
self.assertNumQueries(expected_num_queries, s.delete)
self.assertFalse(S.objects.exists())
self.assertFalse(T.objects.exists())
def test_delete_with_keeping_parents(self):
child = RChild.objects.create()
parent_id = child.r_ptr_id
child.delete(keep_parents=True)
self.assertFalse(RChild.objects.filter(id=child.id).exists())
self.assertTrue(R.objects.filter(id=parent_id).exists())
def test_queryset_delete_returns_num_rows(self):
"""
QuerySet.delete() should return the number of deleted rows and a
dictionary with the number of deletions for each object type.
"""
Avatar.objects.bulk_create([Avatar(desc='a'), Avatar(desc='b'), Avatar(desc='c')])
avatars_count = Avatar.objects.count()
deleted, rows_count = Avatar.objects.all().delete()
self.assertEqual(deleted, avatars_count)
# more complex example with multiple object types
r = R.objects.create()
h1 = HiddenUser.objects.create(r=r)
HiddenUser.objects.create(r=r)
HiddenUserProfile.objects.create(user=h1)
existed_objs = {
R._meta.label: R.objects.count(),
HiddenUser._meta.label: HiddenUser.objects.count(),
A._meta.label: A.objects.count(),
MR._meta.label: MR.objects.count(),
HiddenUserProfile._meta.label: HiddenUserProfile.objects.count(),
}
deleted, deleted_objs = R.objects.all().delete()
for k, v in existed_objs.items():
self.assertEqual(deleted_objs[k], v)
def test_model_delete_returns_num_rows(self):
"""
Model.delete() should return the number of deleted rows and a
dictionary with the number of deletions for each object type.
"""
r = R.objects.create()
h1 = HiddenUser.objects.create(r=r)
h2 = HiddenUser.objects.create(r=r)
HiddenUser.objects.create(r=r)
HiddenUserProfile.objects.create(user=h1)
HiddenUserProfile.objects.create(user=h2)
m1 = M.objects.create()
m2 = M.objects.create()
MR.objects.create(r=r, m=m1)
r.m_set.add(m1)
r.m_set.add(m2)
r.save()
existed_objs = {
R._meta.label: R.objects.count(),
HiddenUser._meta.label: HiddenUser.objects.count(),
A._meta.label: A.objects.count(),
MR._meta.label: MR.objects.count(),
HiddenUserProfile._meta.label: HiddenUserProfile.objects.count(),
M.m2m.through._meta.label: M.m2m.through.objects.count(),
}
deleted, deleted_objs = r.delete()
self.assertEqual(deleted, sum(existed_objs.values()))
for k, v in existed_objs.items():
self.assertEqual(deleted_objs[k], v)
def test_proxied_model_duplicate_queries(self):
"""
#25685 - Deleting instances of a model with existing proxy
classes should not issue multiple queries during cascade
deletion of referring models.
"""
avatar = Avatar.objects.create()
# One query for the Avatar table and a second for the User one.
with self.assertNumQueries(2):
avatar.delete()
class FastDeleteTests(TestCase):
def test_fast_delete_fk(self):
u = User.objects.create(
avatar=Avatar.objects.create()
)
a = Avatar.objects.get(pk=u.avatar_id)
# 1 query to fast-delete the user
# 1 query to delete the avatar
self.assertNumQueries(2, a.delete)
self.assertFalse(User.objects.exists())
self.assertFalse(Avatar.objects.exists())
def test_fast_delete_m2m(self):
t = M2MTo.objects.create()
f = M2MFrom.objects.create()
f.m2m.add(t)
# 1 to delete f, 1 to fast-delete m2m for f
self.assertNumQueries(2, f.delete)
def test_fast_delete_revm2m(self):
t = M2MTo.objects.create()
f = M2MFrom.objects.create()
f.m2m.add(t)
# 1 to delete t, 1 to fast-delete t's m_set
self.assertNumQueries(2, f.delete)
def test_fast_delete_qs(self):
u1 = User.objects.create()
u2 = User.objects.create()
self.assertNumQueries(1, User.objects.filter(pk=u1.pk).delete)
self.assertEqual(User.objects.count(), 1)
self.assertTrue(User.objects.filter(pk=u2.pk).exists())
def test_fast_delete_joined_qs(self):
a = Avatar.objects.create(desc='a')
User.objects.create(avatar=a)
u2 = User.objects.create()
expected_queries = 1 if connection.features.update_can_self_select else 2
self.assertNumQueries(expected_queries,
User.objects.filter(avatar__desc='a').delete)
self.assertEqual(User.objects.count(), 1)
self.assertTrue(User.objects.filter(pk=u2.pk).exists())
def test_fast_delete_inheritance(self):
c = Child.objects.create()
p = Parent.objects.create()
# 1 for self, 1 for parent
self.assertNumQueries(2, c.delete)
self.assertFalse(Child.objects.exists())
self.assertEqual(Parent.objects.count(), 1)
self.assertEqual(Parent.objects.filter(pk=p.pk).count(), 1)
# 1 for self delete, 1 for fast delete of empty "child" qs.
self.assertNumQueries(2, p.delete)
self.assertFalse(Parent.objects.exists())
# 1 for self delete, 1 for fast delete of empty "child" qs.
c = Child.objects.create()
p = c.parent_ptr
self.assertNumQueries(2, p.delete)
self.assertFalse(Parent.objects.exists())
self.assertFalse(Child.objects.exists())
def test_fast_delete_large_batch(self):
User.objects.bulk_create(User() for i in range(0, 2000))
# No problems here - we aren't going to cascade, so we will fast
# delete the objects in a single query.
self.assertNumQueries(1, User.objects.all().delete)
a = Avatar.objects.create(desc='a')
User.objects.bulk_create(User(avatar=a) for i in range(0, 2000))
# We don't hit parameter amount limits for a, so just one query for
# that + fast delete of the related objs.
self.assertNumQueries(2, a.delete)
self.assertEqual(User.objects.count(), 0)
def test_fast_delete_empty_no_update_can_self_select(self):
"""
#25932 - Fast deleting on backends that don't have the
`no_update_can_self_select` feature should work even if the specified
filter doesn't match any row.
"""
with self.assertNumQueries(1):
self.assertEqual(
User.objects.filter(avatar__desc='missing').delete(),
(0, {'delete.User': 0})
)
| bsd-3-clause |
kdazzle/MySQL-safe-django-openid-auth | django_openid_auth/tests/__init__.py | 44 | 1749 | # django-openid-auth - OpenID integration for django.contrib.auth
#
# Copyright (C) 2009-2013 Canonical Ltd.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import unittest
from test_views import *
from test_store import *
from test_auth import *
from test_admin import *
def suite():
suite = unittest.TestSuite()
for name in ['test_auth', 'test_store', 'test_views', 'test_admin']:
mod = __import__('%s.%s' % (__name__, name), {}, {}, ['suite'])
suite.addTest(mod.suite())
return suite
| bsd-2-clause |
shajrawi/swift | utils/build_swift/argparse/__init__.py | 23 | 1658 | # This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
"""
Wrapper module around the standard argparse that extends the default
functionality with support for multi-destination actions, an expressive DSL for
constructing parsers and more argument types. This module exposes a strict
super-set of the argparse API and is meant to be used as a drop-in replacement.
"""
from argparse import (ArgumentDefaultsHelpFormatter, ArgumentError,
ArgumentTypeError, FileType, HelpFormatter,
Namespace, RawDescriptionHelpFormatter,
RawTextHelpFormatter)
from argparse import ONE_OR_MORE, OPTIONAL, SUPPRESS, ZERO_OR_MORE
from .actions import Action, Nargs
from .parser import ArgumentParser
from .types import (BoolType, ClangVersionType, CompilerVersion, PathType,
RegexType, ShellSplitType, SwiftVersionType)
__all__ = [
'Action',
'ArgumentDefaultsHelpFormatter',
'ArgumentError',
'ArgumentParser',
'ArgumentTypeError',
'HelpFormatter',
'Namespace',
'Nargs',
'RawDescriptionHelpFormatter',
'RawTextHelpFormatter',
'CompilerVersion',
'BoolType',
'FileType',
'PathType',
'RegexType',
'ClangVersionType',
'SwiftVersionType',
'ShellSplitType',
'SUPPRESS',
'OPTIONAL',
'ZERO_OR_MORE',
'ONE_OR_MORE',
]
| apache-2.0 |
tkerola/chainer | docs/source/conf.py | 4 | 14897 | # -*- coding: utf-8 -*-
#
# Chainer documentation build configuration file, created by
# sphinx-quickstart on Sun May 10 12:22:10 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import inspect
import os
import pkg_resources
import sys
sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))
import _docstring_check
import _autosummary_check
__version__ = pkg_resources.get_distribution('chainer').version
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
rtd_version = os.environ.get('READTHEDOCS_VERSION')
if rtd_version == 'latest':
tag = 'master'
else:
tag = 'v{}'.format(__version__)
extlinks = {
'blob': ('https://github.com/chainer/chainer/blob/{}/%s'.format(tag), ''),
'tree': ('https://github.com/chainer/chainer/tree/{}/%s'.format(tag), ''),
}
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.extlinks',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
'sphinx.ext.linkcode',
'_napoleon_patch',
]
try:
import sphinxcontrib.spelling # noqa
extensions.append('sphinxcontrib.spelling')
except ImportError:
pass
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Chainer'
copyright = u'2015, Preferred Networks, inc. and Preferred Infrastructure, inc.'
author = u'Preferred Networks, inc. and Preferred Infrastructure, inc.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# Napoleon settings
napoleon_use_ivar = True
napoleon_include_special_with_doc = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
if not on_rtd:
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_style = 'css/modified_theme.css'
if on_rtd:
html_context = {
'css_files': [
'https://media.readthedocs.org/css/sphinx_rtd_theme.css',
'https://media.readthedocs.org/css/readthedocs-doc-embed.css',
'_static/css/modified_theme.css',
],
}
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Chainerdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Chainer.tex', u'Chainer Documentation',
u'Preferred Networks, inc. and Preferred Infrastructure, inc.', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'chainer', u'Chainer Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Chainer', u'Chainer Documentation',
author, 'Chainer', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
autosummary_generate = True
intersphinx_mapping = {
'python': ('https://docs.python.org/3/', None),
'numpy': ('https://docs.scipy.org/doc/numpy/', None),
'cupy': ('https://docs-cupy.chainer.org/en/latest/', None),
'chainercv': ('https://chainercv.readthedocs.io/en/latest/', None),
}
doctest_global_setup = '''
import os
import numpy as np
import chainer
from chainer.backends import cuda
from chainer.backends.cuda import cupy
from chainer import Function, gradient_check, training, utils, Variable
from chainer import datasets, iterators, optimizers, serializers
from chainer import Link, Chain, ChainList
import chainer.functions as F
import chainer.links as L
from chainer.testing import doctest_helper
from chainer.training import extensions
import chainerx
np.random.seed(0)
'''
spelling_lang = 'en_US'
spelling_word_list_filename = 'spelling_wordlist.txt'
def setup(app):
app.connect('autodoc-process-docstring', _autodoc_process_docstring)
app.connect('build-finished', _build_finished)
def _autodoc_process_docstring(app, what, name, obj, options, lines):
_docstring_check.check(app, what, name, obj, options, lines)
def _build_finished(app, exception):
if exception is None:
_autosummary_check.check(app, exception)
def _import_object_from_name(module_name, fullname):
obj = sys.modules.get(module_name)
if obj is None:
return None
for comp in fullname.split('.'):
obj = getattr(obj, comp)
return obj
def _is_egg_directory(path):
return (path.endswith('.egg') and
os.path.isdir(os.path.join(path, 'EGG-INFO')))
def _is_git_root(path):
return os.path.isdir(os.path.join(path, '.git'))
_source_root = None
def _find_source_root(source_abs_path):
# Note that READTHEDOCS* environment variable cannot be used, because they
# are not set under docker environment.
global _source_root
if _source_root is None:
dir = os.path.dirname(source_abs_path)
while True:
if _is_egg_directory(dir) or _is_git_root(dir):
# Reached the root directory
_source_root = dir
break
dir_ = os.path.dirname(dir)
if len(dir_) == len(dir):
raise RuntimeError('Couldn\'t parse root directory from '
'source file: {}'.format(source_abs_path))
dir = dir_
return _source_root
def _get_source_relative_path(source_abs_path):
return os.path.relpath(source_abs_path, _find_source_root(source_abs_path))
def _get_sourcefile_and_linenumber(obj):
# Retrieve the original function wrapped by contextlib.contextmanager
if callable(obj):
closure = getattr(obj, '__closure__', None)
if closure is not None:
obj = closure[0].cell_contents
# Get the source file name and line number at which obj is defined.
try:
filename = inspect.getsourcefile(obj)
except TypeError:
# obj is not a module, class, function, ..etc.
return None, None
# inspect can return None for cython objects
if filename is None:
return None, None
# Get the source line number
_, linenum = inspect.getsourcelines(obj)
return filename, linenum
def linkcode_resolve(domain, info):
if domain != 'py' or not info['module']:
return None
if 1 == int(os.environ.get('CHAINER_DOCS_SKIP_LINKCODE', 0)):
return None
# Import the object from module path
obj = _import_object_from_name(info['module'], info['fullname'])
# If it's not defined in the internal module, return None.
mod = inspect.getmodule(obj)
if mod is None:
return None
if not (mod.__name__ == 'chainer' or mod.__name__.startswith('chainer.')):
return None
# Retrieve source file name and line number
filename, linenum = _get_sourcefile_and_linenumber(obj)
if filename is None or linenum is None:
return None
filename = os.path.realpath(filename)
relpath = _get_source_relative_path(filename)
return 'https://github.com/chainer/chainer/blob/{}/{}#L{}'.format(
tag, relpath, linenum)
| mit |
sestrella/ansible | lib/ansible/plugins/terminal/bigip.py | 89 | 2499 | #
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
from ansible.plugins.terminal import TerminalBase
from ansible.errors import AnsibleConnectionFailure
class TerminalModule(TerminalBase):
terminal_stdout_re = [
re.compile(br"[\r\n]?(?:\([^\)]+\)){,5}(?:>|#) ?$"),
re.compile(br"[\r\n]?[\w+\-\.:\/\[\]]+(?:\([^\)]+\)){,3}(?:>|#) ?$"),
re.compile(br"\[\w+\@[\w\-\.]+(?: [^\]])\] ?[>#\$] ?$"),
re.compile(br"(?:new|confirm) password:")
]
terminal_stderr_re = [
re.compile(br"% ?Error"),
re.compile(br"Syntax Error", re.I),
re.compile(br"% User not present"),
re.compile(br"% ?Bad secret"),
re.compile(br"invalid input", re.I),
re.compile(br"(?:incomplete|ambiguous) command", re.I),
re.compile(br"connection timed out", re.I),
re.compile(br"the new password was not confirmed", re.I),
re.compile(br"[^\r\n]+ not found", re.I),
re.compile(br"'[^']' +returned error code: ?\d+"),
re.compile(br"[^\r\n]\/bin\/(?:ba)?sh")
]
def on_open_shell(self):
try:
self._exec_cli_command(b'modify cli preference display-threshold 0 pager disabled')
self._exec_cli_command(b'run /util bash -c "stty cols 1000000" 2> /dev/null')
except AnsibleConnectionFailure as ex:
output = str(ex)
if 'modify: command not found' in output:
try:
self._exec_cli_command(b'tmsh modify cli preference display-threshold 0 pager disabled')
self._exec_cli_command(b'stty cols 1000000 2> /dev/null')
except AnsibleConnectionFailure as ex:
raise AnsibleConnectionFailure('unable to set terminal parameters')
| gpl-3.0 |
ClearCorp-dev/odoo | addons/survey/__init__.py | 385 | 1037 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-TODAY OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import survey
import controllers
import wizard
| agpl-3.0 |
mallegonian/bottom | bottom/connection.py | 1 | 2156 | import asyncio
from . import unpack
from .log import logger
class Connection(object):
def __init__(self, host, port, events, encoding, ssl):
self.events = events
self._connected = False
self.host, self.port = host, port
self.reader, self.writer = None, None
self.encoding = encoding
self.ssl = ssl
@asyncio.coroutine
def connect(self, loop=None):
if self.connected:
return
self.reader, self.writer = yield from asyncio.open_connection(
self.host, self.port, ssl=self.ssl, loop=loop)
self._connected = True
yield from self.events.trigger(
"CLIENT_CONNECT", host=self.host, port=self.port)
@asyncio.coroutine
def disconnect(self):
if not self.connected:
return
self.writer.close()
self.writer = None
self.reader = None
self._connected = False
yield from self.events.trigger(
"CLIENT_DISCONNECT", host=self.host, port=self.port)
@property
def connected(self):
return self._connected
@asyncio.coroutine
def run(self, loop=None):
yield from self.connect(loop=loop)
while self.connected:
msg = yield from self.read()
if msg:
try:
event, kwargs = unpack.unpack_command(msg)
except ValueError:
logger.info("PARSE ERROR {}".format(msg))
else:
yield from self.events.trigger(event, **kwargs)
else:
# Lost connection
yield from self.disconnect()
def send(self, msg):
if self.writer:
logger.debug("Sent %s", repr(msg))
self.writer.write((msg.strip() + '\n').encode(self.encoding))
@asyncio.coroutine
def read(self):
if not self.reader:
return ''
try:
msg = yield from self.reader.readline()
logger.debug("Recieved %s", repr(msg))
return msg.decode(self.encoding, 'ignore').strip()
except EOFError:
return ''
| mit |
Stavitsky/nova | nova/tests/unit/test_nova_manage.py | 5 | 25360 | # Copyright 2011 OpenStack Foundation
# Copyright 2011 Ilya Alekseyev
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import StringIO
import sys
import fixtures
import mock
from nova.cmd import manage
from nova import context
from nova import db
from nova.db import migration
from nova.db.sqlalchemy import migration as sqla_migration
from nova import exception
from nova import objects
from nova import test
from nova.tests.unit.db import fakes as db_fakes
from nova.tests.unit import fake_instance
from nova.tests.unit.objects import test_network
from nova.tests.unit import test_flavors
class FixedIpCommandsTestCase(test.TestCase):
def setUp(self):
super(FixedIpCommandsTestCase, self).setUp()
db_fakes.stub_out_db_network_api(self.stubs)
self.commands = manage.FixedIpCommands()
def test_reserve(self):
self.commands.reserve('192.168.0.100')
address = db.fixed_ip_get_by_address(context.get_admin_context(),
'192.168.0.100')
self.assertEqual(address['reserved'], True)
def test_reserve_nonexistent_address(self):
self.assertEqual(2, self.commands.reserve('55.55.55.55'))
def test_unreserve(self):
self.commands.unreserve('192.168.0.100')
address = db.fixed_ip_get_by_address(context.get_admin_context(),
'192.168.0.100')
self.assertEqual(address['reserved'], False)
def test_unreserve_nonexistent_address(self):
self.assertEqual(2, self.commands.unreserve('55.55.55.55'))
def test_list(self):
self.useFixture(fixtures.MonkeyPatch('sys.stdout',
StringIO.StringIO()))
self.commands.list()
self.assertNotEqual(1, sys.stdout.getvalue().find('192.168.0.100'))
def test_list_just_one_host(self):
def fake_fixed_ip_get_by_host(*args, **kwargs):
return [db_fakes.fixed_ip_fields]
self.useFixture(fixtures.MonkeyPatch(
'nova.db.fixed_ip_get_by_host',
fake_fixed_ip_get_by_host))
self.useFixture(fixtures.MonkeyPatch('sys.stdout',
StringIO.StringIO()))
self.commands.list('banana')
self.assertNotEqual(1, sys.stdout.getvalue().find('192.168.0.100'))
class FloatingIpCommandsTestCase(test.TestCase):
def setUp(self):
super(FloatingIpCommandsTestCase, self).setUp()
db_fakes.stub_out_db_network_api(self.stubs)
self.commands = manage.FloatingIpCommands()
def test_address_to_hosts(self):
def assert_loop(result, expected):
for ip in result:
self.assertIn(str(ip), expected)
address_to_hosts = self.commands.address_to_hosts
# /32 and /31
self.assertRaises(exception.InvalidInput, address_to_hosts,
'192.168.100.1/32')
self.assertRaises(exception.InvalidInput, address_to_hosts,
'192.168.100.1/31')
# /30
expected = ["192.168.100.%s" % i for i in range(1, 3)]
result = address_to_hosts('192.168.100.0/30')
self.assertEqual(2, len(list(result)))
assert_loop(result, expected)
# /29
expected = ["192.168.100.%s" % i for i in range(1, 7)]
result = address_to_hosts('192.168.100.0/29')
self.assertEqual(6, len(list(result)))
assert_loop(result, expected)
# /28
expected = ["192.168.100.%s" % i for i in range(1, 15)]
result = address_to_hosts('192.168.100.0/28')
self.assertEqual(14, len(list(result)))
assert_loop(result, expected)
# /16
result = address_to_hosts('192.168.100.0/16')
self.assertEqual(65534, len(list(result)))
# NOTE(dripton): I don't test /13 because it makes the test take 3s.
# /12 gives over a million IPs, which is ridiculous.
self.assertRaises(exception.InvalidInput, address_to_hosts,
'192.168.100.1/12')
class NetworkCommandsTestCase(test.TestCase):
def setUp(self):
super(NetworkCommandsTestCase, self).setUp()
self.commands = manage.NetworkCommands()
self.net = {'id': 0,
'label': 'fake',
'injected': False,
'cidr': '192.168.0.0/24',
'cidr_v6': 'dead:beef::/64',
'multi_host': False,
'gateway_v6': 'dead:beef::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': 'fa0',
'bridge_interface': 'fake_fa0',
'gateway': '192.168.0.1',
'broadcast': '192.168.0.255',
'dns1': '8.8.8.8',
'dns2': '8.8.4.4',
'vlan': 200,
'vlan_start': 201,
'vpn_public_address': '10.0.0.2',
'vpn_public_port': '2222',
'vpn_private_address': '192.168.0.2',
'dhcp_start': '192.168.0.3',
'project_id': 'fake_project',
'host': 'fake_host',
'uuid': 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'}
def fake_network_get_by_cidr(context, cidr):
self.assertTrue(context.to_dict()['is_admin'])
self.assertEqual(cidr, self.fake_net['cidr'])
return db_fakes.FakeModel(dict(test_network.fake_network,
**self.fake_net))
def fake_network_get_by_uuid(context, uuid):
self.assertTrue(context.to_dict()['is_admin'])
self.assertEqual(uuid, self.fake_net['uuid'])
return db_fakes.FakeModel(dict(test_network.fake_network,
**self.fake_net))
def fake_network_update(context, network_id, values):
self.assertTrue(context.to_dict()['is_admin'])
self.assertEqual(network_id, self.fake_net['id'])
self.assertEqual(values, self.fake_update_value)
self.fake_network_get_by_cidr = fake_network_get_by_cidr
self.fake_network_get_by_uuid = fake_network_get_by_uuid
self.fake_network_update = fake_network_update
def test_create(self):
def fake_create_networks(obj, context, **kwargs):
self.assertTrue(context.to_dict()['is_admin'])
self.assertEqual(kwargs['label'], 'Test')
self.assertEqual(kwargs['cidr'], '10.2.0.0/24')
self.assertEqual(kwargs['multi_host'], False)
self.assertEqual(kwargs['num_networks'], 1)
self.assertEqual(kwargs['network_size'], 256)
self.assertEqual(kwargs['vlan'], 200)
self.assertEqual(kwargs['vlan_start'], 201)
self.assertEqual(kwargs['vpn_start'], 2000)
self.assertEqual(kwargs['cidr_v6'], 'fd00:2::/120')
self.assertEqual(kwargs['gateway'], '10.2.0.1')
self.assertEqual(kwargs['gateway_v6'], 'fd00:2::22')
self.assertEqual(kwargs['bridge'], 'br200')
self.assertEqual(kwargs['bridge_interface'], 'eth0')
self.assertEqual(kwargs['dns1'], '8.8.8.8')
self.assertEqual(kwargs['dns2'], '8.8.4.4')
self.flags(network_manager='nova.network.manager.VlanManager')
from nova.network import manager as net_manager
self.stubs.Set(net_manager.VlanManager, 'create_networks',
fake_create_networks)
self.commands.create(
label='Test',
cidr='10.2.0.0/24',
num_networks=1,
network_size=256,
multi_host='F',
vlan=200,
vlan_start=201,
vpn_start=2000,
cidr_v6='fd00:2::/120',
gateway='10.2.0.1',
gateway_v6='fd00:2::22',
bridge='br200',
bridge_interface='eth0',
dns1='8.8.8.8',
dns2='8.8.4.4',
uuid='aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa')
def test_list(self):
def fake_network_get_all(context):
return [db_fakes.FakeModel(self.net)]
self.stubs.Set(db, 'network_get_all', fake_network_get_all)
output = StringIO.StringIO()
sys.stdout = output
self.commands.list()
sys.stdout = sys.__stdout__
result = output.getvalue()
_fmt = "\t".join(["%(id)-5s", "%(cidr)-18s", "%(cidr_v6)-15s",
"%(dhcp_start)-15s", "%(dns1)-15s", "%(dns2)-15s",
"%(vlan)-15s", "%(project_id)-15s", "%(uuid)-15s"])
head = _fmt % {'id': 'id',
'cidr': 'IPv4',
'cidr_v6': 'IPv6',
'dhcp_start': 'start address',
'dns1': 'DNS1',
'dns2': 'DNS2',
'vlan': 'VlanID',
'project_id': 'project',
'uuid': "uuid"}
body = _fmt % {'id': self.net['id'],
'cidr': self.net['cidr'],
'cidr_v6': self.net['cidr_v6'],
'dhcp_start': self.net['dhcp_start'],
'dns1': self.net['dns1'],
'dns2': self.net['dns2'],
'vlan': self.net['vlan'],
'project_id': self.net['project_id'],
'uuid': self.net['uuid']}
answer = '%s\n%s\n' % (head, body)
self.assertEqual(result, answer)
def test_delete(self):
self.fake_net = self.net
self.fake_net['project_id'] = None
self.fake_net['host'] = None
self.stubs.Set(db, 'network_get_by_uuid',
self.fake_network_get_by_uuid)
def fake_network_delete_safe(context, network_id):
self.assertTrue(context.to_dict()['is_admin'])
self.assertEqual(network_id, self.fake_net['id'])
self.stubs.Set(db, 'network_delete_safe', fake_network_delete_safe)
self.commands.delete(uuid=self.fake_net['uuid'])
def test_delete_by_cidr(self):
self.fake_net = self.net
self.fake_net['project_id'] = None
self.fake_net['host'] = None
self.stubs.Set(db, 'network_get_by_cidr',
self.fake_network_get_by_cidr)
def fake_network_delete_safe(context, network_id):
self.assertTrue(context.to_dict()['is_admin'])
self.assertEqual(network_id, self.fake_net['id'])
self.stubs.Set(db, 'network_delete_safe', fake_network_delete_safe)
self.commands.delete(fixed_range=self.fake_net['cidr'])
def _test_modify_base(self, update_value, project, host, dis_project=None,
dis_host=None):
self.fake_net = self.net
self.fake_update_value = update_value
self.stubs.Set(db, 'network_get_by_cidr',
self.fake_network_get_by_cidr)
self.stubs.Set(db, 'network_update', self.fake_network_update)
self.commands.modify(self.fake_net['cidr'], project=project, host=host,
dis_project=dis_project, dis_host=dis_host)
def test_modify_associate(self):
self._test_modify_base(update_value={'project_id': 'test_project',
'host': 'test_host'},
project='test_project', host='test_host')
def test_modify_unchanged(self):
self._test_modify_base(update_value={}, project=None, host=None)
def test_modify_disassociate(self):
self._test_modify_base(update_value={'project_id': None, 'host': None},
project=None, host=None, dis_project=True,
dis_host=True)
class NeutronV2NetworkCommandsTestCase(test.TestCase):
def setUp(self):
super(NeutronV2NetworkCommandsTestCase, self).setUp()
self.flags(network_api_class='nova.network.neutronv2.api.API')
self.commands = manage.NetworkCommands()
def test_create(self):
self.assertEqual(2, self.commands.create())
def test_list(self):
self.assertEqual(2, self.commands.list())
def test_delete(self):
self.assertEqual(2, self.commands.delete())
def test_modify(self):
self.assertEqual(2, self.commands.modify('192.168.0.1'))
class ProjectCommandsTestCase(test.TestCase):
def setUp(self):
super(ProjectCommandsTestCase, self).setUp()
self.commands = manage.ProjectCommands()
def test_quota(self):
output = StringIO.StringIO()
sys.stdout = output
self.commands.quota(project_id='admin',
key='instances',
value='unlimited',
)
sys.stdout = sys.__stdout__
result = output.getvalue()
print_format = "%-36s %-10s" % ('instances', 'unlimited')
self.assertIn(print_format, result)
def test_quota_update_invalid_key(self):
self.assertEqual(2, self.commands.quota('admin', 'volumes1', '10'))
class VmCommandsTestCase(test.TestCase):
def setUp(self):
super(VmCommandsTestCase, self).setUp()
self.commands = manage.VmCommands()
self.fake_flavor = objects.Flavor(**test_flavors.DEFAULT_FLAVORS[0])
def test_list_without_host(self):
output = StringIO.StringIO()
sys.stdout = output
with mock.patch.object(objects.InstanceList, 'get_by_filters') as get:
get.return_value = objects.InstanceList(
objects=[fake_instance.fake_instance_obj(
context.get_admin_context(), host='foo-host',
flavor=self.fake_flavor,
system_metadata={})])
self.commands.list()
sys.stdout = sys.__stdout__
result = output.getvalue()
self.assertIn('node', result) # check the header line
self.assertIn('m1.tiny', result) # flavor.name
self.assertIn('foo-host', result)
def test_list_with_host(self):
output = StringIO.StringIO()
sys.stdout = output
with mock.patch.object(objects.InstanceList, 'get_by_host') as get:
get.return_value = objects.InstanceList(
objects=[fake_instance.fake_instance_obj(
context.get_admin_context(),
flavor=self.fake_flavor,
system_metadata={})])
self.commands.list(host='fake-host')
sys.stdout = sys.__stdout__
result = output.getvalue()
self.assertIn('node', result) # check the header line
self.assertIn('m1.tiny', result) # flavor.name
self.assertIn('fake-host', result)
class DBCommandsTestCase(test.TestCase):
def setUp(self):
super(DBCommandsTestCase, self).setUp()
self.commands = manage.DbCommands()
def test_archive_deleted_rows_negative(self):
self.assertEqual(1, self.commands.archive_deleted_rows(-1))
@mock.patch.object(migration, 'db_null_instance_uuid_scan',
return_value={'foo': 0})
def test_null_instance_uuid_scan_no_records_found(self, mock_scan):
self.useFixture(fixtures.MonkeyPatch('sys.stdout',
StringIO.StringIO()))
self.commands.null_instance_uuid_scan()
self.assertIn("There were no records found", sys.stdout.getvalue())
@mock.patch.object(migration, 'db_null_instance_uuid_scan',
return_value={'foo': 1, 'bar': 0})
def _test_null_instance_uuid_scan(self, mock_scan, delete):
self.useFixture(fixtures.MonkeyPatch('sys.stdout',
StringIO.StringIO()))
self.commands.null_instance_uuid_scan(delete)
output = sys.stdout.getvalue()
if delete:
self.assertIn("Deleted 1 records from table 'foo'.", output)
self.assertNotIn("Deleted 0 records from table 'bar'.", output)
else:
self.assertIn("1 records in the 'foo' table", output)
self.assertNotIn("0 records in the 'bar' table", output)
self.assertNotIn("There were no records found", output)
def test_null_instance_uuid_scan_readonly(self):
self._test_null_instance_uuid_scan(delete=False)
def test_null_instance_uuid_scan_delete(self):
self._test_null_instance_uuid_scan(delete=True)
def test_migrate_flavor_data_negative(self):
self.assertEqual(1, self.commands.migrate_flavor_data(-1))
@mock.patch.object(sqla_migration, 'db_version', return_value=2)
def test_version(self, sqla_migrate):
self.commands.version()
sqla_migrate.assert_called_once_with(database='main')
@mock.patch.object(sqla_migration, 'db_sync')
def test_sync(self, sqla_sync):
self.commands.sync(version=4)
sqla_sync.assert_called_once_with(version=4, database='main')
class ApiDbCommandsTestCase(test.TestCase):
def setUp(self):
super(ApiDbCommandsTestCase, self).setUp()
self.commands = manage.ApiDbCommands()
@mock.patch.object(sqla_migration, 'db_version', return_value=2)
def test_version(self, sqla_migrate):
self.commands.version()
sqla_migrate.assert_called_once_with(database='api')
@mock.patch.object(sqla_migration, 'db_sync')
def test_sync(self, sqla_sync):
self.commands.sync(version=4)
sqla_sync.assert_called_once_with(version=4, database='api')
class ServiceCommandsTestCase(test.TestCase):
def setUp(self):
super(ServiceCommandsTestCase, self).setUp()
self.commands = manage.ServiceCommands()
def test_service_enable_invalid_params(self):
self.assertEqual(2, self.commands.enable('nohost', 'noservice'))
def test_service_disable_invalid_params(self):
self.assertEqual(2, self.commands.disable('nohost', 'noservice'))
class CellCommandsTestCase(test.TestCase):
def setUp(self):
super(CellCommandsTestCase, self).setUp()
self.commands = manage.CellCommands()
def test_create_transport_hosts_multiple(self):
"""Test the _create_transport_hosts method
when broker_hosts is set.
"""
brokers = "127.0.0.1:5672,127.0.0.2:5671"
thosts = self.commands._create_transport_hosts(
'guest', 'devstack',
broker_hosts=brokers)
self.assertEqual(2, len(thosts))
self.assertEqual('127.0.0.1', thosts[0].hostname)
self.assertEqual(5672, thosts[0].port)
self.assertEqual('127.0.0.2', thosts[1].hostname)
self.assertEqual(5671, thosts[1].port)
def test_create_transport_hosts_single(self):
"""Test the _create_transport_hosts method when hostname is passed."""
thosts = self.commands._create_transport_hosts('guest', 'devstack',
hostname='127.0.0.1',
port=80)
self.assertEqual(1, len(thosts))
self.assertEqual('127.0.0.1', thosts[0].hostname)
self.assertEqual(80, thosts[0].port)
def test_create_transport_hosts_single_broker(self):
"""Test the _create_transport_hosts method for single broker_hosts."""
thosts = self.commands._create_transport_hosts(
'guest', 'devstack',
broker_hosts='127.0.0.1:5672')
self.assertEqual(1, len(thosts))
self.assertEqual('127.0.0.1', thosts[0].hostname)
self.assertEqual(5672, thosts[0].port)
def test_create_transport_hosts_both(self):
"""Test the _create_transport_hosts method when both broker_hosts
and hostname/port are passed.
"""
thosts = self.commands._create_transport_hosts(
'guest', 'devstack',
broker_hosts='127.0.0.1:5672',
hostname='127.0.0.2', port=80)
self.assertEqual(1, len(thosts))
self.assertEqual('127.0.0.1', thosts[0].hostname)
self.assertEqual(5672, thosts[0].port)
def test_create_transport_hosts_wrong_val(self):
"""Test the _create_transport_hosts method when broker_hosts
is wrongly sepcified
"""
self.assertRaises(ValueError,
self.commands._create_transport_hosts,
'guest', 'devstack',
broker_hosts='127.0.0.1:5672,127.0.0.1')
def test_create_transport_hosts_wrong_port_val(self):
"""Test the _create_transport_hosts method when port in
broker_hosts is wrongly sepcified
"""
self.assertRaises(ValueError,
self.commands._create_transport_hosts,
'guest', 'devstack',
broker_hosts='127.0.0.1:')
def test_create_transport_hosts_wrong_port_arg(self):
"""Test the _create_transport_hosts method when port
argument is wrongly sepcified
"""
self.assertRaises(ValueError,
self.commands._create_transport_hosts,
'guest', 'devstack',
hostname='127.0.0.1', port='ab')
@mock.patch.object(context, 'get_admin_context')
@mock.patch.object(db, 'cell_create')
def test_create_broker_hosts(self, mock_db_cell_create, mock_ctxt):
"""Test the create function when broker_hosts is
passed
"""
cell_tp_url = "fake://guest:devstack@127.0.0.1:5432"
cell_tp_url += ",guest:devstack@127.0.0.2:9999/"
ctxt = mock.sentinel
mock_ctxt.return_value = mock.sentinel
self.commands.create("test",
broker_hosts='127.0.0.1:5432,127.0.0.2:9999',
woffset=0, wscale=0,
username="guest", password="devstack")
exp_values = {'name': "test",
'is_parent': False,
'transport_url': cell_tp_url,
'weight_offset': 0.0,
'weight_scale': 0.0}
mock_db_cell_create.assert_called_once_with(ctxt, exp_values)
@mock.patch.object(context, 'get_admin_context')
@mock.patch.object(db, 'cell_create')
def test_create_broker_hosts_with_url_decoding_fix(self,
mock_db_cell_create,
mock_ctxt):
"""Test the create function when broker_hosts is
passed
"""
cell_tp_url = "fake://the=user:the=password@127.0.0.1:5432/"
ctxt = mock.sentinel
mock_ctxt.return_value = mock.sentinel
self.commands.create("test",
broker_hosts='127.0.0.1:5432',
woffset=0, wscale=0,
username="the=user",
password="the=password")
exp_values = {'name': "test",
'is_parent': False,
'transport_url': cell_tp_url,
'weight_offset': 0.0,
'weight_scale': 0.0}
mock_db_cell_create.assert_called_once_with(ctxt, exp_values)
@mock.patch.object(context, 'get_admin_context')
@mock.patch.object(db, 'cell_create')
def test_create_hostname(self, mock_db_cell_create, mock_ctxt):
"""Test the create function when hostname and port is
passed
"""
cell_tp_url = "fake://guest:devstack@127.0.0.1:9999/"
ctxt = mock.sentinel
mock_ctxt.return_value = mock.sentinel
self.commands.create("test",
hostname='127.0.0.1', port="9999",
woffset=0, wscale=0,
username="guest", password="devstack")
exp_values = {'name': "test",
'is_parent': False,
'transport_url': cell_tp_url,
'weight_offset': 0.0,
'weight_scale': 0.0}
mock_db_cell_create.assert_called_once_with(ctxt, exp_values)
| apache-2.0 |
jmwright/cadquery-projects | sewing-machine-thread-guides/thread_guide_1.py | 1 | 1243 | import cadquery as cq
core_rad = 45.0 / 2.0
# Objects to make the basic shape of the guide
guide_core = (cq.Workplane("XY")
.circle(core_rad)
.extrude(3.0))
guide_fan = (cq.Workplane("XY")
.moveTo(0.0, -10.0)
.hLine(core_rad + 15)
.threePointArc((0.0, core_rad + 15), (-core_rad - 15, -10.0))
.close()
.extrude(3.0))
# Fuse both objects so they act as one
guide = guide_core.union(guide_fan)
# Put guide holes in fan
guide = guide.faces(">Z").workplane(centerOption="ProjectedOrigin").polarArray(core_rad + 7.5, -10, 90, 6).circle(2.5).cutThruAll()
# Center shaft boss
#guide = guide.faces(">Z").workplane(centerOption="ProjectedOrigin").circle(10.0).extrude(7.0)
# Put the center hole in
guide = guide.faces(">Z").workplane(centerOption="ProjectedOrigin").circle(9.75 / 2.0).cutThruAll()
# Put the set screw holes in
guide = guide.faces("<Z").workplane(centerOption="ProjectedOrigin", invert=True).transformed(offset = (0, 0, 7)).transformed(rotate=(0, 90, 0)).circle(2.5 / 2.0).cutThruAll()
# Export to STL for printing
guide.val().exportStl("/home/jwright/Downloads/guide_1.stl", precision=0.0001)
show_object(guide) | apache-2.0 |
youprofit/shogun | examples/undocumented/python_modular/kernel_ssk_string_modular.py | 24 | 1237 | #
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# Written (W) 2014 Soumyajit De
#
#!/usr/bin/env python
from tools.load import LoadMatrix
lm=LoadMatrix()
traindat = lm.load_dna('../data/fm_train_dna.dat')
testdat = lm.load_dna('../data/fm_test_dna.dat')
parameter_list = [[traindat,testdat,2,0.75],[traindat,testdat,3,0.75]]
def kernel_ssk_string_modular (fm_train_dna=traindat, fm_test_dna=testdat, maxlen=1, decay=1):
from modshogun import SubsequenceStringKernel
from modshogun import StringCharFeatures, DNA
feats_train=StringCharFeatures(fm_train_dna, DNA)
feats_test=StringCharFeatures(fm_test_dna, DNA)
kernel=SubsequenceStringKernel(feats_train, feats_train, maxlen, decay)
km_train=kernel.get_kernel_matrix()
# print(km_train)
kernel.init(feats_train, feats_test)
km_test=kernel.get_kernel_matrix()
# print(km_test)
return km_train,km_test,kernel
if __name__=='__main__':
print('SubsequenceStringKernel DNA')
kernel_ssk_string_modular(*parameter_list[0])
kernel_ssk_string_modular(*parameter_list[1])
| gpl-3.0 |
willzhang05/cslbot | cslbot/commands/issue.py | 2 | 4083 | # Copyright (C) 2013-2015 Samuel Damashek, Peter Foley, James Forcier, Srijay Kasturi, Reed Koser, Christopher Reffett, and Fox Wilson
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from requests import get
from random import choice
from ..helpers import arguments
from ..helpers.orm import Issues
from ..helpers.command import Command
from ..helpers.web import create_issue
@Command(['issue', 'bug'], ['source', 'db', 'config', 'type', 'is_admin', 'nick'])
def cmd(send, msg, args):
"""Files a github issue or gets a open one.
Syntax: {command} <title [--desc description]|--get <number>>
"""
repo = args['config']['api']['githubrepo']
apikey = args['config']['api']['githubapikey']
if not repo:
send("GitHub repository undefined in config.cfg!")
return
parser = arguments.ArgParser(args['config'])
parser.add_argument('title', nargs='*', default='')
parser.add_argument('--get', '--show', action='store_true')
parser.add_argument('--description', nargs='+', default="No description given.")
cmdargs, remainder = parser.parse_known_args(msg)
if isinstance(cmdargs.title, list):
cmdargs.title = ' '.join(cmdargs.title)
if isinstance(cmdargs.description, list):
cmdargs.description = ' '.join(cmdargs.description)
if remainder:
cmdargs.title = "%s %s" % (cmdargs.title, ' '.join(remainder))
if args['type'] == 'privmsg':
send('You want to let everybody know about your problems, right?')
elif cmdargs.get or cmdargs.title.isdigit():
issue = get('https://api.github.com/repos/%s/issues/%d' % (repo, int(cmdargs.title))).json()
if 'message' in issue:
send("Invalid Issue Number")
else:
send("%s (%s) -- %s" % (issue['title'], issue['state'], issue['html_url']))
elif not cmdargs.title:
issues = []
n = 1
while True:
headers = {'Authorization': 'token %s' % apikey}
page = get('https://api.github.com/repos/%s/issues' % repo, params={'per_page': '100', 'page': n}, headers=headers).json()
n += 1
if page:
issues += page
else:
break
if len(issues) == 0:
send("No open issues to choose from!")
else:
issue = choice(issues)
num_issues = len([x for x in issues if 'pull_request' not in x])
send("There are %d open issues, here's one." % num_issues)
send("#%d -- %s -- %s" % (issue['number'], issue['title'], issue['html_url']))
elif cmdargs.title and args['is_admin'](args['nick']):
url, success = create_issue(cmdargs.title, cmdargs.description, args['source'], repo, apikey)
if success:
send("Issue created -- %s -- %s -- %s" % (url, cmdargs.title, cmdargs.description))
else:
send("Error creating issue: %s" % url)
elif cmdargs.title:
row = Issues(title=cmdargs.title, description=cmdargs.description, source=str(args['source'])) # str needed to make mysqlconnector happy
args['db'].add(row)
args['db'].flush()
send("New Issue: #%d -- %s -- %s, Submitted by %s" % (row.id, cmdargs.title, cmdargs.description, args['nick']), target=args['config']['core']['ctrlchan'])
send("Issue submitted for approval.", target=args['nick'])
else:
send("Invalid arguments.")
| gpl-2.0 |
gsehub/edx-platform | openedx/core/djangoapps/course_groups/tests/test_partition_scheme.py | 22 | 15469 | """
Test the partitions and partitions service
"""
import django.test
from mock import patch
from nose.plugins.attrib import attr
from courseware.tests.test_masquerade import StaffMasqueradeTestCase
from student.tests.factories import UserFactory
from xmodule.partitions.partitions import Group, UserPartition, UserPartitionError
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase, TEST_DATA_MIXED_MODULESTORE
from xmodule.modulestore.tests.factories import ToyCourseFactory
from openedx.core.djangoapps.user_api.partition_schemes import RandomUserPartitionScheme
from openedx.core.djangolib.testing.utils import skip_unless_lms
from ..partition_scheme import CohortPartitionScheme, get_cohorted_user_partition
from ..models import CourseUserGroupPartitionGroup
from ..views import link_cohort_to_partition_group, unlink_cohort_partition_group
from ..cohorts import add_user_to_cohort, remove_user_from_cohort, get_course_cohorts
from .helpers import CohortFactory, config_course_cohorts
@attr(shard=2)
class TestCohortPartitionScheme(ModuleStoreTestCase):
"""
Test the logic for linking a user to a partition group based on their cohort.
"""
MODULESTORE = TEST_DATA_MIXED_MODULESTORE
def setUp(self):
"""
Regenerate a course with cohort configuration, partition and groups,
and a student for each test.
"""
super(TestCohortPartitionScheme, self).setUp()
self.course_key = ToyCourseFactory.create().id
self.course = modulestore().get_course(self.course_key)
config_course_cohorts(self.course, is_cohorted=True)
self.groups = [Group(10, 'Group 10'), Group(20, 'Group 20')]
self.user_partition = UserPartition(
0,
'Test Partition',
'for testing purposes',
self.groups,
scheme=CohortPartitionScheme
)
self.student = UserFactory.create()
def assert_student_in_group(self, group, partition=None):
"""
Utility for checking that our test student comes up as assigned to the
specified partition (or, if None, no partition at all)
"""
self.assertEqual(
CohortPartitionScheme.get_group_for_user(
self.course_key,
self.student,
partition or self.user_partition,
use_cached=False
),
group
)
def test_student_cohort_assignment(self):
"""
Test that the CohortPartitionScheme continues to return the correct
group for a student as the student is moved in and out of different
cohorts.
"""
first_cohort, second_cohort = [
CohortFactory(course_id=self.course_key) for _ in range(2)
]
# place student 0 into first cohort
add_user_to_cohort(first_cohort, self.student.username)
self.assert_student_in_group(None)
# link first cohort to group 0 in the partition
link_cohort_to_partition_group(
first_cohort,
self.user_partition.id,
self.groups[0].id,
)
# link second cohort to to group 1 in the partition
link_cohort_to_partition_group(
second_cohort,
self.user_partition.id,
self.groups[1].id,
)
self.assert_student_in_group(self.groups[0])
# move student from first cohort to second cohort
add_user_to_cohort(second_cohort, self.student.username)
self.assert_student_in_group(self.groups[1])
# move the student out of the cohort
remove_user_from_cohort(second_cohort, self.student.username)
self.assert_student_in_group(None)
def test_cohort_partition_group_assignment(self):
"""
Test that the CohortPartitionScheme returns the correct group for a
student in a cohort when the cohort link is created / moved / deleted.
"""
test_cohort = CohortFactory(course_id=self.course_key)
# assign user to cohort (but cohort isn't linked to a partition group yet)
add_user_to_cohort(test_cohort, self.student.username)
# scheme should not yet find any link
self.assert_student_in_group(None)
# link cohort to group 0
link_cohort_to_partition_group(
test_cohort,
self.user_partition.id,
self.groups[0].id,
)
# now the scheme should find a link
self.assert_student_in_group(self.groups[0])
# link cohort to group 1 (first unlink it from group 0)
unlink_cohort_partition_group(test_cohort)
link_cohort_to_partition_group(
test_cohort,
self.user_partition.id,
self.groups[1].id,
)
# scheme should pick up the link
self.assert_student_in_group(self.groups[1])
# unlink cohort from anywhere
unlink_cohort_partition_group(
test_cohort,
)
# scheme should now return nothing
self.assert_student_in_group(None)
def test_student_lazily_assigned(self):
"""
Test that the lazy assignment of students to cohorts works
properly when accessed via the CohortPartitionScheme.
"""
# don't assign the student to any cohort initially
self.assert_student_in_group(None)
# get the default cohort, which is automatically created
# during the `get_course_cohorts` API call if it doesn't yet exist
cohort = get_course_cohorts(self.course)[0]
# map that cohort to a group in our partition
link_cohort_to_partition_group(
cohort,
self.user_partition.id,
self.groups[0].id,
)
# The student will be lazily assigned to the default cohort
# when CohortPartitionScheme.get_group_for_user makes its internal
# call to cohorts.get_cohort.
self.assert_student_in_group(self.groups[0])
def setup_student_in_group_0(self):
"""
Utility to set up a cohort, add our student to the cohort, and link
the cohort to self.groups[0]
"""
test_cohort = CohortFactory(course_id=self.course_key)
# link cohort to group 0
link_cohort_to_partition_group(
test_cohort,
self.user_partition.id,
self.groups[0].id,
)
# place student into cohort
add_user_to_cohort(test_cohort, self.student.username)
# check link is correct
self.assert_student_in_group(self.groups[0])
def test_partition_changes_nondestructive(self):
"""
If the name of a user partition is changed, or a group is added to the
partition, links from cohorts do not break.
If the name of a group is changed, links from cohorts do not break.
"""
self.setup_student_in_group_0()
# to simulate a non-destructive configuration change on the course, create
# a new partition with the same id and scheme but with groups renamed and
# a group added
new_groups = [Group(10, 'New Group 10'), Group(20, 'New Group 20'), Group(30, 'New Group 30')]
new_user_partition = UserPartition(
0, # same id
'Different Partition',
'dummy',
new_groups,
scheme=CohortPartitionScheme,
)
# the link should still work
self.assert_student_in_group(new_groups[0], new_user_partition)
def test_missing_group(self):
"""
If the group is deleted (or its id is changed), there's no referential
integrity enforced, so any references from cohorts to that group will be
lost. A warning should be logged when links are found from cohorts to
groups that no longer exist.
"""
self.setup_student_in_group_0()
# to simulate a destructive change on the course, create a new partition
# with the same id, but different group ids.
new_user_partition = UserPartition(
0, # same id
'Another Partition',
'dummy',
[Group(11, 'Not Group 10'), Group(21, 'Not Group 20')], # different ids
scheme=CohortPartitionScheme,
)
# the partition will be found since it has the same id, but the group
# ids aren't present anymore, so the scheme returns None (and logs a
# warning)
with patch('openedx.core.djangoapps.course_groups.partition_scheme.log') as mock_log:
self.assert_student_in_group(None, new_user_partition)
self.assertTrue(mock_log.warn.called)
self.assertRegexpMatches(mock_log.warn.call_args[0][0], 'group not found')
def test_missing_partition(self):
"""
If the user partition is deleted (or its id is changed), there's no
referential integrity enforced, so any references from cohorts to that
partition's groups will be lost. A warning should be logged when links
are found from cohorts to partitions that do not exist.
"""
self.setup_student_in_group_0()
# to simulate another destructive change on the course, create a new
# partition with a different id, but using the same groups.
new_user_partition = UserPartition(
1, # different id
'Moved Partition',
'dummy',
[Group(10, 'Group 10'), Group(20, 'Group 20')], # same ids
scheme=CohortPartitionScheme,
)
# the partition will not be found even though the group ids match, so the
# scheme returns None (and logs a warning).
with patch('openedx.core.djangoapps.course_groups.partition_scheme.log') as mock_log:
self.assert_student_in_group(None, new_user_partition)
self.assertTrue(mock_log.warn.called)
self.assertRegexpMatches(mock_log.warn.call_args[0][0], 'partition mismatch')
@attr(shard=2)
class TestExtension(django.test.TestCase):
"""
Ensure that the scheme extension is correctly plugged in (via entry point
in setup.py)
"""
def test_get_scheme(self):
self.assertEqual(UserPartition.get_scheme('cohort'), CohortPartitionScheme)
with self.assertRaisesRegexp(UserPartitionError, 'Unrecognized scheme'):
UserPartition.get_scheme('other')
@attr(shard=2)
class TestGetCohortedUserPartition(ModuleStoreTestCase):
"""
Test that `get_cohorted_user_partition` returns the first user_partition with scheme `CohortPartitionScheme`.
"""
MODULESTORE = TEST_DATA_MIXED_MODULESTORE
def setUp(self):
"""
Regenerate a course with cohort configuration, partition and groups,
and a student for each test.
"""
super(TestGetCohortedUserPartition, self).setUp()
self.course_key = ToyCourseFactory.create().id
self.course = modulestore().get_course(self.course_key)
self.student = UserFactory.create()
self.random_user_partition = UserPartition(
1,
'Random Partition',
'Should not be returned',
[Group(0, 'Group 0'), Group(1, 'Group 1')],
scheme=RandomUserPartitionScheme
)
self.cohort_user_partition = UserPartition(
0,
'Cohort Partition 1',
'Should be returned',
[Group(10, 'Group 10'), Group(20, 'Group 20')],
scheme=CohortPartitionScheme
)
self.second_cohort_user_partition = UserPartition(
2,
'Cohort Partition 2',
'Should not be returned',
[Group(10, 'Group 10'), Group(1, 'Group 1')],
scheme=CohortPartitionScheme
)
def test_returns_first_cohort_user_partition(self):
"""
Test get_cohorted_user_partition returns first user_partition with scheme `CohortPartitionScheme`.
"""
self.course.user_partitions.append(self.random_user_partition)
self.course.user_partitions.append(self.cohort_user_partition)
self.course.user_partitions.append(self.second_cohort_user_partition)
self.assertEqual(self.cohort_user_partition, get_cohorted_user_partition(self.course))
def test_no_cohort_user_partitions(self):
"""
Test get_cohorted_user_partition returns None when there are no cohorted user partitions.
"""
self.course.user_partitions.append(self.random_user_partition)
self.assertIsNone(get_cohorted_user_partition(self.course))
@attr(shard=2)
class TestMasqueradedGroup(StaffMasqueradeTestCase):
"""
Check for staff being able to masquerade as belonging to a group.
"""
def setUp(self):
super(TestMasqueradedGroup, self).setUp()
self.user_partition = UserPartition(
0, 'Test User Partition', '',
[Group(0, 'Group 1'), Group(1, 'Group 2')],
scheme_id='cohort'
)
self.course.user_partitions.append(self.user_partition)
modulestore().update_item(self.course, self.test_user.id)
def _verify_masquerade_for_group(self, group):
"""
Verify that the masquerade works for the specified group id.
"""
self.ensure_masquerade_as_group_member( # pylint: disable=no-member
self.user_partition.id,
group.id if group is not None else None
)
scheme = self.user_partition.scheme
self.assertEqual(
scheme.get_group_for_user(self.course.id, self.test_user, self.user_partition),
group
)
def _verify_masquerade_for_all_groups(self):
"""
Verify that the staff user can masquerade as being in all groups
as well as no group.
"""
self._verify_masquerade_for_group(self.user_partition.groups[0])
self._verify_masquerade_for_group(self.user_partition.groups[1])
self._verify_masquerade_for_group(None)
@skip_unless_lms
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_group_masquerade(self):
"""
Tests that a staff member can masquerade as being in a particular group.
"""
self._verify_masquerade_for_all_groups()
@skip_unless_lms
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_group_masquerade_with_cohort(self):
"""
Tests that a staff member can masquerade as being in a particular group
when that staff member also belongs to a cohort with a corresponding
group.
"""
self.course.cohort_config = {'cohorted': True}
modulestore().update_item(self.course, self.test_user.id) # pylint: disable=no-member
cohort = CohortFactory.create(course_id=self.course.id, users=[self.test_user])
CourseUserGroupPartitionGroup(
course_user_group=cohort,
partition_id=self.user_partition.id,
group_id=self.user_partition.groups[0].id
).save()
# When the staff user is masquerading as being in a None group
# (within an existent UserPartition), we should treat that as
# an explicit None, not defaulting to the user's cohort's
# partition group.
self._verify_masquerade_for_all_groups()
| agpl-3.0 |
wangjiaxi/django-dynamic-forms | example/example/settings.py | 2 | 2233 | """
Django settings for example project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/dev/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'im=zrjlt*dhw$3_ibo7mxo6p+vbzr))$op+dt#=l^7e%)2vyc&'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'dynamic_forms',
# 'captcha',
# 'dynamic_forms.contrib.simple_captcha',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'dynamic_forms.middlewares.FormModelMiddleware',
)
ROOT_URLCONF = 'example.urls'
WSGI_APPLICATION = 'example.wsgi.application'
# Database
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/dev/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/dev/howto/static-files/
STATIC_URL = '/static/'
| bsd-3-clause |
bzbarsky/servo | tests/wpt/web-platform-tests/tools/py/py/_code/code.py | 180 | 27436 | import py
import sys
from inspect import CO_VARARGS, CO_VARKEYWORDS
builtin_repr = repr
reprlib = py.builtin._tryimport('repr', 'reprlib')
if sys.version_info[0] >= 3:
from traceback import format_exception_only
else:
from py._code._py2traceback import format_exception_only
class Code(object):
""" wrapper around Python code objects """
def __init__(self, rawcode):
if not hasattr(rawcode, "co_filename"):
rawcode = py.code.getrawcode(rawcode)
try:
self.filename = rawcode.co_filename
self.firstlineno = rawcode.co_firstlineno - 1
self.name = rawcode.co_name
except AttributeError:
raise TypeError("not a code object: %r" %(rawcode,))
self.raw = rawcode
def __eq__(self, other):
return self.raw == other.raw
def __ne__(self, other):
return not self == other
@property
def path(self):
""" return a path object pointing to source code (note that it
might not point to an actually existing file). """
p = py.path.local(self.raw.co_filename)
# maybe don't try this checking
if not p.check():
# XXX maybe try harder like the weird logic
# in the standard lib [linecache.updatecache] does?
p = self.raw.co_filename
return p
@property
def fullsource(self):
""" return a py.code.Source object for the full source file of the code
"""
from py._code import source
full, _ = source.findsource(self.raw)
return full
def source(self):
""" return a py.code.Source object for the code object's source only
"""
# return source only for that part of code
return py.code.Source(self.raw)
def getargs(self, var=False):
""" return a tuple with the argument names for the code object
if 'var' is set True also return the names of the variable and
keyword arguments when present
"""
# handfull shortcut for getting args
raw = self.raw
argcount = raw.co_argcount
if var:
argcount += raw.co_flags & CO_VARARGS
argcount += raw.co_flags & CO_VARKEYWORDS
return raw.co_varnames[:argcount]
class Frame(object):
"""Wrapper around a Python frame holding f_locals and f_globals
in which expressions can be evaluated."""
def __init__(self, frame):
self.lineno = frame.f_lineno - 1
self.f_globals = frame.f_globals
self.f_locals = frame.f_locals
self.raw = frame
self.code = py.code.Code(frame.f_code)
@property
def statement(self):
""" statement this frame is at """
if self.code.fullsource is None:
return py.code.Source("")
return self.code.fullsource.getstatement(self.lineno)
def eval(self, code, **vars):
""" evaluate 'code' in the frame
'vars' are optional additional local variables
returns the result of the evaluation
"""
f_locals = self.f_locals.copy()
f_locals.update(vars)
return eval(code, self.f_globals, f_locals)
def exec_(self, code, **vars):
""" exec 'code' in the frame
'vars' are optiona; additional local variables
"""
f_locals = self.f_locals.copy()
f_locals.update(vars)
py.builtin.exec_(code, self.f_globals, f_locals )
def repr(self, object):
""" return a 'safe' (non-recursive, one-line) string repr for 'object'
"""
return py.io.saferepr(object)
def is_true(self, object):
return object
def getargs(self, var=False):
""" return a list of tuples (name, value) for all arguments
if 'var' is set True also include the variable and keyword
arguments when present
"""
retval = []
for arg in self.code.getargs(var):
try:
retval.append((arg, self.f_locals[arg]))
except KeyError:
pass # this can occur when using Psyco
return retval
class TracebackEntry(object):
""" a single entry in a traceback """
_repr_style = None
exprinfo = None
def __init__(self, rawentry):
self._rawentry = rawentry
self.lineno = rawentry.tb_lineno - 1
def set_repr_style(self, mode):
assert mode in ("short", "long")
self._repr_style = mode
@property
def frame(self):
return py.code.Frame(self._rawentry.tb_frame)
@property
def relline(self):
return self.lineno - self.frame.code.firstlineno
def __repr__(self):
return "<TracebackEntry %s:%d>" %(self.frame.code.path, self.lineno+1)
@property
def statement(self):
""" py.code.Source object for the current statement """
source = self.frame.code.fullsource
return source.getstatement(self.lineno)
@property
def path(self):
""" path to the source code """
return self.frame.code.path
def getlocals(self):
return self.frame.f_locals
locals = property(getlocals, None, None, "locals of underlaying frame")
def reinterpret(self):
"""Reinterpret the failing statement and returns a detailed information
about what operations are performed."""
if self.exprinfo is None:
source = str(self.statement).strip()
x = py.code._reinterpret(source, self.frame, should_fail=True)
if not isinstance(x, str):
raise TypeError("interpret returned non-string %r" % (x,))
self.exprinfo = x
return self.exprinfo
def getfirstlinesource(self):
# on Jython this firstlineno can be -1 apparently
return max(self.frame.code.firstlineno, 0)
def getsource(self, astcache=None):
""" return failing source code. """
# we use the passed in astcache to not reparse asttrees
# within exception info printing
from py._code.source import getstatementrange_ast
source = self.frame.code.fullsource
if source is None:
return None
key = astnode = None
if astcache is not None:
key = self.frame.code.path
if key is not None:
astnode = astcache.get(key, None)
start = self.getfirstlinesource()
try:
astnode, _, end = getstatementrange_ast(self.lineno, source,
astnode=astnode)
except SyntaxError:
end = self.lineno + 1
else:
if key is not None:
astcache[key] = astnode
return source[start:end]
source = property(getsource)
def ishidden(self):
""" return True if the current frame has a var __tracebackhide__
resolving to True
mostly for internal use
"""
try:
return self.frame.f_locals['__tracebackhide__']
except KeyError:
try:
return self.frame.f_globals['__tracebackhide__']
except KeyError:
return False
def __str__(self):
try:
fn = str(self.path)
except py.error.Error:
fn = '???'
name = self.frame.code.name
try:
line = str(self.statement).lstrip()
except KeyboardInterrupt:
raise
except:
line = "???"
return " File %r:%d in %s\n %s\n" %(fn, self.lineno+1, name, line)
def name(self):
return self.frame.code.raw.co_name
name = property(name, None, None, "co_name of underlaying code")
class Traceback(list):
""" Traceback objects encapsulate and offer higher level
access to Traceback entries.
"""
Entry = TracebackEntry
def __init__(self, tb):
""" initialize from given python traceback object. """
if hasattr(tb, 'tb_next'):
def f(cur):
while cur is not None:
yield self.Entry(cur)
cur = cur.tb_next
list.__init__(self, f(tb))
else:
list.__init__(self, tb)
def cut(self, path=None, lineno=None, firstlineno=None, excludepath=None):
""" return a Traceback instance wrapping part of this Traceback
by provding any combination of path, lineno and firstlineno, the
first frame to start the to-be-returned traceback is determined
this allows cutting the first part of a Traceback instance e.g.
for formatting reasons (removing some uninteresting bits that deal
with handling of the exception/traceback)
"""
for x in self:
code = x.frame.code
codepath = code.path
if ((path is None or codepath == path) and
(excludepath is None or not hasattr(codepath, 'relto') or
not codepath.relto(excludepath)) and
(lineno is None or x.lineno == lineno) and
(firstlineno is None or x.frame.code.firstlineno == firstlineno)):
return Traceback(x._rawentry)
return self
def __getitem__(self, key):
val = super(Traceback, self).__getitem__(key)
if isinstance(key, type(slice(0))):
val = self.__class__(val)
return val
def filter(self, fn=lambda x: not x.ishidden()):
""" return a Traceback instance with certain items removed
fn is a function that gets a single argument, a TracebackItem
instance, and should return True when the item should be added
to the Traceback, False when not
by default this removes all the TracebackItems which are hidden
(see ishidden() above)
"""
return Traceback(filter(fn, self))
def getcrashentry(self):
""" return last non-hidden traceback entry that lead
to the exception of a traceback.
"""
for i in range(-1, -len(self)-1, -1):
entry = self[i]
if not entry.ishidden():
return entry
return self[-1]
def recursionindex(self):
""" return the index of the frame/TracebackItem where recursion
originates if appropriate, None if no recursion occurred
"""
cache = {}
for i, entry in enumerate(self):
# id for the code.raw is needed to work around
# the strange metaprogramming in the decorator lib from pypi
# which generates code objects that have hash/value equality
#XXX needs a test
key = entry.frame.code.path, id(entry.frame.code.raw), entry.lineno
#print "checking for recursion at", key
l = cache.setdefault(key, [])
if l:
f = entry.frame
loc = f.f_locals
for otherloc in l:
if f.is_true(f.eval(co_equal,
__recursioncache_locals_1=loc,
__recursioncache_locals_2=otherloc)):
return i
l.append(entry.frame.f_locals)
return None
co_equal = compile('__recursioncache_locals_1 == __recursioncache_locals_2',
'?', 'eval')
class ExceptionInfo(object):
""" wraps sys.exc_info() objects and offers
help for navigating the traceback.
"""
_striptext = ''
def __init__(self, tup=None, exprinfo=None):
if tup is None:
tup = sys.exc_info()
if exprinfo is None and isinstance(tup[1], AssertionError):
exprinfo = getattr(tup[1], 'msg', None)
if exprinfo is None:
exprinfo = str(tup[1])
if exprinfo and exprinfo.startswith('assert '):
self._striptext = 'AssertionError: '
self._excinfo = tup
#: the exception class
self.type = tup[0]
#: the exception instance
self.value = tup[1]
#: the exception raw traceback
self.tb = tup[2]
#: the exception type name
self.typename = self.type.__name__
#: the exception traceback (py.code.Traceback instance)
self.traceback = py.code.Traceback(self.tb)
def __repr__(self):
return "<ExceptionInfo %s tblen=%d>" % (self.typename, len(self.traceback))
def exconly(self, tryshort=False):
""" return the exception as a string
when 'tryshort' resolves to True, and the exception is a
py.code._AssertionError, only the actual exception part of
the exception representation is returned (so 'AssertionError: ' is
removed from the beginning)
"""
lines = format_exception_only(self.type, self.value)
text = ''.join(lines)
text = text.rstrip()
if tryshort:
if text.startswith(self._striptext):
text = text[len(self._striptext):]
return text
def errisinstance(self, exc):
""" return True if the exception is an instance of exc """
return isinstance(self.value, exc)
def _getreprcrash(self):
exconly = self.exconly(tryshort=True)
entry = self.traceback.getcrashentry()
path, lineno = entry.frame.code.raw.co_filename, entry.lineno
return ReprFileLocation(path, lineno+1, exconly)
def getrepr(self, showlocals=False, style="long",
abspath=False, tbfilter=True, funcargs=False):
""" return str()able representation of this exception info.
showlocals: show locals per traceback entry
style: long|short|no|native traceback style
tbfilter: hide entries (where __tracebackhide__ is true)
in case of style==native, tbfilter and showlocals is ignored.
"""
if style == 'native':
return ReprExceptionInfo(ReprTracebackNative(
py.std.traceback.format_exception(
self.type,
self.value,
self.traceback[0]._rawentry,
)), self._getreprcrash())
fmt = FormattedExcinfo(showlocals=showlocals, style=style,
abspath=abspath, tbfilter=tbfilter, funcargs=funcargs)
return fmt.repr_excinfo(self)
def __str__(self):
entry = self.traceback[-1]
loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly())
return str(loc)
def __unicode__(self):
entry = self.traceback[-1]
loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly())
return unicode(loc)
class FormattedExcinfo(object):
""" presenting information about failing Functions and Generators. """
# for traceback entries
flow_marker = ">"
fail_marker = "E"
def __init__(self, showlocals=False, style="long", abspath=True, tbfilter=True, funcargs=False):
self.showlocals = showlocals
self.style = style
self.tbfilter = tbfilter
self.funcargs = funcargs
self.abspath = abspath
self.astcache = {}
def _getindent(self, source):
# figure out indent for given source
try:
s = str(source.getstatement(len(source)-1))
except KeyboardInterrupt:
raise
except:
try:
s = str(source[-1])
except KeyboardInterrupt:
raise
except:
return 0
return 4 + (len(s) - len(s.lstrip()))
def _getentrysource(self, entry):
source = entry.getsource(self.astcache)
if source is not None:
source = source.deindent()
return source
def _saferepr(self, obj):
return py.io.saferepr(obj)
def repr_args(self, entry):
if self.funcargs:
args = []
for argname, argvalue in entry.frame.getargs(var=True):
args.append((argname, self._saferepr(argvalue)))
return ReprFuncArgs(args)
def get_source(self, source, line_index=-1, excinfo=None, short=False):
""" return formatted and marked up source lines. """
lines = []
if source is None or line_index >= len(source.lines):
source = py.code.Source("???")
line_index = 0
if line_index < 0:
line_index += len(source)
space_prefix = " "
if short:
lines.append(space_prefix + source.lines[line_index].strip())
else:
for line in source.lines[:line_index]:
lines.append(space_prefix + line)
lines.append(self.flow_marker + " " + source.lines[line_index])
for line in source.lines[line_index+1:]:
lines.append(space_prefix + line)
if excinfo is not None:
indent = 4 if short else self._getindent(source)
lines.extend(self.get_exconly(excinfo, indent=indent, markall=True))
return lines
def get_exconly(self, excinfo, indent=4, markall=False):
lines = []
indent = " " * indent
# get the real exception information out
exlines = excinfo.exconly(tryshort=True).split('\n')
failindent = self.fail_marker + indent[1:]
for line in exlines:
lines.append(failindent + line)
if not markall:
failindent = indent
return lines
def repr_locals(self, locals):
if self.showlocals:
lines = []
keys = [loc for loc in locals if loc[0] != "@"]
keys.sort()
for name in keys:
value = locals[name]
if name == '__builtins__':
lines.append("__builtins__ = <builtins>")
else:
# This formatting could all be handled by the
# _repr() function, which is only reprlib.Repr in
# disguise, so is very configurable.
str_repr = self._saferepr(value)
#if len(str_repr) < 70 or not isinstance(value,
# (list, tuple, dict)):
lines.append("%-10s = %s" %(name, str_repr))
#else:
# self._line("%-10s =\\" % (name,))
# # XXX
# py.std.pprint.pprint(value, stream=self.excinfowriter)
return ReprLocals(lines)
def repr_traceback_entry(self, entry, excinfo=None):
source = self._getentrysource(entry)
if source is None:
source = py.code.Source("???")
line_index = 0
else:
# entry.getfirstlinesource() can be -1, should be 0 on jython
line_index = entry.lineno - max(entry.getfirstlinesource(), 0)
lines = []
style = entry._repr_style
if style is None:
style = self.style
if style in ("short", "long"):
short = style == "short"
reprargs = self.repr_args(entry) if not short else None
s = self.get_source(source, line_index, excinfo, short=short)
lines.extend(s)
if short:
message = "in %s" %(entry.name)
else:
message = excinfo and excinfo.typename or ""
path = self._makepath(entry.path)
filelocrepr = ReprFileLocation(path, entry.lineno+1, message)
localsrepr = None
if not short:
localsrepr = self.repr_locals(entry.locals)
return ReprEntry(lines, reprargs, localsrepr, filelocrepr, style)
if excinfo:
lines.extend(self.get_exconly(excinfo, indent=4))
return ReprEntry(lines, None, None, None, style)
def _makepath(self, path):
if not self.abspath:
try:
np = py.path.local().bestrelpath(path)
except OSError:
return path
if len(np) < len(str(path)):
path = np
return path
def repr_traceback(self, excinfo):
traceback = excinfo.traceback
if self.tbfilter:
traceback = traceback.filter()
recursionindex = None
if excinfo.errisinstance(RuntimeError):
if "maximum recursion depth exceeded" in str(excinfo.value):
recursionindex = traceback.recursionindex()
last = traceback[-1]
entries = []
extraline = None
for index, entry in enumerate(traceback):
einfo = (last == entry) and excinfo or None
reprentry = self.repr_traceback_entry(entry, einfo)
entries.append(reprentry)
if index == recursionindex:
extraline = "!!! Recursion detected (same locals & position)"
break
return ReprTraceback(entries, extraline, style=self.style)
def repr_excinfo(self, excinfo):
reprtraceback = self.repr_traceback(excinfo)
reprcrash = excinfo._getreprcrash()
return ReprExceptionInfo(reprtraceback, reprcrash)
class TerminalRepr:
def __str__(self):
s = self.__unicode__()
if sys.version_info[0] < 3:
s = s.encode('utf-8')
return s
def __unicode__(self):
# FYI this is called from pytest-xdist's serialization of exception
# information.
io = py.io.TextIO()
tw = py.io.TerminalWriter(file=io)
self.toterminal(tw)
return io.getvalue().strip()
def __repr__(self):
return "<%s instance at %0x>" %(self.__class__, id(self))
class ReprExceptionInfo(TerminalRepr):
def __init__(self, reprtraceback, reprcrash):
self.reprtraceback = reprtraceback
self.reprcrash = reprcrash
self.sections = []
def addsection(self, name, content, sep="-"):
self.sections.append((name, content, sep))
def toterminal(self, tw):
self.reprtraceback.toterminal(tw)
for name, content, sep in self.sections:
tw.sep(sep, name)
tw.line(content)
class ReprTraceback(TerminalRepr):
entrysep = "_ "
def __init__(self, reprentries, extraline, style):
self.reprentries = reprentries
self.extraline = extraline
self.style = style
def toterminal(self, tw):
# the entries might have different styles
last_style = None
for i, entry in enumerate(self.reprentries):
if entry.style == "long":
tw.line("")
entry.toterminal(tw)
if i < len(self.reprentries) - 1:
next_entry = self.reprentries[i+1]
if entry.style == "long" or \
entry.style == "short" and next_entry.style == "long":
tw.sep(self.entrysep)
if self.extraline:
tw.line(self.extraline)
class ReprTracebackNative(ReprTraceback):
def __init__(self, tblines):
self.style = "native"
self.reprentries = [ReprEntryNative(tblines)]
self.extraline = None
class ReprEntryNative(TerminalRepr):
style = "native"
def __init__(self, tblines):
self.lines = tblines
def toterminal(self, tw):
tw.write("".join(self.lines))
class ReprEntry(TerminalRepr):
localssep = "_ "
def __init__(self, lines, reprfuncargs, reprlocals, filelocrepr, style):
self.lines = lines
self.reprfuncargs = reprfuncargs
self.reprlocals = reprlocals
self.reprfileloc = filelocrepr
self.style = style
def toterminal(self, tw):
if self.style == "short":
self.reprfileloc.toterminal(tw)
for line in self.lines:
red = line.startswith("E ")
tw.line(line, bold=True, red=red)
#tw.line("")
return
if self.reprfuncargs:
self.reprfuncargs.toterminal(tw)
for line in self.lines:
red = line.startswith("E ")
tw.line(line, bold=True, red=red)
if self.reprlocals:
#tw.sep(self.localssep, "Locals")
tw.line("")
self.reprlocals.toterminal(tw)
if self.reprfileloc:
if self.lines:
tw.line("")
self.reprfileloc.toterminal(tw)
def __str__(self):
return "%s\n%s\n%s" % ("\n".join(self.lines),
self.reprlocals,
self.reprfileloc)
class ReprFileLocation(TerminalRepr):
def __init__(self, path, lineno, message):
self.path = str(path)
self.lineno = lineno
self.message = message
def toterminal(self, tw):
# filename and lineno output for each entry,
# using an output format that most editors unterstand
msg = self.message
i = msg.find("\n")
if i != -1:
msg = msg[:i]
tw.line("%s:%s: %s" %(self.path, self.lineno, msg))
class ReprLocals(TerminalRepr):
def __init__(self, lines):
self.lines = lines
def toterminal(self, tw):
for line in self.lines:
tw.line(line)
class ReprFuncArgs(TerminalRepr):
def __init__(self, args):
self.args = args
def toterminal(self, tw):
if self.args:
linesofar = ""
for name, value in self.args:
ns = "%s = %s" %(name, value)
if len(ns) + len(linesofar) + 2 > tw.fullwidth:
if linesofar:
tw.line(linesofar)
linesofar = ns
else:
if linesofar:
linesofar += ", " + ns
else:
linesofar = ns
if linesofar:
tw.line(linesofar)
tw.line("")
oldbuiltins = {}
def patch_builtins(assertion=True, compile=True):
""" put compile and AssertionError builtins to Python's builtins. """
if assertion:
from py._code import assertion
l = oldbuiltins.setdefault('AssertionError', [])
l.append(py.builtin.builtins.AssertionError)
py.builtin.builtins.AssertionError = assertion.AssertionError
if compile:
l = oldbuiltins.setdefault('compile', [])
l.append(py.builtin.builtins.compile)
py.builtin.builtins.compile = py.code.compile
def unpatch_builtins(assertion=True, compile=True):
""" remove compile and AssertionError builtins from Python builtins. """
if assertion:
py.builtin.builtins.AssertionError = oldbuiltins['AssertionError'].pop()
if compile:
py.builtin.builtins.compile = oldbuiltins['compile'].pop()
def getrawcode(obj, trycall=True):
""" return code object for given function. """
try:
return obj.__code__
except AttributeError:
obj = getattr(obj, 'im_func', obj)
obj = getattr(obj, 'func_code', obj)
obj = getattr(obj, 'f_code', obj)
obj = getattr(obj, '__code__', obj)
if trycall and not hasattr(obj, 'co_firstlineno'):
if hasattr(obj, '__call__') and not py.std.inspect.isclass(obj):
x = getrawcode(obj.__call__, trycall=False)
if hasattr(x, 'co_firstlineno'):
return x
return obj
| mpl-2.0 |
ericholscher/django | django/contrib/gis/gdal/feature.py | 1 | 4267 | # The GDAL C library, OGR exception, and the Field object
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.error import OGRException, OGRIndexError
from django.contrib.gis.gdal.field import Field
from django.contrib.gis.gdal.geometries import OGRGeometry, OGRGeomType
# ctypes function prototypes
from django.contrib.gis.gdal.prototypes import ds as capi, geom as geom_api
from django.utils.encoding import force_bytes, force_text
from django.utils import six
from django.utils.six.moves import xrange
# For more information, see the OGR C API source code:
# http://www.gdal.org/ogr/ogr__api_8h.html
#
# The OGR_F_* routines are relevant here.
class Feature(GDALBase):
"""
This class that wraps an OGR Feature, needs to be instantiated
from a Layer object.
"""
#### Python 'magic' routines ####
def __init__(self, feat, layer):
"""
Initializes Feature from a pointer and its Layer object.
"""
if not feat:
raise OGRException('Cannot create OGR Feature, invalid pointer given.')
self.ptr = feat
self._layer = layer
def __del__(self):
"Releases a reference to this object."
if self._ptr:
capi.destroy_feature(self._ptr)
def __getitem__(self, index):
"""
Gets the Field object at the specified index, which may be either
an integer or the Field's string label. Note that the Field object
is not the field's _value_ -- use the `get` method instead to
retrieve the value (e.g. an integer) instead of a Field instance.
"""
if isinstance(index, six.string_types):
i = self.index(index)
else:
if index < 0 or index > self.num_fields:
raise OGRIndexError('index out of range')
i = index
return Field(self, i)
def __iter__(self):
"Iterates over each field in the Feature."
for i in xrange(self.num_fields):
yield self[i]
def __len__(self):
"Returns the count of fields in this feature."
return self.num_fields
def __str__(self):
"The string name of the feature."
return 'Feature FID %d in Layer<%s>' % (self.fid, self.layer_name)
def __eq__(self, other):
"Does equivalence testing on the features."
return bool(capi.feature_equal(self.ptr, other._ptr))
#### Feature Properties ####
@property
def encoding(self):
return self._layer._ds.encoding
@property
def fid(self):
"Returns the feature identifier."
return capi.get_fid(self.ptr)
@property
def layer_name(self):
"Returns the name of the layer for the feature."
name = capi.get_feat_name(self._layer._ldefn)
return force_text(name, self.encoding, strings_only=True)
@property
def num_fields(self):
"Returns the number of fields in the Feature."
return capi.get_feat_field_count(self.ptr)
@property
def fields(self):
"Returns a list of fields in the Feature."
return [capi.get_field_name(capi.get_field_defn(self._layer._ldefn, i))
for i in xrange(self.num_fields)]
@property
def geom(self):
"Returns the OGR Geometry for this Feature."
# Retrieving the geometry pointer for the feature.
geom_ptr = capi.get_feat_geom_ref(self.ptr)
return OGRGeometry(geom_api.clone_geom(geom_ptr))
@property
def geom_type(self):
"Returns the OGR Geometry Type for this Feture."
return OGRGeomType(capi.get_fd_geom_type(self._layer._ldefn))
#### Feature Methods ####
def get(self, field):
"""
Returns the value of the field, instead of an instance of the Field
object. May take a string of the field name or a Field object as
parameters.
"""
field_name = getattr(field, 'name', field)
return self[field_name].value
def index(self, field_name):
"Returns the index of the given field name."
i = capi.get_field_index(self.ptr, force_bytes(field_name))
if i < 0:
raise OGRIndexError('invalid OFT field name given: "%s"' % field_name)
return i
| bsd-3-clause |
psci2195/espresso-ffans | testsuite/python/integrate.py | 5 | 2637 | #
# Copyright (C) 2013-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import espressomd
import numpy as np
import unittest as ut
class Integrate(ut.TestCase):
"""
Tests integration of Newton's equations for a particle with and without
external forces and with time step changes on the way.
"""
def test(self):
system = espressomd.System(box_l=[10.0, 10.0, 10.0])
system.cell_system.skin = 0
# Newton's 1st law with time step change on the way
p = system.part.add(pos=(0, 0, 0), v=(1, 2, 3))
system.time_step = 0.01
system.time = 12.
np.testing.assert_allclose(np.copy(p.v), (1, 2, 3))
for i in range(10):
np.testing.assert_allclose(np.copy(p.pos), np.copy(
i * system.time_step * p.v), atol=1E-12)
system.integrator.run(1)
# Check that the time has passed
np.testing.assert_allclose(system.time, 12. + 10 * system.time_step)
v = p.v
pos1 = p.pos
system.time_step = 0.02
np.testing.assert_allclose(np.copy(v), np.copy(p.v), atol=1E-12)
np.testing.assert_allclose(np.copy(pos1), np.copy(p.pos), atol=1E-12)
for i in range(10):
np.testing.assert_allclose(np.copy(p.pos), np.copy(
pos1 + i * system.time_step * p.v), atol=1E-12)
system.integrator.run(1)
# Newton's 2nd law
if espressomd.has_features("EXTERNAL_FORCES"):
if espressomd.has_features("MASS"):
p.mass = 2.3
p.pos = (0, 0, 0)
ext_force = np.array((-2, 1.3, 1))
p.ext_force = ext_force
system.time_step = 0.03
for i in range(10):
np.testing.assert_allclose(np.copy(p.pos), np.copy(
0.5 * ext_force / p.mass * (i * system.time_step)**2 + v * i * system.time_step), atol=1E-12)
system.integrator.run(1)
if __name__ == '__main__':
ut.main()
| gpl-3.0 |
dysya92/monkeys | flask/lib/python2.7/site-packages/sqlalchemy/dialects/mssql/mxodbc.py | 32 | 3856 | # mssql/mxodbc.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: mssql+mxodbc
:name: mxODBC
:dbapi: mxodbc
:connectstring: mssql+mxodbc://<username>:<password>@<dsnname>
:url: http://www.egenix.com/
Execution Modes
---------------
mxODBC features two styles of statement execution, using the
``cursor.execute()`` and ``cursor.executedirect()`` methods (the second being
an extension to the DBAPI specification). The former makes use of a particular
API call specific to the SQL Server Native Client ODBC driver known
SQLDescribeParam, while the latter does not.
mxODBC apparently only makes repeated use of a single prepared statement
when SQLDescribeParam is used. The advantage to prepared statement reuse is
one of performance. The disadvantage is that SQLDescribeParam has a limited
set of scenarios in which bind parameters are understood, including that they
cannot be placed within the argument lists of function calls, anywhere outside
the FROM, or even within subqueries within the FROM clause - making the usage
of bind parameters within SELECT statements impossible for all but the most
simplistic statements.
For this reason, the mxODBC dialect uses the "native" mode by default only for
INSERT, UPDATE, and DELETE statements, and uses the escaped string mode for
all other statements.
This behavior can be controlled via
:meth:`~sqlalchemy.sql.expression.Executable.execution_options` using the
``native_odbc_execute`` flag with a value of ``True`` or ``False``, where a
value of ``True`` will unconditionally use native bind parameters and a value
of ``False`` will unconditionally use string-escaped parameters.
"""
from ... import types as sqltypes
from ...connectors.mxodbc import MxODBCConnector
from .pyodbc import MSExecutionContext_pyodbc, _MSNumeric_pyodbc
from .base import (MSDialect,
MSSQLStrictCompiler,
_MSDateTime, _MSDate, _MSTime)
class _MSNumeric_mxodbc(_MSNumeric_pyodbc):
"""Include pyodbc's numeric processor.
"""
class _MSDate_mxodbc(_MSDate):
def bind_processor(self, dialect):
def process(value):
if value is not None:
return "%s-%s-%s" % (value.year, value.month, value.day)
else:
return None
return process
class _MSTime_mxodbc(_MSTime):
def bind_processor(self, dialect):
def process(value):
if value is not None:
return "%s:%s:%s" % (value.hour, value.minute, value.second)
else:
return None
return process
class MSExecutionContext_mxodbc(MSExecutionContext_pyodbc):
"""
The pyodbc execution context is useful for enabling
SELECT SCOPE_IDENTITY in cases where OUTPUT clause
does not work (tables with insert triggers).
"""
# todo - investigate whether the pyodbc execution context
# is really only being used in cases where OUTPUT
# won't work.
class MSDialect_mxodbc(MxODBCConnector, MSDialect):
# this is only needed if "native ODBC" mode is used,
# which is now disabled by default.
# statement_compiler = MSSQLStrictCompiler
execution_ctx_cls = MSExecutionContext_mxodbc
# flag used by _MSNumeric_mxodbc
_need_decimal_fix = True
colspecs = {
sqltypes.Numeric: _MSNumeric_mxodbc,
sqltypes.DateTime: _MSDateTime,
sqltypes.Date: _MSDate_mxodbc,
sqltypes.Time: _MSTime_mxodbc,
}
def __init__(self, description_encoding=None, **params):
super(MSDialect_mxodbc, self).__init__(**params)
self.description_encoding = description_encoding
dialect = MSDialect_mxodbc
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.