repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
cjddny/cocos2d_guardCarrot | cocos2d/plugin/tools/pluginx-bindings-generator/genbindings-lua.py | 130 | 7752 | #!/usr/bin/python
# This script is used to generate luabinding glue codes.
# Android ndk version must be ndk-r9b.
import sys
import os, os.path
import shutil
import ConfigParser
import subprocess
import re
from contextlib import contextmanager
import shutil
import yaml
import tempfile
def _check_ndk_root_env():
''' Checking the environment NDK_ROOT, which will be used for building
'''
try:
NDK_ROOT = os.environ['NDK_ROOT']
except Exception:
print "NDK_ROOT not defined. Please define NDK_ROOT in your environment."
sys.exit(1)
return NDK_ROOT
def _check_python_bin_env():
''' Checking the environment PYTHON_BIN, which will be used for building
'''
try:
PYTHON_BIN = os.environ['PYTHON_BIN']
except Exception:
print "PYTHON_BIN not defined, use current python."
PYTHON_BIN = sys.executable
return PYTHON_BIN
class CmdError(Exception):
pass
@contextmanager
def _pushd(newDir):
previousDir = os.getcwd()
os.chdir(newDir)
yield
os.chdir(previousDir)
def _run_cmd(command):
ret = subprocess.call(command, shell=True)
if ret != 0:
message = "Error running command"
raise CmdError(message)
def _edit_yaml(filePath):
f = open(filePath, 'r')
data = yaml.load(f)
f.close()
data['conversions']['ns_map']['cocos2d::plugin::'] = 'plugin.'
data['conversions']['to_native']['TIAPDeveloperInfo'] = 'ok &= pluginx::luaval_to_TIAPDeveloperInfo(tolua_S, ${arg_idx}, &${out_value})'
data['conversions']['to_native']['TAdsDeveloperInfo'] = 'ok &= pluginx::luaval_to_TAdsDeveloperInfo(tolua_S, ${arg_idx}, &${out_value})'
data['conversions']['to_native']['TAdsInfo'] = 'ok &= pluginx::luaval_to_TAdsInfo(tolua_S, ${arg_idx}, &${out_value})'
data['conversions']['to_native']['TShareDeveloperInfo'] = 'ok &= pluginx::luaval_to_TShareDeveloperInfo(tolua_S, ${arg_idx}, &${out_value})'
data['conversions']['to_native']['TSocialDeveloperInfo'] = 'ok &= pluginx::luaval_to_TSocialDeveloperInfo(tolua_S, ${arg_idx}, &${out_value})'
data['conversions']['to_native']['TUserDeveloperInfo'] = 'ok &= pluginx::luaval_to_TUserDeveloperInfo(tolua_S, ${arg_idx}, &${out_value})'
f = open(filePath, 'w')
f.write(yaml.dump(data))
f.close()
def main():
cur_platform= '??'
llvm_path = '??'
ndk_root = _check_ndk_root_env()
# del the " in the path
ndk_root = re.sub(r"\"", "", ndk_root)
python_bin = _check_python_bin_env()
platform = sys.platform
if platform == 'win32':
cur_platform = 'windows'
elif platform == 'darwin':
cur_platform = platform
elif 'linux' in platform:
cur_platform = 'linux'
else:
print 'Your platform is not supported!'
sys.exit(1)
if platform == 'win32':
x86_llvm_path = os.path.abspath(os.path.join(ndk_root, 'toolchains/llvm-3.3/prebuilt', '%s' % cur_platform))
if not os.path.exists(x86_llvm_path):
x86_llvm_path = os.path.abspath(os.path.join(ndk_root, 'toolchains/llvm-3.4/prebuilt', '%s' % cur_platform))
else:
x86_llvm_path = os.path.abspath(os.path.join(ndk_root, 'toolchains/llvm-3.3/prebuilt', '%s-%s' % (cur_platform, 'x86')))
if not os.path.exists(x86_llvm_path):
x86_llvm_path = os.path.abspath(os.path.join(ndk_root, 'toolchains/llvm-3.4/prebuilt', '%s-%s' % (cur_platform, 'x86')))
x64_llvm_path = os.path.abspath(os.path.join(ndk_root, 'toolchains/llvm-3.3/prebuilt', '%s-%s' % (cur_platform, 'x86_64')))
if not os.path.exists(x64_llvm_path):
x64_llvm_path = os.path.abspath(os.path.join(ndk_root, 'toolchains/llvm-3.4/prebuilt', '%s-%s' % (cur_platform, 'x86_64')))
if os.path.isdir(x86_llvm_path):
llvm_path = x86_llvm_path
elif os.path.isdir(x64_llvm_path):
llvm_path = x64_llvm_path
else:
print 'llvm toolchain not found!'
print 'path: %s or path: %s are not valid! ' % (x86_llvm_path, x64_llvm_path)
sys.exit(1)
project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))
cocos_root = os.path.abspath(os.path.join(project_root, ''))
cxx_generator_root = os.path.abspath(os.path.join(project_root, 'tools/bindings-generator'))
pluginx_root = os.path.abspath(os.path.join(project_root, 'plugin'))
# save config to file
config = ConfigParser.ConfigParser()
config.set('DEFAULT', 'androidndkdir', ndk_root)
config.set('DEFAULT', 'clangllvmdir', llvm_path)
config.set('DEFAULT', 'cocosdir', cocos_root)
config.set('DEFAULT', 'cxxgeneratordir', cxx_generator_root)
config.set('DEFAULT', 'extra_flags', '')
config.set('DEFAULT', 'pluginxdir', pluginx_root)
# To fix parse error on windows, we must difine __WCHAR_MAX__ and undefine __MINGW32__ .
if platform == 'win32':
config.set('DEFAULT', 'extra_flags', '-D__WCHAR_MAX__=0x7fffffff -U__MINGW32__')
conf_ini_file = os.path.abspath(os.path.join(os.path.dirname(__file__), 'userconf.ini'))
print 'generating userconf.ini...'
with open(conf_ini_file, 'w') as configfile:
config.write(configfile)
# set proper environment variables
if 'linux' in platform or platform == 'darwin':
os.putenv('LD_LIBRARY_PATH', '%s/libclang' % cxx_generator_root)
if platform == 'win32':
path_env = os.environ['PATH']
os.putenv('PATH', r'%s;%s\libclang;%s\tools\win32;' % (path_env, cxx_generator_root, cxx_generator_root))
# edit conversions config for pluginx
conversions_yaml = '%s/targets/lua/conversions.yaml' % cxx_generator_root
conversions_backup = '%s.backup' % conversions_yaml
shutil.copy(conversions_yaml, conversions_backup)
_edit_yaml(conversions_yaml)
try:
tolua_root = '%s/plugin/tools/pluginx-bindings-generator/tolua' % project_root
output_dir = '%s/plugin/luabindings/auto' % project_root
cmd_args = {'cocos2dx_pluginx.ini' : ('cocos2dx_pluginx', 'lua_cocos2dx_pluginx_auto')}
target = 'lua'
generator_py = '%s/generator.py' % cxx_generator_root
for key in cmd_args.keys():
args = cmd_args[key]
cfg = '%s/%s' % (tolua_root, key)
print 'Generating bindings for %s...' % (key[:-4])
command = '%s %s %s -s %s -t %s -o %s -n %s' % (python_bin, generator_py, cfg, args[0], target, output_dir, args[1])
_run_cmd(command)
if platform == 'win32':
with _pushd(output_dir):
_run_cmd('dos2unix *')
# replace header file
tmpfd,tmpname = tempfile.mkstemp(dir='.')
input_file_name = '%s/%s.cpp' % (output_dir, args[1])
try:
output_file = os.fdopen(tmpfd, 'w')
input_file = open(input_file_name)
for line in input_file:
output_file.write(line.replace('#include "LuaBasicConversions.h"', '#include "LuaBasicConversions.h"\n#include "lua_pluginx_basic_conversions.h"'))
finally:
output_file.close()
input_file.close()
shutil.move(tmpname, input_file_name)
print '---------------------------------'
print 'Generating lua bindings succeeds.'
print '---------------------------------'
except Exception as e:
if e.__class__.__name__ == 'CmdError':
print '---------------------------------'
print 'Generating lua bindings fails.'
print '---------------------------------'
sys.exit(1)
else:
raise
finally:
shutil.move(conversions_backup, conversions_yaml)
# -------------- main --------------
if __name__ == '__main__':
main()
| mit |
yongshengwang/hue | apps/spark/src/spark/monkey_patches.py | 28 | 1221 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import spark.conf
def _start_livy_server():
import atexit
import subprocess
import sys
import time
p = subprocess.Popen([sys.executable, sys.argv[0], 'livy_server'])
def cleanup():
p.terminate()
for _ in xrange(5):
if p.poll() == None:
time.sleep(1)
else:
break
else:
p.kill()
atexit.register(cleanup)
if spark.conf.START_LIVY_SERVER.get():
_start_livy_server()
| apache-2.0 |
trac-ja/trac-ja | trac/ticket/tests/batch.py | 3 | 11155 | from trac.perm import PermissionCache
from trac.test import Mock, EnvironmentStub
from trac.ticket import default_workflow, web_ui
from trac.ticket.batch import BatchModifyModule
from trac.ticket.model import Ticket
from trac.util.datefmt import utc
import unittest
class BatchModifyTestCase(unittest.TestCase):
def setUp(self):
self.env = EnvironmentStub(default_data=True,
enable=[default_workflow.ConfigurableTicketWorkflow,
web_ui.TicketModule])
self.req = Mock(href=self.env.href, authname='anonymous', tz=utc)
self.req.session = {}
self.req.perm = PermissionCache(self.env)
def assertCommentAdded(self, ticket_id, comment):
ticket = Ticket(self.env, int(ticket_id))
changes = ticket.get_changelog()
comment_change = [c for c in changes if c[2] == 'comment'][0]
self.assertEqual(comment_change[2], comment)
def assertFieldChanged(self, ticket_id, field, new_value):
ticket = Ticket(self.env, int(ticket_id))
changes = ticket.get_changelog()
field_change = [c for c in changes if c[2] == field][0]
self.assertEqual(field_change[4], new_value)
def _change_list_test_helper(self, original, new, new2, mode):
batch = BatchModifyModule(self.env)
return batch._change_list(original, new, new2, mode)
def _add_list_test_helper(self, original, to_add):
return self._change_list_test_helper(original, to_add, '', '+')
def _remove_list_test_helper(self, original, to_remove):
return self._change_list_test_helper(original, to_remove, '', '-')
def _add_remove_list_test_helper(self, original, to_add, to_remove):
return self._change_list_test_helper(original, to_add, to_remove,
'+-')
def _assign_list_test_helper(self, original, new):
return self._change_list_test_helper(original, new, '', '=')
def _insert_ticket(self, summary, **kw):
"""Helper for inserting a ticket into the database"""
ticket = Ticket(self.env)
for k, v in kw.items():
ticket[k] = v
return ticket.insert()
def test_ignore_summary_reporter_and_description(self):
"""These cannot be added through the UI, but if somebody tries
to build their own POST data they will be ignored."""
batch = BatchModifyModule(self.env)
self.req.args = {}
self.req.args['batchmod_value_summary'] = 'test ticket'
self.req.args['batchmod_value_reporter'] = 'anonymous'
self.req.args['batchmod_value_description'] = 'synergize the widgets'
values = batch._get_new_ticket_values(self.req)
self.assertEqual(len(values), 0)
def test_add_batchmod_value_data_from_request(self):
batch = BatchModifyModule(self.env)
self.req.args = {}
self.req.args['batchmod_value_milestone'] = 'milestone1'
values = batch._get_new_ticket_values(self.req)
self.assertEqual(values['milestone'], 'milestone1')
def test_selected_tickets(self):
self.req.args = { 'selected_tickets' : '1,2,3' }
batch = BatchModifyModule(self.env)
selected_tickets = batch._get_selected_tickets(self.req)
self.assertEqual(selected_tickets, ['1', '2', '3'])
def test_no_selected_tickets(self):
"""If nothing is selected, the return value is the empty list."""
self.req.args = { 'selected_tickets' : '' }
batch = BatchModifyModule(self.env)
selected_tickets = batch._get_selected_tickets(self.req)
self.assertEqual(selected_tickets, [])
# Assign list items
def test_change_list_replace_empty_with_single(self):
"""Replace emtpy field with single item."""
changed = self._assign_list_test_helper('', 'alice')
self.assertEqual(changed, 'alice')
def test_change_list_replace_empty_with_items(self):
"""Replace emtpy field with items."""
changed = self._assign_list_test_helper('', 'alice, bob')
self.assertEqual(changed, 'alice, bob')
def test_change_list_replace_item(self):
"""Replace item with a different item."""
changed = self._assign_list_test_helper('alice', 'bob')
self.assertEqual(changed, 'bob')
def test_change_list_replace_item_with_items(self):
"""Replace item with different items."""
changed = self._assign_list_test_helper('alice', 'bob, carol')
self.assertEqual(changed, 'bob, carol')
def test_change_list_replace_items_with_item(self):
"""Replace items with a different item."""
changed = self._assign_list_test_helper('alice, bob', 'carol')
self.assertEqual(changed, 'carol')
def test_change_list_replace_items(self):
"""Replace items with different items."""
changed = self._assign_list_test_helper('alice, bob', 'carol, dave')
self.assertEqual(changed, 'carol, dave')
def test_change_list_replace_items_partial(self):
"""Replace items with different (or not) items."""
changed = self._assign_list_test_helper('alice, bob', 'bob, dave')
self.assertEqual(changed, 'bob, dave')
def test_change_list_clear(self):
"""Clear field."""
changed = self._assign_list_test_helper('alice bob', '')
self.assertEqual(changed, '')
# Add / remove list items
def test_change_list_add_item(self):
"""Append additional item."""
changed = self._add_list_test_helper('alice', 'bob')
self.assertEqual(changed, 'alice, bob')
def test_change_list_add_items(self):
"""Append additional items."""
changed = self._add_list_test_helper('alice, bob', 'carol, dave')
self.assertEqual(changed, 'alice, bob, carol, dave')
def test_change_list_remove_item(self):
"""Remove existing item."""
changed = self._remove_list_test_helper('alice, bob', 'bob')
self.assertEqual(changed, 'alice')
def test_change_list_remove_items(self):
"""Remove existing items."""
changed = self._remove_list_test_helper('alice, bob, carol',
'alice, carol')
self.assertEqual(changed, 'bob')
def test_change_list_remove_idempotent(self):
"""Ignore missing item to be removed."""
changed = self._remove_list_test_helper('alice', 'bob')
self.assertEqual(changed, 'alice')
def test_change_list_remove_mixed(self):
"""Ignore only missing item to be removed."""
changed = self._remove_list_test_helper('alice, bob', 'bob, carol')
self.assertEqual(changed, 'alice')
def test_change_list_add_remove(self):
"""Remove existing item and append additional item."""
changed = self._add_remove_list_test_helper('alice, bob', 'carol',
'alice')
self.assertEqual(changed, 'bob, carol')
def test_change_list_add_no_duplicates(self):
"""Existing items are not duplicated."""
changed = self._add_list_test_helper('alice, bob', 'bob, carol')
self.assertEqual(changed, 'alice, bob, carol')
def test_change_list_remove_all_duplicates(self):
"""Remove all duplicates."""
changed = self._remove_list_test_helper('alice, bob, alice', 'alice')
self.assertEqual(changed, 'bob')
# Save
def test_save_comment(self):
"""Comments are saved to all selected tickets."""
first_ticket_id = self._insert_ticket('Test 1', reporter='joe')
second_ticket_id = self._insert_ticket('Test 2', reporter='joe')
selected_tickets = [first_ticket_id, second_ticket_id]
batch = BatchModifyModule(self.env)
batch._save_ticket_changes(self.req, selected_tickets, {}, 'comment',
'leave')
self.assertCommentAdded(first_ticket_id, 'comment')
self.assertCommentAdded(second_ticket_id, 'comment')
def test_save_values(self):
"""Changed values are saved to all tickets."""
first_ticket_id = self._insert_ticket('Test 1', reporter='joe',
component='foo')
second_ticket_id = self._insert_ticket('Test 2', reporter='joe')
selected_tickets = [first_ticket_id, second_ticket_id]
new_values = { 'component' : 'bar' }
batch = BatchModifyModule(self.env)
batch._save_ticket_changes(self.req, selected_tickets, new_values, '',
'leave')
self.assertFieldChanged(first_ticket_id, 'component', 'bar')
self.assertFieldChanged(second_ticket_id, 'component', 'bar')
def test_action_with_state_change(self):
"""Actions can have change status."""
self.env.config.set('ticket-workflow', 'embiggen', '* -> big')
first_ticket_id = self._insert_ticket('Test 1', reporter='joe',
status='small')
second_ticket_id = self._insert_ticket('Test 2', reporter='joe')
selected_tickets = [first_ticket_id, second_ticket_id]
batch = BatchModifyModule(self.env)
batch._save_ticket_changes(self.req, selected_tickets, {}, '',
'embiggen')
ticket = Ticket(self.env, int(first_ticket_id))
changes = ticket.get_changelog()
self.assertFieldChanged(first_ticket_id, 'status', 'big')
self.assertFieldChanged(second_ticket_id, 'status', 'big')
def test_action_with_side_effects(self):
"""Actions can have operations with side effects."""
self.env.config.set('ticket-workflow', 'buckify', '* -> *')
self.env.config.set('ticket-workflow', 'buckify.operations',
'set_owner')
self.req.args = {}
self.req.args['action_buckify_reassign_owner'] = 'buck'
first_ticket_id = self._insert_ticket('Test 1', reporter='joe',
owner='foo')
second_ticket_id = self._insert_ticket('Test 2', reporter='joe')
selected_tickets = [first_ticket_id, second_ticket_id]
batch = BatchModifyModule(self.env)
batch._save_ticket_changes(self.req, selected_tickets, {}, '',
'buckify')
ticket = Ticket(self.env, int(first_ticket_id))
changes = ticket.get_changelog()
self.assertFieldChanged(first_ticket_id, 'owner', 'buck')
self.assertFieldChanged(second_ticket_id, 'owner', 'buck')
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(BatchModifyTestCase, 'test'))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| bsd-3-clause |
huntxu/neutron | neutron/tests/unit/agent/windows/test_ip_lib.py | 2 | 4839 | # Copyright 2016 Cloudbase Solutions.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import netifaces
from neutron.agent.windows import ip_lib
from neutron.tests import base
class TestIpWrapper(base.BaseTestCase):
def test_get_device_by_ip_no_ip(self):
ret = ip_lib.IPWrapper().get_device_by_ip(None)
self.assertIsNone(ret)
@mock.patch.object(ip_lib.IPWrapper, 'get_devices')
def test_get_device_by_ip(self, mock_get_devices):
mock_dev1 = mock.MagicMock()
mock_dev2 = mock.MagicMock()
mock_dev1.device_has_ip.return_value = False
mock_dev2.device_has_ip.return_value = True
mock_get_devices.return_value = [mock_dev1, mock_dev2]
ret = ip_lib.IPWrapper().get_device_by_ip('fake_ip')
self.assertEqual(mock_dev2, ret)
@mock.patch('netifaces.interfaces')
def test_get_devices(self, mock_interfaces):
mock_interfaces.return_value = [mock.sentinel.dev1,
mock.sentinel.dev2]
ret = ip_lib.IPWrapper().get_devices()
self.assertEqual(mock.sentinel.dev1, ret[0].name)
self.assertEqual(mock.sentinel.dev2, ret[1].name)
@mock.patch('netifaces.interfaces')
def test_get_devices_error(self, mock_interfaces):
mock_interfaces.side_effect = OSError
ret = ip_lib.IPWrapper().get_devices()
self.assertEqual([], ret)
class TestIpDevice(base.BaseTestCase):
@mock.patch('netifaces.ifaddresses')
def test_read_ifaddresses(self, mock_netifaces):
mock_address = {'addr': mock.sentinel.fake_addr}
mock_netifaces.return_value = {netifaces.AF_INET: [mock_address]}
ret = ip_lib.IPDevice("fake_dev").read_ifaddresses()
self.assertTrue(ret)
@mock.patch('netifaces.ifaddresses')
def test_read_ifaddresses_no_ip(self, mock_netifaces):
mock_netifaces.return_value = {}
ret = ip_lib.IPDevice("fake_dev").read_ifaddresses()
self.assertFalse(ret)
@mock.patch('netifaces.ifaddresses')
def test_read_ifaddresses_ip_error(self, mock_netifaces):
mock_netifaces.side_effect = OSError
ret = ip_lib.IPDevice("fake_dev").read_ifaddresses()
self.assertFalse(ret)
@mock.patch('netifaces.ifaddresses')
def test_read_faddresses_not_found(self, mock_netifaces):
mock_netifaces.side_effect = ValueError
ret = ip_lib.IPDevice("fake_dev").read_ifaddresses()
self.assertFalse(ret)
def test_device_has_ip(self):
mock_address = {'addr': mock.sentinel.fake_addr}
ip_device = ip_lib.IPDevice("fake_dev")
with mock.patch.object(ip_device, "read_ifaddresses", return_value=(
{netifaces.AF_INET: [mock_address]})):
ret = ip_device.device_has_ip(mock.sentinel.fake_addr)
self.assertTrue(ret)
def test_device_has_ip_false(self):
ip_device = ip_lib.IPDevice("fake_dev")
with mock.patch.object(ip_device, "read_ifaddresses", return_value={}):
ret = ip_device.device_has_ip(mock.sentinel.fake_addr)
self.assertFalse(ret)
def test_device_has_ip_error(self):
ip_device = ip_lib.IPDevice("fake_dev")
with mock.patch.object(ip_device, "read_ifaddresses",
return_value=None):
ret = ip_device.device_has_ip(mock.sentinel.fake_addr)
self.assertFalse(ret)
class TestIPLink(base.BaseTestCase):
def setUp(self):
super(TestIPLink, self).setUp()
parent = ip_lib.IPDevice("fake_dev")
self.ip_link = ip_lib.IPLink(parent)
self.ip_link._parent.read_ifaddresses = mock.Mock()
def test_address(self):
mock_address = {'addr': mock.sentinel.fake_addr}
self.ip_link._parent.read_ifaddresses.return_value = {
netifaces.AF_LINK: [mock_address]}
self.assertEqual([mock_address['addr']], self.ip_link.address)
def test_address_no_address(self):
self.ip_link._parent.read_ifaddresses.return_value = {
netifaces.AF_LINK: []}
self.assertEqual([], self.ip_link.address)
def test_address_error(self):
self.ip_link._parent.read_ifaddresses.return_value = None
self.assertFalse(self.ip_link.address)
| apache-2.0 |
KamillaKhabibrakhmanova/fish | node_modules/cordova/node_modules/cordova-lib/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/ninja_syntax.py | 217 | 5286 | # This file comes from
# https://github.com/martine/ninja/blob/master/misc/ninja_syntax.py
# Do not edit! Edit the upstream one instead.
"""Python module for generating .ninja files.
Note that this is emphatically not a required piece of Ninja; it's
just a helpful utility for build-file-generation systems that already
use Python.
"""
import textwrap
import re
def escape_path(word):
return word.replace('$ ','$$ ').replace(' ','$ ').replace(':', '$:')
class Writer(object):
def __init__(self, output, width=78):
self.output = output
self.width = width
def newline(self):
self.output.write('\n')
def comment(self, text):
for line in textwrap.wrap(text, self.width - 2):
self.output.write('# ' + line + '\n')
def variable(self, key, value, indent=0):
if value is None:
return
if isinstance(value, list):
value = ' '.join(filter(None, value)) # Filter out empty strings.
self._line('%s = %s' % (key, value), indent)
def rule(self, name, command, description=None, depfile=None,
generator=False, restat=False, rspfile=None, rspfile_content=None):
self._line('rule %s' % name)
self.variable('command', command, indent=1)
if description:
self.variable('description', description, indent=1)
if depfile:
self.variable('depfile', depfile, indent=1)
if generator:
self.variable('generator', '1', indent=1)
if restat:
self.variable('restat', '1', indent=1)
if rspfile:
self.variable('rspfile', rspfile, indent=1)
if rspfile_content:
self.variable('rspfile_content', rspfile_content, indent=1)
def build(self, outputs, rule, inputs=None, implicit=None, order_only=None,
variables=None):
outputs = self._as_list(outputs)
all_inputs = self._as_list(inputs)[:]
out_outputs = list(map(escape_path, outputs))
all_inputs = list(map(escape_path, all_inputs))
if implicit:
implicit = map(escape_path, self._as_list(implicit))
all_inputs.append('|')
all_inputs.extend(implicit)
if order_only:
order_only = map(escape_path, self._as_list(order_only))
all_inputs.append('||')
all_inputs.extend(order_only)
self._line('build %s: %s %s' % (' '.join(out_outputs),
rule,
' '.join(all_inputs)))
if variables:
if isinstance(variables, dict):
iterator = variables.iteritems()
else:
iterator = iter(variables)
for key, val in iterator:
self.variable(key, val, indent=1)
return outputs
def include(self, path):
self._line('include %s' % path)
def subninja(self, path):
self._line('subninja %s' % path)
def default(self, paths):
self._line('default %s' % ' '.join(self._as_list(paths)))
def _count_dollars_before_index(self, s, i):
"""Returns the number of '$' characters right in front of s[i]."""
dollar_count = 0
dollar_index = i - 1
while dollar_index > 0 and s[dollar_index] == '$':
dollar_count += 1
dollar_index -= 1
return dollar_count
def _line(self, text, indent=0):
"""Write 'text' word-wrapped at self.width characters."""
leading_space = ' ' * indent
while len(leading_space) + len(text) > self.width:
# The text is too wide; wrap if possible.
# Find the rightmost space that would obey our width constraint and
# that's not an escaped space.
available_space = self.width - len(leading_space) - len(' $')
space = available_space
while True:
space = text.rfind(' ', 0, space)
if space < 0 or \
self._count_dollars_before_index(text, space) % 2 == 0:
break
if space < 0:
# No such space; just use the first unescaped space we can find.
space = available_space - 1
while True:
space = text.find(' ', space + 1)
if space < 0 or \
self._count_dollars_before_index(text, space) % 2 == 0:
break
if space < 0:
# Give up on breaking.
break
self.output.write(leading_space + text[0:space] + ' $\n')
text = text[space+1:]
# Subsequent lines are continuations, so indent them.
leading_space = ' ' * (indent+2)
self.output.write(leading_space + text + '\n')
def _as_list(self, input):
if input is None:
return []
if isinstance(input, list):
return input
return [input]
def escape(string):
"""Escape a string such that it can be embedded into a Ninja file without
further interpretation."""
assert '\n' not in string, 'Ninja syntax does not allow newlines'
# We only have one special metacharacter: '$'.
return string.replace('$', '$$')
| mit |
sumihai-tekindo/account_sicepat | invoice_supplier_validate/wizard/account_invoice_state.py | 1 | 5216 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2016 STI (<https://github.com/sumihai-tekindo>).
# @author Pambudi Satria <pambudi.satria@yahoo.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
from openerp.tools.translate import _
class account_invoice_confirm(osv.osv_memory):
"""
This wizard will confirm the all the selected approve invoices
"""
_inherit = "account.invoice.confirm"
def invoice_confirm(self, cr, uid, ids, context=None):
if context is None:
context = {}
active_ids = context.get('active_ids', []) or []
proxy = self.pool['account.invoice']
for record in proxy.browse(cr, uid, active_ids, context=context):
if record.state not in ('draft', 'proforma', 'proforma2', 'approved'):
raise osv.except_osv(_('Warning!'), _("Selected invoice(s) cannot be confirmed as they are not in 'Draft', 'Pro-Forma' or 'Approve' state."))
record.signal_workflow('invoice_open')
return {'type': 'ir.actions.act_window_close'}
class account_invoice_submit(osv.osv_memory):
"""
This wizard will submit the all the selected draft invoices
"""
_name = "account.invoice.submit"
_description = "Submit the selected invoices"
def invoice_submit(self, cr, uid, ids, context=None):
if context is None:
context = {}
active_ids = context.get('active_ids', []) or []
if not self.pool['res.users'].has_group(cr, uid, 'invoice_supplier_validate.group_submit_invoices'):
raise osv.except_osv(_('Warning!'), _("You don't have access to submit the selected invoice(s)."))
proxy = self.pool['account.invoice']
for record in proxy.browse(cr, uid, active_ids, context=context):
if record.state not in ('draft', 'proforma', 'proforma2'):
raise osv.except_osv(_('Warning!'), _("Selected invoice(s) cannot be submitted as they are not in 'Draft' or 'Pro-Forma' state."))
record.signal_workflow('invoice_submit')
return {'type': 'ir.actions.act_window_close'}
class account_invoice_acknowledge(osv.osv_memory):
"""
This wizard will acknowledge the all the selected submit invoices
"""
_name = "account.invoice.acknowledge"
_description = "Acknowledge the Selected Invoices"
def invoice_acknowledge(self, cr, uid, ids, context=None):
if context is None:
context = {}
proxy = self.pool['account.invoice']
active_ids = context.get('active_ids', []) or []
if not self.pool['res.users'].has_group(cr, uid, 'invoice_supplier_validate.group_acknowledge_invoices'):
raise osv.except_osv(_('Warning!'), _("You don't have access to acknowledge the selected invoice(s)."))
for record in proxy.browse(cr, uid, active_ids, context=context):
if record.state not in ('submit'):
raise osv.except_osv(_('Warning!'), _("Selected invoice(s) cannot be acknowledge as they are not in 'Submit' state."))
record.signal_workflow('invoice_acknowledge')
return {'type': 'ir.actions.act_window_close'}
class account_invoice_approve(osv.osv_memory):
"""
This wizard will approve the all the selected submit or acknowledge invoices
"""
_name = "account.invoice.approve"
_description = "Approve the Selected Invoices"
def invoice_approve(self, cr, uid, ids, context=None):
if context is None:
context = {}
proxy = self.pool['account.invoice']
active_ids = context.get('active_ids', []) or []
if not self.pool['res.users'].has_group(cr, uid, 'invoice_supplier_validate.group_approve_invoices'):
raise osv.except_osv(_('Warning!'), _("You don't have access to approve the selected invoice(s)."))
for record in proxy.browse(cr, uid, active_ids, context=context):
if record.state not in ('submit', 'acknowledge'):
raise osv.except_osv(_('Warning!'), _("Selected invoice(s) cannot be approve as they are not in 'Submit' or 'Acknowledge' state."))
record.signal_workflow('invoice_approve')
return {'type': 'ir.actions.act_window_close'}
| gpl-3.0 |
razvanphp/arangodb | 3rdParty/V8-3.31.74.1/third_party/python_26/Lib/site-packages/pythonwin/pywin/scintilla/formatter.py | 17 | 20298 | # Does Python source formatting for Scintilla controls.
import win32ui
import win32api
import win32con
import winerror
import string
import array
import scintillacon
WM_KICKIDLE = 0x036A
debugging = 0
if debugging:
# Output must go to another process else the result of
# the printing itself will trigger again trigger a trace.
import sys, win32traceutil, win32trace
def trace(*args):
win32trace.write(string.join(map(str, args), " ") + "\n")
else:
trace = lambda *args: None
class Style:
"""Represents a single format
"""
def __init__(self, name, format, background = None):
self.name = name # Name the format representes eg, "String", "Class"
self.background = background
if type(format)==type(''):
self.aliased = format
self.format = None
else:
self.format = format
self.aliased = None
self.stylenum = None # Not yet registered.
def IsBasedOnDefault(self):
return len(self.format)==5
# If the currently extended font defintion matches the
# default format, restore the format to the "simple" format.
def NormalizeAgainstDefault(self, defaultFormat):
if self.IsBasedOnDefault():
return 0 # No more to do, and not changed.
bIsDefault = self.format[7] == defaultFormat[7] and \
self.format[2] == defaultFormat[2]
if bIsDefault:
self.ForceAgainstDefault()
return bIsDefault
def ForceAgainstDefault(self):
self.format = self.format[:5]
def GetCompleteFormat(self, defaultFormat):
# Get the complete style after applying any relevant defaults.
if len(self.format)==5: # It is a default one
fmt = self.format + defaultFormat[5:]
else:
fmt = self.format
flags = win32con.CFM_BOLD | win32con.CFM_CHARSET | win32con.CFM_COLOR | win32con.CFM_FACE | win32con.CFM_ITALIC | win32con.CFM_SIZE
return (flags,) + fmt[1:]
# The Formatter interface
# used primarily when the actual formatting is done by Scintilla!
class FormatterBase:
def __init__(self, scintilla):
self.scintilla = scintilla
self.baseFormatFixed = (-402653169, 0, 200, 0, 0, 0, 49, 'Courier New')
self.baseFormatProp = (-402653169, 0, 200, 0, 0, 0, 49, 'Arial')
self.bUseFixed = 1
self.styles = {} # Indexed by name
self.styles_by_id = {} # Indexed by allocated ID.
# Default Background
self.default_background = None
self._LoadBackground()
self.SetStyles()
def _LoadBackground( self ):
#load default background
bg = int( self.LoadPreference( "Default Background", -1 ) )
if bg != -1:
self.default_background = bg
if self.default_background is None:
self.default_background = win32api.GetSysColor(win32con.COLOR_WINDOW)
def GetDefaultBackground( self ):
return self.default_background
def HookFormatter(self, parent = None):
raise NotImplementedError
# Used by the IDLE extensions to quickly determine if a character is a string.
def GetStringStyle(self, pos):
try:
style = self.styles_by_id[self.scintilla.SCIGetStyleAt(pos)]
except KeyError:
# A style we dont know about - probably not even a .py file - can't be a string
return None
if style.name in self.string_style_names:
return style
return None
def RegisterStyle(self, style, stylenum):
assert stylenum is not None, "We must have a style number"
assert style.stylenum is None, "Style has already been registered"
assert not self.styles.has_key(stylenum), "We are reusing a style number!"
style.stylenum = stylenum
self.styles[style.name] = style
self.styles_by_id[stylenum] = style
def SetStyles(self):
raise NotImplementedError
def GetSampleText(self):
return "Sample Text for the Format Dialog"
def GetDefaultFormat(self):
if self.bUseFixed:
return self.baseFormatFixed
return self.baseFormatProp
# Update the control with the new style format.
def _ReformatStyle(self, style):
assert style.stylenum is not None, "Unregistered style."
#print "Reformat style", style.name, style.stylenum
scintilla=self.scintilla
stylenum = style.stylenum
# Now we have the style number, indirect for the actual style.
if style.aliased is not None:
style = self.styles[style.aliased]
f=style.format
if style.IsBasedOnDefault():
baseFormat = self.GetDefaultFormat()
else: baseFormat = f
scintilla.SCIStyleSetFore(stylenum, f[4])
scintilla.SCIStyleSetFont(stylenum, baseFormat[7], baseFormat[5])
if f[1] & 1: scintilla.SCIStyleSetBold(stylenum, 1)
else: scintilla.SCIStyleSetBold(stylenum, 0)
if f[1] & 2: scintilla.SCIStyleSetItalic(stylenum, 1)
else: scintilla.SCIStyleSetItalic(stylenum, 0)
scintilla.SCIStyleSetSize(stylenum, int(baseFormat[2]/20))
if style.background is not None:
scintilla.SCIStyleSetBack(stylenum, style.background)
else:
scintilla.SCIStyleSetBack(stylenum, self.GetDefaultBackground() )
scintilla.SCIStyleSetEOLFilled(stylenum, 1) # Only needed for unclosed strings.
def GetStyleByNum(self, stylenum):
return self.styles_by_id[stylenum]
def ApplyFormattingStyles(self, bReload=1):
if bReload:
self.LoadPreferences()
baseFormat = self.GetDefaultFormat()
defaultStyle = Style("default", baseFormat)
defaultStyle.stylenum = scintillacon.STYLE_DEFAULT
self._ReformatStyle(defaultStyle)
for style in self.styles.values():
if style.aliased is None:
style.NormalizeAgainstDefault(baseFormat)
self._ReformatStyle(style)
self.scintilla.InvalidateRect()
# Some functions for loading and saving preferences. By default
# an INI file (well, MFC maps this to the registry) is used.
def LoadPreferences(self):
self.baseFormatFixed = eval(self.LoadPreference("Base Format Fixed", str(self.baseFormatFixed)))
self.baseFormatProp = eval(self.LoadPreference("Base Format Proportional", str(self.baseFormatProp)))
self.bUseFixed = int(self.LoadPreference("Use Fixed", 1))
for style in self.styles.values():
new = self.LoadPreference(style.name, str(style.format))
try:
style.format = eval(new)
bg = int(self.LoadPreference(style.name + " background", -1))
if bg != -1:
style.background = bg
if style.background == self.default_background:
style.background = None
except:
print "Error loading style data for", style.name
def LoadPreference(self, name, default):
return win32ui.GetProfileVal("Format", name, default)
def SavePreferences(self):
self.SavePreference("Base Format Fixed", str(self.baseFormatFixed))
self.SavePreference("Base Format Proportional", str(self.baseFormatProp))
self.SavePreference("Use Fixed", self.bUseFixed)
for style in self.styles.values():
if style.aliased is None:
self.SavePreference(style.name, str(style.format))
bg_name = style.name + " background"
self.SavePreference(bg_name, style.background) # May be None
def SavePreference(self, name, value):
## LoadPreference uses -1 to indicate default
if value is None:
value=-1
win32ui.WriteProfileVal("Format", name, value)
# An abstract formatter
# For all formatters we actually implement here.
# (as opposed to those formatters built in to Scintilla)
class Formatter(FormatterBase):
def __init__(self, scintilla):
self.bCompleteWhileIdle = 0
self.bHaveIdleHandler = 0 # Dont currently have an idle handle
self.nextstylenum = 0
FormatterBase.__init__(self, scintilla)
def HookFormatter(self, parent = None):
if parent is None: parent = self.scintilla.GetParent() # was GetParentFrame()!?
parent.HookNotify(self.OnStyleNeeded, scintillacon.SCN_STYLENEEDED)
def OnStyleNeeded(self, std, extra):
notify = self.scintilla.SCIUnpackNotifyMessage(extra)
endStyledChar = self.scintilla.SendScintilla(scintillacon.SCI_GETENDSTYLED)
lineEndStyled = self.scintilla.LineFromChar(endStyledChar)
endStyled = self.scintilla.LineIndex(lineEndStyled)
#print "enPosPaint %d endStyledChar %d lineEndStyled %d endStyled %d" % (endPosPaint, endStyledChar, lineEndStyled, endStyled)
self.Colorize(endStyled, notify.position)
def ColorSeg(self, start, end, styleName):
end = end+1
# assert end-start>=0, "Can't have negative styling"
stylenum = self.styles[styleName].stylenum
while start<end:
self.style_buffer[start]=chr(stylenum)
start = start+1
#self.scintilla.SCISetStyling(end - start + 1, stylenum)
def RegisterStyle(self, style, stylenum = None):
if stylenum is None:
stylenum = self.nextstylenum
self.nextstylenum = self.nextstylenum + 1
FormatterBase.RegisterStyle(self, style, stylenum)
def ColorizeString(self, str, charStart, styleStart):
raise RuntimeError, "You must override this method"
def Colorize(self, start=0, end=-1):
scintilla = self.scintilla
stringVal = scintilla.GetTextRange(start, end)
if start > 0:
stylenum = scintilla.SCIGetStyleAt(start - 1)
styleStart = self.GetStyleByNum(stylenum).name
else:
styleStart = None
# trace("Coloring", start, end, end-start, len(stringVal), styleStart, self.scintilla.SCIGetCharAt(start))
scintilla.SCIStartStyling(start, 31)
self.style_buffer = array.array("c", chr(0)*len(stringVal))
self.ColorizeString(stringVal, styleStart)
scintilla.SCISetStylingEx(self.style_buffer)
self.style_buffer = None
# trace("After styling, end styled is", self.scintilla.SCIGetEndStyled())
if self.bCompleteWhileIdle and not self.bHaveIdleHandler and end!=-1 and end < scintilla.GetTextLength():
self.bHaveIdleHandler = 1
win32ui.GetApp().AddIdleHandler(self.DoMoreColoring)
# Kicking idle makes the app seem slower when initially repainting!
# win32ui.GetMainFrame().PostMessage(WM_KICKIDLE, 0, 0)
def DoMoreColoring(self, handler, count):
try:
scintilla = self.scintilla
endStyled = scintilla.SCIGetEndStyled()
lineStartStyled = scintilla.LineFromChar(endStyled)
start = scintilla.LineIndex(lineStartStyled)
end = scintilla.LineIndex(lineStartStyled+1)
textlen = scintilla.GetTextLength()
if end < 0: end = textlen
finished = end >= textlen
self.Colorize(start, end)
except (win32ui.error, AttributeError):
# Window may have closed before we finished - no big deal!
finished = 1
if finished:
self.bHaveIdleHandler = 0
win32ui.GetApp().DeleteIdleHandler(handler)
return not finished
# A Formatter that knows how to format Python source
from keyword import iskeyword, kwlist
wordstarts = '_0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
wordchars = '._0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
operators = '%^&*()-+=|{}[]:;<>,/?!.~'
STYLE_DEFAULT = "Whitespace"
STYLE_COMMENT = "Comment"
STYLE_COMMENT_BLOCK = "Comment Blocks"
STYLE_NUMBER = "Number"
STYLE_STRING = "String"
STYLE_SQSTRING = "SQ String"
STYLE_TQSSTRING = "TQS String"
STYLE_TQDSTRING = "TQD String"
STYLE_KEYWORD = "Keyword"
STYLE_CLASS = "Class"
STYLE_METHOD = "Method"
STYLE_OPERATOR = "Operator"
STYLE_IDENTIFIER = "Identifier"
STYLE_BRACE = "Brace/Paren - matching"
STYLE_BRACEBAD = "Brace/Paren - unmatched"
STYLE_STRINGEOL = "String with no terminator"
STRING_STYLES = [STYLE_STRING, STYLE_SQSTRING, STYLE_TQSSTRING, STYLE_TQDSTRING, STYLE_STRINGEOL]
# These styles can have any ID - they are not special to scintilla itself.
# However, if we use the built-in lexer, then we must use its style numbers
# so in that case, they _are_ special.
PYTHON_STYLES = [
(STYLE_DEFAULT, (0, 0, 200, 0, 0x808080), None, scintillacon.SCE_P_DEFAULT ),
(STYLE_COMMENT, (0, 2, 200, 0, 0x008000), None, scintillacon.SCE_P_COMMENTLINE ),
(STYLE_COMMENT_BLOCK,(0, 2, 200, 0, 0x808080), None, scintillacon.SCE_P_COMMENTBLOCK ),
(STYLE_NUMBER, (0, 0, 200, 0, 0x808000), None, scintillacon.SCE_P_NUMBER ),
(STYLE_STRING, (0, 0, 200, 0, 0x008080), None, scintillacon.SCE_P_STRING ),
(STYLE_SQSTRING, STYLE_STRING, None, scintillacon.SCE_P_CHARACTER ),
(STYLE_TQSSTRING, STYLE_STRING, None, scintillacon.SCE_P_TRIPLE ),
(STYLE_TQDSTRING, STYLE_STRING, None, scintillacon.SCE_P_TRIPLEDOUBLE),
(STYLE_STRINGEOL, (0, 0, 200, 0, 0x000000), 0x008080, scintillacon.SCE_P_STRINGEOL),
(STYLE_KEYWORD, (0, 1, 200, 0, 0x800000), None, scintillacon.SCE_P_WORD),
(STYLE_CLASS, (0, 1, 200, 0, 0xFF0000), None, scintillacon.SCE_P_CLASSNAME ),
(STYLE_METHOD, (0, 1, 200, 0, 0x808000), None, scintillacon.SCE_P_DEFNAME),
(STYLE_OPERATOR, (0, 0, 200, 0, 0x000000), None, scintillacon.SCE_P_OPERATOR),
(STYLE_IDENTIFIER, (0, 0, 200, 0, 0x000000), None, scintillacon.SCE_P_IDENTIFIER ),
]
# These styles _always_ have this specific style number, regardless of
# internal or external formatter.
SPECIAL_STYLES = [
(STYLE_BRACE, (0, 0, 200, 0, 0x000000), 0xffff80, scintillacon.STYLE_BRACELIGHT),
(STYLE_BRACEBAD, (0, 0, 200, 0, 0x000000), 0x8ea5f2, scintillacon.STYLE_BRACEBAD),
]
PythonSampleCode = """\
# Some Python
class Sample(Super):
def Fn(self):
\tself.v = 1024
dest = 'dest.html'
x = func(a + 1)|)
s = "I forget...
## A large
## comment block"""
class PythonSourceFormatter(Formatter):
string_style_names = STRING_STYLES
def GetSampleText(self):
return PythonSampleCode
def LoadStyles(self):
pass
def SetStyles(self):
for name, format, bg, ignore in PYTHON_STYLES:
self.RegisterStyle( Style(name, format, bg) )
for name, format, bg, sc_id in SPECIAL_STYLES:
self.RegisterStyle( Style(name, format, bg), sc_id )
def ClassifyWord(self, cdoc, start, end, prevWord):
word = cdoc[start:end+1]
attr = STYLE_IDENTIFIER
if prevWord == "class":
attr = STYLE_CLASS
elif prevWord == "def":
attr = STYLE_METHOD
elif cdoc[start] in string.digits:
attr = STYLE_NUMBER
elif iskeyword(word):
attr = STYLE_KEYWORD
self.ColorSeg(start, end, attr)
return word
def ColorizeString(self, str, styleStart):
if styleStart is None: styleStart = STYLE_DEFAULT
return self.ColorizePythonCode(str, 0, styleStart)
def ColorizePythonCode(self, cdoc, charStart, styleStart):
# Straight translation of C++, should do better
lengthDoc = len(cdoc)
if lengthDoc <= charStart: return
prevWord = ""
state = styleStart
chPrev = chPrev2 = chPrev3 = ' '
chNext = cdoc[charStart]
chNext2 = cdoc[charStart]
startSeg = i = charStart
while i < lengthDoc:
ch = chNext
chNext = ' '
if i+1 < lengthDoc: chNext = cdoc[i+1]
chNext2 = ' '
if i+2 < lengthDoc: chNext2 = cdoc[i+2]
if state == STYLE_DEFAULT:
if ch in wordstarts:
self.ColorSeg(startSeg, i - 1, STYLE_DEFAULT)
state = STYLE_KEYWORD
startSeg = i
elif ch == '#':
self.ColorSeg(startSeg, i - 1, STYLE_DEFAULT)
if chNext == '#':
state = STYLE_COMMENT_BLOCK
else:
state = STYLE_COMMENT
startSeg = i
elif ch == '\"':
self.ColorSeg(startSeg, i - 1, STYLE_DEFAULT)
startSeg = i
state = STYLE_COMMENT
if chNext == '\"' and chNext2 == '\"':
i = i + 2
state = STYLE_TQDSTRING
ch = ' '
chPrev = ' '
chNext = ' '
if i+1 < lengthDoc: chNext = cdoc[i+1]
else:
state = STYLE_STRING
elif ch == '\'':
self.ColorSeg(startSeg, i - 1, STYLE_DEFAULT)
startSeg = i
state = STYLE_COMMENT
if chNext == '\'' and chNext2 == '\'':
i = i + 2
state = STYLE_TQSSTRING
ch = ' '
chPrev = ' '
chNext = ' '
if i+1 < lengthDoc: chNext = cdoc[i+1]
else:
state = STYLE_SQSTRING
elif ch in operators:
self.ColorSeg(startSeg, i - 1, STYLE_DEFAULT)
self.ColorSeg(i, i, STYLE_OPERATOR)
startSeg = i+1
elif state == STYLE_KEYWORD:
if ch not in wordchars:
prevWord = self.ClassifyWord(cdoc, startSeg, i-1, prevWord)
state = STYLE_DEFAULT
startSeg = i
if ch == '#':
if chNext == '#':
state = STYLE_COMMENT_BLOCK
else:
state = STYLE_COMMENT
elif ch == '\"':
if chNext == '\"' and chNext2 == '\"':
i = i + 2
state = STYLE_TQDSTRING
ch = ' '
chPrev = ' '
chNext = ' '
if i+1 < lengthDoc: chNext = cdoc[i+1]
else:
state = STYLE_STRING
elif ch == '\'':
if chNext == '\'' and chNext2 == '\'':
i = i + 2
state = STYLE_TQSSTRING
ch = ' '
chPrev = ' '
chNext = ' '
if i+1 < lengthDoc: chNext = cdoc[i+1]
else:
state = STYLE_SQSTRING
elif ch in operators:
self.ColorSeg(startSeg, i, STYLE_OPERATOR)
startSeg = i+1
elif state == STYLE_COMMENT or state == STYLE_COMMENT_BLOCK:
if ch == '\r' or ch == '\n':
self.ColorSeg(startSeg, i-1, state)
state = STYLE_DEFAULT
startSeg = i
elif state == STYLE_STRING:
if ch == '\\':
if chNext == '\"' or chNext == '\'' or chNext == '\\':
i = i + 1
ch = chNext
chNext = ' '
if i+1 < lengthDoc: chNext = cdoc[i+1]
elif ch == '\"':
self.ColorSeg(startSeg, i, STYLE_STRING)
state = STYLE_DEFAULT
startSeg = i+1
elif state == STYLE_SQSTRING:
if ch == '\\':
if chNext == '\"' or chNext == '\'' or chNext == '\\':
i = i+1
ch = chNext
chNext = ' '
if i+1 < lengthDoc: chNext = cdoc[i+1]
elif ch == '\'':
self.ColorSeg(startSeg, i, STYLE_SQSTRING)
state = STYLE_DEFAULT
startSeg = i+1
elif state == STYLE_TQSSTRING:
if ch == '\'' and chPrev == '\'' and chPrev2 == '\'' and chPrev3 != '\\':
self.ColorSeg(startSeg, i, STYLE_TQSSTRING)
state = STYLE_DEFAULT
startSeg = i+1
elif state == STYLE_TQDSTRING and ch == '\"' and chPrev == '\"' and chPrev2 == '\"' and chPrev3 != '\\':
self.ColorSeg(startSeg, i, STYLE_TQDSTRING)
state = STYLE_DEFAULT
startSeg = i+1
chPrev3 = chPrev2
chPrev2 = chPrev
chPrev = ch
i = i + 1
if startSeg < lengthDoc:
if state == STYLE_KEYWORD:
self.ClassifyWord(cdoc, startSeg, lengthDoc-1, prevWord)
else:
self.ColorSeg(startSeg, lengthDoc-1, state)
# These taken from the SciTE properties file.
source_formatter_extensions = [
( string.split(".py .pys .pyw"), scintillacon.SCLEX_PYTHON ),
( string.split(".html .htm .asp .shtml"), scintillacon.SCLEX_HTML ),
( string.split("c .cc .cpp .cxx .h .hh .hpp .hxx .idl .odl .php3 .phtml .inc .js"),scintillacon.SCLEX_CPP ),
( string.split(".vbs .frm .ctl .cls"), scintillacon.SCLEX_VB ),
( string.split(".pl .pm .cgi .pod"), scintillacon.SCLEX_PERL ),
( string.split(".sql .spec .body .sps .spb .sf .sp"), scintillacon.SCLEX_SQL ),
( string.split(".tex .sty"), scintillacon.SCLEX_LATEX ),
( string.split(".xml .xul"), scintillacon.SCLEX_XML ),
( string.split(".err"), scintillacon.SCLEX_ERRORLIST ),
( string.split(".mak"), scintillacon.SCLEX_MAKEFILE ),
( string.split(".bat .cmd"), scintillacon.SCLEX_BATCH ),
]
class BuiltinSourceFormatter(FormatterBase):
# A class that represents a formatter built-in to Scintilla
def __init__(self, scintilla, ext):
self.ext = ext
FormatterBase.__init__(self, scintilla)
def Colorize(self, start=0, end=-1):
self.scintilla.SendScintilla(scintillacon.SCI_COLOURISE, start, end)
def RegisterStyle(self, style, stylenum = None):
assert style.stylenum is None, "Style has already been registered"
if stylenum is None:
stylenum = self.nextstylenum
self.nextstylenum = self.nextstylenum + 1
assert self.styles.get(stylenum) is None, "We are reusing a style number!"
style.stylenum = stylenum
self.styles[style.name] = style
self.styles_by_id[stylenum] = style
def HookFormatter(self, parent = None):
sc = self.scintilla
for exts, formatter in source_formatter_extensions:
if self.ext in exts:
formatter_use = formatter
break
else:
formatter_use = scintillacon.SCLEX_PYTHON
sc.SendScintilla(scintillacon.SCI_SETLEXER, formatter_use)
keywords = string.join(kwlist)
sc.SCISetKeywords(keywords)
class BuiltinPythonSourceFormatter(BuiltinSourceFormatter):
sci_lexer_name = scintillacon.SCLEX_PYTHON
string_style_names = STRING_STYLES
def __init__(self, sc, ext = ".py"):
BuiltinSourceFormatter.__init__(self, sc, ext)
def SetStyles(self):
for name, format, bg, sc_id in PYTHON_STYLES:
self.RegisterStyle( Style(name, format, bg), sc_id )
for name, format, bg, sc_id in SPECIAL_STYLES:
self.RegisterStyle( Style(name, format, bg), sc_id )
def GetSampleText(self):
return PythonSampleCode
| apache-2.0 |
albireox/marvin | python/marvin/api/spaxel.py | 2 | 12893 | #!/usr/bin/env python
# encoding: utf-8
#
# spaxel.py
#
# Licensed under a 3-clause BSD license.
#
# Revision history:
# 11 Apr 2016 J. Sánchez-Gallego
# Initial version
# from __future__ import division
# from __future__ import print_function
#
# import numpy as np
#
# from flask_classful import route
# from flask import jsonify
#
# from marvin.tools.spaxel import Spaxel
# from marvin.api.base import BaseView, arg_validate as av
# from marvin.core.exceptions import MarvinError
# from marvin.utils.general import parseIdentifier
#
#
# def _getSpaxel(name, x, y, **kwargs):
# """Retrieves a Marvin Spaxel object."""
#
# spaxel = None
# results = {}
#
# # Pop the release to remove a duplicate input to Maps
# release = kwargs.pop('release', None)
#
# # parse name into either mangaid or plateifu
# try:
# idtype = parseIdentifier(name)
# except Exception as e:
# results['error'] = 'Failed to parse input name {0}: {1}'.format(name, str(e))
# return spaxel, results
#
# try:
# if idtype == 'plateifu':
# plateifu = name
# mangaid = None
# elif idtype == 'mangaid':
# mangaid = name
# plateifu = None
# else:
# raise MarvinError('invalid plateifu or mangaid: {0}'.format(idtype))
#
# spaxel = Spaxel(x=x, y=y, mangaid=mangaid, plateifu=plateifu,
# release=release, **kwargs)
# results['status'] = 1
# except Exception as e:
# results['error'] = 'Failed to retrieve Spaxels {0}: {1}'.format(name, str(e))
#
# return spaxel, results
#
#
# class SpaxelView(BaseView):
# """Class describing API calls related to Spaxels."""
#
# route_base = '/spaxels/'
#
# @route('/<name>/spectra/<x>/<y>/', methods=['GET', 'POST'], endpoint='getSpectrum')
# @av.check_args()
# def spectrum(self, args, name, x, y):
# """Returns a dictionary with the DRP spectrum for a spaxel.
#
# Loads a DRP Cube and uses getSpaxel to retrieve the ``(x,y)``
# spaxel. Returns a dictionary with the spectrum for that spaxel.
#
# .. :quickref: Spaxel; Get a spectrum from a specific spaxel from a DRP cube
#
# :param name: The name of the object as plate-ifu or mangaid
# :param x: The x coordinate of the spaxel (origin is ``lower``)
# :param y: The y coordinate of the spaxel (origin is ``lower``)
# :form inconfig: json of any incoming parameters
# :resjson int status: status of response. 1 if good, -1 if bad.
# :resjson string error: error message, null if None
# :resjson json inconfig: json of incoming configuration
# :resjson json utahconfig: json of outcoming configuration
# :resjson string traceback: traceback of an error, null if None
# :resjson json data: dictionary of returned data
# :json list flux: the spectrum flux array
# :json list ivar: the spectrum ivar array
# :json list mask: the spectrum mask array
# :json list wavelength: the spectrum wavelength array
# :json list specres: the spectrum spectral resolution array
# :resheader Content-Type: application/json
# :statuscode 200: no error
# :statuscode 422: invalid input parameters
#
# **Example request**:
#
# .. sourcecode:: http
#
# GET /marvin/api/spaxels/8485-1901/spectra/10/10/ HTTP/1.1
# Host: api.sdss.org
# Accept: application/json, */*
#
# **Example response**:
#
# .. sourcecode:: http
#
# HTTP/1.1 200 OK
# Content-Type: application/json
# {
# "status": 1,
# "error": null,
# "inconfig": {"release": "MPL-5"},
# "utahconfig": {"release": "MPL-5", "mode": "local"},
# "traceback": null,
# "data": {"flux": [-0.001416, 0.0099, 0.0144, ...],
# "ivar": [134.613, 133.393, 132.094, ...],
# "mask": [0, 0, 0, ...],
# "wavelength": [3621.6, 3622.43, ..., 10353.8],
# "specres": [1026.83, 1027.07, 1027.3]
# }
# }
#
# """
#
# # Pop any args we don't want going into Spaxel
# args = self._pop_args(args, arglist=['name', 'x', 'y'])
#
# spaxel, results = _getSpaxel(name, x, y, maps=False, modelcube=False, **args)
#
# self.update_results(results)
#
# if spaxel is not None:
# self.results['data'] = {'flux': spaxel.spectrum.value.tolist(),
# 'ivar': spaxel.spectrum.ivar.tolist(),
# 'mask': spaxel.spectrum.mask.tolist(),
# 'wavelength': spaxel.spectrum.wavelength.value.tolist(),
# 'specres': spaxel.specres.tolist()}
#
# return jsonify(self.results)
#
# @route('/<name>/properties/<template>/<x>/<y>/',
# methods=['GET', 'POST'], endpoint='getProperties')
# @av.check_args()
# def properties(self, args, name, x, y, template):
# """Returns a dictionary with the DAP properties for a spaxel.
#
# Loads a DAP Maps and uses getSpaxel to retrieve the ``(x,y)``
# spaxel. Returns a dictionary with the properties for that spaxel.
#
# .. :quickref: Spaxel; Get DAP properties from a specific spaxel from a DAP Maps
#
# :param name: The name of the object as plate-ifu or mangaid
# :param x: The x coordinate of the spaxel (origin is ``lower``)
# :param y: The y coordinate of the spaxel (origin is ``lower``)
# :param template: The template associated with this maps. If none, default is used.
# :form inconfig: json of any incoming parameters
# :resjson int status: status of response. 1 if good, -1 if bad.
# :resjson string error: error message, null if None
# :resjson json inconfig: json of incoming configuration
# :resjson json utahconfig: json of outcoming configuration
# :resjson string traceback: traceback of an error, null if None
# :resjson json data: dictionary of returned data
# :json dict properties: the DAP properties for this spaxel
# :resheader Content-Type: application/json
# :statuscode 200: no error
# :statuscode 422: invalid input parameters
#
# **Example request**:
#
# .. sourcecode:: http
#
# GET /marvin/api/spaxels/8485-1901/properties/GAU-MILESHC/10/10/ HTTP/1.1
# Host: api.sdss.org
# Accept: application/json, */*
#
# **Example response**:
#
# .. sourcecode:: http
#
# HTTP/1.1 200 OK
# Content-Type: application/json
# {
# "status": 1,
# "error": null,
# "inconfig": {"release": "MPL-5"},
# "utahconfig": {"release": "MPL-5", "mode": "local"},
# "traceback": null,
# "data": {"properties": {
# "bin_area": {
# "channel": null,
# "description": "Area of each bin.",
# "ivar": null,
# "mask": null,
# "name": "bin_area",
# "unit": "arcsec^2",
# "value": 0.5
# },
# ...
# }
# }
# }
#
# """
#
# # Pop any args we don't want going into Spaxel
# args = self._pop_args(args, arglist=['name', 'x', 'y'])
#
# spaxel, results = _getSpaxel(name, x, y, cube=False, modelcube=False, **args)
#
# self.update_results(results)
#
# if spaxel is not None:
# spaxel_properties = {}
# for name in spaxel.properties:
# prop = spaxel.properties[name]
# spaxel_properties[name] = {}
# for key in ['name', 'value', 'ivar', 'mask', 'description']:
# propval = getattr(prop, key)
# if type(propval).__module__ == np.__name__:
# propval = np.asscalar(str(propval))
# spaxel_properties[name][key] = propval
# spaxel_properties[name]['channel'] = prop.channel.name if prop.channel else None
# spaxel_properties[name]['unit'] = prop.unit.to_string()
#
# self.results['data'] = {'properties': spaxel_properties}
#
# return jsonify(self.results)
#
# @route('/<name>/models/<template>/<x>/<y>/',
# methods=['GET', 'POST'], endpoint='getModels')
# @av.check_args()
# def getModels(self, args, name, x, y, template):
# """Returns a dictionary with the models for a spaxel.
#
# Loads a ModelCube and uses getSpaxel to retrieve the ``(x,y)``
# spaxel. Returns a dictionary with the models for that spaxel.
#
# .. :quickref: Spaxel; Get the models for a specific spaxel from a DAP ModelCube
#
# :param name: The name of the object as plate-ifu or mangaid
# :param x: The x coordinate of the spaxel (origin is ``lower``)
# :param y: The y coordinate of the spaxel (origin is ``lower``)
# :param template: The template associated with this maps. If none, default is used.
# :form inconfig: json of any incoming parameters
# :resjson int status: status of response. 1 if good, -1 if bad.
# :resjson string error: error message, null if None
# :resjson json inconfig: json of incoming configuration
# :resjson json utahconfig: json of outcoming configuration
# :resjson string traceback: traceback of an error, null if None
# :resjson json data: dictionary of returned data
# :json list flux_array: flux of the binned spectrum
# :json list flux_ivar: ivar of the binned spectrum
# :json list flux_mask: mask of the binned spectrum and model
# :json list model_array: best fitting model spectra
# :json list model_emline: model spectrum with only emission lines
# :json list model_emline_base: model of constant baseline fitted beneath emission lines
# :json list model_emline_mask: bitmask that applies only to emission-line modeling
# :json string bintype: the spectrum spectral resolution array
# :json string template: the spectrum spectral resolution array
# :resheader Content-Type: application/json
# :statuscode 200: no error
# :statuscode 422: invalid input parameters
#
# **Example request**:
#
# .. sourcecode:: http
#
# GET /marvin/api/spaxels/8485-1901/models/GAU-MILESHC/10/10/ HTTP/1.1
# Host: api.sdss.org
# Accept: application/json, */*
#
# **Example response**:
#
# .. sourcecode:: http
#
# HTTP/1.1 200 OK
# Content-Type: application/json
# {
# "status": 1,
# "error": null,
# "inconfig": {"release": "MPL-5"},
# "utahconfig": {"release": "MPL-5", "mode": "local"},
# "traceback": null,
# "data": {"bintype": "SPX",
# "template": "GAU-MILESHC",
# "flux_array": [-0.001416, 0.0099, 0.0144, ...],
# "flux_ivar": [134.613, 133.393, 132.094, ...],
# "flux_mask": [32, 32, 32, ...],
# "model_array": [0, 0, 0, ...],
# "model_emline": [0, 0, 0, ...],
# "model_emline_base": [0, 0, 0, ...],
# "model_emline_mask": [128, 128, 128, ...],
# }
# }
#
# """
#
# # Pop any args we don't want going into Spaxel
# args = self._pop_args(args, arglist=['name', 'x', 'y'])
#
# spaxel, results = _getSpaxel(name, x, y, cube=False, maps=False, **args)
#
# self.update_results(results)
#
# if spaxel is not None:
#
# self.results['data'] = {
# 'flux_array': spaxel.model_flux.value.tolist(),
# 'flux_ivar': spaxel.model_flux.ivar.tolist(),
# 'flux_mask': spaxel.model_flux.mask.tolist(),
# 'model_array': spaxel.model.value.tolist(),
# 'model_emline': spaxel.emline.value.tolist(),
# 'model_emline_base': spaxel.emline_base.value.tolist(),
# 'model_emline_mask': spaxel.emline.mask.tolist(),
# 'bintype': spaxel.bintype.name,
# 'template': spaxel.template.name}
#
# return jsonify(self.results)
| bsd-3-clause |
viggates/nova | nova/tests/scheduler/test_rpcapi.py | 16 | 2398 | # Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for nova.scheduler.rpcapi
"""
import mox
from oslo.config import cfg
from nova import context
from nova.scheduler import rpcapi as scheduler_rpcapi
from nova import test
CONF = cfg.CONF
class SchedulerRpcAPITestCase(test.NoDBTestCase):
def _test_scheduler_api(self, method, rpc_method, **kwargs):
ctxt = context.RequestContext('fake_user', 'fake_project')
rpcapi = scheduler_rpcapi.SchedulerAPI()
self.assertIsNotNone(rpcapi.client)
self.assertEqual(rpcapi.client.target.topic, CONF.scheduler_topic)
expected_retval = 'foo' if rpc_method == 'call' else None
expected_version = kwargs.pop('version', None)
expected_fanout = kwargs.pop('fanout', None)
expected_kwargs = kwargs.copy()
self.mox.StubOutWithMock(rpcapi, 'client')
rpcapi.client.can_send_version(
mox.IsA(str)).MultipleTimes().AndReturn(True)
prepare_kwargs = {}
if expected_fanout:
prepare_kwargs['fanout'] = True
if expected_version:
prepare_kwargs['version'] = expected_version
rpcapi.client.prepare(**prepare_kwargs).AndReturn(rpcapi.client)
rpc_method = getattr(rpcapi.client, rpc_method)
rpc_method(ctxt, method, **expected_kwargs).AndReturn('foo')
self.mox.ReplayAll()
# NOTE(markmc): MultipleTimes() is OnceOrMore() not ZeroOrMore()
rpcapi.client.can_send_version('I fool you mox')
retval = getattr(rpcapi, method)(ctxt, **kwargs)
self.assertEqual(retval, expected_retval)
def test_select_destinations(self):
self._test_scheduler_api('select_destinations', rpc_method='call',
request_spec='fake_request_spec',
filter_properties='fake_prop')
| apache-2.0 |
liw/daos | src/client/setup.py | 1 | 1475 | #
# setup.py for packaging pydaos python module.
#
# To use type:
#
# python3 setup.py install
#
# If run from within a compiled DAOS source tree this it will detect the
# install path automatically, otherwise it'll use the defaults.
import os
import sys
import json
def load_conf():
"""Load the build config file"""
file_self = os.path.dirname(os.path.abspath(__file__))
json_file = None
while file_self != '/':
new_file = os.path.join(file_self, '.build_vars.json')
if os.path.exists(new_file):
json_file = new_file
ofh = open(new_file, 'r')
conf = json.load(ofh)
return conf
file_self = os.path.dirname(file_self)
return None
from setuptools import setup, find_packages, Extension
conf = load_conf()
args = {'sources': ['pydaos/pydaos_shim.c'],
'libraries': ['daos', 'duns']}
if conf:
args['include_dirs'] = [os.path.join(conf['PREFIX'], 'include')]
if conf.get('CART_PREFIX', None):
args['include_dirs'].extend(os.path.join(
conf['CART_PREFIX'], 'include'))
args['library_dirs'] = [os.path.join(conf['PREFIX'], 'lib64')]
args['runtime_library_dirs'] = args['library_dirs']
args['define_macros'] = [('__USE_PYTHON3__', 1)]
module1 = Extension('pydaos.{}'.format("pydaos_shim"), **args)
setup(
name='pydaos',
version='0.2',
packages=find_packages(),
description='DAOS interface',
ext_modules=[module1]
)
| apache-2.0 |
djaodjin/drop | src/tero/dparselog.py | 1 | 13478 | # Copyright (c) 2020, DjaoDjin inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import
from __future__ import unicode_literals
import argparse, datetime, gzip, itertools, json, logging, re, os, os.path, sys
import six
import pytz
from tero import __version__
LOGGER = logging.getLogger(__name__)
# http://stackoverflow.com/questions/1101508/how-to-parse-dates-with-0400-timezone-string-in-python/23122493#23122493
class FixedOffset(datetime.tzinfo):
"""Fixed offset in minutes: `time = utc_time + utc_offset`."""
def __init__(self, offset):
self.__offset = datetime.timedelta(minutes=offset)
hours, minutes = divmod(offset, 60)
#NOTE: the last part is to remind about deprecated POSIX GMT+h timezones
# that have the opposite sign in the name;
# the corresponding numeric value is not used e.g., no minutes
self.__name = '<%+03d%02d>%+d' % (hours, minutes, -hours)
def utcoffset(self, dt=None):
return self.__offset
def tzname(self, dt=None):
return self.__name
def dst(self, dt=None):
return datetime.timedelta(0)
def __repr__(self):
return 'FixedOffset(%d)' % (self.utcoffset().total_seconds() // 60)
def get_last_modified(item):
return item['LastModified']
def parse_date(dt_str):
naive_date_str, offset_str = dt_str.split(' ')
naive_dt = datetime.datetime.strptime(naive_date_str, '%d/%b/%Y:%H:%M:%S')
offset = int(offset_str[-4:-2])*60 + int(offset_str[-2:])
if offset_str[0] == "-":
offset = -offset
return naive_dt.replace(tzinfo=FixedOffset(offset))
def split_on_comma(http_x_forwarded_for):
if http_x_forwarded_for == '-':
return []
ips = http_x_forwarded_for.split(',')
return [part.strip() for part in ips]
def convert_bytes_sent(value):
if value == '-':
return 0
return int(value)
def generate_regex(format_string, var_regex, regexps):
format_vars = re.findall(var_regex, format_string)
var_matches = list(re.finditer(var_regex, format_string))
var_match_positions = [(match.start(), match.end())
for match in var_matches]
non_var_indexes = (
[0] +
list(itertools.chain(*var_match_positions)) +
[len(format_string)]
)
grouped_non_var_indexes = [(non_var_indexes[i*2], non_var_indexes[i*2+1])
for i in range(len(non_var_indexes)//2)]
non_var_strings = [format_string[start:end]
for start, end in grouped_non_var_indexes]
escaped_non_var_strings = [re.escape(s) for s in non_var_strings]
named_regexps = ['(' + regexps[s] + ')' for i, s in enumerate(format_vars)]
full_regex_pieces = list(
itertools.chain(*six.moves.zip_longest(escaped_non_var_strings,
named_regexps, fillvalue=''))
)
full_regex = ''.join(full_regex_pieces[:])
return re.compile(full_regex)
class NginxLogParser(object):
"""
We make sure nginx and gunicorn access logs have the same format.
"""
def __init__(self):
format_string = '$remote_addr$load_balancer_addr $http_host'\
' $remote_user [$time_local]'\
' "$request" $status $body_bytes_sent'\
' "$http_referer" "$http_user_agent"'\
' "$http_x_forwarded_for"'
var_regex = r'\$[a-z_]+'
ipv6_regex = r'(?:[0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}'
ipv4_regex = r'[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}'
ip_num_regex = r'(?:%s)|(?:%s)' % (ipv4_regex, ipv6_regex)
regexps = {
'$remote_addr' : ip_num_regex,
'$load_balancer_addr' : r'(?:,\s%s)*' % ip_num_regex,
'$http_host' :
# We cannot have parentheses in regex here?
r'[a-z0-9.-]+|[a-z0-9.-]+:\d+?',
'$remote_user' : r'[\w.@+-]+',
'$time_local' : r'[^\[\]]+',
'$request' : r'[^"]*',
'$status' : r'[0-9]{3}',
'$body_bytes_sent' : r'[0-9]+|-',
'$http_referer' : r'[^"]+',
'$http_user_agent' : r'[^"]*',
'$http_x_forwarded_for' : r'[^"]+',
}
self.format_vars = re.findall(var_regex, format_string)
self.regex = generate_regex(format_string, var_regex, regexps)
def parse(self, to_parse):
match = self.regex.match(to_parse)
if match:
parsed = dict(zip(self.format_vars, match.groups()))
else:
return None
parsed = {k[1:]: v for k, v in six.iteritems(parsed)}
field_types = {
'status' : int,
'body_bytes_sent': convert_bytes_sent,
'time_local': parse_date,
'http_x_forwarded_for': split_on_comma
}
for key, convert in six.iteritems(field_types):
parsed[key] = convert(parsed[key])
if (parsed['http_x_forwarded_for']
and parsed['remote_addr'] in ['-', '127.0.0.1']):
# To simplify processing later on, we replace the direct IP
# the request is coming from (will be locahost for gunicorn
# behind nginx anyway) by the IP of the browser client.
parsed['remote_addr'] = parsed['http_x_forwarded_for'][0]
request_regex = r'(?P<http_method>[A-Z]+) (?P<http_path>.*) HTTP/1.[01]'
request_match = re.match(request_regex, parsed['request'])
if request_match:
parsed.update(request_match.groupdict())
return parsed
class JsonEventParser(object):
"""
Application logs
"""
@staticmethod
def parse(to_parse):
try:
to_parse = to_parse[to_parse.find('{'):]
event = json.loads(to_parse)
field_types = {
'status' : int,
'body_bytes_sent': convert_bytes_sent,
'time_local': parse_date,
'http_x_forwarded_for': split_on_comma
}
for key, convert in six.iteritems(field_types):
if key in event:
event[key] = convert(event[key])
except ValueError:
event = None
return event
def error_event(fname, key, reason, extra=None):
now = datetime.datetime.now()
body = {
'reason': reason,
's3_key': key,
's3_bucket' : 'djaodjin',
'parse_time': now,
}
if extra:
body.update(extra)
return {
'_index': 'parse-errors-%s' % datetime.datetime.strftime(now, '%Y%m%d'),
'_type': 'parse-error',
'_source': body,
}
def parse_logname(filename):
host = None
log_name = None
instance_id = None
log_date = None
look = re.match(r'(?P<host>\S+)-(?P<log_name>\S+)\.log-(?P<instance_id>[^-]+)-(?P<log_date>[0-9]{8})(\.gz)?', filename)
if look:
host = look.group('host')
log_name = look.group('log_name')
instance_id = look.group('instance_id')
log_date = datetime.datetime.strptime(look.group('log_date'), '%Y%m%d')
if log_date.tzinfo is None:
log_date = log_date.replace(tzinfo=pytz.utc)
return host, log_name, instance_id, log_date
def generate_events(fileobj, key):
fname = os.path.basename(key)
host, log_name, instance_id, log_date = parse_logname(fname)
if not log_name:
sys.stderr.write('warning: "%s" is not a log file?' % fname)
yield error_event(fname, key, 'log filename didnt match regexp')
return
log_folder = os.path.basename(os.path.dirname(key))
if log_folder == 'nginx':
log_type = 'webfront'
elif log_folder == 'gunicorn':
if fname.startswith('djaodjin-access.log-'):
log_type = 'djsession'
else:
log_type = 'customer'
else:
log_type = None
index = 'logs-%s' % log_date.strftime('%Y%m%d')
doc_type = 'log'
if log_folder == 'nginx':
parser = NginxLogParser()
elif log_folder == 'gunicorn':
parser = NginxLogParser()
if log_name == 'access':
parser = NginxLogParser()
else:
parser = JsonEventParser()
else:
sys.stderr.write("error: unknown log folder %s\n" % log_folder)
yield error_event(fname, key, 'could not find parser for log folder',
{'log_folder': log_folder,
'log_date': log_date})
return
LOGGER.debug("using parser %s", parser)
error_count = 0
ok_count = 0
for idx, line in enumerate(fileobj.readlines()):
if hasattr(line, 'decode'):
line = line.decode('ascii', errors='replace')
line = line.strip()
total_count = ok_count + error_count
if total_count > 100 and (float(error_count)/total_count) > 0.8:
sys.stderr.write(
"error: too many errors for key '%s'. bailing" % str(key))
yield error_event(fname, key, 'bailing because of too many errors.',
{'log_date': log_date,
'line': line})
return
try:
event = parser.parse(line)
except Exception as err:
sys.stderr.write("error: %s in line '%s'\n" % (err, line))
yield error_event(fname, key, 'could not parse log line',
{'line': line,
'exception_message': err.message,
'log_date': log_date,
'exception_type': type(err).__name__})
continue
if event is None:
sys.stderr.write(
"error: parsing '%s' in '%s'\n" % (line, log_folder))
yield error_event(fname, key, 'could not parse log line',
{'line': line,
'log_date': log_date,})
error_count += 1
continue
ok_count += 1
_id = '%s:%d' % (key, idx)
event.update({
'log_name': log_name,
})
if log_type is not None:
event['log_type'] = log_type
event.update({
'host': host,
'log_name': log_name,
'instance_id': instance_id,
'log_date': log_date.strftime('%Y%m%d')
})
yield {
'_id': _id,
'_index': index,
'_type': doc_type,
'_source': event
}
def sanitize_filename(fname):
fname = fname.replace(os.path.sep, '_')
fname = re.sub(r'[^a-zA-Z_\-.0-9]', '', fname)
fname = re.sub(r'^[^a-zA-Z0-9]+', '', fname)
if fname.startswith('.'):
fname = fname[1:]
return fname
def main(args):
parser = argparse.ArgumentParser(
usage='%(prog)s [options] command\n\nVersion\n %(prog)s version '
+ str(__version__))
parser.add_argument('--version', action='version',
version='%(prog)s ' + str(__version__))
parser.add_argument('lognames', metavar='lognames', nargs='+',
help="log files to parse")
options = parser.parse_args(args)
if len(options.lognames) < 1:
sys.stderr.write("error: not enough arguments")
parser.print_help()
return 1
serializer = JSONSerializer()
for logname in options.lognames:
with open(logname) as logfile:
for event in generate_events(logfile, logname):
# the elasticsearch serializer does have a
# a dumps method, but we don't use it
# because it turns off json.dumps' ensure_ascii
# we want to enforce ascii because it's
# not actually specified what encoding the
# log file is in. We were also getting
# invalid utf-8 sequences.
sys.stdout.write(json.dumps(event, default=serializer.default))
sys.stdout.write('\n')
if __name__ == '__main__':
from elasticsearch.serializer import JSONSerializer
main(sys.argv[1:])
| bsd-2-clause |
sda2b/youtube-dl | youtube_dl/extractor/m6.py | 147 | 1952 | # encoding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class M6IE(InfoExtractor):
IE_NAME = 'm6'
_VALID_URL = r'http://(?:www\.)?m6\.fr/[^/]+/videos/(?P<id>\d+)-[^\.]+\.html'
_TEST = {
'url': 'http://www.m6.fr/emission-les_reines_du_shopping/videos/11323908-emeline_est_la_reine_du_shopping_sur_le_theme_ma_fete_d_8217_anniversaire.html',
'md5': '242994a87de2c316891428e0176bcb77',
'info_dict': {
'id': '11323908',
'ext': 'mp4',
'title': 'Emeline est la Reine du Shopping sur le thème « Ma fête d’anniversaire ! »',
'description': 'md5:1212ae8fb4b7baa4dc3886c5676007c2',
'duration': 100,
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
rss = self._download_xml('http://ws.m6.fr/v1/video/info/m6/bonus/%s' % video_id, video_id,
'Downloading video RSS')
title = rss.find('./channel/item/title').text
description = rss.find('./channel/item/description').text
thumbnail = rss.find('./channel/item/visuel_clip_big').text
duration = int(rss.find('./channel/item/duration').text)
view_count = int(rss.find('./channel/item/nombre_vues').text)
formats = []
for format_id in ['lq', 'sd', 'hq', 'hd']:
video_url = rss.find('./channel/item/url_video_%s' % format_id)
if video_url is None:
continue
formats.append({
'url': video_url.text,
'format_id': format_id,
})
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'view_count': view_count,
'formats': formats,
}
| unlicense |
pombredanne/dGit | setup.py | 1 | 1762 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
APP_NAME = 'legit'
APP_SCRIPT = './legit_r'
VERSION = '0.1.0'
# Grab requirments.
with open('reqs.txt') as f:
required = f.readlines()
settings = dict()
# Publish Helper.
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
# Build Helper.
if sys.argv[-1] == 'build':
try:
import py2exe
except ImportError:
print 'py2exe is required to continue.'
sys.exit(1)
sys.argv.append('py2exe')
settings.update(
console=[{'script': APP_SCRIPT}],
zipfile = None,
options = {
'py2exe': {
'compressed': 1,
'optimize': 0,
'bundle_files': 1}})
settings.update(
name=APP_NAME,
version=VERSION,
description='Sexy Git CLI, Inspired by GitHub for Mac.',
long_description=open('README.rst').read(),
author='Kenneth Reitz',
author_email='me@kennethreitz.com',
url='https://github.com/kennethreitz/legit',
packages= ['legit',],
install_requires=required,
license='BSD',
classifiers=(
# 'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
# 'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
),
entry_points={
'console_scripts': [
'legit = legit.cli:main',
],
}
)
setup(**settings)
| bsd-3-clause |
kamcpp/tensorflow | tensorflow/python/framework/registry.py | 18 | 2935 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Registry mechanism for "registering" classes/functions for general use.
This is typically used with a decorator that calls Register for adding
a class or function to a registry.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import traceback
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
# Registry mechanism below is based on mapreduce.python.mrpython.Register.
_LOCATION_TAG = "location"
_TYPE_TAG = "type"
class Registry(object):
"""Provides a registry for saving objects."""
def __init__(self, name):
"""Creates a new registry."""
self._name = name
self._registry = dict()
def register(self, candidate, name=None):
"""Registers a Python object "candidate" for the given "name".
Args:
candidate: The candidate object to add to the registry.
name: An optional string specifying the registry key for the candidate.
If None, candidate.__name__ will be used.
Raises:
KeyError: If same name is used twice.
"""
if not name:
name = candidate.__name__
if name in self._registry:
(filename, line_number, function_name, _) = (
self._registry[name][_LOCATION_TAG])
raise KeyError("Registering two %s with name '%s' !"
"(Previous registration was in %s %s:%d)" %
(self._name, name, function_name, filename, line_number))
logging.vlog(1, "Registering %s (%s) in %s.", name, candidate, self._name)
# stack trace is [this_function, Register(), user_function,...]
# so the user function is #2.
stack = traceback.extract_stack()
self._registry[name] = {_TYPE_TAG: candidate, _LOCATION_TAG: stack[2]}
def lookup(self, name):
"""Looks up "name".
Args:
name: a string specifying the registry key for the candidate.
Returns:
Registered object if found
Raises:
LookupError: if "name" has not been registered.
"""
name = compat.as_str(name)
if name in self._registry:
return self._registry[name][_TYPE_TAG]
else:
raise LookupError(
"%s registry has no entry for: %s" % (self._name, name))
| apache-2.0 |
luofei98/qgis | python/plugins/processing/algs/lidar/lastools/lascolor.py | 2 | 2285 | # -*- coding: utf-8 -*-
"""
***************************************************************************
lasclip.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
---------------------
Date : March 2014
Copyright : (C) 2014 by Martin Isenburg
Email : martin near rapidlasso point com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from LAStoolsUtils import LAStoolsUtils
from LAStoolsAlgorithm import LAStoolsAlgorithm
from processing.core.parameters import ParameterRaster
class lascolor(LAStoolsAlgorithm):
ORTHO = "ORTHO"
def defineCharacteristics(self):
self.name = "lascolor"
self.group = "LAStools"
self.addParametersVerboseGUI();
self.addParametersPointInputGUI()
self.addParameter(ParameterRaster(lascolor.ORTHO, "Input ortho"))
self.addParametersPointOutputGUI()
def processAlgorithm(self, progress):
commands = [os.path.join(LAStoolsUtils.LAStoolsPath(), "bin", "lascolor.exe")]
self.addParametersVerboseCommands(commands)
self.addParametersPointInputCommands(commands)
ortho = self.getParameterValue(lascolor.ORTHO)
if ortho != None:
commands.append("-image")
commands.append(ortho)
self.addParametersPointOutputCommands(commands)
LAStoolsUtils.runLAStools(commands, progress)
| gpl-2.0 |
lovelysystems/pyjamas | examples/mail/TopPanel.py | 7 | 2027 | from pyjamas import Window
from pyjamas.ui.Composite import Composite
from pyjamas.ui.HTML import HTML
from pyjamas.ui.HorizontalPanel import HorizontalPanel
from pyjamas.ui.VerticalPanel import VerticalPanel
from pyjamas.ui.Widget import Widget
from pyjamas.ui import HasAlignment
from AboutDialog import AboutDialog
from Logger import Logger
class TopPanel(Composite):
def __init__(self):
Composite.__init__(self)
self.signOutLink = HTML("<a href='javascript:;'>Sign Out</a>")
self.aboutLink = HTML("<a href='javascript:;'>About</a>")
outer = HorizontalPanel()
inner = VerticalPanel()
outer.setHorizontalAlignment(HasAlignment.ALIGN_RIGHT)
inner.setHorizontalAlignment(HasAlignment.ALIGN_RIGHT)
links = HorizontalPanel()
links.setSpacing(4)
links.add(self.signOutLink)
links.add(self.aboutLink)
outer.add(inner)
inner.add(HTML("<b>Welcome back, foo@example.com</b>"))
inner.add(links)
self.signOutLink.addClickListener(self)
self.aboutLink.addClickListener(self)
self.initWidget(outer)
inner.setStyleName("mail-TopPanel")
links.setStyleName("mail-TopPanelLinks")
def onClick(self, sender):
if (sender == self.signOutLink):
Window.alert("If this were implemented, you would be signed out now.")
elif (sender == self.aboutLink):
# When the 'About' item is selected, show the AboutDialog.
# Note that showing a dialog box does not block -- execution continues
# normally, and the dialog fires an event when it is closed.
dlg = AboutDialog()
# Position it roughly in the middle of the screen.
left = (Window.getClientWidth() - 512) / 2
top = (Window.getClientHeight() - 256) / 2
Logger("TopPanel", "left: %d" % left)
Logger("TopPanel", "top: %d" % top)
dlg.setPopupPosition(left, top)
dlg.show()
| apache-2.0 |
Facetracker-project/facetracker-core | lib/youtube-dl/youtube_dl/extractor/kankan.py | 124 | 1738 | from __future__ import unicode_literals
import re
import hashlib
from .common import InfoExtractor
_md5 = lambda s: hashlib.md5(s.encode('utf-8')).hexdigest()
class KankanIE(InfoExtractor):
_VALID_URL = r'https?://(?:.*?\.)?kankan\.com/.+?/(?P<id>\d+)\.shtml'
_TEST = {
'url': 'http://yinyue.kankan.com/vod/48/48863.shtml',
'md5': '29aca1e47ae68fc28804aca89f29507e',
'info_dict': {
'id': '48863',
'ext': 'flv',
'title': 'Ready To Go',
},
'skip': 'Only available from China',
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._search_regex(r'(?:G_TITLE=|G_MOVIE_TITLE = )[\'"](.+?)[\'"]', webpage, 'video title')
surls = re.search(r'surls:\[\'.+?\'\]|lurl:\'.+?\.flv\'', webpage).group(0)
gcids = re.findall(r"http://.+?/.+?/(.+?)/", surls)
gcid = gcids[-1]
info_url = 'http://p2s.cl.kankan.com/getCdnresource_flv?gcid=%s' % gcid
video_info_page = self._download_webpage(
info_url, video_id, 'Downloading video url info')
ip = self._search_regex(r'ip:"(.+?)"', video_info_page, 'video url ip')
path = self._search_regex(r'path:"(.+?)"', video_info_page, 'video url path')
param1 = self._search_regex(r'param1:(\d+)', video_info_page, 'param1')
param2 = self._search_regex(r'param2:(\d+)', video_info_page, 'param2')
key = _md5('xl_mp43651' + param1 + param2)
video_url = 'http://%s%s?key=%s&key1=%s' % (ip, path, key, param2)
return {
'id': video_id,
'title': title,
'url': video_url,
}
| gpl-2.0 |
pombredanne/readthedocs.org | readthedocs/restapi/views/integrations.py | 1 | 11162 | """Endpoints integrating with Github, Bitbucket, and other webhooks."""
from __future__ import absolute_import
import json
import logging
from builtins import object
from rest_framework import permissions
from rest_framework.views import APIView
from rest_framework.renderers import JSONRenderer
from rest_framework.response import Response
from rest_framework.exceptions import ParseError, NotFound
from django.shortcuts import get_object_or_404
from readthedocs.core.views.hooks import build_branches
from readthedocs.core.signals import (webhook_github, webhook_bitbucket,
webhook_gitlab)
from readthedocs.integrations.models import HttpExchange, Integration
from readthedocs.integrations.utils import normalize_request_payload
from readthedocs.projects.models import Project
import six
log = logging.getLogger(__name__)
GITHUB_PUSH = 'push'
GITLAB_PUSH = 'push'
BITBUCKET_PUSH = 'repo:push'
class WebhookMixin(object):
"""Base class for Webhook mixins."""
permission_classes = (permissions.AllowAny,)
renderer_classes = (JSONRenderer,)
integration = None
integration_type = None
def post(self, request, project_slug):
"""Set up webhook post view with request and project objects"""
self.request = request
self.project = None
try:
self.project = self.get_project(slug=project_slug)
except Project.DoesNotExist:
raise NotFound('Project not found')
self.data = self.get_data()
resp = self.handle_webhook()
if resp is None:
log.info('Unhandled webhook event')
resp = {'detail': 'Unhandled webhook event'}
return Response(resp)
def get_project(self, **kwargs):
return Project.objects.get(**kwargs)
def finalize_response(self, req, *args, **kwargs):
"""If the project was set on POST, store an HTTP exchange"""
resp = super(WebhookMixin, self).finalize_response(req, *args, **kwargs)
if hasattr(self, 'project') and self.project:
HttpExchange.objects.from_exchange(
req,
resp,
related_object=self.get_integration(),
payload=self.data,
)
return resp
def get_data(self):
"""Normalize posted data"""
return normalize_request_payload(self.request)
def handle_webhook(self):
"""Handle webhook payload"""
raise NotImplementedError
def get_integration(self):
"""Get or create an inbound webhook to track webhook requests
We shouldn't need this, but to support legacy webhooks, we can't assume
that a webhook has ever been created on our side. Most providers don't
pass the webhook ID in either, so we default to just finding *any*
integration from the provider. This is not ideal, but the
:py:class:`WebhookView` view solves this by performing a lookup on the
integration instead of guessing.
"""
# `integration` can be passed in as an argument to `as_view`, as it is
# in `WebhookView`
if self.integration is not None:
return self.integration
integration, _ = Integration.objects.get_or_create(
project=self.project,
integration_type=self.integration_type,
)
return integration
def get_response_push(self, project, branches):
"""Build branches on push events and return API response
Return a JSON response with the following::
{
"build_triggered": true,
"project": "project_name",
"versions": [...]
}
:param project: Project instance
:type project: Project
:param branches: List of branch names to build
:type branches: list(str)
"""
to_build, not_building = build_branches(project, branches)
if not_building:
log.info('Skipping project branches: project=%s branches=%s',
project, branches)
triggered = True if to_build else False
return {'build_triggered': triggered,
'project': project.slug,
'versions': list(to_build)}
class GitHubWebhookView(WebhookMixin, APIView):
"""Webhook consumer for GitHub
Accepts webhook events from GitHub, 'push' events trigger builds. Expects the
webhook event type will be included in HTTP header ``X-GitHub-Event``, and
we will have a JSON payload.
Expects the following JSON::
{
"ref": "branch-name",
...
}
"""
integration_type = Integration.GITHUB_WEBHOOK
def get_data(self):
if self.request.content_type == 'application/x-www-form-urlencoded':
try:
return json.loads(self.request.data['payload'])
except (ValueError, KeyError):
pass
return super(GitHubWebhookView, self).get_data()
def handle_webhook(self):
# Get event and trigger other webhook events
event = self.request.META.get('HTTP_X_GITHUB_EVENT', 'push')
webhook_github.send(Project, project=self.project,
data=self.data, event=event)
# Handle push events and trigger builds
if event == GITHUB_PUSH:
try:
branches = [self.data['ref'].replace('refs/heads/', '')]
return self.get_response_push(self.project, branches)
except KeyError:
raise ParseError('Parameter "ref" is required')
class GitLabWebhookView(WebhookMixin, APIView):
"""Webhook consumer for GitLab
Accepts webhook events from GitLab, 'push' events trigger builds.
Expects the following JSON::
{
"object_kind": "push",
"ref": "branch-name",
...
}
"""
integration_type = Integration.GITLAB_WEBHOOK
def handle_webhook(self):
# Get event and trigger other webhook events
event = self.request.data.get('object_kind', GITLAB_PUSH)
webhook_gitlab.send(Project, project=self.project,
data=self.request.data, event=event)
# Handle push events and trigger builds
if event == GITLAB_PUSH:
try:
branches = [self.request.data['ref'].replace('refs/heads/', '')]
return self.get_response_push(self.project, branches)
except KeyError:
raise ParseError('Parameter "ref" is required')
class BitbucketWebhookView(WebhookMixin, APIView):
"""Webhook consumer for Bitbucket
Accepts webhook events from Bitbucket, 'repo:push' events trigger builds.
Expects the following JSON::
{
"push": {
"changes": [{
"new": {
"name": "branch-name",
...
},
...
}],
...
},
...
}
"""
integration_type = Integration.BITBUCKET_WEBHOOK
def handle_webhook(self):
# Get event and trigger other webhook events
event = self.request.META.get('HTTP_X_EVENT_KEY', BITBUCKET_PUSH)
webhook_bitbucket.send(Project, project=self.project,
data=self.request.data, event=event)
# Handle push events and trigger builds
if event == BITBUCKET_PUSH:
try:
changes = self.request.data['push']['changes']
branches = [change['new']['name']
for change in changes]
return self.get_response_push(self.project, branches)
except KeyError:
raise ParseError('Invalid request')
class IsAuthenticatedOrHasToken(permissions.IsAuthenticated):
"""Allow authenticated users and requests with token auth through
This does not check for instance-level permissions, as the check uses
methods from the view to determine if the token matches.
"""
def has_permission(self, request, view):
has_perm = (super(IsAuthenticatedOrHasToken, self)
.has_permission(request, view))
return has_perm or 'token' in request.data
class APIWebhookView(WebhookMixin, APIView):
"""API webhook consumer
Expects the following JSON::
{
"branches": ["master"]
}
"""
integration_type = Integration.API_WEBHOOK
permission_classes = [IsAuthenticatedOrHasToken]
def get_project(self, **kwargs):
"""Get authenticated user projects, or token authed projects
Allow for a user to either be authed to receive a project, or require
the integration token to be specified as a POST argument.
"""
# If the user is not an admin of the project, fall back to token auth
if self.request.user.is_authenticated():
try:
return (Project.objects
.for_admin_user(self.request.user)
.get(**kwargs))
except Project.DoesNotExist:
pass
# Recheck project and integration relationship during token auth check
token = self.request.data.get('token')
if token:
integration = self.get_integration()
obj = Project.objects.get(**kwargs)
is_valid = (
integration.project == obj and
token == getattr(integration, 'token', None)
)
if is_valid:
return obj
raise Project.DoesNotExist()
def handle_webhook(self):
try:
branches = self.request.data.get(
'branches',
[self.project.get_default_branch()]
)
if isinstance(branches, six.string_types):
branches = [branches]
return self.get_response_push(self.project, branches)
except TypeError:
raise ParseError('Invalid request')
class WebhookView(APIView):
"""This is the main webhook view for webhooks with an ID
The handling of each view is handed off to another view. This should only
ever get webhook requests for established webhooks on our side. The other
views can receive webhooks for unknown webhooks, as all legacy webhooks will
be.
"""
VIEW_MAP = {
Integration.GITHUB_WEBHOOK: GitHubWebhookView,
Integration.GITLAB_WEBHOOK: GitLabWebhookView,
Integration.BITBUCKET_WEBHOOK: BitbucketWebhookView,
Integration.API_WEBHOOK: APIWebhookView,
}
def post(self, request, project_slug, integration_pk):
"""Set up webhook post view with request and project objects"""
integration = get_object_or_404(
Integration,
project__slug=project_slug,
pk=integration_pk,
)
view_cls = self.VIEW_MAP[integration.integration_type]
view = view_cls.as_view(integration=integration)
return view(request, project_slug)
| mit |
NeovaHealth/odoo | addons/stock/wizard/stock_transfer_details.py | 52 | 8586 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-TODAY OpenERP S.A. <http://www.odoo.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields, api
from openerp.exceptions import Warning
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
from datetime import datetime
class stock_transfer_details(models.TransientModel):
_name = 'stock.transfer_details'
_description = 'Picking wizard'
picking_id = fields.Many2one('stock.picking', 'Picking')
item_ids = fields.One2many('stock.transfer_details_items', 'transfer_id', 'Items', domain=[('product_id', '!=', False)])
packop_ids = fields.One2many('stock.transfer_details_items', 'transfer_id', 'Packs', domain=[('product_id', '=', False)])
picking_source_location_id = fields.Many2one('stock.location', string="Head source location", related='picking_id.location_id', store=False, readonly=True)
picking_destination_location_id = fields.Many2one('stock.location', string="Head destination location", related='picking_id.location_dest_id', store=False, readonly=True)
def default_get(self, cr, uid, fields, context=None):
if context is None: context = {}
res = super(stock_transfer_details, self).default_get(cr, uid, fields, context=context)
picking_ids = context.get('active_ids', [])
active_model = context.get('active_model')
if not picking_ids or len(picking_ids) != 1:
# Partial Picking Processing may only be done for one picking at a time
return res
assert active_model in ('stock.picking'), 'Bad context propagation'
picking_id, = picking_ids
picking = self.pool.get('stock.picking').browse(cr, uid, picking_id, context=context)
items = []
packs = []
if not picking.pack_operation_ids:
picking.do_prepare_partial()
for op in picking.pack_operation_ids:
item = {
'packop_id': op.id,
'product_id': op.product_id.id,
'product_uom_id': op.product_uom_id.id,
'quantity': op.product_qty,
'package_id': op.package_id.id,
'lot_id': op.lot_id.id,
'sourceloc_id': op.location_id.id,
'destinationloc_id': op.location_dest_id.id,
'result_package_id': op.result_package_id.id,
'date': op.date,
'owner_id': op.owner_id.id,
}
if op.product_id:
items.append(item)
elif op.package_id:
packs.append(item)
res.update(item_ids=items)
res.update(packop_ids=packs)
return res
@api.one
def do_detailed_transfer(self):
if self.picking_id.state not in ['assigned', 'partially_available']:
raise Warning(_('You cannot transfer a picking in state \'%s\'.') % self.picking_id.state)
processed_ids = []
# Create new and update existing pack operations
for lstits in [self.item_ids, self.packop_ids]:
for prod in lstits:
pack_datas = {
'product_id': prod.product_id.id,
'product_uom_id': prod.product_uom_id.id,
'product_qty': prod.quantity,
'package_id': prod.package_id.id,
'lot_id': prod.lot_id.id,
'location_id': prod.sourceloc_id.id,
'location_dest_id': prod.destinationloc_id.id,
'result_package_id': prod.result_package_id.id,
'date': prod.date if prod.date else datetime.now(),
'owner_id': prod.owner_id.id,
}
if prod.packop_id:
prod.packop_id.with_context(no_recompute=True).write(pack_datas)
processed_ids.append(prod.packop_id.id)
else:
pack_datas['picking_id'] = self.picking_id.id
packop_id = self.env['stock.pack.operation'].create(pack_datas)
processed_ids.append(packop_id.id)
# Delete the others
packops = self.env['stock.pack.operation'].search(['&', ('picking_id', '=', self.picking_id.id), '!', ('id', 'in', processed_ids)])
packops.unlink()
# Execute the transfer of the picking
self.picking_id.do_transfer()
return True
@api.multi
def wizard_view(self):
view = self.env.ref('stock.view_stock_enter_transfer_details')
return {
'name': _('Enter transfer details'),
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'stock.transfer_details',
'views': [(view.id, 'form')],
'view_id': view.id,
'target': 'new',
'res_id': self.ids[0],
'context': self.env.context,
}
class stock_transfer_details_items(models.TransientModel):
_name = 'stock.transfer_details_items'
_description = 'Picking wizard items'
transfer_id = fields.Many2one('stock.transfer_details', 'Transfer')
packop_id = fields.Many2one('stock.pack.operation', 'Operation')
product_id = fields.Many2one('product.product', 'Product')
product_uom_id = fields.Many2one('product.uom', 'Product Unit of Measure')
quantity = fields.Float('Quantity', digits=dp.get_precision('Product Unit of Measure'), default = 1.0)
package_id = fields.Many2one('stock.quant.package', 'Source package', domain="['|', ('location_id', 'child_of', sourceloc_id), ('location_id','=',False)]")
lot_id = fields.Many2one('stock.production.lot', 'Lot/Serial Number')
sourceloc_id = fields.Many2one('stock.location', 'Source Location', required=True)
destinationloc_id = fields.Many2one('stock.location', 'Destination Location', required=True)
result_package_id = fields.Many2one('stock.quant.package', 'Destination package', domain="['|', ('location_id', 'child_of', destinationloc_id), ('location_id','=',False)]")
date = fields.Datetime('Date')
owner_id = fields.Many2one('res.partner', 'Owner', help="Owner of the quants")
@api.multi
def split_quantities(self):
for det in self:
if det.quantity>1:
det.quantity = (det.quantity-1)
new_id = det.copy(context=self.env.context)
new_id.quantity = 1
new_id.packop_id = False
if self and self[0]:
return self[0].transfer_id.wizard_view()
@api.multi
def put_in_pack(self):
newpack = None
for packop in self:
if not packop.result_package_id:
if not newpack:
newpack = self.pool['stock.quant.package'].create(self._cr, self._uid, {'location_id': packop.destinationloc_id.id if packop.destinationloc_id else False}, self._context)
packop.result_package_id = newpack
if self and self[0]:
return self[0].transfer_id.wizard_view()
@api.multi
def product_id_change(self, product, uom=False):
result = {}
if product:
prod = self.env['product.product'].browse(product)
result['product_uom_id'] = prod.uom_id and prod.uom_id.id
return {'value': result, 'domain': {}, 'warning':{} }
@api.multi
def source_package_change(self, sourcepackage):
result = {}
if sourcepackage:
pack = self.env['stock.quant.package'].browse(sourcepackage)
result['sourceloc_id'] = pack.location_id and pack.location_id.id
return {'value': result, 'domain': {}, 'warning':{} }
| agpl-3.0 |
kdheepak89/pelican-plugins | clean_summary/clean_summary.py | 58 | 1768 | """
Clean Summary
-------------
adds option to specify maximum number of images to appear in article summary
also adds option to include an image by default if one exists in your article
"""
from pelican import signals
from pelican.contents import Content, Article
from pelican.generators import ArticlesGenerator
from bs4 import BeautifulSoup
from six import text_type
def init(pelican):
global maximum_images
global minimum_one
maximum_images = pelican.settings.get('CLEAN_SUMMARY_MAXIMUM', 0)
minimum_one = pelican.settings.get('CLEAN_SUMMARY_MINIMUM_ONE', False)
def clean_summary(instance):
if type(instance) == Article:
summary = instance.summary
summary = BeautifulSoup(instance.summary, 'html.parser')
images = summary.findAll('img')
if (len(images) > maximum_images):
for image in images[maximum_images:]:
image.extract()
if len(images) < 1 and minimum_one: #try to find one
content = BeautifulSoup(instance.content, 'html.parser')
first_image = content.find('img')
if first_image:
summary.insert(0, first_image)
instance._summary = text_type(summary)
def run_plugin(generators):
for generator in generators:
if isinstance(generator, ArticlesGenerator):
for article in generator.articles:
clean_summary(article)
def register():
signals.initialized.connect(init)
try:
signals.all_generators_finalized.connect(run_plugin)
except AttributeError:
# NOTE: This may result in #314 so shouldn't really be relied on
# https://github.com/getpelican/pelican-plugins/issues/314
signals.content_object_init.connect(clean_summary)
| agpl-3.0 |
tonk/ansible | test/integration/targets/vault/test-vault-client.py | 139 | 1818 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
import argparse
import sys
# TODO: could read these from the files I suppose...
secrets = {'vault-password': 'test-vault-password',
'vault-password-wrong': 'hunter42',
'vault-password-ansible': 'ansible',
'password': 'password',
'vault-client-password-1': 'password-1',
'vault-client-password-2': 'password-2'}
def build_arg_parser():
parser = argparse.ArgumentParser(description='Get a vault password from user keyring')
parser.add_argument('--vault-id', action='store', default=None,
dest='vault_id',
help='name of the vault secret to get from keyring')
parser.add_argument('--username', action='store', default=None,
help='the username whose keyring is queried')
parser.add_argument('--set', action='store_true', default=False,
dest='set_password',
help='set the password instead of getting it')
return parser
def get_secret(keyname):
return secrets.get(keyname, None)
def main():
rc = 0
arg_parser = build_arg_parser()
args = arg_parser.parse_args()
# print('args: %s' % args)
keyname = args.vault_id or 'ansible'
if args.set_password:
print('--set is not supported yet')
sys.exit(1)
secret = get_secret(keyname)
if secret is None:
sys.stderr.write('test-vault-client could not find key for vault-id="%s"\n' % keyname)
# key not found rc=2
return 2
sys.stdout.write('%s\n' % secret)
return rc
if __name__ == '__main__':
sys.exit(main())
| gpl-3.0 |
TwoLaid/PyHDB | pyhdb/protocol/headers.py | 9 | 4818 | # Copyright 2014, 2015 SAP SE.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: //www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import collections
import struct
###
from pyhdb.protocol.constants import type_codes
MessageHeader = collections.namedtuple(
'MessageHeader', 'session_id, packet_count, payload_length, varpartsize, num_segments, packet_options')
RequestSegmentHeader = collections.namedtuple(
'RequestSegmentHeader',
'segment_length, segment_offset, num_parts, segment_number, segment_kind, message_type, commit, command_options')
ReplySegmentHeader = collections.namedtuple(
'ReplySegmentHeader',
'segment_length, segment_offset, num_parts, segment_number, segment_kind, function_code')
PartHeader = collections.namedtuple(
'PartHeader',
'part_kind, part_attributes, argument_count, bigargument_count, payload_size, remaining_buffer_size')
class BaseLobheader(object):
"""Base LobHeader class"""
BLOB_TYPE = 1
CLOB_TYPE = 2
NCLOB_TYPE = 3
LOB_TYPES = {type_codes.BLOB: BLOB_TYPE, type_codes.CLOB: CLOB_TYPE, type_codes.NCLOB: NCLOB_TYPE}
# Bit masks for LOB options (field 2 in header):
LOB_OPTION_ISNULL = 0x01
LOB_OPTION_DATAINCLUDED = 0x02
LOB_OPTION_LASTDATA = 0x04
OPTIONS_STR = {
LOB_OPTION_ISNULL: 'isnull',
LOB_OPTION_DATAINCLUDED: 'data_included',
LOB_OPTION_LASTDATA: 'last_data'
}
class WriteLobHeader(BaseLobheader):
"""Write-LOB header structure used when sending data to Hana.
Total header size is 10 bytes.
Note that the lob data does not come immediately after the lob header but AFTER all rowdata headers
have been written to the part header!!!
00: TYPECODE: I1
01: OPTIONS: I1 Options that further refine the descriptor
02: LENGTH: I4 Length of bytes of data that follows
06: POSITION: I4 Position P of the lob data in the part (startinb at the beginning of the part)
...
P: LOB data
"""
header_struct = struct.Struct('<BBII')
class ReadLobHeader(BaseLobheader):
"""
Read-LOB header structure used when receiving data from Hana.
(incomplete in Command Network Protocol Reference docs):
Total header size is 32 bytes. The first columns denotes the offset:
00: TYPE: I1 Type of data
01: OPTIONS: I1 Options that further refine the descriptor
-> no further data to be read for LOB if options->is_null is true
02: RESERVED: I2 (ignore this)
04: CHARLENGTH: I8 Length of string (for asci and unicode)
12: BYTELENGTH: I8 Number of bytes of LOB
20: LOCATORID: B8 8 bytes serving as locator id for LOB
28: CHUNKLENGTH: I4 Number of bytes of LOB chunk in this result set
32: LOB data if CHUNKLENGTH > 0
"""
header_struct_part1 = struct.Struct('<BB') # read blob type and 'options' field
header_struct_part2 = struct.Struct('<2sQQ8sI') # only read if blob is not null (see options field)
def __init__(self, payload):
"""Parse LOB header from payload"""
raw_header_p1 = payload.read(self.header_struct_part1.size)
self.lob_type, self.options = self.header_struct_part1.unpack(raw_header_p1)
if not self.isnull():
raw_header_p2 = payload.read(self.header_struct_part2.size)
header = self.header_struct_part2.unpack(raw_header_p2)
(reserved, self.char_length, self.byte_length, self.locator_id, self.chunk_length) = header
# Set total_lob_length attribute differently for binary and character lobs:
self.total_lob_length = self.byte_length if self.lob_type == self.BLOB_TYPE else self.char_length
def isnull(self):
return bool(self.options & self.LOB_OPTION_ISNULL)
def __str__(self):
"""Return a string of properly formatted header values"""
O = self.OPTIONS_STR
options = [O[o] for o in sorted(self.OPTIONS_STR.keys()) if o & self.options]
options_str = ', '.join(options)
value = 'type: %d, options %d (%s)' % (self.lob_type, self.options, options_str)
if not self.isnull():
value += ', charlength: %d, bytelength: %d, locator_id: %r, chunklength: %d' % \
(self.char_length, self.byte_length, self.locator_id, self.chunk_length)
return '<ReadLobHeader %s>' % value
| apache-2.0 |
AnySDK/Sample_CPP_Cocos2dx | cocos2d/build/android-build.py | 13 | 10206 | #!/usr/bin/python
# android-build.py
# Build android
import sys
import os, os.path
import shutil
from optparse import OptionParser
CPP_SAMPLES = ['cpp-empty-test', 'cpp-tests', 'game-controller-test']
LUA_SAMPLES = ['lua-empty-test', 'lua-tests', 'lua-game-controller-test']
ALL_SAMPLES = CPP_SAMPLES + LUA_SAMPLES
def get_num_of_cpu():
''' The build process can be accelerated by running multiple concurrent job processes using the -j-option.
'''
try:
platform = sys.platform
if platform == 'win32':
if 'NUMBER_OF_PROCESSORS' in os.environ:
return int(os.environ['NUMBER_OF_PROCESSORS'])
else:
return 1
else:
from numpy.distutils import cpuinfo
return cpuinfo.cpu._getNCPUs()
except Exception:
print "Can't know cpuinfo, use default 1 cpu"
return 1
def check_environment_variables():
''' Checking the environment NDK_ROOT, which will be used for building
'''
try:
NDK_ROOT = os.environ['NDK_ROOT']
except Exception:
print "NDK_ROOT not defined. Please define NDK_ROOT in your environment"
sys.exit(1)
return NDK_ROOT
def check_environment_variables_sdk():
''' Checking the environment ANDROID_SDK_ROOT, which will be used for building
'''
try:
SDK_ROOT = os.environ['ANDROID_SDK_ROOT']
except Exception:
print "ANDROID_SDK_ROOT not defined. Please define ANDROID_SDK_ROOT in your environment"
sys.exit(1)
return SDK_ROOT
def select_toolchain_version():
pass
def caculate_built_samples(args):
''' Compute the sampels to be built
'cpp' for short of all cpp tests
'lua' for short of all lua tests
'''
if 'all' in args:
return ALL_SAMPLES
targets = []
if 'cpp' in args:
targets += CPP_SAMPLES
args.remove('cpp')
if 'lua' in args:
targets += LUA_SAMPLES
args.remove('lua')
targets += args
# remove duplicate elements, for example
# python android-build.py cpp hellocpp
targets = set(targets)
return list(targets)
def do_build(cocos_root, ndk_root, app_android_root, ndk_build_param,sdk_root,android_platform,build_mode):
ndk_path = os.path.join(ndk_root, "ndk-build")
# windows should use ";" to seperate module paths
platform = sys.platform
if platform == 'win32':
ndk_module_path = 'NDK_MODULE_PATH=%s;%s/external;%s/cocos' % (cocos_root, cocos_root, cocos_root)
else:
ndk_module_path = 'NDK_MODULE_PATH=%s:%s/external:%s/cocos' % (cocos_root, cocos_root, cocos_root)
num_of_cpu = get_num_of_cpu()
if ndk_build_param == None:
command = '%s -j%d -C %s NDK_DEBUG=%d %s' % (ndk_path, num_of_cpu, app_android_root, build_mode=='debug', ndk_module_path)
else:
command = '%s -j%d -C %s NDK_DEBUG=%d %s %s' % (ndk_path, num_of_cpu, app_android_root, build_mode=='debug', ndk_build_param, ndk_module_path)
print command
if os.system(command) != 0:
raise Exception("Build dynamic library for project [ " + app_android_root + " ] fails!")
elif android_platform is not None:
sdk_tool_path = os.path.join(sdk_root, "tools/android")
cocoslib_path = os.path.join(cocos_root, "cocos/platform/android/java")
command = '%s update lib-project -t %s -p %s' % (sdk_tool_path,android_platform,cocoslib_path)
if os.system(command) != 0:
raise Exception("update cocos lib-project [ " + cocoslib_path + " ] fails!")
command = '%s update project -t %s -p %s -s' % (sdk_tool_path,android_platform,app_android_root)
if os.system(command) != 0:
raise Exception("update project [ " + app_android_root + " ] fails!")
buildfile_path = os.path.join(app_android_root, "build.xml")
command = 'ant clean %s -f %s -Dsdk.dir=%s' % (build_mode,buildfile_path,sdk_root)
os.system(command)
def copy_files(src, dst):
for item in os.listdir(src):
path = os.path.join(src, item)
# Android can not package the file that ends with ".gz"
if not item.startswith('.') and not item.endswith('.gz') and os.path.isfile(path):
shutil.copy(path, dst)
if os.path.isdir(path):
new_dst = os.path.join(dst, item)
os.mkdir(new_dst)
copy_files(path, new_dst)
def copy_file(src_file, dst):
if not src_file.startswith('.') and not src_file.endswith('.gz') and os.path.isfile(src_file):
shutil.copy(src_file, dst)
def copy_resources(target, app_android_root):
# remove app_android_root/assets if it exists
assets_dir = os.path.join(app_android_root, "assets")
if os.path.isdir(assets_dir):
shutil.rmtree(assets_dir)
os.mkdir(assets_dir)
# copy resources(cpp samples)
if target in CPP_SAMPLES:
resources_dir = os.path.join(app_android_root, "../Resources")
if os.path.isdir(resources_dir):
copy_files(resources_dir, assets_dir)
# lua samples should copy lua script
if target in LUA_SAMPLES:
resources_dir = os.path.join(app_android_root, "../../res")
assets_res_dir = os.path.join(assets_dir, "res")
os.mkdir(assets_res_dir)
if target != "lua-tests":
copy_files(resources_dir, assets_res_dir)
src_dir = os.path.join(app_android_root, "../../src")
assets_src_dir = os.path.join(assets_dir, "src")
os.mkdir(assets_src_dir)
copy_files(src_dir, assets_src_dir)
common_script_dir = os.path.join(app_android_root, "../../../../cocos/scripting/lua-bindings/script/")
cocos_src_dir = os.path.join(assets_src_dir,"cocos")
if os.path.exists(cocos_src_dir):
shutil.rmtree(cocos_src_dir)
os.mkdir(cocos_src_dir)
copy_files(common_script_dir, cocos_src_dir)
luasocket_script_dir = os.path.join(app_android_root, "../../../../external/lua/luasocket")
for root, dirs, files in os.walk(luasocket_script_dir):
for f in files:
if os.path.splitext(f)[1] == '.lua':
fall = os.path.join(root, f)
shutil.copy(fall, assets_dir)
# lua-tests shared resources with cpp-tests
if target == "lua-tests":
resources_cocosbuilder_res_dir = os.path.join(resources_dir, "cocosbuilderRes")
assets_cocosbuilder_res_dir = os.path.join(assets_res_dir, "cocosbuilderRes")
os.mkdir(assets_cocosbuilder_res_dir)
copy_files(resources_cocosbuilder_res_dir, assets_cocosbuilder_res_dir)
resources_dir = os.path.join(app_android_root, "../../../cpp-tests/Resources")
copy_files(resources_dir, assets_res_dir)
if target == "lua-game-controller-test":
print("coming generator game controller")
resources_dir = os.path.join(app_android_root, "../../../game-controller-test/Resources")
copy_files(resources_dir, assets_res_dir)
def build_samples(target,ndk_build_param,android_platform,build_mode):
ndk_root = check_environment_variables()
sdk_root = None
select_toolchain_version()
build_targets = caculate_built_samples(target)
current_dir = os.path.dirname(os.path.realpath(__file__))
cocos_root = os.path.join(current_dir, "..")
if android_platform is not None:
sdk_root = check_environment_variables_sdk()
if android_platform.isdigit():
android_platform = 'android-'+android_platform
else:
print 'please use vaild android platform'
exit(1)
if build_mode is None:
build_mode = 'debug'
elif build_mode != 'release':
build_mode = 'debug'
app_android_root = ''
target_proj_path_map = {
"cpp-empty-test": "tests/cpp-empty-test/proj.android",
"game-controller-test": "tests/game-controller-test/proj.android",
"cpp-tests": "tests/cpp-tests/proj.android",
"lua-empty-test": "tests/lua-empty-test/project/proj.android",
"lua-tests": "tests/lua-tests/project/proj.android",
"lua-game-controller-test": "tests/lua-game-controller-test/project/proj.android"
}
for target in build_targets:
if target in target_proj_path_map:
app_android_root = os.path.join(cocos_root, target_proj_path_map[target])
else:
print 'unknown target: %s' % target
continue
copy_resources(target, app_android_root)
do_build(cocos_root, ndk_root, app_android_root, ndk_build_param,sdk_root,android_platform,build_mode)
# -------------- main --------------
if __name__ == '__main__':
#parse the params
usage = """
This script is mainy used for building tests built-in with cocos2d-x.
Usage: %prog [options] [cpp-empty-test|cpp-tests|lua-empty-test|lua-tests|cpp|lua|all]
If you are new to cocos2d-x, I recommend you start with cpp-empty-test, lua-empty-test.
You can combine these targets like this:
python android-build.py -p 10 cpp-empty-test lua-empty-test
Note: You should install ant to generate apk while building the andriod tests. But it is optional. You can generate apk with eclipse.
"""
parser = OptionParser(usage=usage)
parser.add_option("-n", "--ndk", dest="ndk_build_param",
help='Parameter for ndk-build')
parser.add_option("-p", "--platform", dest="android_platform",
help='Parameter for android-update. Without the parameter,the script just build dynamic library for the projects. Valid android-platform are:[10|11|12|13|14|15|16|17|18|19]')
parser.add_option("-b", "--build", dest="build_mode",
help='The build mode for java project,debug[default] or release. Get more information,please refer to http://developer.android.com/tools/building/building-cmdline.html')
(opts, args) = parser.parse_args()
if len(args) == 0:
parser.print_help()
sys.exit(1)
else:
try:
build_samples(args, opts.ndk_build_param,opts.android_platform,opts.build_mode)
except Exception as e:
print e
sys.exit(1)
| mit |
marcoantoniooliveira/labweb | tests/integration/catalogue/reviews/form_tests.py | 13 | 1890 | from django.test import TestCase
from django_dynamic_fixture import G
from oscar.core.compat import get_user_model
from oscar.apps.catalogue.reviews import forms, models
from oscar.test.factories import create_product
User = get_user_model()
class TestReviewForm(TestCase):
def setUp(self):
self.product = create_product()
self.reviewer = G(User)
self.data = {
'title': ' This product is lovely',
'body': 'I really like this cheese',
'score': 0,
'name': 'JR Hartley',
'email': 'hartley@example.com'
}
def test_cleans_title(self):
form = forms.ProductReviewForm(
product=self.product, user=self.reviewer, data=self.data)
self.assertTrue(form.is_valid())
review = form.save()
self.assertEqual("This product is lovely", review.title)
class TestVoteForm(TestCase):
def setUp(self):
self.product = create_product()
self.reviewer = G(User)
self.voter = G(User)
self.review = self.product.reviews.create(
title='This is nice',
score=3,
body="This is the body",
user=self.reviewer)
def test_allows_real_users_to_vote(self):
form = forms.VoteForm(self.review, self.voter, data={'delta': 1})
self.assertTrue(form.is_valid())
def test_prevents_users_from_voting_more_than_once(self):
self.review.vote_up(self.voter)
form = forms.VoteForm(self.review, self.voter, data={'delta': 1})
self.assertFalse(form.is_valid())
self.assertTrue(len(form.errors['__all__']) > 0)
def test_prevents_users_voting_on_their_own_reviews(self):
form = forms.VoteForm(self.review, self.reviewer, data={'delta': 1})
self.assertFalse(form.is_valid())
self.assertTrue(len(form.errors['__all__']) > 0)
| bsd-3-clause |
grlee77/nipype | nipype/external/cloghandler.py | 16 | 15087 | # Copyright 2008 Lowell Alleman
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" cloghandler.py: A smart replacement for the standard RotatingFileHandler
ConcurrentRotatingFileHandler: This class is a log handler which is a drop-in
replacement for the python standard log handler 'RotateFileHandler', the primary
difference being that this handler will continue to write to the same file if
the file cannot be rotated for some reason, whereas the RotatingFileHandler will
strictly adhere to the maximum file size. Unfortunately, if you are using the
RotatingFileHandler on Windows, you will find that once an attempted rotation
fails, all subsequent log messages are dropped. The other major advantage of
this module is that multiple processes can safely write to a single log file.
To put it another way: This module's top priority is preserving your log
records, whereas the standard library attempts to limit disk usage, which can
potentially drop log messages. If you are trying to determine which module to
use, there are number of considerations: What is most important: strict disk
space usage or preservation of log messages? What OSes are you supporting? Can
you afford to have processes blocked by file locks?
Concurrent access is handled by using file locks, which should ensure that log
messages are not dropped or clobbered. This means that a file lock is acquired
and released for every log message that is written to disk. (On Windows, you may
also run into a temporary situation where the log file must be opened and closed
for each log message.) This can have potentially performance implications. In my
testing, performance was more than adequate, but if you need a high-volume or
low-latency solution, I suggest you look elsewhere.
This module currently only support the 'nt' and 'posix' platforms due to the
usage of the portalocker module. I do not have access to any other platforms
for testing, patches are welcome.
See the README file for an example usage of this module.
"""
__version__ = "$Id: cloghandler.py 6175 2009-11-02 18:40:35Z lowell $"
__author__ = "Lowell Alleman"
__all__ = [
"ConcurrentRotatingFileHandler",
]
import os
import sys
from random import randint
from logging import Handler
from logging.handlers import BaseRotatingHandler
try:
import codecs
except ImportError:
codecs = None
# Question/TODO: Should we have a fallback mode if we can't load portalocker /
# we should still be better off than with the standard RotattingFileHandler
# class, right? We do some rename checking... that should prevent some file
# clobbering that the builtin class allows.
# sibling module than handles all the ugly platform-specific details of file locking
from .portalocker import lock, unlock, LOCK_EX, LOCK_NB, LockException
# A client can set this to true to automatically convert relative paths to
# absolute paths (which will also hide the absolute path warnings)
FORCE_ABSOLUTE_PATH = False
class ConcurrentRotatingFileHandler(BaseRotatingHandler):
"""
Handler for logging to a set of files, which switches from one file to the
next when the current file reaches a certain size. Multiple processes can
write to the log file concurrently, but this may mean that the file will
exceed the given size.
"""
def __init__(self, filename, mode='a', maxBytes=0, backupCount=0,
encoding=None, debug=True, supress_abs_warn=False):
"""
Open the specified file and use it as the stream for logging.
By default, the file grows indefinitely. You can specify particular
values of maxBytes and backupCount to allow the file to rollover at
a predetermined size.
Rollover occurs whenever the current log file is nearly maxBytes in
length. If backupCount is >= 1, the system will successively create
new files with the same pathname as the base file, but with extensions
".1", ".2" etc. appended to it. For example, with a backupCount of 5
and a base file name of "app.log", you would get "app.log",
"app.log.1", "app.log.2", ... through to "app.log.5". The file being
written to is always "app.log" - when it gets filled up, it is closed
and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc.
exist, then they are renamed to "app.log.2", "app.log.3" etc.
respectively.
If maxBytes is zero, rollover never occurs.
On Windows, it is not possible to rename a file that is currently opened
by another process. This means that it is not possible to rotate the
log files if multiple processes is using the same log file. In this
case, the current log file will continue to grow until the rotation can
be completed successfully. In order for rotation to be possible, all of
the other processes need to close the file first. A mechanism, called
"degraded" mode, has been created for this scenario. In degraded mode,
the log file is closed after each log message is written. So once all
processes have entered degraded mode, the next rotate log attempt should
be successful and then normal logging can be resumed.
This log handler assumes that all concurrent processes logging to a
single file will are using only this class, and that the exact same
parameters are provided to each instance of this class. If, for
example, two different processes are using this class, but with
different values for 'maxBytes' or 'backupCount', then odd behavior is
expected. The same is true if this class is used by one application, but
the RotatingFileHandler is used by another.
NOTE: You should always provide 'filename' as an absolute path, since
this class will need to re-open the file during rotation. If your
application call os.chdir() then subsequent log files could be created
in the wrong directory.
"""
# The question of absolute paths: I'm not sure what the 'right thing' is
# to do here. RotatingFileHander simply ignores this possibility. I was
# going call os.path.abspath(), but that potentially limits uses. For
# example, on Linux (any posix system?) you can rename a directory of a
# running app, and the app wouldn't notice as long as it only opens new
# files using relative paths. But since that's not a "normal" thing to
# do, and having an app call os.chdir() is a much more likely scenario
# that should be supported. For the moment, we are just going to warn
# the user if they provide a relative path and do some other voodoo
# logic that you'll just have to review for yourself.
# if the given filename contains no path, we make an absolute path
if not os.path.isabs(filename):
if FORCE_ABSOLUTE_PATH or \
not os.path.split(filename)[0]:
filename = os.path.abspath(filename)
elif not supress_abs_warn:
from warnings import warn
warn("The given 'filename' should be an absolute path. If your "
"application calls os.chdir(), your logs may get messed up. "
"Use 'supress_abs_warn=True' to hide this message.")
try:
BaseRotatingHandler.__init__(self, filename, mode, encoding)
except TypeError: # Due to a different logging release without encoding support (Python 2.4.1 and earlier?)
BaseRotatingHandler.__init__(self, filename, mode)
self.encoding = encoding
self._rotateFailed = False
self.maxBytes = maxBytes
self.backupCount = backupCount
# Prevent multiple extensions on the lock file (Only handles the normal "*.log" case.)
if filename.endswith(".log"):
lock_file = filename[:-4]
else:
lock_file = filename
self.stream_lock = open(lock_file + ".lock", "w")
# For debug mode, swap out the "_degrade()" method with a more a verbose one.
if debug:
self._degrade = self._degrade_debug
def _openFile(self, mode):
if self.encoding:
self.stream = codecs.open(self.baseFilename, mode, self.encoding)
else:
self.stream = open(self.baseFilename, mode)
def acquire(self):
""" Acquire thread and file locks. Also re-opening log file when running
in 'degraded' mode. """
# handle thread lock
Handler.acquire(self)
lock(self.stream_lock, LOCK_EX)
if self.stream.closed:
self._openFile(self.mode)
def release(self):
""" Release file and thread locks. Flush stream and take care of closing
stream in 'degraded' mode. """
try:
if not self.stream.closed:
self.stream.flush()
if self._rotateFailed:
self.stream.close()
except IOError:
if self._rotateFailed:
self.stream.close()
finally:
try:
unlock(self.stream_lock)
finally:
# release thread lock
Handler.release(self)
def close(self):
"""
Closes the stream.
"""
if not self.stream.closed:
self.stream.flush()
self.stream.close()
Handler.close(self)
def flush(self):
""" flush(): Do nothing.
Since a flush is issued in release(), we don't do it here. To do a flush
here, it would be necessary to re-lock everything, and it is just easier
and cleaner to do it all in release(), rather than requiring two lock
ops per handle() call.
Doing a flush() here would also introduces a window of opportunity for
another process to write to the log file in between calling
stream.write() and stream.flush(), which seems like a bad thing. """
pass
def _degrade(self, degrade, msg, *args):
""" Set degrade mode or not. Ignore msg. """
self._rotateFailed = degrade
del msg, args # avoid pychecker warnings
def _degrade_debug(self, degrade, msg, *args):
""" A more colorful version of _degade(). (This is enabled by passing
"debug=True" at initialization).
"""
if degrade:
if not self._rotateFailed:
sys.stderr.write("Degrade mode - ENTERING - (pid=%d) %s\n" %
(os.getpid(), msg % args))
self._rotateFailed = True
else:
if self._rotateFailed:
sys.stderr.write("Degrade mode - EXITING - (pid=%d) %s\n" %
(os.getpid(), msg % args))
self._rotateFailed = False
def doRollover(self):
"""
Do a rollover, as described in __init__().
"""
if self.backupCount <= 0:
# Don't keep any backups, just overwrite the existing backup file
# Locking doesn't much matter here; since we are overwriting it anyway
self.stream.close()
self._openFile("w")
return
self.stream.close()
try:
# Attempt to rename logfile to tempname: There is a slight race-condition here, but it seems unavoidable
tmpname = None
while not tmpname or os.path.exists(tmpname):
tmpname = "%s.rotate.%08d" % (self.baseFilename, randint(0,99999999))
try:
# Do a rename test to determine if we can successfully rename the log file
os.rename(self.baseFilename, tmpname)
except (IOError, OSError):
exc_value = sys.exc_info()[1]
self._degrade(True, "rename failed. File in use? "
"exception=%s", exc_value)
return
# Q: Is there some way to protect this code from a KeboardInterupt?
# This isn't necessarily a data loss issue, but it certainly would
# break the rotation process during my stress testing.
# There is currently no mechanism in place to handle the situation
# where one of these log files cannot be renamed. (Example, user
# opens "logfile.3" in notepad)
for i in range(self.backupCount - 1, 0, -1):
sfn = "%s.%d" % (self.baseFilename, i)
dfn = "%s.%d" % (self.baseFilename, i + 1)
if os.path.exists(sfn):
#print "%s -> %s" % (sfn, dfn)
if os.path.exists(dfn):
os.remove(dfn)
os.rename(sfn, dfn)
dfn = self.baseFilename + ".1"
if os.path.exists(dfn):
os.remove(dfn)
os.rename(tmpname, dfn)
#print "%s -> %s" % (self.baseFilename, dfn)
self._degrade(False, "Rotation completed")
finally:
self._openFile(self.mode)
def shouldRollover(self, record):
"""
Determine if rollover should occur.
For those that are keeping track. This differs from the standard
library's RotatingLogHandler class. Because there is no promise to keep
the file size under maxBytes we ignore the length of the current record.
"""
del record # avoid pychecker warnings
if self._shouldRollover():
# if some other process already did the rollover we might
# checked log.1, so we reopen the stream and check again on
# the right log file
self.stream.close()
self._openFile(self.mode)
return self._shouldRollover()
return False
def _shouldRollover(self):
if self.maxBytes > 0: # are we rolling over?
try:
self.stream.seek(0, 2) #due to non-posix-compliant Windows feature
except IOError:
return True
if self.stream.tell() >= self.maxBytes:
return True
else:
self._degrade(False, "Rotation done or not needed at this time")
return False
# Publish this class to the "logging.handlers" module so that it can be use
# from a logging config file via logging.config.fileConfig().
import logging.handlers
logging.handlers.ConcurrentRotatingFileHandler = ConcurrentRotatingFileHandler
| bsd-3-clause |
mzl9039/spark | dev/create-release/generate-contributors.py | 69 | 11338 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This script automates the process of creating release notes.
import os
import re
import sys
from releaseutils import *
# You must set the following before use!
JIRA_API_BASE = os.environ.get("JIRA_API_BASE", "https://issues.apache.org/jira")
RELEASE_TAG = os.environ.get("RELEASE_TAG", "v1.2.0-rc2")
PREVIOUS_RELEASE_TAG = os.environ.get("PREVIOUS_RELEASE_TAG", "v1.1.0")
# If the release tags are not provided, prompt the user to provide them
while not tag_exists(RELEASE_TAG):
RELEASE_TAG = raw_input("Please provide a valid release tag: ")
while not tag_exists(PREVIOUS_RELEASE_TAG):
print("Please specify the previous release tag.")
PREVIOUS_RELEASE_TAG = raw_input(
"For instance, if you are releasing v1.2.0, you should specify v1.1.0: ")
# Gather commits found in the new tag but not in the old tag.
# This filters commits based on both the git hash and the PR number.
# If either is present in the old tag, then we ignore the commit.
print("Gathering new commits between tags %s and %s" % (PREVIOUS_RELEASE_TAG, RELEASE_TAG))
release_commits = get_commits(RELEASE_TAG)
previous_release_commits = get_commits(PREVIOUS_RELEASE_TAG)
previous_release_hashes = set()
previous_release_prs = set()
for old_commit in previous_release_commits:
previous_release_hashes.add(old_commit.get_hash())
if old_commit.get_pr_number():
previous_release_prs.add(old_commit.get_pr_number())
new_commits = []
for this_commit in release_commits:
this_hash = this_commit.get_hash()
this_pr_number = this_commit.get_pr_number()
if this_hash in previous_release_hashes:
continue
if this_pr_number and this_pr_number in previous_release_prs:
continue
new_commits.append(this_commit)
if not new_commits:
sys.exit("There are no new commits between %s and %s!" % (PREVIOUS_RELEASE_TAG, RELEASE_TAG))
# Prompt the user for confirmation that the commit range is correct
print("\n==================================================================================")
print("JIRA server: %s" % JIRA_API_BASE)
print("Release tag: %s" % RELEASE_TAG)
print("Previous release tag: %s" % PREVIOUS_RELEASE_TAG)
print("Number of commits in this range: %s" % len(new_commits))
print
def print_indented(_list):
for x in _list:
print(" %s" % x)
if yesOrNoPrompt("Show all commits?"):
print_indented(new_commits)
print("==================================================================================\n")
if not yesOrNoPrompt("Does this look correct?"):
sys.exit("Ok, exiting")
# Filter out special commits
releases = []
maintenance = []
reverts = []
nojiras = []
filtered_commits = []
def is_release(commit_title):
return re.findall("\[release\]", commit_title.lower()) or \
"preparing spark release" in commit_title.lower() or \
"preparing development version" in commit_title.lower() or \
"CHANGES.txt" in commit_title
def is_maintenance(commit_title):
return "maintenance" in commit_title.lower() or \
"manually close" in commit_title.lower()
def has_no_jira(commit_title):
return not re.findall("SPARK-[0-9]+", commit_title.upper())
def is_revert(commit_title):
return "revert" in commit_title.lower()
def is_docs(commit_title):
return re.findall("docs*", commit_title.lower()) or \
"programming guide" in commit_title.lower()
for c in new_commits:
t = c.get_title()
if not t:
continue
elif is_release(t):
releases.append(c)
elif is_maintenance(t):
maintenance.append(c)
elif is_revert(t):
reverts.append(c)
elif is_docs(t):
filtered_commits.append(c) # docs may not have JIRA numbers
elif has_no_jira(t):
nojiras.append(c)
else:
filtered_commits.append(c)
# Warn against ignored commits
if releases or maintenance or reverts or nojiras:
print("\n==================================================================================")
if releases:
print("Found %d release commits" % len(releases))
if maintenance:
print("Found %d maintenance commits" % len(maintenance))
if reverts:
print("Found %d revert commits" % len(reverts))
if nojiras:
print("Found %d commits with no JIRA" % len(nojiras))
print("* Warning: these commits will be ignored.\n")
if yesOrNoPrompt("Show ignored commits?"):
if releases:
print("Release (%d)" % len(releases))
print_indented(releases)
if maintenance:
print("Maintenance (%d)" % len(maintenance))
print_indented(maintenance)
if reverts:
print("Revert (%d)" % len(reverts))
print_indented(reverts)
if nojiras:
print("No JIRA (%d)" % len(nojiras))
print_indented(nojiras)
print("==================== Warning: the above commits will be ignored ==================\n")
prompt_msg = "%d commits left to process after filtering. Ok to proceed?" % len(filtered_commits)
if not yesOrNoPrompt(prompt_msg):
sys.exit("Ok, exiting.")
# Keep track of warnings to tell the user at the end
warnings = []
# Mapping from the invalid author name to its associated JIRA issues
# E.g. andrewor14 -> set("SPARK-2413", "SPARK-3551", "SPARK-3471")
invalid_authors = {}
# Populate a map that groups issues and components by author
# It takes the form: Author name -> { Contribution type -> Spark components }
# For instance,
# {
# 'Andrew Or': {
# 'bug fixes': ['windows', 'core', 'web ui'],
# 'improvements': ['core']
# },
# 'Tathagata Das' : {
# 'bug fixes': ['streaming']
# 'new feature': ['streaming']
# }
# }
#
author_info = {}
jira_options = {"server": JIRA_API_BASE}
jira_client = JIRA(options=jira_options)
print("\n=========================== Compiling contributor list ===========================")
for commit in filtered_commits:
_hash = commit.get_hash()
title = commit.get_title()
issues = re.findall("SPARK-[0-9]+", title.upper())
author = commit.get_author()
date = get_date(_hash)
# If the author name is invalid, keep track of it along
# with all associated issues so we can translate it later
if is_valid_author(author):
author = capitalize_author(author)
else:
if author not in invalid_authors:
invalid_authors[author] = set()
for issue in issues:
invalid_authors[author].add(issue)
# Parse components from the commit title, if any
commit_components = find_components(title, _hash)
# Populate or merge an issue into author_info[author]
def populate(issue_type, components):
components = components or [CORE_COMPONENT] # assume core if no components provided
if author not in author_info:
author_info[author] = {}
if issue_type not in author_info[author]:
author_info[author][issue_type] = set()
for component in components:
author_info[author][issue_type].add(component)
# Find issues and components associated with this commit
for issue in issues:
try:
jira_issue = jira_client.issue(issue)
jira_type = jira_issue.fields.issuetype.name
jira_type = translate_issue_type(jira_type, issue, warnings)
jira_components = [translate_component(c.name, _hash, warnings)
for c in jira_issue.fields.components]
all_components = set(jira_components + commit_components)
populate(jira_type, all_components)
except Exception as e:
print("Unexpected error:", e)
# For docs without an associated JIRA, manually add it ourselves
if is_docs(title) and not issues:
populate("documentation", commit_components)
print(" Processed commit %s authored by %s on %s" % (_hash, author, date))
print("==================================================================================\n")
# Write to contributors file ordered by author names
# Each line takes the format " * Author name -- semi-colon delimited contributions"
# e.g. * Andrew Or -- Bug fixes in Windows, Core, and Web UI; improvements in Core
# e.g. * Tathagata Das -- Bug fixes and new features in Streaming
contributors_file = open(contributors_file_name, "w")
authors = author_info.keys()
authors.sort()
for author in authors:
contribution = ""
components = set()
issue_types = set()
for issue_type, comps in author_info[author].items():
components.update(comps)
issue_types.add(issue_type)
# If there is only one component, mention it only once
# e.g. Bug fixes, improvements in MLlib
if len(components) == 1:
contribution = "%s in %s" % (nice_join(issue_types), next(iter(components)))
# Otherwise, group contributions by issue types instead of modules
# e.g. Bug fixes in MLlib, Core, and Streaming; documentation in YARN
else:
contributions = ["%s in %s" % (issue_type, nice_join(comps))
for issue_type, comps in author_info[author].items()]
contribution = "; ".join(contributions)
# Do not use python's capitalize() on the whole string to preserve case
assert contribution
contribution = contribution[0].capitalize() + contribution[1:]
# If the author name is invalid, use an intermediate format that
# can be translated through translate-contributors.py later
# E.g. andrewor14/SPARK-3425/SPARK-1157/SPARK-6672
if author in invalid_authors and invalid_authors[author]:
author = author + "/" + "/".join(invalid_authors[author])
# line = " * %s -- %s" % (author, contribution)
line = author
contributors_file.write(line + "\n")
contributors_file.close()
print("Contributors list is successfully written to %s!" % contributors_file_name)
# Prompt the user to translate author names if necessary
if invalid_authors:
warnings.append("Found the following invalid authors:")
for a in invalid_authors:
warnings.append("\t%s" % a)
warnings.append("Please run './translate-contributors.py' to translate them.")
# Log any warnings encountered in the process
if warnings:
print("\n============ Warnings encountered while creating the contributor list ============")
for w in warnings:
print(w)
print("Please correct these in the final contributors list at %s." % contributors_file_name)
print("==================================================================================\n")
| apache-2.0 |
yoer/hue | desktop/core/ext-py/Django-1.6.10/tests/utils_tests/test_dateparse.py | 108 | 1917 | from __future__ import unicode_literals
from datetime import date, time, datetime
from django.utils.dateparse import parse_date, parse_time, parse_datetime
from django.utils import unittest
from django.utils.tzinfo import FixedOffset
class DateParseTests(unittest.TestCase):
def test_parse_date(self):
# Valid inputs
self.assertEqual(parse_date('2012-04-23'), date(2012, 4, 23))
self.assertEqual(parse_date('2012-4-9'), date(2012, 4, 9))
# Invalid inputs
self.assertEqual(parse_date('20120423'), None)
self.assertRaises(ValueError, parse_date, '2012-04-56')
def test_parse_time(self):
# Valid inputs
self.assertEqual(parse_time('09:15:00'), time(9, 15))
self.assertEqual(parse_time('10:10'), time(10, 10))
self.assertEqual(parse_time('10:20:30.400'), time(10, 20, 30, 400000))
self.assertEqual(parse_time('4:8:16'), time(4, 8, 16))
# Invalid inputs
self.assertEqual(parse_time('091500'), None)
self.assertRaises(ValueError, parse_time, '09:15:90')
def test_parse_datetime(self):
# Valid inputs
self.assertEqual(parse_datetime('2012-04-23T09:15:00'),
datetime(2012, 4, 23, 9, 15))
self.assertEqual(parse_datetime('2012-4-9 4:8:16'),
datetime(2012, 4, 9, 4, 8, 16))
self.assertEqual(parse_datetime('2012-04-23T09:15:00Z'),
datetime(2012, 4, 23, 9, 15, 0, 0, FixedOffset(0)))
self.assertEqual(parse_datetime('2012-4-9 4:8:16-0320'),
datetime(2012, 4, 9, 4, 8, 16, 0, FixedOffset(-200)))
self.assertEqual(parse_datetime('2012-04-23T10:20:30.400+02:30'),
datetime(2012, 4, 23, 10, 20, 30, 400000, FixedOffset(150)))
# Invalid inputs
self.assertEqual(parse_datetime('20120423091500'), None)
self.assertRaises(ValueError, parse_datetime, '2012-04-56T09:15:90')
| apache-2.0 |
huntxu/neutron | neutron/tests/unit/tests/common/test_net_helpers.py | 7 | 3466 | # Copyright 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron_lib import constants as n_const
from neutron.tests import base
from neutron.tests.common import net_helpers
ss_output = """
State Recv-Q Send-Q Local Address:Port Peer Address:Port
LISTEN 0 10 127.0.0.1:6640 *:*
LISTEN 0 128 *:46675 *:*
LISTEN 0 128 *:22 *:*
LISTEN 0 128 *:5432 *:*
LISTEN 0 128 *:3260 *:*
LISTEN 0 50 *:3306 *:*
ESTAB 0 36 10.0.0.202:22 10.0.0.44:45258
ESTAB 0 0 127.0.0.1:32965 127.0.0.1:4369
ESTAB 0 0 10.0.0.202:22 10.0.0.44:36104
LISTEN 0 128 :::80 :::*
LISTEN 0 128 :::4369 :::*
LISTEN 0 128 :::22 :::*
LISTEN 0 128 :::5432 :::*
LISTEN 0 128 :::3260 :::*
LISTEN 0 128 :::5672 :::*
ESTAB 0 0 ::ffff:127.0.0.1:4369 ::ffff:127.0.0.1:32965
"""
ss_output_template = """
LISTEN 0 10 127.0.0.1:%d *:*
"""
class PortAllocationTestCase(base.DietTestCase):
def test__get_source_ports_from_ss_output(self):
result = net_helpers._get_source_ports_from_ss_output(ss_output)
expected = {6640, 46675, 5432, 3260, 3306, 22, 32965,
4369, 5672, 80}
self.assertEqual(expected, result)
def test_get_free_namespace_port(self):
ss_output2 = ss_output
for p in range(1024, 32767):
ss_output2 += ss_output_template % p
with mock.patch('neutron.agent.linux.ip_lib.IPWrapper') \
as ipwrapper, \
mock.patch('neutron.agent.linux.utils.execute') as ex:
m = mock.MagicMock()
m.netns.execute.return_value = ss_output2
ipwrapper.return_value = m
local_port_range_start = 32768
ex.return_value = "%s\t61000" % local_port_range_start
result = net_helpers.get_free_namespace_port(
n_const.PROTO_NAME_TCP)
self.assertEqual((local_port_range_start - 1), result)
def test_get_unused_port(self):
with mock.patch('neutron.agent.linux.utils.execute') as ex:
ex.return_value = "2048\t61000"
result = net_helpers.get_unused_port(set(range(1025, 2048)))
self.assertEqual(1024, result)
| apache-2.0 |
wehkamp/ansible-modules-extras | monitoring/pagerduty.py | 12 | 7181 | #!/usr/bin/python
DOCUMENTATION = '''
module: pagerduty
short_description: Create PagerDuty maintenance windows
description:
- This module will let you create PagerDuty maintenance windows
version_added: "1.2"
author:
- "Andrew Newdigate (@suprememoocow)"
- "Dylan Silva (@thaumos)"
- "Justin Johns"
requirements:
- PagerDuty API access
options:
state:
description:
- Create a maintenance window or get a list of ongoing windows.
required: true
default: null
choices: [ "running", "started", "ongoing" ]
aliases: []
name:
description:
- PagerDuty unique subdomain.
required: true
default: null
choices: []
aliases: []
user:
description:
- PagerDuty user ID.
required: true
default: null
choices: []
aliases: []
passwd:
description:
- PagerDuty user password.
required: true
default: null
choices: []
aliases: []
token:
description:
- A pagerduty token, generated on the pagerduty site. Can be used instead of
user/passwd combination.
required: true
default: null
choices: []
aliases: []
version_added: '1.8'
requester_id:
description:
- ID of user making the request. Only needed when using a token and creating a maintenance_window.
required: true
default: null
choices: []
aliases: []
version_added: '1.8'
service:
description:
- PagerDuty service ID.
required: false
default: null
choices: []
aliases: []
hours:
description:
- Length of maintenance window in hours.
required: false
default: 1
choices: []
aliases: []
minutes:
description:
- Maintenance window in minutes (this is added to the hours).
required: false
default: 0
choices: []
aliases: []
version_added: '1.8'
desc:
description:
- Short description of maintenance window.
required: false
default: Created by Ansible
choices: []
aliases: []
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
version_added: 1.5.1
notes:
- This module does not yet have support to end maintenance windows.
'''
EXAMPLES='''
# List ongoing maintenance windows using a user/passwd
- pagerduty: name=companyabc user=example@example.com passwd=password123 state=ongoing
# List ongoing maintenance windows using a token
- pagerduty: name=companyabc token=xxxxxxxxxxxxxx state=ongoing
# Create a 1 hour maintenance window for service FOO123, using a user/passwd
- pagerduty: name=companyabc
user=example@example.com
passwd=password123
state=running
service=FOO123
# Create a 5 minute maintenance window for service FOO123, using a token
- pagerduty: name=companyabc
token=xxxxxxxxxxxxxx
hours=0
minutes=5
state=running
service=FOO123
# Create a 4 hour maintenance window for service FOO123 with the description "deployment".
- pagerduty: name=companyabc
user=example@example.com
passwd=password123
state=running
service=FOO123
hours=4
desc=deployment
'''
import datetime
import base64
def auth_header(user, passwd, token):
if token:
return "Token token=%s" % token
auth = base64.encodestring('%s:%s' % (user, passwd)).replace('\n', '')
return "Basic %s" % auth
def ongoing(module, name, user, passwd, token):
url = "https://" + name + ".pagerduty.com/api/v1/maintenance_windows/ongoing"
headers = {"Authorization": auth_header(user, passwd, token)}
response, info = fetch_url(module, url, headers=headers)
if info['status'] != 200:
module.fail_json(msg="failed to lookup the ongoing window: %s" % info['msg'])
return False, response.read()
def create(module, name, user, passwd, token, requester_id, service, hours, minutes, desc):
now = datetime.datetime.utcnow()
later = now + datetime.timedelta(hours=int(hours), minutes=int(minutes))
start = now.strftime("%Y-%m-%dT%H:%M:%SZ")
end = later.strftime("%Y-%m-%dT%H:%M:%SZ")
url = "https://" + name + ".pagerduty.com/api/v1/maintenance_windows"
headers = {
'Authorization': auth_header(user, passwd, token),
'Content-Type' : 'application/json',
}
request_data = {'maintenance_window': {'start_time': start, 'end_time': end, 'description': desc, 'service_ids': [service]}}
if requester_id:
request_data['requester_id'] = requester_id
else:
if token:
module.fail_json(msg="requester_id is required when using a token")
data = json.dumps(request_data)
response, info = fetch_url(module, url, data=data, headers=headers, method='POST')
if info['status'] != 200:
module.fail_json(msg="failed to create the window: %s" % info['msg'])
return False, response.read()
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(required=True, choices=['running', 'started', 'ongoing']),
name=dict(required=True),
user=dict(required=False),
passwd=dict(required=False),
token=dict(required=False),
service=dict(required=False),
requester_id=dict(required=False),
hours=dict(default='1', required=False),
minutes=dict(default='0', required=False),
desc=dict(default='Created by Ansible', required=False),
validate_certs = dict(default='yes', type='bool'),
)
)
state = module.params['state']
name = module.params['name']
user = module.params['user']
passwd = module.params['passwd']
token = module.params['token']
service = module.params['service']
hours = module.params['hours']
minutes = module.params['minutes']
token = module.params['token']
desc = module.params['desc']
requester_id = module.params['requester_id']
if not token and not (user or passwd):
module.fail_json(msg="neither user and passwd nor token specified")
if state == "running" or state == "started":
if not service:
module.fail_json(msg="service not specified")
(rc, out) = create(module, name, user, passwd, token, requester_id, service, hours, minutes, desc)
if state == "ongoing":
(rc, out) = ongoing(module, name, user, passwd, token)
if rc != 0:
module.fail_json(msg="failed", result=out)
module.exit_json(msg="success", result=out)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
main()
| gpl-3.0 |
huguesv/PTVS | Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/chardet/euckrfreq.py | 342 | 13546 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# Sampling from about 20M text materials include literature and computer technology
# 128 --> 0.79
# 256 --> 0.92
# 512 --> 0.986
# 1024 --> 0.99944
# 2048 --> 0.99999
#
# Idea Distribution Ratio = 0.98653 / (1-0.98653) = 73.24
# Random Distribution Ration = 512 / (2350-512) = 0.279.
#
# Typical Distribution Ratio
EUCKR_TYPICAL_DISTRIBUTION_RATIO = 6.0
EUCKR_TABLE_SIZE = 2352
# Char to FreqOrder table ,
EUCKR_CHAR_TO_FREQ_ORDER = (
13, 130, 120,1396, 481,1719,1720, 328, 609, 212,1721, 707, 400, 299,1722, 87,
1397,1723, 104, 536,1117,1203,1724,1267, 685,1268, 508,1725,1726,1727,1728,1398,
1399,1729,1730,1731, 141, 621, 326,1057, 368,1732, 267, 488, 20,1733,1269,1734,
945,1400,1735, 47, 904,1270,1736,1737, 773, 248,1738, 409, 313, 786, 429,1739,
116, 987, 813,1401, 683, 75,1204, 145,1740,1741,1742,1743, 16, 847, 667, 622,
708,1744,1745,1746, 966, 787, 304, 129,1747, 60, 820, 123, 676,1748,1749,1750,
1751, 617,1752, 626,1753,1754,1755,1756, 653,1757,1758,1759,1760,1761,1762, 856,
344,1763,1764,1765,1766, 89, 401, 418, 806, 905, 848,1767,1768,1769, 946,1205,
709,1770,1118,1771, 241,1772,1773,1774,1271,1775, 569,1776, 999,1777,1778,1779,
1780, 337, 751,1058, 28, 628, 254,1781, 177, 906, 270, 349, 891,1079,1782, 19,
1783, 379,1784, 315,1785, 629, 754,1402, 559,1786, 636, 203,1206,1787, 710, 567,
1788, 935, 814,1789,1790,1207, 766, 528,1791,1792,1208,1793,1794,1795,1796,1797,
1403,1798,1799, 533,1059,1404,1405,1156,1406, 936, 884,1080,1800, 351,1801,1802,
1803,1804,1805, 801,1806,1807,1808,1119,1809,1157, 714, 474,1407,1810, 298, 899,
885,1811,1120, 802,1158,1812, 892,1813,1814,1408, 659,1815,1816,1121,1817,1818,
1819,1820,1821,1822, 319,1823, 594, 545,1824, 815, 937,1209,1825,1826, 573,1409,
1022,1827,1210,1828,1829,1830,1831,1832,1833, 556, 722, 807,1122,1060,1834, 697,
1835, 900, 557, 715,1836,1410, 540,1411, 752,1159, 294, 597,1211, 976, 803, 770,
1412,1837,1838, 39, 794,1413, 358,1839, 371, 925,1840, 453, 661, 788, 531, 723,
544,1023,1081, 869, 91,1841, 392, 430, 790, 602,1414, 677,1082, 457,1415,1416,
1842,1843, 475, 327,1024,1417, 795, 121,1844, 733, 403,1418,1845,1846,1847, 300,
119, 711,1212, 627,1848,1272, 207,1849,1850, 796,1213, 382,1851, 519,1852,1083,
893,1853,1854,1855, 367, 809, 487, 671,1856, 663,1857,1858, 956, 471, 306, 857,
1859,1860,1160,1084,1861,1862,1863,1864,1865,1061,1866,1867,1868,1869,1870,1871,
282, 96, 574,1872, 502,1085,1873,1214,1874, 907,1875,1876, 827, 977,1419,1420,
1421, 268,1877,1422,1878,1879,1880, 308,1881, 2, 537,1882,1883,1215,1884,1885,
127, 791,1886,1273,1423,1887, 34, 336, 404, 643,1888, 571, 654, 894, 840,1889,
0, 886,1274, 122, 575, 260, 908, 938,1890,1275, 410, 316,1891,1892, 100,1893,
1894,1123, 48,1161,1124,1025,1895, 633, 901,1276,1896,1897, 115, 816,1898, 317,
1899, 694,1900, 909, 734,1424, 572, 866,1425, 691, 85, 524,1010, 543, 394, 841,
1901,1902,1903,1026,1904,1905,1906,1907,1908,1909, 30, 451, 651, 988, 310,1910,
1911,1426, 810,1216, 93,1912,1913,1277,1217,1914, 858, 759, 45, 58, 181, 610,
269,1915,1916, 131,1062, 551, 443,1000, 821,1427, 957, 895,1086,1917,1918, 375,
1919, 359,1920, 687,1921, 822,1922, 293,1923,1924, 40, 662, 118, 692, 29, 939,
887, 640, 482, 174,1925, 69,1162, 728,1428, 910,1926,1278,1218,1279, 386, 870,
217, 854,1163, 823,1927,1928,1929,1930, 834,1931, 78,1932, 859,1933,1063,1934,
1935,1936,1937, 438,1164, 208, 595,1938,1939,1940,1941,1219,1125,1942, 280, 888,
1429,1430,1220,1431,1943,1944,1945,1946,1947,1280, 150, 510,1432,1948,1949,1950,
1951,1952,1953,1954,1011,1087,1955,1433,1043,1956, 881,1957, 614, 958,1064,1065,
1221,1958, 638,1001, 860, 967, 896,1434, 989, 492, 553,1281,1165,1959,1282,1002,
1283,1222,1960,1961,1962,1963, 36, 383, 228, 753, 247, 454,1964, 876, 678,1965,
1966,1284, 126, 464, 490, 835, 136, 672, 529, 940,1088,1435, 473,1967,1968, 467,
50, 390, 227, 587, 279, 378, 598, 792, 968, 240, 151, 160, 849, 882,1126,1285,
639,1044, 133, 140, 288, 360, 811, 563,1027, 561, 142, 523,1969,1970,1971, 7,
103, 296, 439, 407, 506, 634, 990,1972,1973,1974,1975, 645,1976,1977,1978,1979,
1980,1981, 236,1982,1436,1983,1984,1089, 192, 828, 618, 518,1166, 333,1127,1985,
818,1223,1986,1987,1988,1989,1990,1991,1992,1993, 342,1128,1286, 746, 842,1994,
1995, 560, 223,1287, 98, 8, 189, 650, 978,1288,1996,1437,1997, 17, 345, 250,
423, 277, 234, 512, 226, 97, 289, 42, 167,1998, 201,1999,2000, 843, 836, 824,
532, 338, 783,1090, 182, 576, 436,1438,1439, 527, 500,2001, 947, 889,2002,2003,
2004,2005, 262, 600, 314, 447,2006, 547,2007, 693, 738,1129,2008, 71,1440, 745,
619, 688,2009, 829,2010,2011, 147,2012, 33, 948,2013,2014, 74, 224,2015, 61,
191, 918, 399, 637,2016,1028,1130, 257, 902,2017,2018,2019,2020,2021,2022,2023,
2024,2025,2026, 837,2027,2028,2029,2030, 179, 874, 591, 52, 724, 246,2031,2032,
2033,2034,1167, 969,2035,1289, 630, 605, 911,1091,1168,2036,2037,2038,1441, 912,
2039, 623,2040,2041, 253,1169,1290,2042,1442, 146, 620, 611, 577, 433,2043,1224,
719,1170, 959, 440, 437, 534, 84, 388, 480,1131, 159, 220, 198, 679,2044,1012,
819,1066,1443, 113,1225, 194, 318,1003,1029,2045,2046,2047,2048,1067,2049,2050,
2051,2052,2053, 59, 913, 112,2054, 632,2055, 455, 144, 739,1291,2056, 273, 681,
499,2057, 448,2058,2059, 760,2060,2061, 970, 384, 169, 245,1132,2062,2063, 414,
1444,2064,2065, 41, 235,2066, 157, 252, 877, 568, 919, 789, 580,2067, 725,2068,
2069,1292,2070,2071,1445,2072,1446,2073,2074, 55, 588, 66,1447, 271,1092,2075,
1226,2076, 960,1013, 372,2077,2078,2079,2080,2081,1293,2082,2083,2084,2085, 850,
2086,2087,2088,2089,2090, 186,2091,1068, 180,2092,2093,2094, 109,1227, 522, 606,
2095, 867,1448,1093, 991,1171, 926, 353,1133,2096, 581,2097,2098,2099,1294,1449,
1450,2100, 596,1172,1014,1228,2101,1451,1295,1173,1229,2102,2103,1296,1134,1452,
949,1135,2104,2105,1094,1453,1454,1455,2106,1095,2107,2108,2109,2110,2111,2112,
2113,2114,2115,2116,2117, 804,2118,2119,1230,1231, 805,1456, 405,1136,2120,2121,
2122,2123,2124, 720, 701,1297, 992,1457, 927,1004,2125,2126,2127,2128,2129,2130,
22, 417,2131, 303,2132, 385,2133, 971, 520, 513,2134,1174, 73,1096, 231, 274,
962,1458, 673,2135,1459,2136, 152,1137,2137,2138,2139,2140,1005,1138,1460,1139,
2141,2142,2143,2144, 11, 374, 844,2145, 154,1232, 46,1461,2146, 838, 830, 721,
1233, 106,2147, 90, 428, 462, 578, 566,1175, 352,2148,2149, 538,1234, 124,1298,
2150,1462, 761, 565,2151, 686,2152, 649,2153, 72, 173,2154, 460, 415,2155,1463,
2156,1235, 305,2157,2158,2159,2160,2161,2162, 579,2163,2164,2165,2166,2167, 747,
2168,2169,2170,2171,1464, 669,2172,2173,2174,2175,2176,1465,2177, 23, 530, 285,
2178, 335, 729,2179, 397,2180,2181,2182,1030,2183,2184, 698,2185,2186, 325,2187,
2188, 369,2189, 799,1097,1015, 348,2190,1069, 680,2191, 851,1466,2192,2193, 10,
2194, 613, 424,2195, 979, 108, 449, 589, 27, 172, 81,1031, 80, 774, 281, 350,
1032, 525, 301, 582,1176,2196, 674,1045,2197,2198,1467, 730, 762,2199,2200,2201,
2202,1468,2203, 993,2204,2205, 266,1070, 963,1140,2206,2207,2208, 664,1098, 972,
2209,2210,2211,1177,1469,1470, 871,2212,2213,2214,2215,2216,1471,2217,2218,2219,
2220,2221,2222,2223,2224,2225,2226,2227,1472,1236,2228,2229,2230,2231,2232,2233,
2234,2235,1299,2236,2237, 200,2238, 477, 373,2239,2240, 731, 825, 777,2241,2242,
2243, 521, 486, 548,2244,2245,2246,1473,1300, 53, 549, 137, 875, 76, 158,2247,
1301,1474, 469, 396,1016, 278, 712,2248, 321, 442, 503, 767, 744, 941,1237,1178,
1475,2249, 82, 178,1141,1179, 973,2250,1302,2251, 297,2252,2253, 570,2254,2255,
2256, 18, 450, 206,2257, 290, 292,1142,2258, 511, 162, 99, 346, 164, 735,2259,
1476,1477, 4, 554, 343, 798,1099,2260,1100,2261, 43, 171,1303, 139, 215,2262,
2263, 717, 775,2264,1033, 322, 216,2265, 831,2266, 149,2267,1304,2268,2269, 702,
1238, 135, 845, 347, 309,2270, 484,2271, 878, 655, 238,1006,1478,2272, 67,2273,
295,2274,2275, 461,2276, 478, 942, 412,2277,1034,2278,2279,2280, 265,2281, 541,
2282,2283,2284,2285,2286, 70, 852,1071,2287,2288,2289,2290, 21, 56, 509, 117,
432,2291,2292, 331, 980, 552,1101, 148, 284, 105, 393,1180,1239, 755,2293, 187,
2294,1046,1479,2295, 340,2296, 63,1047, 230,2297,2298,1305, 763,1306, 101, 800,
808, 494,2299,2300,2301, 903,2302, 37,1072, 14, 5,2303, 79, 675,2304, 312,
2305,2306,2307,2308,2309,1480, 6,1307,2310,2311,2312, 1, 470, 35, 24, 229,
2313, 695, 210, 86, 778, 15, 784, 592, 779, 32, 77, 855, 964,2314, 259,2315,
501, 380,2316,2317, 83, 981, 153, 689,1308,1481,1482,1483,2318,2319, 716,1484,
2320,2321,2322,2323,2324,2325,1485,2326,2327, 128, 57, 68, 261,1048, 211, 170,
1240, 31,2328, 51, 435, 742,2329,2330,2331, 635,2332, 264, 456,2333,2334,2335,
425,2336,1486, 143, 507, 263, 943,2337, 363, 920,1487, 256,1488,1102, 243, 601,
1489,2338,2339,2340,2341,2342,2343,2344, 861,2345,2346,2347,2348,2349,2350, 395,
2351,1490,1491, 62, 535, 166, 225,2352,2353, 668, 419,1241, 138, 604, 928,2354,
1181,2355,1492,1493,2356,2357,2358,1143,2359, 696,2360, 387, 307,1309, 682, 476,
2361,2362, 332, 12, 222, 156,2363, 232,2364, 641, 276, 656, 517,1494,1495,1035,
416, 736,1496,2365,1017, 586,2366,2367,2368,1497,2369, 242,2370,2371,2372,1498,
2373, 965, 713,2374,2375,2376,2377, 740, 982,1499, 944,1500,1007,2378,2379,1310,
1501,2380,2381,2382, 785, 329,2383,2384,1502,2385,2386,2387, 932,2388,1503,2389,
2390,2391,2392,1242,2393,2394,2395,2396,2397, 994, 950,2398,2399,2400,2401,1504,
1311,2402,2403,2404,2405,1049, 749,2406,2407, 853, 718,1144,1312,2408,1182,1505,
2409,2410, 255, 516, 479, 564, 550, 214,1506,1507,1313, 413, 239, 444, 339,1145,
1036,1508,1509,1314,1037,1510,1315,2411,1511,2412,2413,2414, 176, 703, 497, 624,
593, 921, 302,2415, 341, 165,1103,1512,2416,1513,2417,2418,2419, 376,2420, 700,
2421,2422,2423, 258, 768,1316,2424,1183,2425, 995, 608,2426,2427,2428,2429, 221,
2430,2431,2432,2433,2434,2435,2436,2437, 195, 323, 726, 188, 897, 983,1317, 377,
644,1050, 879,2438, 452,2439,2440,2441,2442,2443,2444, 914,2445,2446,2447,2448,
915, 489,2449,1514,1184,2450,2451, 515, 64, 427, 495,2452, 583,2453, 483, 485,
1038, 562, 213,1515, 748, 666,2454,2455,2456,2457, 334,2458, 780, 996,1008, 705,
1243,2459,2460,2461,2462,2463, 114,2464, 493,1146, 366, 163,1516, 961,1104,2465,
291,2466,1318,1105,2467,1517, 365,2468, 355, 951,1244,2469,1319,2470, 631,2471,
2472, 218,1320, 364, 320, 756,1518,1519,1321,1520,1322,2473,2474,2475,2476, 997,
2477,2478,2479,2480, 665,1185,2481, 916,1521,2482,2483,2484, 584, 684,2485,2486,
797,2487,1051,1186,2488,2489,2490,1522,2491,2492, 370,2493,1039,1187, 65,2494,
434, 205, 463,1188,2495, 125, 812, 391, 402, 826, 699, 286, 398, 155, 781, 771,
585,2496, 590, 505,1073,2497, 599, 244, 219, 917,1018, 952, 646,1523,2498,1323,
2499,2500, 49, 984, 354, 741,2501, 625,2502,1324,2503,1019, 190, 357, 757, 491,
95, 782, 868,2504,2505,2506,2507,2508,2509, 134,1524,1074, 422,1525, 898,2510,
161,2511,2512,2513,2514, 769,2515,1526,2516,2517, 411,1325,2518, 472,1527,2519,
2520,2521,2522,2523,2524, 985,2525,2526,2527,2528,2529,2530, 764,2531,1245,2532,
2533, 25, 204, 311,2534, 496,2535,1052,2536,2537,2538,2539,2540,2541,2542, 199,
704, 504, 468, 758, 657,1528, 196, 44, 839,1246, 272, 750,2543, 765, 862,2544,
2545,1326,2546, 132, 615, 933,2547, 732,2548,2549,2550,1189,1529,2551, 283,1247,
1053, 607, 929,2552,2553,2554, 930, 183, 872, 616,1040,1147,2555,1148,1020, 441,
249,1075,2556,2557,2558, 466, 743,2559,2560,2561, 92, 514, 426, 420, 526,2562,
2563,2564,2565,2566,2567,2568, 185,2569,2570,2571,2572, 776,1530, 658,2573, 362,
2574, 361, 922,1076, 793,2575,2576,2577,2578,2579,2580,1531, 251,2581,2582,2583,
2584,1532, 54, 612, 237,1327,2585,2586, 275, 408, 647, 111,2587,1533,1106, 465,
3, 458, 9, 38,2588, 107, 110, 890, 209, 26, 737, 498,2589,1534,2590, 431,
202, 88,1535, 356, 287,1107, 660,1149,2591, 381,1536, 986,1150, 445,1248,1151,
974,2592,2593, 846,2594, 446, 953, 184,1249,1250, 727,2595, 923, 193, 883,2596,
2597,2598, 102, 324, 539, 817,2599, 421,1041,2600, 832,2601, 94, 175, 197, 406,
2602, 459,2603,2604,2605,2606,2607, 330, 555,2608,2609,2610, 706,1108, 389,2611,
2612,2613,2614, 233,2615, 833, 558, 931, 954,1251,2616,2617,1537, 546,2618,2619,
1009,2620,2621,2622,1538, 690,1328,2623, 955,2624,1539,2625,2626, 772,2627,2628,
2629,2630,2631, 924, 648, 863, 603,2632,2633, 934,1540, 864, 865,2634, 642,1042,
670,1190,2635,2636,2637,2638, 168,2639, 652, 873, 542,1054,1541,2640,2641,2642, # 512, 256
)
| apache-2.0 |
chanijjani/TizenRT | external/protobuf/python/google/protobuf/internal/json_format_test.py | 19 | 41456 | #! /usr/bin/env python
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Test for google.protobuf.json_format."""
__author__ = 'jieluo@google.com (Jie Luo)'
import json
import math
import sys
try:
import unittest2 as unittest #PY26
except ImportError:
import unittest
from google.protobuf import any_pb2
from google.protobuf import duration_pb2
from google.protobuf import field_mask_pb2
from google.protobuf import struct_pb2
from google.protobuf import timestamp_pb2
from google.protobuf import wrappers_pb2
from google.protobuf import unittest_mset_pb2
from google.protobuf import unittest_pb2
from google.protobuf.internal import well_known_types
from google.protobuf import json_format
from google.protobuf.util import json_format_proto3_pb2
class JsonFormatBase(unittest.TestCase):
def FillAllFields(self, message):
message.int32_value = 20
message.int64_value = -20
message.uint32_value = 3120987654
message.uint64_value = 12345678900
message.float_value = float('-inf')
message.double_value = 3.1415
message.bool_value = True
message.string_value = 'foo'
message.bytes_value = b'bar'
message.message_value.value = 10
message.enum_value = json_format_proto3_pb2.BAR
# Repeated
message.repeated_int32_value.append(0x7FFFFFFF)
message.repeated_int32_value.append(-2147483648)
message.repeated_int64_value.append(9007199254740992)
message.repeated_int64_value.append(-9007199254740992)
message.repeated_uint32_value.append(0xFFFFFFF)
message.repeated_uint32_value.append(0x7FFFFFF)
message.repeated_uint64_value.append(9007199254740992)
message.repeated_uint64_value.append(9007199254740991)
message.repeated_float_value.append(0)
message.repeated_double_value.append(1E-15)
message.repeated_double_value.append(float('inf'))
message.repeated_bool_value.append(True)
message.repeated_bool_value.append(False)
message.repeated_string_value.append('Few symbols!#$,;')
message.repeated_string_value.append('bar')
message.repeated_bytes_value.append(b'foo')
message.repeated_bytes_value.append(b'bar')
message.repeated_message_value.add().value = 10
message.repeated_message_value.add().value = 11
message.repeated_enum_value.append(json_format_proto3_pb2.FOO)
message.repeated_enum_value.append(json_format_proto3_pb2.BAR)
self.message = message
def CheckParseBack(self, message, parsed_message):
json_format.Parse(json_format.MessageToJson(message),
parsed_message)
self.assertEqual(message, parsed_message)
def CheckError(self, text, error_message):
message = json_format_proto3_pb2.TestMessage()
self.assertRaisesRegexp(
json_format.ParseError,
error_message,
json_format.Parse, text, message)
class JsonFormatTest(JsonFormatBase):
def testEmptyMessageToJson(self):
message = json_format_proto3_pb2.TestMessage()
self.assertEqual(json_format.MessageToJson(message),
'{}')
parsed_message = json_format_proto3_pb2.TestMessage()
self.CheckParseBack(message, parsed_message)
def testPartialMessageToJson(self):
message = json_format_proto3_pb2.TestMessage(
string_value='test',
repeated_int32_value=[89, 4])
self.assertEqual(json.loads(json_format.MessageToJson(message)),
json.loads('{"stringValue": "test", '
'"repeatedInt32Value": [89, 4]}'))
parsed_message = json_format_proto3_pb2.TestMessage()
self.CheckParseBack(message, parsed_message)
def testAllFieldsToJson(self):
message = json_format_proto3_pb2.TestMessage()
text = ('{"int32Value": 20, '
'"int64Value": "-20", '
'"uint32Value": 3120987654,'
'"uint64Value": "12345678900",'
'"floatValue": "-Infinity",'
'"doubleValue": 3.1415,'
'"boolValue": true,'
'"stringValue": "foo",'
'"bytesValue": "YmFy",'
'"messageValue": {"value": 10},'
'"enumValue": "BAR",'
'"repeatedInt32Value": [2147483647, -2147483648],'
'"repeatedInt64Value": ["9007199254740992", "-9007199254740992"],'
'"repeatedUint32Value": [268435455, 134217727],'
'"repeatedUint64Value": ["9007199254740992", "9007199254740991"],'
'"repeatedFloatValue": [0],'
'"repeatedDoubleValue": [1e-15, "Infinity"],'
'"repeatedBoolValue": [true, false],'
'"repeatedStringValue": ["Few symbols!#$,;", "bar"],'
'"repeatedBytesValue": ["Zm9v", "YmFy"],'
'"repeatedMessageValue": [{"value": 10}, {"value": 11}],'
'"repeatedEnumValue": ["FOO", "BAR"]'
'}')
self.FillAllFields(message)
self.assertEqual(
json.loads(json_format.MessageToJson(message)),
json.loads(text))
parsed_message = json_format_proto3_pb2.TestMessage()
json_format.Parse(text, parsed_message)
self.assertEqual(message, parsed_message)
def testUnknownEnumToJsonAndBack(self):
text = '{\n "enumValue": 999\n}'
message = json_format_proto3_pb2.TestMessage()
message.enum_value = 999
self.assertEqual(json_format.MessageToJson(message),
text)
parsed_message = json_format_proto3_pb2.TestMessage()
json_format.Parse(text, parsed_message)
self.assertEqual(message, parsed_message)
def testExtensionToJsonAndBack(self):
message = unittest_mset_pb2.TestMessageSetContainer()
ext1 = unittest_mset_pb2.TestMessageSetExtension1.message_set_extension
ext2 = unittest_mset_pb2.TestMessageSetExtension2.message_set_extension
message.message_set.Extensions[ext1].i = 23
message.message_set.Extensions[ext2].str = 'foo'
message_text = json_format.MessageToJson(
message
)
parsed_message = unittest_mset_pb2.TestMessageSetContainer()
json_format.Parse(message_text, parsed_message)
self.assertEqual(message, parsed_message)
def testExtensionErrors(self):
self.CheckError('{"[extensionField]": {}}',
'Message type proto3.TestMessage does not have extensions')
def testExtensionToDictAndBack(self):
message = unittest_mset_pb2.TestMessageSetContainer()
ext1 = unittest_mset_pb2.TestMessageSetExtension1.message_set_extension
ext2 = unittest_mset_pb2.TestMessageSetExtension2.message_set_extension
message.message_set.Extensions[ext1].i = 23
message.message_set.Extensions[ext2].str = 'foo'
message_dict = json_format.MessageToDict(
message
)
parsed_message = unittest_mset_pb2.TestMessageSetContainer()
json_format.ParseDict(message_dict, parsed_message)
self.assertEqual(message, parsed_message)
def testExtensionSerializationDictMatchesProto3Spec(self):
"""See go/proto3-json-spec for spec.
"""
message = unittest_mset_pb2.TestMessageSetContainer()
ext1 = unittest_mset_pb2.TestMessageSetExtension1.message_set_extension
ext2 = unittest_mset_pb2.TestMessageSetExtension2.message_set_extension
message.message_set.Extensions[ext1].i = 23
message.message_set.Extensions[ext2].str = 'foo'
message_dict = json_format.MessageToDict(
message
)
golden_dict = {
'messageSet': {
'[protobuf_unittest.'
'TestMessageSetExtension1.messageSetExtension]': {
'i': 23,
},
'[protobuf_unittest.'
'TestMessageSetExtension2.messageSetExtension]': {
'str': u'foo',
},
},
}
self.assertEqual(golden_dict, message_dict)
def testExtensionSerializationJsonMatchesProto3Spec(self):
"""See go/proto3-json-spec for spec.
"""
message = unittest_mset_pb2.TestMessageSetContainer()
ext1 = unittest_mset_pb2.TestMessageSetExtension1.message_set_extension
ext2 = unittest_mset_pb2.TestMessageSetExtension2.message_set_extension
message.message_set.Extensions[ext1].i = 23
message.message_set.Extensions[ext2].str = 'foo'
message_text = json_format.MessageToJson(
message
)
ext1_text = ('protobuf_unittest.TestMessageSetExtension1.'
'messageSetExtension')
ext2_text = ('protobuf_unittest.TestMessageSetExtension2.'
'messageSetExtension')
golden_text = ('{"messageSet": {'
' "[%s]": {'
' "i": 23'
' },'
' "[%s]": {'
' "str": "foo"'
' }'
'}}') % (ext1_text, ext2_text)
self.assertEqual(json.loads(golden_text), json.loads(message_text))
def testJsonEscapeString(self):
message = json_format_proto3_pb2.TestMessage()
if sys.version_info[0] < 3:
message.string_value = '&\n<\"\r>\b\t\f\\\001/\xe2\x80\xa8\xe2\x80\xa9'
else:
message.string_value = '&\n<\"\r>\b\t\f\\\001/'
message.string_value += (b'\xe2\x80\xa8\xe2\x80\xa9').decode('utf-8')
self.assertEqual(
json_format.MessageToJson(message),
'{\n "stringValue": '
'"&\\n<\\\"\\r>\\b\\t\\f\\\\\\u0001/\\u2028\\u2029"\n}')
parsed_message = json_format_proto3_pb2.TestMessage()
self.CheckParseBack(message, parsed_message)
text = u'{"int32Value": "\u0031"}'
json_format.Parse(text, message)
self.assertEqual(message.int32_value, 1)
def testAlwaysSeriliaze(self):
message = json_format_proto3_pb2.TestMessage(
string_value='foo')
self.assertEqual(
json.loads(json_format.MessageToJson(message, True)),
json.loads('{'
'"repeatedStringValue": [],'
'"stringValue": "foo",'
'"repeatedBoolValue": [],'
'"repeatedUint32Value": [],'
'"repeatedInt32Value": [],'
'"enumValue": "FOO",'
'"int32Value": 0,'
'"floatValue": 0,'
'"int64Value": "0",'
'"uint32Value": 0,'
'"repeatedBytesValue": [],'
'"repeatedUint64Value": [],'
'"repeatedDoubleValue": [],'
'"bytesValue": "",'
'"boolValue": false,'
'"repeatedEnumValue": [],'
'"uint64Value": "0",'
'"doubleValue": 0,'
'"repeatedFloatValue": [],'
'"repeatedInt64Value": [],'
'"repeatedMessageValue": []}'))
parsed_message = json_format_proto3_pb2.TestMessage()
self.CheckParseBack(message, parsed_message)
def testIntegersRepresentedAsFloat(self):
message = json_format_proto3_pb2.TestMessage()
json_format.Parse('{"int32Value": -2.147483648e9}', message)
self.assertEqual(message.int32_value, -2147483648)
json_format.Parse('{"int32Value": 1e5}', message)
self.assertEqual(message.int32_value, 100000)
json_format.Parse('{"int32Value": 1.0}', message)
self.assertEqual(message.int32_value, 1)
def testMapFields(self):
message = json_format_proto3_pb2.TestNestedMap()
self.assertEqual(
json.loads(json_format.MessageToJson(message, True)),
json.loads('{'
'"boolMap": {},'
'"int32Map": {},'
'"int64Map": {},'
'"uint32Map": {},'
'"uint64Map": {},'
'"stringMap": {},'
'"mapMap": {}'
'}'))
message.bool_map[True] = 1
message.bool_map[False] = 2
message.int32_map[1] = 2
message.int32_map[2] = 3
message.int64_map[1] = 2
message.int64_map[2] = 3
message.uint32_map[1] = 2
message.uint32_map[2] = 3
message.uint64_map[1] = 2
message.uint64_map[2] = 3
message.string_map['1'] = 2
message.string_map['null'] = 3
message.map_map['1'].bool_map[True] = 3
self.assertEqual(
json.loads(json_format.MessageToJson(message, False)),
json.loads('{'
'"boolMap": {"false": 2, "true": 1},'
'"int32Map": {"1": 2, "2": 3},'
'"int64Map": {"1": 2, "2": 3},'
'"uint32Map": {"1": 2, "2": 3},'
'"uint64Map": {"1": 2, "2": 3},'
'"stringMap": {"1": 2, "null": 3},'
'"mapMap": {"1": {"boolMap": {"true": 3}}}'
'}'))
parsed_message = json_format_proto3_pb2.TestNestedMap()
self.CheckParseBack(message, parsed_message)
def testOneofFields(self):
message = json_format_proto3_pb2.TestOneof()
# Always print does not affect oneof fields.
self.assertEqual(
json_format.MessageToJson(message, True),
'{}')
message.oneof_int32_value = 0
self.assertEqual(
json_format.MessageToJson(message, True),
'{\n'
' "oneofInt32Value": 0\n'
'}')
parsed_message = json_format_proto3_pb2.TestOneof()
self.CheckParseBack(message, parsed_message)
def testSurrogates(self):
# Test correct surrogate handling.
message = json_format_proto3_pb2.TestMessage()
json_format.Parse('{"stringValue": "\\uD83D\\uDE01"}', message)
self.assertEqual(message.string_value,
b'\xF0\x9F\x98\x81'.decode('utf-8', 'strict'))
# Error case: unpaired high surrogate.
self.CheckError(
'{"stringValue": "\\uD83D"}',
r'Invalid \\uXXXX escape|Unpaired.*surrogate')
# Unpaired low surrogate.
self.CheckError(
'{"stringValue": "\\uDE01"}',
r'Invalid \\uXXXX escape|Unpaired.*surrogate')
def testTimestampMessage(self):
message = json_format_proto3_pb2.TestTimestamp()
message.value.seconds = 0
message.value.nanos = 0
message.repeated_value.add().seconds = 20
message.repeated_value[0].nanos = 1
message.repeated_value.add().seconds = 0
message.repeated_value[1].nanos = 10000
message.repeated_value.add().seconds = 100000000
message.repeated_value[2].nanos = 0
# Maximum time
message.repeated_value.add().seconds = 253402300799
message.repeated_value[3].nanos = 999999999
# Minimum time
message.repeated_value.add().seconds = -62135596800
message.repeated_value[4].nanos = 0
self.assertEqual(
json.loads(json_format.MessageToJson(message, True)),
json.loads('{'
'"value": "1970-01-01T00:00:00Z",'
'"repeatedValue": ['
' "1970-01-01T00:00:20.000000001Z",'
' "1970-01-01T00:00:00.000010Z",'
' "1973-03-03T09:46:40Z",'
' "9999-12-31T23:59:59.999999999Z",'
' "0001-01-01T00:00:00Z"'
']'
'}'))
parsed_message = json_format_proto3_pb2.TestTimestamp()
self.CheckParseBack(message, parsed_message)
text = (r'{"value": "1970-01-01T00:00:00.01+08:00",'
r'"repeatedValue":['
r' "1970-01-01T00:00:00.01+08:30",'
r' "1970-01-01T00:00:00.01-01:23"]}')
json_format.Parse(text, parsed_message)
self.assertEqual(parsed_message.value.seconds, -8 * 3600)
self.assertEqual(parsed_message.value.nanos, 10000000)
self.assertEqual(parsed_message.repeated_value[0].seconds, -8.5 * 3600)
self.assertEqual(parsed_message.repeated_value[1].seconds, 3600 + 23 * 60)
def testDurationMessage(self):
message = json_format_proto3_pb2.TestDuration()
message.value.seconds = 1
message.repeated_value.add().seconds = 0
message.repeated_value[0].nanos = 10
message.repeated_value.add().seconds = -1
message.repeated_value[1].nanos = -1000
message.repeated_value.add().seconds = 10
message.repeated_value[2].nanos = 11000000
message.repeated_value.add().seconds = -315576000000
message.repeated_value.add().seconds = 315576000000
self.assertEqual(
json.loads(json_format.MessageToJson(message, True)),
json.loads('{'
'"value": "1s",'
'"repeatedValue": ['
' "0.000000010s",'
' "-1.000001s",'
' "10.011s",'
' "-315576000000s",'
' "315576000000s"'
']'
'}'))
parsed_message = json_format_proto3_pb2.TestDuration()
self.CheckParseBack(message, parsed_message)
def testFieldMaskMessage(self):
message = json_format_proto3_pb2.TestFieldMask()
message.value.paths.append('foo.bar')
message.value.paths.append('bar')
self.assertEqual(
json_format.MessageToJson(message, True),
'{\n'
' "value": "foo.bar,bar"\n'
'}')
parsed_message = json_format_proto3_pb2.TestFieldMask()
self.CheckParseBack(message, parsed_message)
def testWrapperMessage(self):
message = json_format_proto3_pb2.TestWrapper()
message.bool_value.value = False
message.int32_value.value = 0
message.string_value.value = ''
message.bytes_value.value = b''
message.repeated_bool_value.add().value = True
message.repeated_bool_value.add().value = False
message.repeated_int32_value.add()
self.assertEqual(
json.loads(json_format.MessageToJson(message, True)),
json.loads('{\n'
' "int32Value": 0,'
' "boolValue": false,'
' "stringValue": "",'
' "bytesValue": "",'
' "repeatedBoolValue": [true, false],'
' "repeatedInt32Value": [0],'
' "repeatedUint32Value": [],'
' "repeatedFloatValue": [],'
' "repeatedDoubleValue": [],'
' "repeatedBytesValue": [],'
' "repeatedInt64Value": [],'
' "repeatedUint64Value": [],'
' "repeatedStringValue": []'
'}'))
parsed_message = json_format_proto3_pb2.TestWrapper()
self.CheckParseBack(message, parsed_message)
def testStructMessage(self):
message = json_format_proto3_pb2.TestStruct()
message.value['name'] = 'Jim'
message.value['age'] = 10
message.value['attend'] = True
message.value['email'] = None
message.value.get_or_create_struct('address')['city'] = 'SFO'
message.value['address']['house_number'] = 1024
struct_list = message.value.get_or_create_list('list')
struct_list.extend([6, 'seven', True, False, None])
struct_list.add_struct()['subkey2'] = 9
message.repeated_value.add()['age'] = 11
message.repeated_value.add()
self.assertEqual(
json.loads(json_format.MessageToJson(message, False)),
json.loads(
'{'
' "value": {'
' "address": {'
' "city": "SFO", '
' "house_number": 1024'
' }, '
' "age": 10, '
' "name": "Jim", '
' "attend": true, '
' "email": null, '
' "list": [6, "seven", true, false, null, {"subkey2": 9}]'
' },'
' "repeatedValue": [{"age": 11}, {}]'
'}'))
parsed_message = json_format_proto3_pb2.TestStruct()
self.CheckParseBack(message, parsed_message)
def testValueMessage(self):
message = json_format_proto3_pb2.TestValue()
message.value.string_value = 'hello'
message.repeated_value.add().number_value = 11.1
message.repeated_value.add().bool_value = False
message.repeated_value.add().null_value = 0
self.assertEqual(
json.loads(json_format.MessageToJson(message, False)),
json.loads(
'{'
' "value": "hello",'
' "repeatedValue": [11.1, false, null]'
'}'))
parsed_message = json_format_proto3_pb2.TestValue()
self.CheckParseBack(message, parsed_message)
# Can't parse back if the Value message is not set.
message.repeated_value.add()
self.assertEqual(
json.loads(json_format.MessageToJson(message, False)),
json.loads(
'{'
' "value": "hello",'
' "repeatedValue": [11.1, false, null, null]'
'}'))
message.Clear()
json_format.Parse('{"value": null}', message)
self.assertEqual(message.value.WhichOneof('kind'), 'null_value')
def testListValueMessage(self):
message = json_format_proto3_pb2.TestListValue()
message.value.values.add().number_value = 11.1
message.value.values.add().null_value = 0
message.value.values.add().bool_value = True
message.value.values.add().string_value = 'hello'
message.value.values.add().struct_value['name'] = 'Jim'
message.repeated_value.add().values.add().number_value = 1
message.repeated_value.add()
self.assertEqual(
json.loads(json_format.MessageToJson(message, False)),
json.loads(
'{"value": [11.1, null, true, "hello", {"name": "Jim"}]\n,'
'"repeatedValue": [[1], []]}'))
parsed_message = json_format_proto3_pb2.TestListValue()
self.CheckParseBack(message, parsed_message)
def testAnyMessage(self):
message = json_format_proto3_pb2.TestAny()
value1 = json_format_proto3_pb2.MessageType()
value2 = json_format_proto3_pb2.MessageType()
value1.value = 1234
value2.value = 5678
message.value.Pack(value1)
message.repeated_value.add().Pack(value1)
message.repeated_value.add().Pack(value2)
message.repeated_value.add()
self.assertEqual(
json.loads(json_format.MessageToJson(message, True)),
json.loads(
'{\n'
' "repeatedValue": [ {\n'
' "@type": "type.googleapis.com/proto3.MessageType",\n'
' "value": 1234\n'
' }, {\n'
' "@type": "type.googleapis.com/proto3.MessageType",\n'
' "value": 5678\n'
' },\n'
' {}],\n'
' "value": {\n'
' "@type": "type.googleapis.com/proto3.MessageType",\n'
' "value": 1234\n'
' }\n'
'}\n'))
parsed_message = json_format_proto3_pb2.TestAny()
self.CheckParseBack(message, parsed_message)
# Must print @type first
test_message = json_format_proto3_pb2.TestMessage(
bool_value=True,
int32_value=20,
int64_value=-20,
uint32_value=20,
uint64_value=20,
double_value=3.14,
string_value='foo')
message.Clear()
message.value.Pack(test_message)
self.assertEqual(
json_format.MessageToJson(message, False)[0:68],
'{\n'
' "value": {\n'
' "@type": "type.googleapis.com/proto3.TestMessage"')
def testWellKnownInAnyMessage(self):
message = any_pb2.Any()
int32_value = wrappers_pb2.Int32Value()
int32_value.value = 1234
message.Pack(int32_value)
self.assertEqual(
json.loads(json_format.MessageToJson(message, True)),
json.loads(
'{\n'
' "@type": \"type.googleapis.com/google.protobuf.Int32Value\",\n'
' "value": 1234\n'
'}\n'))
parsed_message = any_pb2.Any()
self.CheckParseBack(message, parsed_message)
timestamp = timestamp_pb2.Timestamp()
message.Pack(timestamp)
self.assertEqual(
json.loads(json_format.MessageToJson(message, True)),
json.loads(
'{\n'
' "@type": "type.googleapis.com/google.protobuf.Timestamp",\n'
' "value": "1970-01-01T00:00:00Z"\n'
'}\n'))
self.CheckParseBack(message, parsed_message)
duration = duration_pb2.Duration()
duration.seconds = 1
message.Pack(duration)
self.assertEqual(
json.loads(json_format.MessageToJson(message, True)),
json.loads(
'{\n'
' "@type": "type.googleapis.com/google.protobuf.Duration",\n'
' "value": "1s"\n'
'}\n'))
self.CheckParseBack(message, parsed_message)
field_mask = field_mask_pb2.FieldMask()
field_mask.paths.append('foo.bar')
field_mask.paths.append('bar')
message.Pack(field_mask)
self.assertEqual(
json.loads(json_format.MessageToJson(message, True)),
json.loads(
'{\n'
' "@type": "type.googleapis.com/google.protobuf.FieldMask",\n'
' "value": "foo.bar,bar"\n'
'}\n'))
self.CheckParseBack(message, parsed_message)
struct_message = struct_pb2.Struct()
struct_message['name'] = 'Jim'
message.Pack(struct_message)
self.assertEqual(
json.loads(json_format.MessageToJson(message, True)),
json.loads(
'{\n'
' "@type": "type.googleapis.com/google.protobuf.Struct",\n'
' "value": {"name": "Jim"}\n'
'}\n'))
self.CheckParseBack(message, parsed_message)
nested_any = any_pb2.Any()
int32_value.value = 5678
nested_any.Pack(int32_value)
message.Pack(nested_any)
self.assertEqual(
json.loads(json_format.MessageToJson(message, True)),
json.loads(
'{\n'
' "@type": "type.googleapis.com/google.protobuf.Any",\n'
' "value": {\n'
' "@type": "type.googleapis.com/google.protobuf.Int32Value",\n'
' "value": 5678\n'
' }\n'
'}\n'))
self.CheckParseBack(message, parsed_message)
def testParseNull(self):
message = json_format_proto3_pb2.TestMessage()
parsed_message = json_format_proto3_pb2.TestMessage()
self.FillAllFields(parsed_message)
json_format.Parse('{"int32Value": null, '
'"int64Value": null, '
'"uint32Value": null,'
'"uint64Value": null,'
'"floatValue": null,'
'"doubleValue": null,'
'"boolValue": null,'
'"stringValue": null,'
'"bytesValue": null,'
'"messageValue": null,'
'"enumValue": null,'
'"repeatedInt32Value": null,'
'"repeatedInt64Value": null,'
'"repeatedUint32Value": null,'
'"repeatedUint64Value": null,'
'"repeatedFloatValue": null,'
'"repeatedDoubleValue": null,'
'"repeatedBoolValue": null,'
'"repeatedStringValue": null,'
'"repeatedBytesValue": null,'
'"repeatedMessageValue": null,'
'"repeatedEnumValue": null'
'}',
parsed_message)
self.assertEqual(message, parsed_message)
# Null and {} should have different behavior for sub message.
self.assertFalse(parsed_message.HasField('message_value'))
json_format.Parse('{"messageValue": {}}', parsed_message)
self.assertTrue(parsed_message.HasField('message_value'))
# Null is not allowed to be used as an element in repeated field.
self.assertRaisesRegexp(
json_format.ParseError,
'Failed to parse repeatedInt32Value field: '
'null is not allowed to be used as an element in a repeated field.',
json_format.Parse,
'{"repeatedInt32Value":[1, null]}',
parsed_message)
self.CheckError('{"repeatedMessageValue":[null]}',
'Failed to parse repeatedMessageValue field: null is not'
' allowed to be used as an element in a repeated field.')
def testNanFloat(self):
message = json_format_proto3_pb2.TestMessage()
message.float_value = float('nan')
text = '{\n "floatValue": "NaN"\n}'
self.assertEqual(json_format.MessageToJson(message), text)
parsed_message = json_format_proto3_pb2.TestMessage()
json_format.Parse(text, parsed_message)
self.assertTrue(math.isnan(parsed_message.float_value))
def testParseEmptyText(self):
self.CheckError('',
r'Failed to load JSON: (Expecting value)|(No JSON).')
def testParseEnumValue(self):
message = json_format_proto3_pb2.TestMessage()
text = '{"enumValue": 0}'
json_format.Parse(text, message)
text = '{"enumValue": 1}'
json_format.Parse(text, message)
self.CheckError(
'{"enumValue": "baz"}',
'Failed to parse enumValue field: Invalid enum value baz '
'for enum type proto3.EnumType.')
# Proto3 accepts numeric unknown enums.
text = '{"enumValue": 12345}'
json_format.Parse(text, message)
# Proto2 does not accept unknown enums.
message = unittest_pb2.TestAllTypes()
self.assertRaisesRegexp(
json_format.ParseError,
'Failed to parse optionalNestedEnum field: Invalid enum value 12345 '
'for enum type protobuf_unittest.TestAllTypes.NestedEnum.',
json_format.Parse, '{"optionalNestedEnum": 12345}', message)
def testParseBadIdentifer(self):
self.CheckError('{int32Value: 1}',
(r'Failed to load JSON: Expecting property name'
r'( enclosed in double quotes)?: line 1'))
self.CheckError('{"unknownName": 1}',
'Message type "proto3.TestMessage" has no field named '
'"unknownName".')
def testIgnoreUnknownField(self):
text = '{"unknownName": 1}'
parsed_message = json_format_proto3_pb2.TestMessage()
json_format.Parse(text, parsed_message, ignore_unknown_fields=True)
text = ('{\n'
' "repeatedValue": [ {\n'
' "@type": "type.googleapis.com/proto3.MessageType",\n'
' "unknownName": 1\n'
' }]\n'
'}\n')
parsed_message = json_format_proto3_pb2.TestAny()
json_format.Parse(text, parsed_message, ignore_unknown_fields=True)
def testDuplicateField(self):
# Duplicate key check is not supported for python2.6
if sys.version_info < (2, 7):
return
self.CheckError('{"int32Value": 1,\n"int32Value":2}',
'Failed to load JSON: duplicate key int32Value.')
def testInvalidBoolValue(self):
self.CheckError('{"boolValue": 1}',
'Failed to parse boolValue field: '
'Expected true or false without quotes.')
self.CheckError('{"boolValue": "true"}',
'Failed to parse boolValue field: '
'Expected true or false without quotes.')
def testInvalidIntegerValue(self):
message = json_format_proto3_pb2.TestMessage()
text = '{"int32Value": 0x12345}'
self.assertRaises(json_format.ParseError,
json_format.Parse, text, message)
self.CheckError('{"int32Value": 1.5}',
'Failed to parse int32Value field: '
'Couldn\'t parse integer: 1.5.')
self.CheckError('{"int32Value": 012345}',
(r'Failed to load JSON: Expecting \'?,\'? delimiter: '
r'line 1.'))
self.CheckError('{"int32Value": " 1 "}',
'Failed to parse int32Value field: '
'Couldn\'t parse integer: " 1 ".')
self.CheckError('{"int32Value": "1 "}',
'Failed to parse int32Value field: '
'Couldn\'t parse integer: "1 ".')
self.CheckError('{"int32Value": 12345678901234567890}',
'Failed to parse int32Value field: Value out of range: '
'12345678901234567890.')
self.CheckError('{"uint32Value": -1}',
'Failed to parse uint32Value field: '
'Value out of range: -1.')
def testInvalidFloatValue(self):
self.CheckError('{"floatValue": "nan"}',
'Failed to parse floatValue field: Couldn\'t '
'parse float "nan", use "NaN" instead.')
def testInvalidBytesValue(self):
self.CheckError('{"bytesValue": "AQI"}',
'Failed to parse bytesValue field: Incorrect padding.')
self.CheckError('{"bytesValue": "AQI*"}',
'Failed to parse bytesValue field: Incorrect padding.')
def testInvalidRepeated(self):
self.CheckError('{"repeatedInt32Value": 12345}',
(r'Failed to parse repeatedInt32Value field: repeated field'
r' repeatedInt32Value must be in \[\] which is 12345.'))
def testInvalidMap(self):
message = json_format_proto3_pb2.TestMap()
text = '{"int32Map": {"null": 2, "2": 3}}'
self.assertRaisesRegexp(
json_format.ParseError,
'Failed to parse int32Map field: invalid literal',
json_format.Parse, text, message)
text = '{"int32Map": {1: 2, "2": 3}}'
self.assertRaisesRegexp(
json_format.ParseError,
(r'Failed to load JSON: Expecting property name'
r'( enclosed in double quotes)?: line 1'),
json_format.Parse, text, message)
text = '{"boolMap": {"null": 1}}'
self.assertRaisesRegexp(
json_format.ParseError,
'Failed to parse boolMap field: Expected "true" or "false", not null.',
json_format.Parse, text, message)
if sys.version_info < (2, 7):
return
text = r'{"stringMap": {"a": 3, "\u0061": 2}}'
self.assertRaisesRegexp(
json_format.ParseError,
'Failed to load JSON: duplicate key a',
json_format.Parse, text, message)
text = r'{"stringMap": 0}'
self.assertRaisesRegexp(
json_format.ParseError,
'Failed to parse stringMap field: Map field string_map must be '
'in a dict which is 0.',
json_format.Parse, text, message)
def testInvalidTimestamp(self):
message = json_format_proto3_pb2.TestTimestamp()
text = '{"value": "10000-01-01T00:00:00.00Z"}'
self.assertRaisesRegexp(
json_format.ParseError,
'time data \'10000-01-01T00:00:00\' does not match'
' format \'%Y-%m-%dT%H:%M:%S\'.',
json_format.Parse, text, message)
text = '{"value": "1970-01-01T00:00:00.0123456789012Z"}'
self.assertRaisesRegexp(
well_known_types.ParseError,
'nanos 0123456789012 more than 9 fractional digits.',
json_format.Parse, text, message)
text = '{"value": "1972-01-01T01:00:00.01+08"}'
self.assertRaisesRegexp(
well_known_types.ParseError,
(r'Invalid timezone offset value: \+08.'),
json_format.Parse, text, message)
# Time smaller than minimum time.
text = '{"value": "0000-01-01T00:00:00Z"}'
self.assertRaisesRegexp(
json_format.ParseError,
'Failed to parse value field: year (0 )?is out of range.',
json_format.Parse, text, message)
# Time bigger than maxinum time.
message.value.seconds = 253402300800
self.assertRaisesRegexp(
OverflowError,
'date value out of range',
json_format.MessageToJson, message)
def testInvalidOneof(self):
message = json_format_proto3_pb2.TestOneof()
text = '{"oneofInt32Value": 1, "oneofStringValue": "2"}'
self.assertRaisesRegexp(
json_format.ParseError,
'Message type "proto3.TestOneof"'
' should not have multiple "oneof_value" oneof fields.',
json_format.Parse, text, message)
def testInvalidListValue(self):
message = json_format_proto3_pb2.TestListValue()
text = '{"value": 1234}'
self.assertRaisesRegexp(
json_format.ParseError,
r'Failed to parse value field: ListValue must be in \[\] which is 1234',
json_format.Parse, text, message)
def testInvalidStruct(self):
message = json_format_proto3_pb2.TestStruct()
text = '{"value": 1234}'
self.assertRaisesRegexp(
json_format.ParseError,
'Failed to parse value field: Struct must be in a dict which is 1234',
json_format.Parse, text, message)
def testInvalidAny(self):
message = any_pb2.Any()
text = '{"@type": "type.googleapis.com/google.protobuf.Int32Value"}'
self.assertRaisesRegexp(
KeyError,
'value',
json_format.Parse, text, message)
text = '{"value": 1234}'
self.assertRaisesRegexp(
json_format.ParseError,
'@type is missing when parsing any message.',
json_format.Parse, text, message)
text = '{"@type": "type.googleapis.com/MessageNotExist", "value": 1234}'
self.assertRaisesRegexp(
TypeError,
'Can not find message descriptor by type_url: '
'type.googleapis.com/MessageNotExist.',
json_format.Parse, text, message)
# Only last part is to be used: b/25630112
text = (r'{"@type": "incorrect.googleapis.com/google.protobuf.Int32Value",'
r'"value": 1234}')
json_format.Parse(text, message)
def testPreservingProtoFieldNames(self):
message = json_format_proto3_pb2.TestMessage()
message.int32_value = 12345
self.assertEqual('{\n "int32Value": 12345\n}',
json_format.MessageToJson(message))
self.assertEqual('{\n "int32_value": 12345\n}',
json_format.MessageToJson(message, False, True))
# When including_default_value_fields is True.
message = json_format_proto3_pb2.TestTimestamp()
self.assertEqual('{\n "repeatedValue": []\n}',
json_format.MessageToJson(message, True, False))
self.assertEqual('{\n "repeated_value": []\n}',
json_format.MessageToJson(message, True, True))
# Parsers accept both original proto field names and lowerCamelCase names.
message = json_format_proto3_pb2.TestMessage()
json_format.Parse('{"int32Value": 54321}', message)
self.assertEqual(54321, message.int32_value)
json_format.Parse('{"int32_value": 12345}', message)
self.assertEqual(12345, message.int32_value)
def testIndent(self):
message = json_format_proto3_pb2.TestMessage()
message.int32_value = 12345
self.assertEqual('{\n"int32Value": 12345\n}',
json_format.MessageToJson(message, indent=0))
def testParseDict(self):
expected = 12345
js_dict = {'int32Value': expected}
message = json_format_proto3_pb2.TestMessage()
json_format.ParseDict(js_dict, message)
self.assertEqual(expected, message.int32_value)
def testMessageToDict(self):
message = json_format_proto3_pb2.TestMessage()
message.int32_value = 12345
expected = {'int32Value': 12345}
self.assertEqual(expected,
json_format.MessageToDict(message))
def testJsonName(self):
message = json_format_proto3_pb2.TestCustomJsonName()
message.value = 12345
self.assertEqual('{\n "@value": 12345\n}',
json_format.MessageToJson(message))
parsed_message = json_format_proto3_pb2.TestCustomJsonName()
self.CheckParseBack(message, parsed_message)
def testSortKeys(self):
# Testing sort_keys is not perfectly working, as by random luck we could
# get the output sorted. We just use a selection of names.
message = json_format_proto3_pb2.TestMessage(bool_value=True,
int32_value=1,
int64_value=3,
uint32_value=4,
string_value='bla')
self.assertEqual(
json_format.MessageToJson(message, sort_keys=True),
# We use json.dumps() instead of a hardcoded string due to differences
# between Python 2 and Python 3.
json.dumps({'boolValue': True, 'int32Value': 1, 'int64Value': '3',
'uint32Value': 4, 'stringValue': 'bla'},
indent=2, sort_keys=True))
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
alexdebrie/moto | moto/ec2/responses/virtual_private_gateways.py | 14 | 4556 | from __future__ import unicode_literals
from moto.core.responses import BaseResponse
from moto.ec2.utils import filters_from_querystring
class VirtualPrivateGateways(BaseResponse):
def attach_vpn_gateway(self):
vpn_gateway_id = self.querystring.get('VpnGatewayId')[0]
vpc_id = self.querystring.get('VpcId')[0]
attachment = self.ec2_backend.attach_vpn_gateway(
vpn_gateway_id,
vpc_id
)
template = self.response_template(ATTACH_VPN_GATEWAY_RESPONSE)
return template.render(attachment=attachment)
def create_vpn_gateway(self):
type = self.querystring.get('Type', None)[0]
vpn_gateway = self.ec2_backend.create_vpn_gateway(type)
template = self.response_template(CREATE_VPN_GATEWAY_RESPONSE)
return template.render(vpn_gateway=vpn_gateway)
def delete_vpn_gateway(self):
vpn_gateway_id = self.querystring.get('VpnGatewayId')[0]
vpn_gateway = self.ec2_backend.delete_vpn_gateway(vpn_gateway_id)
template = self.response_template(DELETE_VPN_GATEWAY_RESPONSE)
return template.render(vpn_gateway=vpn_gateway)
def describe_vpn_gateways(self):
filters = filters_from_querystring(self.querystring)
vpn_gateways = self.ec2_backend.get_all_vpn_gateways(filters)
template = self.response_template(DESCRIBE_VPN_GATEWAYS_RESPONSE)
return template.render(vpn_gateways=vpn_gateways)
def detach_vpn_gateway(self):
vpn_gateway_id = self.querystring.get('VpnGatewayId')[0]
vpc_id = self.querystring.get('VpcId')[0]
attachment = self.ec2_backend.detach_vpn_gateway(
vpn_gateway_id,
vpc_id
)
template = self.response_template(DETACH_VPN_GATEWAY_RESPONSE)
return template.render(attachment=attachment)
CREATE_VPN_GATEWAY_RESPONSE = """
<CreateVpnGatewayResponse xmlns="http://ec2.amazonaws.com/doc/2014-10-01/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<vpnGateway>
<vpnGatewayId>{{ vpn_gateway.id }}</vpnGatewayId>
<state>available</state>
<type>{{ vpn_gateway.type }}</type>
<availabilityZone>us-east-1a</availabilityZone>
<attachments/>
<tagSet>
{% for tag in vpn_gateway.get_tags() %}
<item>
<resourceId>{{ tag.resource_id }}</resourceId>
<resourceType>{{ tag.resource_type }}</resourceType>
<key>{{ tag.key }}</key>
<value>{{ tag.value }}</value>
</item>
{% endfor %}
</tagSet>
</vpnGateway>
</CreateVpnGatewayResponse>"""
DESCRIBE_VPN_GATEWAYS_RESPONSE = """
<DescribeVpnGatewaysResponse xmlns="http://ec2.amazonaws.com/doc/2014-10-01/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<vpnGatewaySet>
{% for vpn_gateway in vpn_gateways %}
<item>
<vpnGatewayId>{{ vpn_gateway.id }}</vpnGatewayId>
<state>available</state>
<type>{{ vpn_gateway.id }}</type>
<availabilityZone>us-east-1a</availabilityZone>
<attachments>
{% for attachment in vpn_gateway.attachments.values() %}
<item>
<vpcId>{{ attachment.vpc_id }}</vpcId>
<state>{{ attachment.state }}</state>
</item>
{% endfor %}
</attachments>
<tagSet/>
<tagSet>
{% for tag in vpn_gateway.get_tags() %}
<item>
<resourceId>{{ tag.resource_id }}</resourceId>
<resourceType>{{ tag.resource_type }}</resourceType>
<key>{{ tag.key }}</key>
<value>{{ tag.value }}</value>
</item>
{% endfor %}
</tagSet>
</item>
{% endfor %}
</vpnGatewaySet>
</DescribeVpnGatewaysResponse>"""
ATTACH_VPN_GATEWAY_RESPONSE = """
<AttachVpnGatewayResponse xmlns="http://ec2.amazonaws.com/doc/2014-10-01/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<attachment>
<vpcId>{{ attachment.vpc_id }}</vpcId>
<state>{{ attachment.state }}</state>
</attachment>
</AttachVpnGatewayResponse>"""
DELETE_VPN_GATEWAY_RESPONSE = """
<DeleteVpnGatewayResponse xmlns="http://ec2.amazonaws.com/doc/2014-10-01/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<return>true</return>
</DeleteVpnGatewayResponse>
"""
DETACH_VPN_GATEWAY_RESPONSE = """
<DetachVpnGatewayResponse xmlns="http://ec2.amazonaws.com/doc/2014-10-01/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<return>true</return>
</DetachVpnGatewayResponse>
"""
| apache-2.0 |
scorpionis/docklet | client/venv/lib/python3.5/site-packages/requests/packages/urllib3/request.py | 714 | 5988 | from __future__ import absolute_import
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
from .filepost import encode_multipart_formdata
__all__ = ['RequestMethods']
class RequestMethods(object):
"""
Convenience mixin for classes who implement a :meth:`urlopen` method, such
as :class:`~urllib3.connectionpool.HTTPConnectionPool` and
:class:`~urllib3.poolmanager.PoolManager`.
Provides behavior for making common types of HTTP request methods and
decides which type of request field encoding to use.
Specifically,
:meth:`.request_encode_url` is for sending requests whose fields are
encoded in the URL (such as GET, HEAD, DELETE).
:meth:`.request_encode_body` is for sending requests whose fields are
encoded in the *body* of the request using multipart or www-form-urlencoded
(such as for POST, PUT, PATCH).
:meth:`.request` is for making any kind of request, it will look up the
appropriate encoding format and use one of the above two methods to make
the request.
Initializer parameters:
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
"""
_encode_url_methods = set(['DELETE', 'GET', 'HEAD', 'OPTIONS'])
def __init__(self, headers=None):
self.headers = headers or {}
def urlopen(self, method, url, body=None, headers=None,
encode_multipart=True, multipart_boundary=None,
**kw): # Abstract
raise NotImplemented("Classes extending RequestMethods must implement "
"their own ``urlopen`` method.")
def request(self, method, url, fields=None, headers=None, **urlopen_kw):
"""
Make a request using :meth:`urlopen` with the appropriate encoding of
``fields`` based on the ``method`` used.
This is a convenience method that requires the least amount of manual
effort. It can be used in most situations, while still having the
option to drop down to more specific methods when necessary, such as
:meth:`request_encode_url`, :meth:`request_encode_body`,
or even the lowest level :meth:`urlopen`.
"""
method = method.upper()
if method in self._encode_url_methods:
return self.request_encode_url(method, url, fields=fields,
headers=headers,
**urlopen_kw)
else:
return self.request_encode_body(method, url, fields=fields,
headers=headers,
**urlopen_kw)
def request_encode_url(self, method, url, fields=None, headers=None,
**urlopen_kw):
"""
Make a request using :meth:`urlopen` with the ``fields`` encoded in
the url. This is useful for request methods like GET, HEAD, DELETE, etc.
"""
if headers is None:
headers = self.headers
extra_kw = {'headers': headers}
extra_kw.update(urlopen_kw)
if fields:
url += '?' + urlencode(fields)
return self.urlopen(method, url, **extra_kw)
def request_encode_body(self, method, url, fields=None, headers=None,
encode_multipart=True, multipart_boundary=None,
**urlopen_kw):
"""
Make a request using :meth:`urlopen` with the ``fields`` encoded in
the body. This is useful for request methods like POST, PUT, PATCH, etc.
When ``encode_multipart=True`` (default), then
:meth:`urllib3.filepost.encode_multipart_formdata` is used to encode
the payload with the appropriate content type. Otherwise
:meth:`urllib.urlencode` is used with the
'application/x-www-form-urlencoded' content type.
Multipart encoding must be used when posting files, and it's reasonably
safe to use it in other times too. However, it may break request
signing, such as with OAuth.
Supports an optional ``fields`` parameter of key/value strings AND
key/filetuple. A filetuple is a (filename, data, MIME type) tuple where
the MIME type is optional. For example::
fields = {
'foo': 'bar',
'fakefile': ('foofile.txt', 'contents of foofile'),
'realfile': ('barfile.txt', open('realfile').read()),
'typedfile': ('bazfile.bin', open('bazfile').read(),
'image/jpeg'),
'nonamefile': 'contents of nonamefile field',
}
When uploading a file, providing a filename (the first parameter of the
tuple) is optional but recommended to best mimick behavior of browsers.
Note that if ``headers`` are supplied, the 'Content-Type' header will
be overwritten because it depends on the dynamic random boundary string
which is used to compose the body of the request. The random boundary
string can be explicitly set with the ``multipart_boundary`` parameter.
"""
if headers is None:
headers = self.headers
extra_kw = {'headers': {}}
if fields:
if 'body' in urlopen_kw:
raise TypeError(
"request got values for both 'fields' and 'body', can only specify one.")
if encode_multipart:
body, content_type = encode_multipart_formdata(fields, boundary=multipart_boundary)
else:
body, content_type = urlencode(fields), 'application/x-www-form-urlencoded'
extra_kw['body'] = body
extra_kw['headers'] = {'Content-Type': content_type}
extra_kw['headers'].update(headers)
extra_kw.update(urlopen_kw)
return self.urlopen(method, url, **extra_kw)
| bsd-3-clause |
ai-se/XTREE | src/tools/axe/table.py | 2 | 3428 | from __future__ import division
from lib import *
from demos import *
from counts import *
from fi import *
import re
import sys
from settings import *
sys.dont_write_bytecode = True
def rows(file,
sep= The.reader.sep,
bad= The.reader.bad):
"""Read comma-seperated rows that might be split
over many lines. Finds strings that can compile
to nums. Kills comments and white space."""
n,kept = 0,""
for line in open(file):
now = re.sub(bad,"",line)
kept += now
if kept:
if not now[-1] == sep:
yield n, map(atom,kept.split(sep))
n += 1
kept = ""
def row(file,skip= The.reader.skip):
"Leaps over any columns marked 'skip'."
todo = None
for n,line in rows(file):
todo = todo or [col for col,name in enumerate(line)
if not skip in name]
yield n, [ line[col] for col in todo ]
## Read Headers and Rows
def table(source, rows = True, contents = row):
t = table0(source)
for n,cells in contents(source):
if n == 0 : head(cells,t)
else : body(cells,t,rows)
#print ">>>>", t.headers
return t
def centroid(tbl,selections=False):
return [h.centroid() for h in tbl.headers if (not selections or h.selected)]
## Create Table
def table0(source):
return Thing(
source = source,
depen=[], indep=[], nums =[], syms=[],
more =[], less =[], klass=[], headers=[],
_rows=[], at ={}, patterns= The.reader.patterns)
def head(cells,t,numc=The.reader.numc):
for col,cell in enumerate(cells):
this = Num if numc in cell else Sym
this.rank = 0
header = this()
header.col, header.name = col,cell
t.at[cell] = header
for pattern,val in t.patterns.items():
if re.search(pattern,cell):
where = val(t)
where += [header]
return t
def body(cells,t,keep=True):
#print "LEN?",len(t._rows)
for n,header in enumerate(t.headers):
cell = cells[header.col]
#print n,"!",cell,"!"
if not cell == The.reader.missing:
header + cell
if keep:
new = Row(cells)
t._rows += [new]
class Row(Thing):
def __init__(i,cells):
i.newId()
i.cells = cells
i.pos = []
i.x0,i.y0= 0,0
def discreteTable(f,contents=lambda x: row(x)):
rows, t = [], table0(f)
for n,cells in contents(f):
if n==0 : head(cells,t)
else : rows += [cells]
return discreteNums(t,rows)
def discreteNums(tbl,therows):
for num in tbl.indep:
if not num in tbl.depen:
if isinstance(num,Num):
for cut in ediv(therows,
num=lambda x:x[num.col],
sym=lambda x:x[tbl.klass[0].col]):
for row in cut._has:
row[num.col] = cut.range
return clone(tbl, discrete=True, rows=therows)
def clone(tbl1,rows=[],discrete=False,keepSelections=False) :
def ok(x):
if x[-1]=="/": return x
return x.replace("$",'') if discrete else x
tbl2= head([ok(h.name) for h in tbl1.headers],
table0('copy of '+tbl1.source))
if keepSelections:
for h in tbl1.headers:
tbl2.headers[h.col].selected = h.selected
for cells in rows: body(cells, tbl2, True)
return tbl2
@demo
def tabled(f='data/weather2.csv'):
t=table(f)
for x in t.indep: rprintln(x)
rprintln(t)
@demo
def tableCopied(f='data/weather2.csv'):
t0=table(f)
t1=clone(t0)
rprintln([t0.nums,t1.nums]);
if __name__ == '__main__': eval(cmd())
| mit |
tylertian/Openstack | openstack F/horizon/horizon/dashboards/nova/access_and_security/urls.py | 9 | 1330 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls.defaults import *
from .floating_ips import urls as fip_urls
from .keypairs import urls as keypair_urls
from .security_groups import urls as sec_group_urls
from .views import IndexView
urlpatterns = patterns('',
url(r'^$', IndexView.as_view(), name='index'),
url(r'keypairs/', include(keypair_urls, namespace='keypairs')),
url(r'floating_ips/', include(fip_urls, namespace='floating_ips')),
url(r'security_groups/',
include(sec_group_urls, namespace='security_groups')),
)
| apache-2.0 |
jelmer/samba | third_party/waf/wafadmin/3rdparty/prefork.py | 32 | 5895 | #! /usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2015 (ita)
#
# prefer the waf 1.8 version
"""
The full samba build can be faster by ~10%, but there are a few limitations:
* only one build process should be run at a time as the servers would use the same ports
* only one build command is going to be called ("waf build configure build" would not work)
def build(bld):
mod = Utils.load_tool('prefork')
mod.build(bld)
...
(build declarations after)
"""
import os, re, socket, threading, sys, subprocess, time, atexit, traceback
try:
import SocketServer
except ImportError:
import socketserver as SocketServer
try:
from queue import Queue
except ImportError:
from Queue import Queue
try:
import cPickle
except ImportError:
import pickle as cPickle
DEFAULT_PORT = 51200
HEADER_SIZE = 128
REQ = 'REQ'
RES = 'RES'
BYE = 'BYE'
def make_header(params):
header = ','.join(params)
if sys.hexversion > 0x3000000:
header = header.encode('iso8859-1')
header = header.ljust(HEADER_SIZE)
assert(len(header) == HEADER_SIZE)
return header
re_valid_query = re.compile('^[a-zA-Z0-9_, ]+$')
class req(SocketServer.StreamRequestHandler):
def handle(self):
while 1:
try:
self.process_command()
except Exception as e:
print(e)
break
def process_command(self):
query = self.rfile.read(HEADER_SIZE)
if not query:
return
#print(len(query))
assert(len(query) == HEADER_SIZE)
if sys.hexversion > 0x3000000:
query = query.decode('iso8859-1')
#print "%r" % query
if not re_valid_query.match(query):
raise ValueError('Invalid query %r' % query)
query = query.strip().split(',')
if query[0] == REQ:
self.run_command(query[1:])
elif query[0] == BYE:
raise ValueError('Exit')
else:
raise ValueError('Invalid query %r' % query)
def run_command(self, query):
size = int(query[0])
data = self.rfile.read(size)
assert(len(data) == size)
kw = cPickle.loads(data)
# run command
ret = out = err = exc = None
cmd = kw['cmd']
del kw['cmd']
#print(cmd)
try:
if kw['stdout'] or kw['stderr']:
p = subprocess.Popen(cmd, **kw)
(out, err) = p.communicate()
ret = p.returncode
else:
ret = subprocess.Popen(cmd, **kw).wait()
except Exception as e:
ret = -1
exc = str(e) + traceback.format_exc()
# write the results
if out or err or exc:
data = (out, err, exc)
data = cPickle.dumps(data, -1)
else:
data = ''
params = [RES, str(ret), str(len(data))]
self.wfile.write(make_header(params))
if data:
self.wfile.write(data)
def create_server(conn, cls):
#SocketServer.ThreadingTCPServer.allow_reuse_address = True
#server = SocketServer.ThreadingTCPServer(conn, req)
SocketServer.TCPServer.allow_reuse_address = True
server = SocketServer.TCPServer(conn, req)
#server.timeout = 6000 # seconds
server.serve_forever(poll_interval=0.001)
if __name__ == '__main__':
if len(sys.argv) > 1:
port = int(sys.argv[1])
else:
port = DEFAULT_PORT
#conn = (socket.gethostname(), port)
conn = ("127.0.0.1", port)
#print("listening - %r %r\n" % conn)
create_server(conn, req)
else:
import Runner, Utils
def init_task_pool(self):
# lazy creation, and set a common pool for all task consumers
pool = self.pool = []
for i in range(self.numjobs):
consumer = Runner.get_pool()
pool.append(consumer)
consumer.idx = i
self.ready = Queue(0)
def setq(consumer):
consumer.ready = self.ready
try:
threading.current_thread().idx = consumer.idx
except Exception as e:
print(e)
for x in pool:
x.ready.put(setq)
return pool
Runner.Parallel.init_task_pool = init_task_pool
PORT = 51200
def make_server(idx):
port = PORT + idx
cmd = [sys.executable, os.path.abspath(__file__), str(port)]
proc = subprocess.Popen(cmd)
proc.port = port
return proc
def make_conn(srv):
#port = PORT + idx
port = srv.port
conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn.connect(('127.0.0.1', port))
return conn
SERVERS = []
CONNS = []
def close_all():
while CONNS:
conn = CONNS.pop()
try:
conn.close()
except:
pass
while SERVERS:
srv = SERVERS.pop()
try:
srv.kill()
except:
pass
atexit.register(close_all)
def put_data(conn, data):
conn.send(data)
def read_data(conn, siz):
ret = conn.recv(siz)
if not ret:
print("closed connection?")
assert(len(ret) == siz)
return ret
def exec_command(cmd, **kw):
if 'log' in kw:
log = kw['log']
kw['stdout'] = kw['stderr'] = subprocess.PIPE
del(kw['log'])
else:
kw['stdout'] = kw['stderr'] = None
kw['shell'] = isinstance(cmd, str)
idx = threading.current_thread().idx
kw['cmd'] = cmd
data = cPickle.dumps(kw, -1)
params = [REQ, str(len(data))]
header = make_header(params)
conn = CONNS[idx]
put_data(conn, header)
put_data(conn, data)
data = read_data(conn, HEADER_SIZE)
if sys.hexversion > 0x3000000:
data = data.decode('iso8859-1')
lst = data.split(',')
ret = int(lst[1])
dlen = int(lst[2])
out = err = None
if dlen:
data = read_data(conn, dlen)
(out, err, exc) = cPickle.loads(data)
if exc:
raise Utils.WafError('Execution failure: %s' % exc)
if out:
log.write(out)
if err:
log.write(err)
return ret
def __init__(self):
threading.Thread.__init__(self)
# identifier of the current thread
self.idx = len(SERVERS)
# create a server and wait for the connection
srv = make_server(self.idx)
SERVERS.append(srv)
conn = None
for x in range(30):
try:
conn = make_conn(srv)
break
except socket.error:
time.sleep(0.01)
if not conn:
raise ValueError('Could not start the server!')
CONNS.append(conn)
self.setDaemon(1)
self.start()
Runner.TaskConsumer.__init__ = __init__
def build(bld):
# dangerous, there is no other command hopefully
Utils.exec_command = exec_command
| gpl-3.0 |
chaeplin/p2pool-drk | wstools/TimeoutSocket.py | 293 | 5293 | """Based on code from timeout_socket.py, with some tweaks for compatibility.
These tweaks should really be rolled back into timeout_socket, but it's
not totally clear who is maintaining it at this point. In the meantime,
we'll use a different module name for our tweaked version to avoid any
confusion.
The original timeout_socket is by:
Scott Cotton <scott@chronis.pobox.com>
Lloyd Zusman <ljz@asfast.com>
Phil Mayes <pmayes@olivebr.com>
Piers Lauder <piers@cs.su.oz.au>
Radovan Garabik <garabik@melkor.dnp.fmph.uniba.sk>
"""
ident = "$Id$"
import string, socket, select, errno
WSAEINVAL = getattr(errno, 'WSAEINVAL', 10022)
class TimeoutSocket:
"""A socket imposter that supports timeout limits."""
def __init__(self, timeout=20, sock=None):
self.timeout = float(timeout)
self.inbuf = ''
if sock is None:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock = sock
self.sock.setblocking(0)
self._rbuf = ''
self._wbuf = ''
def __getattr__(self, name):
# Delegate to real socket attributes.
return getattr(self.sock, name)
def connect(self, *addr):
timeout = self.timeout
sock = self.sock
try:
# Non-blocking mode
sock.setblocking(0)
apply(sock.connect, addr)
sock.setblocking(timeout != 0)
return 1
except socket.error,why:
if not timeout:
raise
sock.setblocking(1)
if len(why.args) == 1:
code = 0
else:
code, why = why
if code not in (
errno.EINPROGRESS, errno.EALREADY, errno.EWOULDBLOCK
):
raise
r,w,e = select.select([],[sock],[],timeout)
if w:
try:
apply(sock.connect, addr)
return 1
except socket.error,why:
if len(why.args) == 1:
code = 0
else:
code, why = why
if code in (errno.EISCONN, WSAEINVAL):
return 1
raise
raise TimeoutError('socket connect() timeout.')
def send(self, data, flags=0):
total = len(data)
next = 0
while 1:
r, w, e = select.select([],[self.sock], [], self.timeout)
if w:
buff = data[next:next + 8192]
sent = self.sock.send(buff, flags)
next = next + sent
if next == total:
return total
continue
raise TimeoutError('socket send() timeout.')
def recv(self, amt, flags=0):
if select.select([self.sock], [], [], self.timeout)[0]:
return self.sock.recv(amt, flags)
raise TimeoutError('socket recv() timeout.')
buffsize = 4096
handles = 1
def makefile(self, mode="r", buffsize=-1):
self.handles = self.handles + 1
self.mode = mode
return self
def close(self):
self.handles = self.handles - 1
if self.handles == 0 and self.sock.fileno() >= 0:
self.sock.close()
def read(self, n=-1):
if not isinstance(n, type(1)):
n = -1
if n >= 0:
k = len(self._rbuf)
if n <= k:
data = self._rbuf[:n]
self._rbuf = self._rbuf[n:]
return data
n = n - k
L = [self._rbuf]
self._rbuf = ""
while n > 0:
new = self.recv(max(n, self.buffsize))
if not new: break
k = len(new)
if k > n:
L.append(new[:n])
self._rbuf = new[n:]
break
L.append(new)
n = n - k
return "".join(L)
k = max(4096, self.buffsize)
L = [self._rbuf]
self._rbuf = ""
while 1:
new = self.recv(k)
if not new: break
L.append(new)
k = min(k*2, 1024**2)
return "".join(L)
def readline(self, limit=-1):
data = ""
i = self._rbuf.find('\n')
while i < 0 and not (0 < limit <= len(self._rbuf)):
new = self.recv(self.buffsize)
if not new: break
i = new.find('\n')
if i >= 0: i = i + len(self._rbuf)
self._rbuf = self._rbuf + new
if i < 0: i = len(self._rbuf)
else: i = i+1
if 0 <= limit < len(self._rbuf): i = limit
data, self._rbuf = self._rbuf[:i], self._rbuf[i:]
return data
def readlines(self, sizehint = 0):
total = 0
list = []
while 1:
line = self.readline()
if not line: break
list.append(line)
total += len(line)
if sizehint and total >= sizehint:
break
return list
def writelines(self, list):
self.send(''.join(list))
def write(self, data):
self.send(data)
def flush(self):
pass
class TimeoutError(Exception):
pass
| gpl-3.0 |
happyleavesaoc/home-assistant | tests/components/zwave/test_workaround.py | 32 | 2679 | """Test Z-Wave workarounds."""
from homeassistant.components.zwave import const, workaround
from tests.mock.zwave import MockNode, MockValue
def test_get_device_no_component_mapping():
"""Test that None is returned."""
node = MockNode(manufacturer_id=' ')
value = MockValue(data=0, node=node)
assert workaround.get_device_component_mapping(value) is None
def test_get_device_component_mapping():
"""Test that component is returned."""
node = MockNode(manufacturer_id='010f', product_type='0b00')
value = MockValue(data=0, node=node,
command_class=const.COMMAND_CLASS_SENSOR_ALARM)
assert workaround.get_device_component_mapping(value) == 'binary_sensor'
def test_get_device_component_mapping_mti():
"""Test that component is returned."""
# GE Fan controller
node = MockNode(manufacturer_id='0063', product_type='4944',
product_id='3034')
value = MockValue(data=0, node=node,
command_class=const.COMMAND_CLASS_SWITCH_MULTILEVEL)
assert workaround.get_device_component_mapping(value) == 'fan'
# GE Dimmer
node = MockNode(manufacturer_id='0063', product_type='4944',
product_id='3031')
value = MockValue(data=0, node=node,
command_class=const.COMMAND_CLASS_SWITCH_MULTILEVEL)
assert workaround.get_device_component_mapping(value) is None
def test_get_device_no_mapping():
"""Test that no device mapping is returned."""
node = MockNode(manufacturer_id=' ')
value = MockValue(data=0, node=node)
assert workaround.get_device_mapping(value) is None
def test_get_device_mapping_mt():
"""Test that device mapping mt is returned."""
node = MockNode(manufacturer_id='0047', product_type='5a52')
value = MockValue(data=0, node=node)
assert workaround.get_device_mapping(value) == 'workaround_no_position'
def test_get_device_mapping_mtii():
"""Test that device mapping mtii is returned."""
node = MockNode(manufacturer_id='013c', product_type='0002',
product_id='0002')
value = MockValue(data=0, node=node, index=0)
assert workaround.get_device_mapping(value) == 'trigger_no_off_event'
def test_get_device_mapping_mti_instance():
"""Test that device mapping mti_instance is returned."""
node = MockNode(manufacturer_id='013c', product_type='0001',
product_id='0005')
value = MockValue(data=0, node=node, instance=1)
assert workaround.get_device_mapping(value) == 'refresh_node_on_update'
value = MockValue(data=0, node=node, instance=2)
assert workaround.get_device_mapping(value) is None
| apache-2.0 |
jshlbrd/python-drawer | laika-bro-client/laika-bro-client.py | 1 | 4378 | from watchdog.events import FileSystemEventHandler
from watchdog.observers import Observer
import schedule
import json
import time
import os
import multiprocessing as mp
from argparse import ArgumentParser
from laikaboss.objectmodel import ExternalObject, ExternalVars
from laikaboss.constants import level_minimal
from laikaboss.clientLib import Client
'''
The queue where file paths to-be-scanned are
stored.
'''
file_queue = mp.Queue()
'''
Function that handles inserting file paths
into the queue.
'''
class laika_watcher(FileSystemEventHandler):
def on_created(self, event):
if not event.is_directory:
file_queue.put(event.src_path)
'''
Function that takes a Bro supplied filename
and turns it into JSON metadata for Laika BOSS.
'''
def fname_to_json(fname):
fname_split = fname.rsplit('/',1)[1].split('_')
json_d = { "log_source": fname_split[0], "file_id": fname_split[1], "src_addr": fname_split[2], "dst_addr": fname_split[3] }
return json.loads(json.dumps(json_d))
'''
Function that defines the worker routine
for each subprocess.
'''
def laika_worker(broker):
client = Client(broker, async=True)
for fname in iter(file_queue.get, None):
with open(fname, 'rb') as f:
file_buffer = f.read()
externalObject = ExternalObject(buffer=file_buffer,
externalVars=ExternalVars(filename=fname,
source='bro',
extMetaData=fname_to_json(fname)),
level='level_minimal')
client.send(externalObject)
os.remove(fname)
'''
Function that kicks off the workers.
'''
def kick(broker,num_workers,worker_timeout):
if file_queue.qsize() != 0:
pool = []
for i in xrange(num_workers):
file_queue.put(None)
pool.append(mp.Process(target=laika_worker, args=(broker,)))
for p in pool:
p.start()
for p in pool:
p.join(worker_timeout)
if __name__ == '__main__':
parser = ArgumentParser(description=
'''
Prototype client to send files from a Bro sensor to a server running Laika BOSS (laikad). This script monitors a directory for files extracted by Bro and sends them to the Laika server. The laikad service is expected to be in asynchronous mode. This script requires a companion Bro script that extracts files with a specific filename pattern to a directory of the user's choice. Files will be deleted from the Bro sensor after being sent to the Laika server.
''')
parser.add_argument('-a', '--address', action='store', dest='broker', default='tcp://localhost:5558',
help='Laika BOSS broker address. (Default: tcp://localhost:5558)')
parser.add_argument('-f', '--file-path', action='store', dest="fpath", default="",
help='Path to the monitored directory. Files in this directory will be deleted. (No default)')
parser.add_argument('-w', '--workers', action='store', type=int, dest="num_processors",
help='Number of worker processes to use during file transfer. (Default: number of cores available on system * 2)')
parser.add_argument('-st', '--schedule-time', action='store', type=int, dest="schedule_time", default=5,
help='Number of minutes for the scheduler to kick off new file transfers. (Default: Every 5 minutes)')
parser.add_argument('-wt', '--worker-timeout', action='store', type=int, dest="worker_timeout", default=30,
help='Number of seconds for each worker process to timeout if it does not finish file transfer (this assists with reaping zombie processes). (Default: 60 seconds)')
args = parser.parse_args()
if args.num_processors:
num_workers = args.num_processors
else:
num_workers = mp.cpu_count() * 2
schedule.every(args.schedule_time).minutes.do(kick, args.broker, num_workers, args.worker_timeout)
event_handler = laika_watcher()
observer = Observer()
observer.schedule(event_handler, args.fpath, recursive=False)
observer.start()
while 1:
schedule.run_pending()
time.sleep(1)
observer.join()
| apache-2.0 |
ilyes14/scikit-learn | sklearn/utils/tests/test_class_weight.py | 90 | 12846 | import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.datasets import make_blobs
from sklearn.utils.class_weight import compute_class_weight
from sklearn.utils.class_weight import compute_sample_weight
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
def test_compute_class_weight():
# Test (and demo) compute_class_weight.
y = np.asarray([2, 2, 2, 3, 3, 4])
classes = np.unique(y)
cw = assert_warns(DeprecationWarning,
compute_class_weight, "auto", classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_true(cw[0] < cw[1] < cw[2])
cw = compute_class_weight("balanced", classes, y)
# total effect of samples is preserved
class_counts = np.bincount(y)[2:]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_true(cw[0] < cw[1] < cw[2])
def test_compute_class_weight_not_present():
# Raise error when y does not contain all class labels
classes = np.arange(4)
y = np.asarray([0, 0, 0, 1, 1, 2])
assert_raises(ValueError, compute_class_weight, "auto", classes, y)
assert_raises(ValueError, compute_class_weight, "balanced", classes, y)
def test_compute_class_weight_dict():
classes = np.arange(3)
class_weights = {0: 1.0, 1: 2.0, 2: 3.0}
y = np.asarray([0, 0, 1, 2])
cw = compute_class_weight(class_weights, classes, y)
# When the user specifies class weights, compute_class_weights should just
# return them.
assert_array_almost_equal(np.asarray([1.0, 2.0, 3.0]), cw)
# When a class weight is specified that isn't in classes, a ValueError
# should get raised
msg = 'Class label 4 not present.'
class_weights = {0: 1.0, 1: 2.0, 2: 3.0, 4: 1.5}
assert_raise_message(ValueError, msg, compute_class_weight, class_weights,
classes, y)
msg = 'Class label -1 not present.'
class_weights = {-1: 5.0, 0: 1.0, 1: 2.0, 2: 3.0}
assert_raise_message(ValueError, msg, compute_class_weight, class_weights,
classes, y)
def test_compute_class_weight_invariance():
# Test that results with class_weight="balanced" is invariant wrt
# class imbalance if the number of samples is identical.
# The test uses a balanced two class dataset with 100 datapoints.
# It creates three versions, one where class 1 is duplicated
# resulting in 150 points of class 1 and 50 of class 0,
# one where there are 50 points in class 1 and 150 in class 0,
# and one where there are 100 points of each class (this one is balanced
# again).
# With balancing class weights, all three should give the same model.
X, y = make_blobs(centers=2, random_state=0)
# create dataset where class 1 is duplicated twice
X_1 = np.vstack([X] + [X[y == 1]] * 2)
y_1 = np.hstack([y] + [y[y == 1]] * 2)
# create dataset where class 0 is duplicated twice
X_0 = np.vstack([X] + [X[y == 0]] * 2)
y_0 = np.hstack([y] + [y[y == 0]] * 2)
# cuplicate everything
X_ = np.vstack([X] * 2)
y_ = np.hstack([y] * 2)
# results should be identical
logreg1 = LogisticRegression(class_weight="balanced").fit(X_1, y_1)
logreg0 = LogisticRegression(class_weight="balanced").fit(X_0, y_0)
logreg = LogisticRegression(class_weight="balanced").fit(X_, y_)
assert_array_almost_equal(logreg1.coef_, logreg0.coef_)
assert_array_almost_equal(logreg.coef_, logreg0.coef_)
def test_compute_class_weight_auto_negative():
# Test compute_class_weight when labels are negative
# Test with balanced class labels.
classes = np.array([-2, -1, 0])
y = np.asarray([-1, -1, 0, 0, -2, -2])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
# Test with unbalanced class labels.
y = np.asarray([-1, 0, 0, -2, -2, -2])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([0.545, 1.636, 0.818]), decimal=3)
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
class_counts = np.bincount(y + 2)
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2. / 3, 2., 1.])
def test_compute_class_weight_auto_unordered():
# Test compute_class_weight when classes are unordered
classes = np.array([1, 0, 3])
y = np.asarray([1, 0, 0, 3, 3, 3])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1.636, 0.818, 0.545]), decimal=3)
cw = compute_class_weight("balanced", classes, y)
class_counts = np.bincount(y)[classes]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2., 1., 2. / 3])
def test_compute_sample_weight():
# Test (and demo) compute_sample_weight.
# Test with balanced classes
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with user-defined weights
sample_weight = compute_sample_weight({1: 2, 2: 1}, y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 1., 1., 1.])
# Test with column vector of balanced classes
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with unbalanced classes
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
expected_auto = np.asarray([.6, .6, .6, .6, .6, .6, 1.8])
assert_array_almost_equal(sample_weight, expected_auto)
sample_weight = compute_sample_weight("balanced", y)
expected_balanced = np.array([0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 2.3333])
assert_array_almost_equal(sample_weight, expected_balanced, decimal=4)
# Test with `None` weights
sample_weight = compute_sample_weight(None, y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 1.])
# Test with multi-output of balanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with multi-output with user-defined weights
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = compute_sample_weight([{1: 2, 2: 1}, {0: 1, 1: 2}], y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 2., 2., 2.])
# Test with multi-output of unbalanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [3, -1]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, expected_auto ** 2)
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, expected_balanced ** 2, decimal=3)
def test_compute_sample_weight_with_subsample():
# Test compute_sample_weight with subsamples specified.
# Test with balanced classes and all samples present
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with column vector of balanced classes and all samples present
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with a subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y, range(4))
assert_array_almost_equal(sample_weight, [.5, .5, .5, 1.5, 1.5, 1.5])
sample_weight = compute_sample_weight("balanced", y, range(4))
assert_array_almost_equal(sample_weight, [2. / 3, 2. / 3,
2. / 3, 2., 2., 2.])
# Test with a bootstrap subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, [0, 1, 1, 2, 2, 3])
expected_auto = np.asarray([1 / 3., 1 / 3., 1 / 3., 5 / 3., 5 / 3., 5 / 3.])
assert_array_almost_equal(sample_weight, expected_auto)
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
expected_balanced = np.asarray([0.6, 0.6, 0.6, 3., 3., 3.])
assert_array_almost_equal(sample_weight, expected_balanced)
# Test with a bootstrap subsample for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_auto ** 2)
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_balanced ** 2)
# Test with a missing class
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
# Test with a missing class for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [2, 2]])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
def test_compute_sample_weight_errors():
# Test compute_sample_weight raises errors expected.
# Invalid preset string
y = np.asarray([1, 1, 1, 2, 2, 2])
y_ = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
assert_raises(ValueError, compute_sample_weight, "ni", y)
assert_raises(ValueError, compute_sample_weight, "ni", y, range(4))
assert_raises(ValueError, compute_sample_weight, "ni", y_)
assert_raises(ValueError, compute_sample_weight, "ni", y_, range(4))
# Not "auto" for subsample
assert_raises(ValueError,
compute_sample_weight, {1: 2, 2: 1}, y, range(4))
# Not a list or preset for multi-output
assert_raises(ValueError, compute_sample_weight, {1: 2, 2: 1}, y_)
# Incorrect length list for multi-output
assert_raises(ValueError, compute_sample_weight, [{1: 2, 2: 1}], y_)
| bsd-3-clause |
mach0/QGIS | tests/src/python/test_provider_spatialite.py | 17 | 72889 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsSpatialiteProvider
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Vincent Mora'
__date__ = '09/07/2013'
__copyright__ = 'Copyright 2013, The QGIS Project'
import qgis # NOQA
import os
import re
import sys
import shutil
import tempfile
from osgeo import ogr
from datetime import datetime
from qgis.core import (QgsProviderRegistry,
QgsDataSourceUri,
QgsVectorLayer,
QgsVectorDataProvider,
QgsPointXY,
QgsFeature,
QgsGeometry,
QgsProject,
QgsFieldConstraints,
QgsVectorLayerUtils,
QgsSettings,
QgsDefaultValue,
QgsFeatureRequest,
QgsRectangle,
QgsVectorLayerExporter,
QgsWkbTypes)
from qgis.testing import start_app, unittest
from utilities import unitTestDataPath
from providertestbase import ProviderTestCase
from qgis.PyQt.QtCore import QObject, QVariant, QByteArray
from qgis.utils import spatialite_connect
# Pass no_exit=True: for some reason this crashes sometimes on exit on Travis
start_app(True)
TEST_DATA_DIR = unitTestDataPath()
def count_opened_filedescriptors(filename_to_test):
count = -1
if sys.platform.startswith('linux'):
count = 0
open_files_dirname = '/proc/%d/fd' % os.getpid()
filenames = os.listdir(open_files_dirname)
for filename in filenames:
full_filename = open_files_dirname + '/' + filename
if os.path.exists(full_filename):
link = os.readlink(full_filename)
if os.path.basename(link) == os.path.basename(filename_to_test):
count += 1
return count
class TestQgsSpatialiteProvider(unittest.TestCase, ProviderTestCase):
@classmethod
def setUpClass(cls):
"""Run before all tests"""
print(' ### Setup Spatialite Provider Test Class')
# setup provider for base tests
cls.vl = QgsVectorLayer(
'dbname=\'{}/provider/spatialite.db\' table="somedata" (geom) sql='.format(
TEST_DATA_DIR), 'test',
'spatialite')
assert (cls.vl.isValid())
cls.source = cls.vl.dataProvider()
cls.vl_poly = QgsVectorLayer(
'dbname=\'{}/provider/spatialite.db\' table="somepolydata" (geom) sql='.format(
TEST_DATA_DIR), 'test',
'spatialite')
assert (cls.vl_poly.isValid())
cls.poly_provider = cls.vl_poly.dataProvider()
# create test db
cls.dbname = os.path.join(tempfile.gettempdir(), "test.sqlite")
if os.path.exists(cls.dbname):
os.remove(cls.dbname)
con = spatialite_connect(cls.dbname, isolation_level=None)
cur = con.cursor()
cur.execute("BEGIN")
sql = "SELECT InitSpatialMetadata()"
cur.execute(sql)
# simple table with primary key
sql = "CREATE TABLE test_pg (id INTEGER NOT NULL PRIMARY KEY, name TEXT NOT NULL)"
cur.execute(sql)
sql = "SELECT AddGeometryColumn('test_pg', 'geometry', 4326, 'POLYGON', 'XY')"
cur.execute(sql)
sql = "INSERT INTO test_pg (id, name, geometry) "
sql += "VALUES (1, 'toto 1', GeomFromText('POLYGON((0 0,1 0,1 1,0 1,0 0))', 4326))"
cur.execute(sql)
# table with Z dimension geometry
sql = "CREATE TABLE test_z (id INTEGER NOT NULL PRIMARY KEY, name TEXT NOT NULL)"
cur.execute(sql)
sql = "SELECT AddGeometryColumn('test_z', 'geometry', 4326, 'POINT', 'XYZ')"
cur.execute(sql)
sql = "INSERT INTO test_z (id, name, geometry) "
sql += "VALUES (1, 'toto 2', GeomFromText('POINT Z (0 0 1)', 4326))"
cur.execute(sql)
# table with M value geometry
sql = "CREATE TABLE test_m (id INTEGER NOT NULL PRIMARY KEY, name TEXT NOT NULL)"
cur.execute(sql)
sql = "SELECT AddGeometryColumn('test_m', 'geometry', 4326, 'POINT', 'XYM')"
cur.execute(sql)
sql = "INSERT INTO test_m (id, name, geometry) "
sql += "VALUES (1, 'toto 3', GeomFromText('POINT M (0 0 1)', 4326))"
cur.execute(sql)
# table with Z dimension and M value geometry
sql = "CREATE TABLE test_zm (id INTEGER NOT NULL PRIMARY KEY, name TEXT NOT NULL)"
cur.execute(sql)
sql = "SELECT AddGeometryColumn('test_zm', 'geometry', 4326, 'POINT', 'XYZM')"
cur.execute(sql)
sql = "INSERT INTO test_zm (id, name, geometry) "
sql += "VALUES (1, 'toto 1', GeomFromText('POINT ZM (0 0 1 1)', 4326))"
cur.execute(sql)
# table with multiple column primary key
sql = "CREATE TABLE test_pg_mk (id INTEGER NOT NULL, name TEXT NOT NULL, PRIMARY KEY(id,name))"
cur.execute(sql)
sql = "SELECT AddGeometryColumn('test_pg_mk', 'geometry', 4326, 'POLYGON', 'XY')"
cur.execute(sql)
sql = "INSERT INTO test_pg_mk (id, name, geometry) "
sql += "VALUES (1, 'toto 1', GeomFromText('POLYGON((0 0,1 0,1 1,0 1,0 0))', 4326))"
cur.execute(sql)
# simple table with primary key
sql = "CREATE TABLE test_q (id INTEGER NOT NULL PRIMARY KEY, name TEXT NOT NULL)"
cur.execute(sql)
sql = "SELECT AddGeometryColumn('test_q', 'geometry', 4326, 'POLYGON', 'XY')"
cur.execute(sql)
sql = "INSERT INTO test_q (id, name, geometry) "
sql += "VALUES (11, 'toto 11', GeomFromText('POLYGON((0 0,1 0,1 1,0 1,0 0))', 4326))"
cur.execute(sql)
sql = "INSERT INTO test_q (id, name, geometry) "
sql += "VALUES (21, 'toto 12', GeomFromText('POLYGON((0 0,1 0,1 1,0 1,0 0))', 4326))"
cur.execute(sql)
# simple table with a geometry column named 'Geometry'
sql = "CREATE TABLE test_n (id INTEGER NOT NULL PRIMARY KEY, name TEXT NOT NULL)"
cur.execute(sql)
sql = "SELECT AddGeometryColumn('test_n', 'Geometry', 4326, 'POLYGON', 'XY')"
cur.execute(sql)
sql = "INSERT INTO test_n (id, name, geometry) "
sql += "VALUES (1, 'toto 1', GeomFromText('POLYGON((0 0,1 0,1 1,0 1,0 0))', 4326))"
cur.execute(sql)
sql = "INSERT INTO test_n (id, name, geometry) "
sql += "VALUES (2, 'toto 1', GeomFromText('POLYGON((0 0,1 0,1 1,0 1,0 0))', 4326))"
cur.execute(sql)
# table with different array types, stored as JSON
sql = "CREATE TABLE test_arrays (id INTEGER NOT NULL PRIMARY KEY, strings JSONSTRINGLIST NOT NULL, ints JSONINTEGERLIST NOT NULL, reals JSONREALLIST NOT NULL)"
cur.execute(sql)
sql = "SELECT AddGeometryColumn('test_arrays', 'Geometry', 4326, 'POLYGON', 'XY')"
cur.execute(sql)
sql = "INSERT INTO test_arrays (id, strings, ints, reals, geometry) "
sql += "VALUES (1, '[\"toto\",\"tutu\"]', '[1,-2,724562]', '[1.0, -232567.22]', GeomFromText('POLYGON((0 0,1 0,1 1,0 1,0 0))', 4326))"
cur.execute(sql)
# table with different array types, stored as JSON
sql = "CREATE TABLE test_arrays_write (id INTEGER NOT NULL PRIMARY KEY, array JSONARRAY NOT NULL, strings JSONSTRINGLIST NOT NULL, ints JSONINTEGERLIST NOT NULL, reals JSONREALLIST NOT NULL)"
cur.execute(sql)
sql = "SELECT AddGeometryColumn('test_arrays_write', 'Geometry', 4326, 'POLYGON', 'XY')"
cur.execute(sql)
# 2 tables with relations
sql = "PRAGMA foreign_keys = ON;"
cur.execute(sql)
sql = "CREATE TABLE test_relation_a(artistid INTEGER PRIMARY KEY, artistname TEXT);"
cur.execute(sql)
sql = "SELECT AddGeometryColumn('test_relation_a', 'Geometry', 4326, 'POLYGON', 'XY')"
cur.execute(sql)
sql = "CREATE TABLE test_relation_b(trackid INTEGER, trackname TEXT, trackartist INTEGER, FOREIGN KEY(trackartist) REFERENCES test_relation_a(artistid));"
cur.execute(sql)
sql = "SELECT AddGeometryColumn('test_relation_b', 'Geometry', 4326, 'POLYGON', 'XY')"
cur.execute(sql)
# table to test auto increment
sql = "CREATE TABLE test_autoincrement(id INTEGER PRIMARY KEY AUTOINCREMENT, num INTEGER);"
cur.execute(sql)
sql = "INSERT INTO test_autoincrement (num) VALUES (123);"
cur.execute(sql)
# tables with constraints
sql = "CREATE TABLE test_constraints(id INTEGER PRIMARY KEY, num INTEGER NOT NULL, desc TEXT UNIQUE, desc2 TEXT, num2 INTEGER NOT NULL UNIQUE)"
cur.execute(sql)
# simple table with defaults
sql = "CREATE TABLE test_defaults (id INTEGER NOT NULL PRIMARY KEY, name TEXT DEFAULT 'qgis ''is good', number INTEGER DEFAULT 5, number2 REAL DEFAULT 5.7, no_default REAL)"
cur.execute(sql)
# simple table with catgorized points
sql = "CREATE TABLE test_filter (id INTEGER NOT NULL PRIMARY KEY, name TEXT NOT NULL)"
cur.execute(sql)
sql = "SELECT AddGeometryColumn('test_filter', 'geometry', 4326, 'POINT', 'XY')"
cur.execute(sql)
sql = "INSERT INTO test_filter (id, name, geometry) "
sql += "VALUES (1, 'ext', GeomFromText('POINT(0 0)', 4326))"
cur.execute(sql)
sql = "INSERT INTO test_filter (id, name, geometry) "
sql += "VALUES (2, 'ext', GeomFromText('POINT(0 3)', 4326))"
cur.execute(sql)
sql = "INSERT INTO test_filter (id, name, geometry) "
sql += "VALUES (3, 'ext', GeomFromText('POINT(3 3)', 4326))"
cur.execute(sql)
sql = "INSERT INTO test_filter (id, name, geometry) "
sql += "VALUES (4, 'ext', GeomFromText('POINT(3 0)', 4326))"
cur.execute(sql)
sql = "INSERT INTO test_filter (id, name, geometry) "
sql += "VALUES (5, 'int', GeomFromText('POINT(1 1)', 4326))"
cur.execute(sql)
sql = "INSERT INTO test_filter (id, name, geometry) "
sql += "VALUES (6, 'int', GeomFromText('POINT(1 2)', 4326))"
cur.execute(sql)
sql = "INSERT INTO test_filter (id, name, geometry) "
sql += "VALUES (7, 'int', GeomFromText('POINT(2 2)', 4326))"
cur.execute(sql)
sql = "INSERT INTO test_filter (id, name, geometry) "
sql += "VALUES (8, 'int', GeomFromText('POINT(2 1)', 4326))"
cur.execute(sql)
# bigint table
sql = "CREATE TABLE test_bigint (id BIGINT, value INT)"
cur.execute(sql)
sql = "SELECT AddGeometryColumn('test_bigint', 'position', 4326, 'LINESTRING', 'XYM')"
cur.execute(sql)
sql = """
INSERT INTO test_bigint (id, value, position) VALUES
(987654321012345, 1, ST_GeomFromtext('LINESTRINGM(10.416255 55.3786316 1577093516, 10.516255 55.4786316 157709)', 4326) ),
(987654321012346, 2, ST_GeomFromtext('LINESTRINGM(10.316255 55.3786316 1577093516, 11.216255 56.3786316 157709)', 4326) )"""
cur.execute(sql)
# no fields table
sql = "CREATE TABLE \"test_nofields\"(pkuid integer primary key autoincrement)"
cur.execute(sql)
sql = "SELECT AddGeometryColumn('test_nofields', 'geometry', 4326, 'POINT', 'XY')"
cur.execute(sql)
# constraints check table
sql = "CREATE TABLE \"check_constraint\"(pkuid integer primary key autoincrement, i_will_fail_on_no_name TEXT CHECK (i_will_fail_on_no_name != 'no name'))"
cur.execute(sql)
sql = "SELECT AddGeometryColumn('check_constraint', 'geometry', 4326, 'POINT', 'XY')"
cur.execute(sql)
sql = """
INSERT INTO check_constraint (pkuid, geometry, i_will_fail_on_no_name) VALUES(1, ST_GeomFromtext('POINT(10.416255 55.3786316)', 4326), 'I have a name'),
(2, ST_GeomFromtext('POINT(9.416255 45.3786316)', 4326), 'I have a name too');
"""
cur.execute(sql)
# Unique and not null constraints
sql = "CREATE TABLE \"unique_not_null_constraints\"(pkuid integer primary key autoincrement, \"unique\" TEXT UNIQUE, \"not_null\" TEXT NOT NULL)"
cur.execute(sql)
sql = "SELECT AddGeometryColumn('unique_not_null_constraints', 'geometry', 4326, 'POINT', 'XY')"
cur.execute(sql)
# blob test table
sql = "CREATE TABLE blob_table ( id INTEGER NOT NULL PRIMARY KEY, fld1 BLOB )"
cur.execute(sql)
sql = """
INSERT INTO blob_table VALUES
(1, X'0053514C697465'),
(2, NULL),
(3, X'53514C697465')
"""
cur.execute(sql)
# Transaction tables
sql = "CREATE TABLE \"test_transactions1\"(pkuid integer primary key autoincrement)"
cur.execute(sql)
sql = "CREATE TABLE \"test_transactions2\"(pkuid integer primary key autoincrement)"
cur.execute(sql)
sql = "INSERT INTO \"test_transactions2\" VALUES (NULL)"
cur.execute(sql)
# Commit all test data
cur.execute("COMMIT")
con.close()
cls.dirs_to_cleanup = []
@classmethod
def tearDownClass(cls):
"""Run after all tests"""
del(cls.vl)
del(cls.vl_poly)
# for the time being, keep the file to check with qgis
# if os.path.exists(cls.dbname) :
# os.remove(cls.dbname)
for dirname in cls.dirs_to_cleanup:
shutil.rmtree(dirname, True)
def getSource(self):
tmpdir = tempfile.mkdtemp()
self.dirs_to_cleanup.append(tmpdir)
srcpath = os.path.join(TEST_DATA_DIR, 'provider')
datasource = os.path.join(tmpdir, 'spatialite.db')
shutil.copy(os.path.join(srcpath, 'spatialite.db'), datasource)
vl = QgsVectorLayer(
'dbname=\'{}\' table="somedata" (geom) sql='.format(
datasource), 'test',
'spatialite')
return vl
def getEditableLayerWithCheckConstraint(self):
"""Returns the layer for attribute change CHECK constraint violation"""
vl = QgsVectorLayer(
'dbname=\'{}\' table="check_constraint" (geometry) sql='.format(
self.dbname), 'check_constraint',
'spatialite')
return vl
def getEditableLayerWithUniqueNotNullConstraints(self):
"""Returns the layer for UNIQUE and NOT NULL constraints detection"""
vl = QgsVectorLayer(
'dbname=\'{}\' table="unique_not_null_constraints" (geometry) sql='.format(
self.dbname), 'unique_not_null_constraints',
'spatialite')
return vl
def treat_time_as_string(self):
return True
def getEditableLayer(self):
return self.getSource()
def setUp(self):
"""Run before each test."""
pass
def tearDown(self):
"""Run after each test."""
pass
def enableCompiler(self):
QgsSettings().setValue('/qgis/compileExpressions', True)
return True
def disableCompiler(self):
QgsSettings().setValue('/qgis/compileExpressions', False)
def uncompiledFilters(self):
return set(['cnt = 10 ^ 2',
'"name" ~ \'[OP]ra[gne]+\'',
'sqrt(pk) >= 2',
'radians(cnt) < 2',
'degrees(pk) <= 200',
'cos(pk) < 0',
'sin(pk) < 0',
'tan(pk) < 0',
'acos(-1) < pk',
'asin(1) < pk',
'atan(3.14) < pk',
'atan2(3.14, pk) < 1',
'exp(pk) < 10',
'ln(pk) <= 1',
'log(3, pk) <= 1',
'log10(pk) < 0.5',
'floor(3.14) <= pk',
'ceil(3.14) <= pk',
'pk < pi()',
'floor(cnt / 66.67) <= 2',
'ceil(cnt / 66.67) <= 2',
'pk < pi() / 2',
'x($geometry) < -70',
'y($geometry) > 70',
'xmin($geometry) < -70',
'ymin($geometry) > 70',
'xmax($geometry) < -70',
'ymax($geometry) > 70',
'disjoint($geometry,geom_from_wkt( \'Polygon ((-72.2 66.1, -65.2 66.1, -65.2 72.0, -72.2 72.0, -72.2 66.1))\'))',
'intersects($geometry,geom_from_wkt( \'Polygon ((-72.2 66.1, -65.2 66.1, -65.2 72.0, -72.2 72.0, -72.2 66.1))\'))',
'contains(geom_from_wkt( \'Polygon ((-72.2 66.1, -65.2 66.1, -65.2 72.0, -72.2 72.0, -72.2 66.1))\'),$geometry)',
'distance($geometry,geom_from_wkt( \'Point (-70 70)\')) > 7',
'intersects($geometry,geom_from_gml( \'<gml:Polygon srsName="EPSG:4326"><gml:outerBoundaryIs><gml:LinearRing><gml:coordinates>-72.2,66.1 -65.2,66.1 -65.2,72.0 -72.2,72.0 -72.2,66.1</gml:coordinates></gml:LinearRing></gml:outerBoundaryIs></gml:Polygon>\'))',
'x($geometry) < -70',
'y($geometry) > 79',
'xmin($geometry) < -70',
'ymin($geometry) < 76',
'xmax($geometry) > -68',
'ymax($geometry) > 80',
'area($geometry) > 10',
'perimeter($geometry) < 12',
'relate($geometry,geom_from_wkt( \'Polygon ((-68.2 82.1, -66.95 82.1, -66.95 79.05, -68.2 79.05, -68.2 82.1))\')) = \'FF2FF1212\'',
'relate($geometry,geom_from_wkt( \'Polygon ((-68.2 82.1, -66.95 82.1, -66.95 79.05, -68.2 79.05, -68.2 82.1))\'), \'****F****\')',
'crosses($geometry,geom_from_wkt( \'Linestring (-68.2 82.1, -66.95 82.1, -66.95 79.05)\'))',
'overlaps($geometry,geom_from_wkt( \'Polygon ((-68.2 82.1, -66.95 82.1, -66.95 79.05, -68.2 79.05, -68.2 82.1))\'))',
'within($geometry,geom_from_wkt( \'Polygon ((-75.1 76.1, -75.1 81.6, -68.8 81.6, -68.8 76.1, -75.1 76.1))\'))',
'overlaps(translate($geometry,-1,-1),geom_from_wkt( \'Polygon ((-75.1 76.1, -75.1 81.6, -68.8 81.6, -68.8 76.1, -75.1 76.1))\'))',
'overlaps(buffer($geometry,1),geom_from_wkt( \'Polygon ((-75.1 76.1, -75.1 81.6, -68.8 81.6, -68.8 76.1, -75.1 76.1))\'))',
'intersects(centroid($geometry),geom_from_wkt( \'Polygon ((-74.4 78.2, -74.4 79.1, -66.8 79.1, -66.8 78.2, -74.4 78.2))\'))',
'intersects(point_on_surface($geometry),geom_from_wkt( \'Polygon ((-74.4 78.2, -74.4 79.1, -66.8 79.1, -66.8 78.2, -74.4 78.2))\'))',
'"dt" = to_datetime(\'000www14ww13ww12www4ww5ww2020\',\'zzzwwwsswwmmwwhhwwwdwwMwwyyyy\')',
'"dt" <= format_date(make_datetime(2020, 5, 4, 12, 13, 14), \'yyyy-MM-dd hh:mm:ss\')',
'"dt" < format_date(make_date(2020, 5, 4), \'yyyy-MM-dd hh:mm:ss\')',
'"dt" = format_date(to_datetime(\'000www14ww13ww12www4ww5ww2020\',\'zzzwwwsswwmmwwhhwwwdwwMwwyyyy\'),\'yyyy-MM-dd hh:mm:ss\')',
'to_time("time") >= make_time(12, 14, 14)',
'to_time("time") = to_time(\'000www14ww13ww12www\',\'zzzwwwsswwmmwwhhwww\')',
'"date" = to_date(\'www4ww5ww2020\',\'wwwdwwMwwyyyy\')'
])
def partiallyCompiledFilters(self):
return set(['"name" NOT LIKE \'Ap%\'',
'name LIKE \'Apple\'',
'name LIKE \'aPple\'',
'name LIKE \'Ap_le\'',
'name LIKE \'Ap\\_le\''
])
def test_SplitFeature(self):
"""Create SpatiaLite database"""
layer = QgsVectorLayer("dbname=%s table=test_pg (geometry)" %
self.dbname, "test_pg", "spatialite")
self.assertTrue(layer.isValid())
self.assertTrue(layer.isSpatial())
layer.startEditing()
self.assertEqual(layer.splitFeatures(
[QgsPointXY(0.75, -0.5), QgsPointXY(0.75, 1.5)], 0), 0)
self.assertEqual(layer.splitFeatures(
[QgsPointXY(-0.5, 0.25), QgsPointXY(1.5, 0.25)], 0), 0)
self.assertTrue(layer.commitChanges())
self.assertEqual(layer.featureCount(), 4)
def test_SplitFeatureWithMultiKey(self):
"""Create SpatiaLite database"""
layer = QgsVectorLayer("dbname=%s table=test_pg_mk (geometry)" %
self.dbname, "test_pg_mk", "spatialite")
self.assertTrue(layer.isValid())
self.assertTrue(layer.isSpatial())
layer.startEditing()
self.assertEqual(layer.splitFeatures(
[QgsPointXY(0.5, -0.5), QgsPointXY(0.5, 1.5)], 0), 0)
self.assertEqual(layer.splitFeatures(
[QgsPointXY(-0.5, 0.5), QgsPointXY(1.5, 0.5)], 0), 0)
self.assertTrue(layer.commitChanges())
def test_queries(self):
"""Test loading of query-based layers"""
# a query with a geometry, but no unique id
# the id will be autoincremented
l = QgsVectorLayer("dbname=%s table='(select * from test_q)' (geometry)" % self.dbname, "test_pg_query1",
"spatialite")
self.assertTrue(l.isValid())
# the id() is autoincremented
sum_id1 = sum(f.id() for f in l.getFeatures())
# the attribute 'id' works
sum_id2 = sum(f.attributes()[0] for f in l.getFeatures())
self.assertEqual(sum_id1, 32) # 11 + 21
self.assertEqual(sum_id2, 32) # 11 + 21
# and now with an id declared
l = QgsVectorLayer("dbname=%s table='(select * from test_q)' (geometry) key='id'" % self.dbname,
"test_pg_query1", "spatialite")
self.assertTrue(l.isValid())
sum_id1 = sum(f.id() for f in l.getFeatures())
sum_id2 = sum(f.attributes()[0] for f in l.getFeatures())
self.assertEqual(sum_id1, 32)
self.assertEqual(sum_id2, 32)
# a query, but no geometry
l = QgsVectorLayer("dbname=%s table='(select id,name from test_q)' key='id'" % self.dbname, "test_pg_query1",
"spatialite")
self.assertTrue(l.isValid())
sum_id1 = sum(f.id() for f in l.getFeatures())
sum_id2 = sum(f.attributes()[0] for f in l.getFeatures())
self.assertEqual(sum_id1, 32)
self.assertEqual(sum_id2, 32)
def test_zm(self):
"""Test Z dimension and M value"""
l = QgsVectorLayer("dbname=%s table='test_z' (geometry) key='id'" %
self.dbname, "test_z", "spatialite")
self.assertTrue(l.isValid())
self.assertTrue(QgsWkbTypes.hasZ(l.wkbType()))
feature = l.getFeature(1)
geom = feature.geometry().constGet()
self.assertEqual(geom.z(), 1.0)
l = QgsVectorLayer("dbname=%s table='test_m' (geometry) key='id'" %
self.dbname, "test_m", "spatialite")
self.assertTrue(l.isValid())
self.assertTrue(QgsWkbTypes.hasM(l.wkbType()))
feature = l.getFeature(1)
geom = feature.geometry().constGet()
self.assertEqual(geom.m(), 1.0)
l = QgsVectorLayer("dbname=%s table='test_zm' (geometry) key='id'" %
self.dbname, "test_zm", "spatialite")
self.assertTrue(l.isValid())
self.assertTrue(QgsWkbTypes.hasZ(l.wkbType()))
self.assertTrue(QgsWkbTypes.hasM(l.wkbType()))
feature = l.getFeature(1)
geom = feature.geometry().constGet()
self.assertEqual(geom.z(), 1.0)
self.assertEqual(geom.m(), 1.0)
def test_case(self):
"""Test case sensitivity issues"""
l = QgsVectorLayer("dbname=%s table='test_n' (geometry) key='id'" %
self.dbname, "test_n1", "spatialite")
self.assertTrue(l.isValid())
self.assertEqual(l.dataProvider().fields().count(), 2)
fields = [f.name() for f in l.dataProvider().fields()]
self.assertTrue('Geometry' not in fields)
def test_invalid_iterator(self):
""" Test invalid iterator """
corrupt_dbname = self.dbname + '.corrupt'
shutil.copy(self.dbname, corrupt_dbname)
layer = QgsVectorLayer("dbname=%s table=test_pg (geometry)" %
corrupt_dbname, "test_pg", "spatialite")
# Corrupt the database
with open(corrupt_dbname, 'wb') as f:
f.write(b'')
layer.getFeatures()
layer = None
os.unlink(corrupt_dbname)
def testNoDanglingFileDescriptorAfterCloseVariant1(self):
''' Test that when closing the provider all file handles are released '''
temp_dbname = self.dbname + '.no_dangling_test1'
shutil.copy(self.dbname, temp_dbname)
vl = QgsVectorLayer("dbname=%s table=test_n (geometry)" %
temp_dbname, "test_n", "spatialite")
self.assertTrue(vl.isValid())
# The iterator will take one extra connection
myiter = vl.getFeatures()
print((vl.featureCount()))
# Consume one feature but the iterator is still opened
f = next(myiter)
self.assertTrue(f.isValid())
if sys.platform.startswith('linux'):
self.assertEqual(count_opened_filedescriptors(temp_dbname), 2)
# does NO release one file descriptor, because shared with the iterator
del vl
# Non portable, but Windows testing is done with trying to unlink
if sys.platform.startswith('linux'):
self.assertEqual(count_opened_filedescriptors(temp_dbname), 2)
f = next(myiter)
self.assertTrue(f.isValid())
# Should release one file descriptor
del myiter
# Non portable, but Windows testing is done with trying to unlink
if sys.platform.startswith('linux'):
self.assertEqual(count_opened_filedescriptors(temp_dbname), 0)
# Check that deletion works well (can only fail on Windows)
os.unlink(temp_dbname)
self.assertFalse(os.path.exists(temp_dbname))
def testNoDanglingFileDescriptorAfterCloseVariant2(self):
''' Test that when closing the provider all file handles are released '''
temp_dbname = self.dbname + '.no_dangling_test2'
shutil.copy(self.dbname, temp_dbname)
vl = QgsVectorLayer("dbname=%s table=test_n (geometry)" %
temp_dbname, "test_n", "spatialite")
self.assertTrue(vl.isValid())
self.assertTrue(vl.isValid())
# Consume all features.
myiter = vl.getFeatures()
for feature in myiter:
pass
# The iterator is closed
if sys.platform.startswith('linux'):
self.assertEqual(count_opened_filedescriptors(temp_dbname), 2)
# Should release one file descriptor
del vl
# Non portable, but Windows testing is done with trying to unlink
if sys.platform.startswith('linux'):
self.assertEqual(count_opened_filedescriptors(temp_dbname), 0)
# Check that deletion works well (can only fail on Windows)
os.unlink(temp_dbname)
self.assertFalse(os.path.exists(temp_dbname))
def test_arrays(self):
"""Test loading of layers with arrays"""
l = QgsVectorLayer("dbname=%s table=test_arrays (geometry)" %
self.dbname, "test_arrays", "spatialite")
self.assertTrue(l.isValid())
features = [f for f in l.getFeatures()]
self.assertEqual(len(features), 1)
strings_field = l.fields().field('strings')
self.assertEqual(strings_field.typeName(), 'jsonstringlist')
self.assertEqual(strings_field.type(), QVariant.StringList)
self.assertEqual(strings_field.subType(), QVariant.String)
strings = features[0].attributes()[1]
self.assertEqual(strings, ['toto', 'tutu'])
ints_field = l.fields().field('ints')
self.assertEqual(ints_field.typeName(), 'jsonintegerlist')
self.assertEqual(ints_field.type(), QVariant.List)
self.assertEqual(ints_field.subType(), QVariant.LongLong)
ints = features[0].attributes()[2]
self.assertEqual(ints, [1, -2, 724562])
reals_field = l.fields().field('reals')
self.assertEqual(reals_field.typeName(), 'jsonreallist')
self.assertEqual(reals_field.type(), QVariant.List)
self.assertEqual(reals_field.subType(), QVariant.Double)
reals = features[0].attributes()[3]
self.assertEqual(reals, [1.0, -232567.22])
new_f = QgsFeature(l.fields())
new_f['id'] = 2
new_f['strings'] = ['simple', '"doubleQuote"', "'quote'", 'back\\slash']
new_f['ints'] = [1, 2, 3, 4]
new_f['reals'] = [1e67, 1e-56]
r, fs = l.dataProvider().addFeatures([new_f])
self.assertTrue(r)
read_back = l.getFeature(new_f['id'])
self.assertEqual(read_back['id'], new_f['id'])
self.assertEqual(read_back['strings'], new_f['strings'])
self.assertEqual(read_back['ints'], new_f['ints'])
self.assertEqual(read_back['reals'], new_f['reals'])
def test_arrays_write(self):
"""Test writing of layers with arrays"""
l = QgsVectorLayer("dbname=%s table=test_arrays_write (geometry)" %
self.dbname, "test_arrays", "spatialite")
self.assertTrue(l.isValid())
new_f = QgsFeature(l.fields())
new_f['id'] = 2
new_f['array'] = ['simple', '"doubleQuote"', "'quote'", 'back\\slash']
new_f['strings'] = ['simple', '"doubleQuote"', "'quote'", 'back\\slash']
new_f['ints'] = [1, 2, 3, 4]
new_f['reals'] = [1e67, 1e-56]
r, fs = l.dataProvider().addFeatures([new_f])
self.assertTrue(r)
read_back = l.getFeature(new_f['id'])
self.assertEqual(read_back['id'], new_f['id'])
self.assertEqual(read_back['array'], new_f['array'])
self.assertEqual(read_back['strings'], new_f['strings'])
self.assertEqual(read_back['ints'], new_f['ints'])
self.assertEqual(read_back['reals'], new_f['reals'])
new_f = QgsFeature(l.fields())
new_f['id'] = 3
new_f['array'] = [1, 1.2345, '"doubleQuote"', "'quote'", 'back\\slash']
new_f['strings'] = ['simple', '"doubleQuote"', "'quote'", 'back\\slash']
new_f['ints'] = [1, 2, 3, 4]
new_f['reals'] = [1e67, 1e-56]
r, fs = l.dataProvider().addFeatures([new_f])
self.assertTrue(r)
read_back = l.getFeature(new_f['id'])
self.assertEqual(read_back['id'], new_f['id'])
self.assertEqual(read_back['array'], new_f['array'])
self.assertEqual(read_back['strings'], new_f['strings'])
self.assertEqual(read_back['ints'], new_f['ints'])
self.assertEqual(read_back['reals'], new_f['reals'])
read_back = l.getFeature(new_f['id'])
def test_discover_relation(self):
artist = QgsVectorLayer("dbname=%s table=test_relation_a (geometry)" % self.dbname, "test_relation_a",
"spatialite")
self.assertTrue(artist.isValid())
track = QgsVectorLayer("dbname=%s table=test_relation_b (geometry)" % self.dbname, "test_relation_b",
"spatialite")
self.assertTrue(track.isValid())
QgsProject.instance().addMapLayer(artist)
QgsProject.instance().addMapLayer(track)
try:
relMgr = QgsProject.instance().relationManager()
relations = relMgr.discoverRelations([], [artist, track])
relations = {r.name(): r for r in relations}
self.assertEqual({'fk_test_relation_b_0'}, set(relations.keys()))
a2t = relations['fk_test_relation_b_0']
self.assertTrue(a2t.isValid())
self.assertEqual('test_relation_b', a2t.referencingLayer().name())
self.assertEqual('test_relation_a', a2t.referencedLayer().name())
self.assertEqual([2], a2t.referencingFields())
self.assertEqual([0], a2t.referencedFields())
finally:
QgsProject.instance().removeMapLayer(track.id())
QgsProject.instance().removeMapLayer(artist.id())
def testNotNullConstraint(self):
vl = QgsVectorLayer("dbname=%s table=test_constraints key='id'" % self.dbname, "test_constraints",
"spatialite")
self.assertTrue(vl.isValid())
self.assertEqual(len(vl.fields()), 5)
# test some bad field indexes
self.assertEqual(vl.dataProvider().fieldConstraints(-1),
QgsFieldConstraints.Constraints())
self.assertEqual(vl.dataProvider().fieldConstraints(
1001), QgsFieldConstraints.Constraints())
self.assertTrue(vl.dataProvider().fieldConstraints(0) &
QgsFieldConstraints.ConstraintNotNull)
self.assertTrue(vl.dataProvider().fieldConstraints(1) &
QgsFieldConstraints.ConstraintNotNull)
self.assertFalse(vl.dataProvider().fieldConstraints(2)
& QgsFieldConstraints.ConstraintNotNull)
self.assertFalse(vl.dataProvider().fieldConstraints(3)
& QgsFieldConstraints.ConstraintNotNull)
self.assertTrue(vl.dataProvider().fieldConstraints(4) &
QgsFieldConstraints.ConstraintNotNull)
# test that constraints have been saved to fields correctly
fields = vl.fields()
self.assertTrue(fields.at(0).constraints().constraints()
& QgsFieldConstraints.ConstraintNotNull)
self.assertEqual(fields.at(0).constraints().constraintOrigin(QgsFieldConstraints.ConstraintNotNull),
QgsFieldConstraints.ConstraintOriginProvider)
self.assertTrue(fields.at(1).constraints().constraints()
& QgsFieldConstraints.ConstraintNotNull)
self.assertEqual(fields.at(1).constraints().constraintOrigin(QgsFieldConstraints.ConstraintNotNull),
QgsFieldConstraints.ConstraintOriginProvider)
self.assertFalse(fields.at(2).constraints().constraints()
& QgsFieldConstraints.ConstraintNotNull)
self.assertFalse(fields.at(3).constraints().constraints()
& QgsFieldConstraints.ConstraintNotNull)
self.assertTrue(fields.at(4).constraints().constraints()
& QgsFieldConstraints.ConstraintNotNull)
self.assertEqual(fields.at(4).constraints().constraintOrigin(QgsFieldConstraints.ConstraintNotNull),
QgsFieldConstraints.ConstraintOriginProvider)
def testUniqueConstraint(self):
vl = QgsVectorLayer("dbname=%s table=test_constraints key='id'" % self.dbname, "test_constraints",
"spatialite")
self.assertTrue(vl.isValid())
self.assertEqual(len(vl.fields()), 5)
# test some bad field indexes
self.assertEqual(vl.dataProvider().fieldConstraints(-1),
QgsFieldConstraints.Constraints())
self.assertEqual(vl.dataProvider().fieldConstraints(
1001), QgsFieldConstraints.Constraints())
self.assertTrue(vl.dataProvider().fieldConstraints(0)
& QgsFieldConstraints.ConstraintUnique)
self.assertFalse(vl.dataProvider().fieldConstraints(1)
& QgsFieldConstraints.ConstraintUnique)
self.assertTrue(vl.dataProvider().fieldConstraints(2)
& QgsFieldConstraints.ConstraintUnique)
self.assertFalse(vl.dataProvider().fieldConstraints(3)
& QgsFieldConstraints.ConstraintUnique)
self.assertTrue(vl.dataProvider().fieldConstraints(4)
& QgsFieldConstraints.ConstraintUnique)
# test that constraints have been saved to fields correctly
fields = vl.fields()
self.assertTrue(fields.at(0).constraints().constraints()
& QgsFieldConstraints.ConstraintUnique)
self.assertEqual(fields.at(0).constraints().constraintOrigin(QgsFieldConstraints.ConstraintUnique),
QgsFieldConstraints.ConstraintOriginProvider)
self.assertFalse(fields.at(1).constraints().constraints()
& QgsFieldConstraints.ConstraintUnique)
self.assertTrue(fields.at(2).constraints().constraints()
& QgsFieldConstraints.ConstraintUnique)
self.assertEqual(fields.at(2).constraints().constraintOrigin(QgsFieldConstraints.ConstraintUnique),
QgsFieldConstraints.ConstraintOriginProvider)
self.assertFalse(fields.at(3).constraints().constraints()
& QgsFieldConstraints.ConstraintUnique)
self.assertTrue(fields.at(4).constraints().constraints()
& QgsFieldConstraints.ConstraintUnique)
self.assertEqual(fields.at(4).constraints().constraintOrigin(QgsFieldConstraints.ConstraintUnique),
QgsFieldConstraints.ConstraintOriginProvider)
def testSkipConstraintCheck(self):
vl = QgsVectorLayer("dbname=%s table=test_autoincrement" % self.dbname, "test_autoincrement",
"spatialite")
self.assertTrue(vl.isValid())
self.assertTrue(
vl.dataProvider().skipConstraintCheck(0, QgsFieldConstraints.ConstraintUnique, str("Autogenerate")))
self.assertFalse(vl.dataProvider().skipConstraintCheck(
0, QgsFieldConstraints.ConstraintUnique, 123))
# This test would fail. It would require turning on WAL
def XXXXXtestLocking(self):
temp_dbname = self.dbname + '.locking'
shutil.copy(self.dbname, temp_dbname)
vl = QgsVectorLayer("dbname=%s table=test_n (geometry)" %
temp_dbname, "test_n", "spatialite")
self.assertTrue(vl.isValid())
self.assertTrue(vl.startEditing())
self.assertTrue(vl.changeGeometry(
1, QgsGeometry.fromWkt('POLYGON((0 0,1 0,1 1,0 1,0 0))')))
# The iterator will take one extra connection
myiter = vl.getFeatures()
# Consume one feature but the iterator is still opened
f = next(myiter)
self.assertTrue(f.isValid())
self.assertTrue(vl.commitChanges())
def testDefaultValues(self):
l = QgsVectorLayer("dbname=%s table='test_defaults' key='id'" %
self.dbname, "test_defaults", "spatialite")
self.assertTrue(l.isValid())
self.assertEqual(l.dataProvider().defaultValue(1), "qgis 'is good")
self.assertEqual(l.dataProvider().defaultValue(2), 5)
self.assertEqual(l.dataProvider().defaultValue(3), 5.7)
self.assertFalse(l.dataProvider().defaultValue(4))
def testVectorLayerUtilsCreateFeatureWithProviderDefaultLiteral(self):
vl = QgsVectorLayer("dbname=%s table='test_defaults' key='id'" %
self.dbname, "test_defaults", "spatialite")
self.assertEqual(vl.dataProvider().defaultValue(2), 5)
f = QgsVectorLayerUtils.createFeature(vl)
self.assertEqual(f.attributes(), [None, "qgis 'is good", 5, 5.7, None])
# check that provider default literals do not take precedence over passed attribute values
f = QgsVectorLayerUtils.createFeature(
vl, attributes={1: 'qgis is great', 0: 3})
self.assertEqual(f.attributes(), [3, "qgis is great", 5, 5.7, None])
# test that vector layer default value expression overrides provider default literal
vl.setDefaultValueDefinition(3, QgsDefaultValue("4*3"))
f = QgsVectorLayerUtils.createFeature(
vl, attributes={1: 'qgis is great', 0: 3})
self.assertEqual(f.attributes(), [3, "qgis is great", 5, 12, None])
def testCreateAttributeIndex(self):
vl = QgsVectorLayer("dbname=%s table='test_defaults' key='id'" %
self.dbname, "test_defaults", "spatialite")
self.assertTrue(vl.dataProvider().capabilities() &
QgsVectorDataProvider.CreateAttributeIndex)
self.assertFalse(vl.dataProvider().createAttributeIndex(-1))
self.assertFalse(vl.dataProvider().createAttributeIndex(100))
self.assertTrue(vl.dataProvider().createAttributeIndex(1))
con = spatialite_connect(self.dbname, isolation_level=None)
cur = con.cursor()
rs = cur.execute(
"SELECT * FROM sqlite_master WHERE type='index' AND tbl_name='test_defaults'")
res = [row for row in rs]
self.assertEqual(len(res), 1)
index_name = res[0][1]
rs = cur.execute("PRAGMA index_info({})".format(index_name))
res = [row for row in rs]
self.assertEqual(len(res), 1)
self.assertEqual(res[0][2], 'name')
# second index
self.assertTrue(vl.dataProvider().createAttributeIndex(2))
rs = cur.execute(
"SELECT * FROM sqlite_master WHERE type='index' AND tbl_name='test_defaults'")
res = [row for row in rs]
self.assertEqual(len(res), 2)
indexed_columns = []
for row in res:
index_name = row[1]
rs = cur.execute("PRAGMA index_info({})".format(index_name))
res = [row for row in rs]
self.assertEqual(len(res), 1)
indexed_columns.append(res[0][2])
self.assertEqual(set(indexed_columns), set(['name', 'number']))
con.close()
def testSubsetStringRegexp(self):
"""Check that the provider supports the REGEXP syntax"""
testPath = "dbname=%s table='test_filter' (geometry) key='id'" % self.dbname
vl = QgsVectorLayer(testPath, 'test', 'spatialite')
self.assertTrue(vl.isValid())
vl.setSubsetString('"name" REGEXP \'[txe]{3}\'')
self.assertEqual(vl.featureCount(), 4)
del (vl)
def testSubsetStringExtent_bug17863(self):
"""Check that the extent is correct when applied in the ctor and when
modified after a subset string is set """
def _lessdigits(s):
return re.sub(r'(\d+\.\d{3})\d+', r'\1', s)
testPath = "dbname=%s table='test_filter' (geometry) key='id'" % self.dbname
subSetString = '"name" = \'int\''
subSet = ' sql=%s' % subSetString
# unfiltered
vl = QgsVectorLayer(testPath, 'test', 'spatialite')
self.assertTrue(vl.isValid())
self.assertEqual(vl.featureCount(), 8)
unfiltered_extent = _lessdigits(vl.extent().toString())
self.assertNotEqual('Empty', unfiltered_extent)
del (vl)
# filter after construction ...
subSet_vl2 = QgsVectorLayer(testPath, 'test', 'spatialite')
self.assertEqual(_lessdigits(
subSet_vl2.extent().toString()), unfiltered_extent)
self.assertEqual(subSet_vl2.featureCount(), 8)
# ... apply filter now!
subSet_vl2.setSubsetString(subSetString)
self.assertEqual(subSet_vl2.featureCount(), 4)
self.assertEqual(subSet_vl2.subsetString(), subSetString)
self.assertNotEqual(_lessdigits(
subSet_vl2.extent().toString()), unfiltered_extent)
filtered_extent = _lessdigits(subSet_vl2.extent().toString())
del (subSet_vl2)
# filtered in constructor
subSet_vl = QgsVectorLayer(
testPath + subSet, 'subset_test', 'spatialite')
self.assertEqual(subSet_vl.subsetString(), subSetString)
self.assertTrue(subSet_vl.isValid())
# This was failing in bug 17863
self.assertEqual(subSet_vl.featureCount(), 4)
self.assertEqual(_lessdigits(
subSet_vl.extent().toString()), filtered_extent)
self.assertNotEqual(_lessdigits(
subSet_vl.extent().toString()), unfiltered_extent)
self.assertTrue(subSet_vl.setSubsetString(''))
self.assertEqual(subSet_vl.featureCount(), 8)
self.assertEqual(_lessdigits(
subSet_vl.extent().toString()), unfiltered_extent)
def testDecodeUri(self):
"""Check that the provider URI decoding returns expected values"""
filename = '/home/to/path/test.db'
uri = 'dbname=\'{}\' table="test" (geometry) key=testkey sql=1=1'.format(filename)
registry = QgsProviderRegistry.instance()
components = registry.decodeUri('spatialite', uri)
self.assertEqual(components['path'], filename)
self.assertEqual(components['layerName'], 'test')
self.assertEqual(components['subset'], '1=1')
self.assertEqual(components['geometryColumn'], 'geometry')
self.assertEqual(components['keyColumn'], 'testkey')
def testEncodeUri(self):
"""Check that the provider URI encoding returns expected values"""
filename = '/home/to/path/test.db'
registry = QgsProviderRegistry.instance()
parts = {'path': filename,
'layerName': 'test',
'subset': '1=1',
'geometryColumn': 'geometry',
'keyColumn': 'testkey'}
uri = registry.encodeUri('spatialite', parts)
self.assertEqual(uri, 'dbname=\'{}\' key=\'testkey\' table="test" (geometry) sql=1=1'.format(filename))
def testPKNotInt(self):
""" Check when primary key is not an integer """
# create test db
tmpdir = tempfile.mkdtemp()
self.dirs_to_cleanup.append(tmpdir)
dbname = os.path.join(tmpdir, "test_pknotint.sqlite")
con = spatialite_connect(dbname, isolation_level=None)
cur = con.cursor()
# try the two different types of index creation
for index_creation_method in ['CreateSpatialIndex', 'CreateMbrCache']:
table_name = "pk_is_string_{}".format(index_creation_method)
cur.execute("BEGIN")
sql = "SELECT InitSpatialMetadata()"
cur.execute(sql)
# create table with spatial index and pk is string
sql = "CREATE TABLE {}(id VARCHAR PRIMARY KEY NOT NULL, name TEXT NOT NULL);"
cur.execute(sql.format(table_name))
sql = "SELECT AddGeometryColumn('{}', 'geometry', 4326, 'POINT', 'XY')"
cur.execute(sql.format(table_name))
sql = "SELECT {}('{}', 'geometry')"
cur.execute(sql.format(index_creation_method, table_name))
sql = "insert into {} ('id', 'name', 'geometry') values( 'test_id', 'test_name', st_geomfromtext('POINT(1 2)', 4326))"
cur.execute(sql.format(table_name))
cur.execute("COMMIT")
testPath = "dbname={} table='{}' (geometry)".format(
dbname, table_name)
vl = QgsVectorLayer(testPath, 'test', 'spatialite')
self.assertTrue(vl.isValid())
self.assertEqual(vl.featureCount(), 1)
# make spatial request to force the index use
request = QgsFeatureRequest(QgsRectangle(0, 0, 2, 3))
feature = next(vl.getFeatures(request), None)
self.assertTrue(feature)
self.assertEqual(feature.id(), 1)
point = feature.geometry().asPoint()
self.assertTrue(point)
self.assertEqual(point.x(), 1)
self.assertEqual(point.y(), 2)
con.close()
def testLoadStyle(self):
"""Check that we can store and load a style"""
# create test db
dbname = os.path.join(tempfile.gettempdir(), "test_loadstyle.sqlite")
if os.path.exists(dbname):
os.remove(dbname)
con = spatialite_connect(dbname, isolation_level=None)
cur = con.cursor()
cur.execute("BEGIN")
sql = "SELECT InitSpatialMetadata()"
cur.execute(sql)
# simple table with primary key
sql = "CREATE TABLE test_pg (id INTEGER NOT NULL PRIMARY KEY, name TEXT NOT NULL)"
cur.execute(sql)
sql = "SELECT AddGeometryColumn('test_pg', 'geometry', 4326, 'POLYGON', 'XY')"
cur.execute(sql)
sql = "INSERT INTO test_pg (id, name, geometry) "
sql += "VALUES (1, 'toto', GeomFromText('POLYGON((0 0,1 0,1 1,0 1,0 0))', 4326))"
cur.execute(sql)
cur.execute("COMMIT")
con.close()
testPath = "dbname=%s table='test_pg' (geometry) key='id'" % dbname
vl = QgsVectorLayer(testPath, 'test', 'spatialite')
self.assertTrue(vl.isValid())
self.assertEqual(vl.featureCount(), 1)
err, ok = vl.loadDefaultStyle()
self.assertFalse(ok)
vl.saveStyleToDatabase('my_style', 'My description', True, '')
err, ok = vl.loadDefaultStyle()
self.assertTrue(ok)
def _aliased_sql_helper(self, dbname):
queries = (
'(SELECT * FROM (SELECT * from \\"some view\\"))',
'(SELECT * FROM \\"some view\\")',
'(select sd.* from somedata as sd left join somedata as sd2 on ( sd2.name = sd.name ))',
'(select sd.* from \\"somedata\\" as sd left join \\"somedata\\" as sd2 on ( sd2.name = sd.name ))',
"(SELECT * FROM somedata as my_alias1\n)",
"(SELECT * FROM somedata as my_alias2)",
"(SELECT * FROM somedata AS my_alias3)",
'(SELECT * FROM \\"somedata\\" as my_alias4\n)',
'(SELECT * FROM (SELECT * FROM \\"somedata\\"))',
'(SELECT my_alias5.* FROM (SELECT * FROM \\"somedata\\") AS my_alias5)',
'(SELECT my_alias6.* FROM (SELECT * FROM \\"somedata\\" as my_alias\n) AS my_alias6)',
'(SELECT my_alias7.* FROM (SELECT * FROM \\"somedata\\" as my_alias\n) AS my_alias7\n)',
'(SELECT my_alias8.* FROM (SELECT * FROM \\"some data\\") AS my_alias8)',
'(SELECT my_alias9.* FROM (SELECT * FROM \\"some data\\" as my_alias\n) AS my_alias9)',
'(SELECT my_alias10.* FROM (SELECT * FROM \\"some data\\" as my_alias\n) AS my_alias10\n)',
'(select sd.* from \\"some data\\" as sd left join \\"some data\\" as sd2 on ( sd2.name = sd.name ))',
'(SELECT * FROM \\"some data\\" as my_alias11\n)',
'(SELECT * FROM \\"some data\\" as my_alias12)',
'(SELECT * FROM \\"some data\\" AS my_alias13)',
'(SELECT * from \\"some data\\" AS my_alias14\n)',
'(SELECT * FROM (SELECT * from \\"some data\\"))',
)
for sql in queries:
vl = QgsVectorLayer('dbname=\'{}\' table="{}" (geom) sql='.format(
dbname, sql), 'test', 'spatialite')
self.assertTrue(
vl.isValid(), 'dbname: {} - sql: {}'.format(dbname, sql))
self.assertTrue(vl.featureCount() > 1)
self.assertTrue(vl.isSpatial())
def testPkLessQuery(self):
"""Test if features in queries with/without pk can be retrieved by id"""
# create test db
dbname = os.path.join(tempfile.gettempdir(), "test_pkless.sqlite")
if os.path.exists(dbname):
os.remove(dbname)
con = spatialite_connect(dbname, isolation_level=None)
cur = con.cursor()
cur.execute("BEGIN")
sql = "SELECT InitSpatialMetadata()"
cur.execute(sql)
# simple table with primary key
sql = "CREATE TABLE \"test pk\" (id INTEGER NOT NULL PRIMARY KEY, name TEXT NOT NULL)"
cur.execute(sql)
sql = "SELECT AddGeometryColumn('test pk', 'geometry', 4326, 'POINT', 'XY')"
cur.execute(sql)
for i in range(11, 21):
sql = "INSERT INTO \"test pk\" (id, name, geometry) "
sql += "VALUES ({id}, 'name {id}', GeomFromText('POINT({id} {id})', 4326))".format(id=i)
cur.execute(sql)
def _make_table(table_name):
# simple table without primary key
sql = "CREATE TABLE \"%s\" (name TEXT NOT NULL)" % table_name
cur.execute(sql)
sql = "SELECT AddGeometryColumn('%s', 'geom', 4326, 'POINT', 'XY')" % table_name
cur.execute(sql)
for i in range(11, 21):
sql = "INSERT INTO \"%s\" (name, geom) " % table_name
sql += "VALUES ('name {id}', GeomFromText('POINT({id} {id})', 4326))".format(id=i)
cur.execute(sql)
_make_table("somedata")
_make_table("some data")
sql = "CREATE VIEW \"some view\" AS SELECT * FROM \"somedata\""
cur.execute(sql)
cur.execute("COMMIT")
con.close()
def _check_features(vl, offset):
self.assertEqual(vl.featureCount(), 10)
i = 11
for f in vl.getFeatures():
self.assertTrue(f.isValid())
self.assertTrue(vl.getFeature(i - offset).isValid())
self.assertEqual(vl.getFeature(i - offset)
['name'], 'name {id}'.format(id=i))
self.assertEqual(f.id(), i - offset)
self.assertEqual(f['name'], 'name {id}'.format(id=i))
self.assertEqual(f.geometry().asWkt(),
'Point ({id} {id})'.format(id=i))
i += 1
vl_pk = QgsVectorLayer('dbname=\'%s\' table="(select * from \\"test pk\\")" (geometry) sql=' % dbname, 'pk',
'spatialite')
self.assertTrue(vl_pk.isValid())
_check_features(vl_pk, 0)
vl_no_pk = QgsVectorLayer('dbname=\'%s\' table="(select * from somedata)" (geom) sql=' % dbname, 'pk',
'spatialite')
self.assertTrue(vl_no_pk.isValid())
_check_features(vl_no_pk, 10)
vl_no_pk = QgsVectorLayer('dbname=\'%s\' table="(select * from \\"some data\\")" (geom) sql=' % dbname, 'pk',
'spatialite')
self.assertTrue(vl_no_pk.isValid())
_check_features(vl_no_pk, 10)
# Test regression when sending queries with aliased tables from DB manager
self._aliased_sql_helper(dbname)
def testAliasedQueries(self):
"""Test regression when sending queries with aliased tables from DB manager"""
dbname = TEST_DATA_DIR + '/provider/spatialite.db'
self._aliased_sql_helper(dbname)
def testTextPks(self):
"""Test regression when retrieving features from tables with text PKs, see #21176"""
# create test db
dbname = os.path.join(tempfile.gettempdir(), "test_text_pks.sqlite")
if os.path.exists(dbname):
os.remove(dbname)
con = spatialite_connect(dbname, isolation_level=None)
cur = con.cursor()
cur.execute("BEGIN")
sql = "SELECT InitSpatialMetadata()"
cur.execute(sql)
# simple table with primary key
sql = "CREATE TABLE test_pg (id TEXT NOT NULL PRIMARY KEY, name TEXT NOT NULL)"
cur.execute(sql)
sql = "SELECT AddGeometryColumn('test_pg', 'geometry', 4326, 'POLYGON', 'XY')"
cur.execute(sql)
sql = "INSERT INTO test_pg (id, name, geometry) "
sql += "VALUES ('one', 'toto', GeomFromText('POLYGON((0 0,1 0,1 1,0 1,0 0))', 4326))"
cur.execute(sql)
sql = "INSERT INTO test_pg (id, name, geometry) "
sql += "VALUES ('two', 'bogo', GeomFromText('POLYGON((0 0,2 0,2 2,0 2,0 0))', 4326))"
cur.execute(sql)
cur.execute("COMMIT")
con.close()
def _test_db(testPath):
vl = QgsVectorLayer(testPath, 'test', 'spatialite')
self.assertTrue(vl.isValid())
f = next(vl.getFeatures())
self.assertTrue(f.isValid())
fid = f.id()
self.assertTrue(fid > 0)
self.assertTrue(vl.getFeature(fid).isValid())
f2 = next(vl.getFeatures(QgsFeatureRequest().setFilterFid(fid)))
self.assertTrue(f2.isValid())
self.assertEqual(f2.id(), f.id())
self.assertEqual(f2.geometry().asWkt(), f.geometry().asWkt())
for f in vl.getFeatures():
self.assertTrue(f.isValid())
self.assertTrue(vl.getFeature(f.id()).isValid())
self.assertEqual(vl.getFeature(f.id()).id(), f.id())
testPath = "dbname=%s table='test_pg' (geometry) key='id'" % dbname
_test_db(testPath)
testPath = "dbname=%s table='test_pg' (geometry)" % dbname
_test_db(testPath)
testPath = "dbname=%s table='test_pg' key='id'" % dbname
_test_db(testPath)
testPath = "dbname=%s table='test_pg'" % dbname
_test_db(testPath)
def testGeometryTypes(self):
"""Test creating db with various geometry types"""
# create test db
dbname = os.path.join(tempfile.gettempdir(),
"testGeometryTypes.sqlite")
if os.path.exists(dbname):
os.remove(dbname)
con = spatialite_connect(dbname, isolation_level=None)
cur = con.cursor()
cur.execute("BEGIN")
sql = "SELECT InitSpatialMetadata()"
cur.execute(sql)
cur.execute("COMMIT")
con.close()
tests = [('Point', 'Point (0 0)', QgsWkbTypes.Point),
('PointZ', 'PointZ (0 0 10)', QgsWkbTypes.PointZ),
('Point25D', 'PointZ (0 0 10)', QgsWkbTypes.PointZ),
('MultiPoint', 'MultiPoint (0 0, 0 1)', QgsWkbTypes.MultiPoint),
('MultiPointZ', 'MultiPointZ ((0 0 10, 0 1 10))',
QgsWkbTypes.MultiPointZ),
('MultiPoint25D', 'MultiPointZ ((0 0 10, 0 1 10))',
QgsWkbTypes.MultiPointZ),
('LineString', 'LineString (0 0, 0 1)', QgsWkbTypes.LineString),
('LineStringZ', 'LineStringZ (0 0 10, 0 1 10)',
QgsWkbTypes.LineStringZ),
('LineString25D', 'LineStringZ (0 0 10, 0 1 10)',
QgsWkbTypes.LineStringZ),
('MultiLineString', 'MultiLineString (0 0, 0 1)',
QgsWkbTypes.MultiLineString),
('MultiLineStringZ', 'MultiLineStringZ ((0 0 10, 0 1 10))',
QgsWkbTypes.MultiLineStringZ),
('MultiLineString25D', 'MultiLineStringZ ((0 0 10, 0 1 10))',
QgsWkbTypes.MultiLineStringZ),
('Polygon', 'Polygon ((0 0, 0 1, 1 1, 1 0, 0 0))', QgsWkbTypes.Polygon),
('PolygonZ', 'PolygonZ ((0 0 10, 0 1 10, 1 1 10, 1 0 10, 0 0 10))',
QgsWkbTypes.PolygonZ),
('Polygon25D', 'PolygonZ ((0 0 10, 0 1 10, 1 1 10, 1 0 10, 0 0 10))',
QgsWkbTypes.PolygonZ),
('MultiPolygon', 'MultiPolygon (((0 0, 0 1, 1 1, 1 0, 0 0)))',
QgsWkbTypes.MultiPolygon),
('MultiPolygonZ', 'MultiPolygonZ (((0 0 10, 0 1 10, 1 1 10, 1 0 10, 0 0 10)))',
QgsWkbTypes.MultiPolygonZ),
('MultiPolygon25D', 'MultiPolygonZ (((0 0 10, 0 1 10, 1 1 10, 1 0 10, 0 0 10)))',
QgsWkbTypes.MultiPolygonZ)
]
for typeStr, wkt, qgisType in tests:
ml = QgsVectorLayer(
(typeStr + '?crs=epsg:4326&field=id:int'),
typeStr,
'memory')
provider = ml.dataProvider()
ft = QgsFeature()
ft.setGeometry(QgsGeometry.fromWkt(wkt))
res, features = provider.addFeatures([ft])
layer = typeStr
uri = "dbname=%s table='%s' (geometry)" % (dbname, layer)
write_result, error_message = QgsVectorLayerExporter.exportLayer(ml,
uri,
'spatialite',
ml.crs(),
False,
{},
)
self.assertEqual(
write_result, QgsVectorLayerExporter.NoError, error_message)
vl = QgsVectorLayer(uri, typeStr, 'spatialite')
self.assertTrue(vl.isValid())
self.assertEqual(vl.wkbType(), qgisType)
def testBigint(self):
"""Test unique values bigint, see GH #33585"""
l = QgsVectorLayer("dbname=%s table='test_bigint' (position) key='id'" % self.dbname, "test_bigint",
"spatialite")
self.assertTrue(l.isValid())
self.assertEqual(l.uniqueValues(1), {1, 2})
self.assertEqual(l.uniqueValues(0), {987654321012345, 987654321012346})
def testSpatialiteDefaultValues(self):
"""Test whether in spatialite table with default values like CURRENT_TIMESTAMP or
(datetime('now','localtime')) they are respected. See GH #33383"""
# Create the test table
dbname = os.path.join(tempfile.gettempdir(),
"test_default_values.sqlite")
if os.path.exists(dbname):
os.remove(dbname)
con = spatialite_connect(dbname, isolation_level=None)
cur = con.cursor()
cur.execute("BEGIN")
sql = "SELECT InitSpatialMetadata()"
cur.execute(sql)
# simple table with primary key
sql = """
CREATE TABLE test_table_default_values (
`id` integer primary key autoincrement,
comment text,
created_at_01 text DEFAULT (datetime('now','localtime')),
created_at_02 text DEFAULT CURRENT_TIMESTAMP,
anumber INTEGER DEFAULT 123,
atext TEXT default 'My default'
)
"""
cur.execute(sql)
cur.execute("COMMIT")
con.close()
vl = QgsVectorLayer("dbname='%s' table='test_table_default_values'" % dbname, 'test_table_default_values',
'spatialite')
self.assertTrue(vl.isValid())
# Save it for the test
now = datetime.now()
# Test default values
dp = vl.dataProvider()
# FIXME: should it be None?
self.assertTrue(dp.defaultValue(0).isNull())
self.assertIsNone(dp.defaultValue(1))
# FIXME: This fails because there is no backend-side evaluation in this provider
# self.assertTrue(dp.defaultValue(2).startswith(now.strftime('%Y-%m-%d')))
self.assertTrue(dp.defaultValue(
3).startswith(now.strftime('%Y-%m-%d')))
self.assertEqual(dp.defaultValue(4), 123)
self.assertEqual(dp.defaultValue(5), 'My default')
self.assertEqual(dp.defaultValueClause(0), 'Autogenerate')
self.assertEqual(dp.defaultValueClause(1), '')
self.assertEqual(dp.defaultValueClause(
2), "datetime('now','localtime')")
self.assertEqual(dp.defaultValueClause(3), "CURRENT_TIMESTAMP")
self.assertEqual(dp.defaultValueClause(4), '')
self.assertEqual(dp.defaultValueClause(5), '')
feature = QgsFeature(vl.fields())
for idx in range(vl.fields().count()):
default = vl.dataProvider().defaultValue(idx)
if not default:
feature.setAttribute(idx, 'A comment')
else:
feature.setAttribute(idx, default)
self.assertTrue(vl.dataProvider().addFeature(feature))
del (vl)
# Verify
vl2 = QgsVectorLayer("dbname='%s' table='test_table_default_values'" % dbname, 'test_table_default_values',
'spatialite')
self.assertTrue(vl2.isValid())
feature = next(vl2.getFeatures())
self.assertEqual(feature.attribute(1), 'A comment')
self.assertTrue(feature.attribute(
2).startswith(now.strftime('%Y-%m-%d')))
self.assertTrue(feature.attribute(
3).startswith(now.strftime('%Y-%m-%d')))
self.assertEqual(feature.attribute(4), 123)
self.assertEqual(feature.attribute(5), 'My default')
def testSpatialiteAspatialMultipleAdd(self):
"""Add multiple features in aspatial table. See GH #34379"""
# Create the test table
dbname = os.path.join(tempfile.gettempdir(),
"test_aspatial_multiple_edits.sqlite")
if os.path.exists(dbname):
os.remove(dbname)
con = spatialite_connect(dbname, isolation_level=None)
cur = con.cursor()
cur.execute("BEGIN")
sql = "SELECT InitSpatialMetadata()"
cur.execute(sql)
# simple table with primary key
sql = """
CREATE TABLE "test_aspatial_multiple_edits"(pkuid integer primary key autoincrement,"id" integer,"note" text)
"""
cur.execute(sql)
cur.execute("COMMIT")
con.close()
vl = QgsVectorLayer("dbname='%s' table='test_aspatial_multiple_edits'" % dbname, 'test_aspatial_multiple_edits',
'spatialite')
self.assertTrue(vl.isValid())
self.assertTrue(vl.startEditing())
f1 = QgsFeature(vl.fields())
f1.setAttribute('note', 'a note')
f1.setAttribute('id', 123)
f2 = QgsFeature(vl.fields())
f2.setAttribute('note', 'another note')
f2.setAttribute('id', 456)
self.assertTrue(vl.addFeatures([f1, f2]))
self.assertTrue(vl.commitChanges())
# Verify
self.assertEqual(vl.getFeature(1).attributes(), [1, 123, 'a note'])
self.assertEqual(vl.getFeature(2).attributes(),
[2, 456, 'another note'])
def testAddFeatureNoFields(self):
"""Test regression #34696"""
vl = QgsVectorLayer("dbname=%s table='test_nofields' (geometry)" %
self.dbname, "test_nofields", "spatialite")
self.assertTrue(vl.isValid())
self.assertTrue(vl.startEditing())
f = QgsFeature(vl.fields())
g = QgsGeometry.fromWkt('point(9 45)')
f.setGeometry(g)
self.assertTrue(vl.addFeatures([f]))
self.assertTrue(vl.commitChanges())
vl = QgsVectorLayer("dbname=%s table='test_nofields' (geometry)" %
self.dbname, "test_nofields", "spatialite")
self.assertEqual(vl.featureCount(), 1)
self.assertEqual(vl.getFeature(
1).geometry().asWkt().upper(), 'POINT (9 45)')
def testBLOBType(self):
"""Test binary field"""
vl = QgsVectorLayer('dbname=%s table="blob_table" sql=' % self.dbname, "testBLOBType", "spatialite")
self.assertTrue(vl.isValid())
fields = vl.dataProvider().fields()
self.assertEqual(fields.at(fields.indexFromName('fld1')).type(), QVariant.ByteArray)
values = {feat['id']: feat['fld1'] for feat in vl.getFeatures()}
expected = {
1: QByteArray(b'\x00SQLite'),
2: QByteArray(),
3: QByteArray(b'SQLite')
}
self.assertEqual(values, expected)
# change attribute value
self.assertTrue(vl.dataProvider().changeAttributeValues(
{1: {1: QByteArray(b'bbbvx')}}))
values = {feat['id']: feat['fld1'] for feat in vl.getFeatures()}
expected = {
1: QByteArray(b'bbbvx'),
2: QByteArray(),
3: QByteArray(b'SQLite')
}
self.assertEqual(values, expected)
# add feature
f = QgsFeature()
f.setAttributes([4, QByteArray(b'cccc')])
self.assertTrue(vl.dataProvider().addFeature(f))
values = {feat['id']: feat['fld1'] for feat in vl.getFeatures()}
expected = {
1: QByteArray(b'bbbvx'),
2: QByteArray(),
3: QByteArray(b'SQLite'),
4: QByteArray(b'cccc')
}
self.assertEqual(values, expected)
def testTransaction(self):
"""Test spatialite transactions"""
tmpfile = tempfile.mktemp('.db')
ds = ogr.GetDriverByName('SQLite').CreateDataSource(
tmpfile, options=['SPATIALITE=YES'])
lyr = ds.CreateLayer('lyr1', geom_type=ogr.wkbPoint)
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('POINT(0 1)'))
lyr.CreateFeature(f)
lyr = ds.CreateLayer('lyr2', geom_type=ogr.wkbPoint)
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('POINT(2 3)'))
lyr.CreateFeature(f)
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('POINT(4 5)'))
lyr.CreateFeature(f)
ds = None
uri1 = QgsDataSourceUri()
uri1.setDatabase(tmpfile)
uri1.setTable('lyr1')
uri2 = QgsDataSourceUri()
uri2.setDatabase(tmpfile)
uri2.setTable('lyr2')
vl1 = QgsVectorLayer(uri1.uri(), 'test', 'spatialite')
self.assertTrue(vl1.isValid())
vl2 = QgsVectorLayer(uri2.uri(), 'test', 'spatialite')
self.assertTrue(vl2.isValid())
# prepare a project with transactions enabled
p = QgsProject()
p.setAutoTransaction(True)
p.addMapLayers([vl1, vl2])
self.assertTrue(vl1.startEditing())
self.assertIsNotNone(vl1.dataProvider().transaction())
self.assertTrue(vl1.deleteFeature(1))
# An iterator opened on the layer should see the feature deleted
self.assertEqual(
len([f for f in vl1.getFeatures(QgsFeatureRequest())]), 0)
# But not if opened from another connection
vl1_external = QgsVectorLayer(uri1.uri(), 'test', 'spatialite')
self.assertTrue(vl1_external.isValid())
self.assertEqual(
len([f for f in vl1_external.getFeatures(QgsFeatureRequest())]), 1)
del vl1_external
self.assertTrue(vl1.commitChanges())
# Should still get zero features on vl1
self.assertEqual(
len([f for f in vl1.getFeatures(QgsFeatureRequest())]), 0)
self.assertEqual(
len([f for f in vl2.getFeatures(QgsFeatureRequest())]), 2)
# Test undo/redo
self.assertTrue(vl2.startEditing())
self.assertIsNotNone(vl2.dataProvider().transaction())
self.assertTrue(vl2.editBuffer().deleteFeature(1))
self.assertEqual(
len([f for f in vl2.getFeatures(QgsFeatureRequest())]), 1)
self.assertTrue(vl2.editBuffer().deleteFeature(2))
self.assertEqual(
len([f for f in vl2.getFeatures(QgsFeatureRequest())]), 0)
vl2.undoStack().undo()
self.assertEqual(
len([f for f in vl2.getFeatures(QgsFeatureRequest())]), 1)
vl2.undoStack().undo()
self.assertEqual(
len([f for f in vl2.getFeatures(QgsFeatureRequest())]), 2)
vl2.undoStack().redo()
self.assertEqual(
len([f for f in vl2.getFeatures(QgsFeatureRequest())]), 1)
self.assertTrue(vl2.commitChanges())
self.assertEqual(
len([f for f in vl2.getFeatures(QgsFeatureRequest())]), 1)
del vl1
del vl2
vl2_external = QgsVectorLayer(uri2.uri(), 'test', 'spatialite')
self.assertTrue(vl2_external.isValid())
self.assertEqual(
len([f for f in vl2_external.getFeatures(QgsFeatureRequest())]), 1)
del vl2_external
def testTransactions(self):
"""Test autogenerate"""
vl = QgsVectorLayer("dbname=%s table=test_transactions1 ()" %
self.dbname, "test_transactions1", "spatialite")
self.assertTrue(vl.isValid())
vl2 = QgsVectorLayer("dbname=%s table=test_transactions2 ()" %
self.dbname, "test_transactions2", "spatialite")
self.assertTrue(vl.isValid())
self.assertTrue(vl2.isValid())
self.assertEqual(vl.featureCount(), 0)
self.assertEqual(vl2.featureCount(), 1)
project = QgsProject()
project.setAutoTransaction(True)
project.addMapLayers([vl, vl2])
project.setEvaluateDefaultValues(True)
self.assertTrue(vl.startEditing())
self.assertEqual(vl2.dataProvider().defaultValueClause(0), '')
self.assertEqual(vl2.dataProvider().defaultValue(0), 2)
self.assertEqual(vl.dataProvider().defaultValueClause(0), '')
self.assertEqual(vl.dataProvider().defaultValue(0), 1)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
Yen-Chung-En/2015cdb_W12 | static/Brython3.1.1-20150328-091302/Lib/jqueryui/__init__.py | 603 | 3671 | """Wrapper around the jQuery UI library
Exposes a single object, jq, to manipulate the widgets designed in the library
This object supports :
- subscription : js[elt_id] returns an object matching the element with the
specified id
- a method get(**kw). The only keyword currently supported is "selector". The
method returns a list of instances of the class Element, each instance wraps
the elements matching the CSS selector passed
jq(selector="button") : returns instances of Element for all button tags
The value can be a list or tuple of CSS selector strings :
js(selector=("input[type=submit]","a")) : instances of Element for all
"input" tags with attribute "type" set to "submit" + "a" tags (anchors)
Instances of Element have the same interface as the selections made by the
jQuery function $, with the additional methods provided by jQuery UI. For
instance, to turn an element into a dialog :
jq[elt_id].dialog()
When jQuery UI methods expect a Javascript object, they can be passed as
key/value pairs :
jq['tags'].autocomplete(source=availableTags)
"""
from browser import html, document, window
import javascript
_path = __file__[:__file__.rfind('/')]+'/'
document <= html.LINK(rel="stylesheet",
href=_path+'css/smoothness/jquery-ui.css')
# The scripts must be loaded in blocking mode, by using the function
# load(script_url[, names]) in module javascript
# If we just add them to the document with script tags, eg :
#
# document <= html.SCRIPT(sciprt_url)
# _jqui = window.jQuery.noConflict(True)
#
# the name "jQuery" is not in the Javascript namespace until the script is
# fully loaded in the page, so "window.jQuery" raises an exception
# Load jQuery and put name 'jQuery' in the global Javascript namespace
javascript.load(_path+'jquery-1.11.2.js', ['jQuery'])
javascript.load(_path+'jquery-ui.js')
_jqui = window.jQuery.noConflict(True)
_events = ['abort',
'beforeinput',
'blur',
'click',
'compositionstart',
'compositionupdate',
'compositionend',
'dblclick',
'error',
'focus',
'focusin',
'focusout',
'input',
'keydown',
'keyup',
'load',
'mousedown',
'mouseenter',
'mouseleave',
'mousemove',
'mouseout',
'mouseover',
'mouseup',
'resize',
'scroll',
'select',
'unload']
class JQFunction:
def __init__(self, func):
self.func = func
def __call__(self, *args, **kw):
if kw:
# keyword arguments are passed as a single Javascript object
return self.func(*args, kw)
else:
return self.func(*args)
class Element:
"""Wrapper around the objects returned by jQuery selections"""
def __init__(self, item):
self.item = item
def bind(self, event, callback):
"""Binds an event on the element to function callback"""
getattr(self.item, event)(callback)
def __getattr__(self, attr):
res = getattr(self.item, attr)
if attr in _events:
# elt.click(f) is handled like elt.bind('click', f)
return lambda f:self.bind(attr, f)
if callable(res):
res = JQFunction(res)
return res
class jq:
@staticmethod
def get(**selectors):
items = []
for k,v in selectors.items():
if k=='selector':
if isinstance(v,[list, tuple]):
values = v
else:
values = [v]
for value in values:
items.append(Element(_jqui(value)))
elif k=='element':
items = Element(_jqui(v))
return items
@staticmethod
def __getitem__(element_id):
return jq.get(selector='#'+element_id)[0]
| agpl-3.0 |
fidodaj/project3 | server/lib/werkzeug/test.py | 308 | 33874 | # -*- coding: utf-8 -*-
"""
werkzeug.test
~~~~~~~~~~~~~
This module implements a client to WSGI applications for testing.
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import sys
import mimetypes
from time import time
from random import random
from itertools import chain
from tempfile import TemporaryFile
from io import BytesIO
try:
from urllib2 import Request as U2Request
except ImportError:
from urllib.request import Request as U2Request
try:
from http.cookiejar import CookieJar
except ImportError: # Py2
from cookielib import CookieJar
from werkzeug._compat import iterlists, iteritems, itervalues, to_native, \
string_types, text_type, reraise, wsgi_encoding_dance, \
make_literal_wrapper
from werkzeug._internal import _empty_stream, _get_environ
from werkzeug.wrappers import BaseRequest
from werkzeug.urls import url_encode, url_fix, iri_to_uri, url_unquote, \
url_unparse, url_parse
from werkzeug.wsgi import get_host, get_current_url, ClosingIterator
from werkzeug.utils import dump_cookie
from werkzeug.datastructures import FileMultiDict, MultiDict, \
CombinedMultiDict, Headers, FileStorage
def stream_encode_multipart(values, use_tempfile=True, threshold=1024 * 500,
boundary=None, charset='utf-8'):
"""Encode a dict of values (either strings or file descriptors or
:class:`FileStorage` objects.) into a multipart encoded string stored
in a file descriptor.
"""
if boundary is None:
boundary = '---------------WerkzeugFormPart_%s%s' % (time(), random())
_closure = [BytesIO(), 0, False]
if use_tempfile:
def write_binary(string):
stream, total_length, on_disk = _closure
if on_disk:
stream.write(string)
else:
length = len(string)
if length + _closure[1] <= threshold:
stream.write(string)
else:
new_stream = TemporaryFile('wb+')
new_stream.write(stream.getvalue())
new_stream.write(string)
_closure[0] = new_stream
_closure[2] = True
_closure[1] = total_length + length
else:
write_binary = _closure[0].write
def write(string):
write_binary(string.encode(charset))
if not isinstance(values, MultiDict):
values = MultiDict(values)
for key, values in iterlists(values):
for value in values:
write('--%s\r\nContent-Disposition: form-data; name="%s"' %
(boundary, key))
reader = getattr(value, 'read', None)
if reader is not None:
filename = getattr(value, 'filename',
getattr(value, 'name', None))
content_type = getattr(value, 'content_type', None)
if content_type is None:
content_type = filename and \
mimetypes.guess_type(filename)[0] or \
'application/octet-stream'
if filename is not None:
write('; filename="%s"\r\n' % filename)
else:
write('\r\n')
write('Content-Type: %s\r\n\r\n' % content_type)
while 1:
chunk = reader(16384)
if not chunk:
break
write_binary(chunk)
else:
if isinstance(value, string_types):
value = to_native(value, charset)
else:
value = str(value)
write('\r\n\r\n' + value)
write('\r\n')
write('--%s--\r\n' % boundary)
length = int(_closure[0].tell())
_closure[0].seek(0)
return _closure[0], length, boundary
def encode_multipart(values, boundary=None, charset='utf-8'):
"""Like `stream_encode_multipart` but returns a tuple in the form
(``boundary``, ``data``) where data is a bytestring.
"""
stream, length, boundary = stream_encode_multipart(
values, use_tempfile=False, boundary=boundary, charset=charset)
return boundary, stream.read()
def File(fd, filename=None, mimetype=None):
"""Backwards compat."""
from warnings import warn
warn(DeprecationWarning('werkzeug.test.File is deprecated, use the '
'EnvironBuilder or FileStorage instead'))
return FileStorage(fd, filename=filename, content_type=mimetype)
class _TestCookieHeaders(object):
"""A headers adapter for cookielib
"""
def __init__(self, headers):
self.headers = headers
def getheaders(self, name):
headers = []
name = name.lower()
for k, v in self.headers:
if k.lower() == name:
headers.append(v)
return headers
def get_all(self, name, default=None):
rv = []
for k, v in self.headers:
if k.lower() == name.lower():
rv.append(v)
return rv or default or []
class _TestCookieResponse(object):
"""Something that looks like a httplib.HTTPResponse, but is actually just an
adapter for our test responses to make them available for cookielib.
"""
def __init__(self, headers):
self.headers = _TestCookieHeaders(headers)
def info(self):
return self.headers
class _TestCookieJar(CookieJar):
"""A cookielib.CookieJar modified to inject and read cookie headers from
and to wsgi environments, and wsgi application responses.
"""
def inject_wsgi(self, environ):
"""Inject the cookies as client headers into the server's wsgi
environment.
"""
cvals = []
for cookie in self:
cvals.append('%s=%s' % (cookie.name, cookie.value))
if cvals:
environ['HTTP_COOKIE'] = '; '.join(cvals)
def extract_wsgi(self, environ, headers):
"""Extract the server's set-cookie headers as cookies into the
cookie jar.
"""
self.extract_cookies(
_TestCookieResponse(headers),
U2Request(get_current_url(environ)),
)
def _iter_data(data):
"""Iterates over a dict or multidict yielding all keys and values.
This is used to iterate over the data passed to the
:class:`EnvironBuilder`.
"""
if isinstance(data, MultiDict):
for key, values in iterlists(data):
for value in values:
yield key, value
else:
for key, values in iteritems(data):
if isinstance(values, list):
for value in values:
yield key, value
else:
yield key, values
class EnvironBuilder(object):
"""This class can be used to conveniently create a WSGI environment
for testing purposes. It can be used to quickly create WSGI environments
or request objects from arbitrary data.
The signature of this class is also used in some other places as of
Werkzeug 0.5 (:func:`create_environ`, :meth:`BaseResponse.from_values`,
:meth:`Client.open`). Because of this most of the functionality is
available through the constructor alone.
Files and regular form data can be manipulated independently of each
other with the :attr:`form` and :attr:`files` attributes, but are
passed with the same argument to the constructor: `data`.
`data` can be any of these values:
- a `str`: If it's a string it is converted into a :attr:`input_stream`,
the :attr:`content_length` is set and you have to provide a
:attr:`content_type`.
- a `dict`: If it's a dict the keys have to be strings and the values
any of the following objects:
- a :class:`file`-like object. These are converted into
:class:`FileStorage` objects automatically.
- a tuple. The :meth:`~FileMultiDict.add_file` method is called
with the tuple items as positional arguments.
.. versionadded:: 0.6
`path` and `base_url` can now be unicode strings that are encoded using
the :func:`iri_to_uri` function.
:param path: the path of the request. In the WSGI environment this will
end up as `PATH_INFO`. If the `query_string` is not defined
and there is a question mark in the `path` everything after
it is used as query string.
:param base_url: the base URL is a URL that is used to extract the WSGI
URL scheme, host (server name + server port) and the
script root (`SCRIPT_NAME`).
:param query_string: an optional string or dict with URL parameters.
:param method: the HTTP method to use, defaults to `GET`.
:param input_stream: an optional input stream. Do not specify this and
`data`. As soon as an input stream is set you can't
modify :attr:`args` and :attr:`files` unless you
set the :attr:`input_stream` to `None` again.
:param content_type: The content type for the request. As of 0.5 you
don't have to provide this when specifying files
and form data via `data`.
:param content_length: The content length for the request. You don't
have to specify this when providing data via
`data`.
:param errors_stream: an optional error stream that is used for
`wsgi.errors`. Defaults to :data:`stderr`.
:param multithread: controls `wsgi.multithread`. Defaults to `False`.
:param multiprocess: controls `wsgi.multiprocess`. Defaults to `False`.
:param run_once: controls `wsgi.run_once`. Defaults to `False`.
:param headers: an optional list or :class:`Headers` object of headers.
:param data: a string or dict of form data. See explanation above.
:param environ_base: an optional dict of environment defaults.
:param environ_overrides: an optional dict of environment overrides.
:param charset: the charset used to encode unicode data.
"""
#: the server protocol to use. defaults to HTTP/1.1
server_protocol = 'HTTP/1.1'
#: the wsgi version to use. defaults to (1, 0)
wsgi_version = (1, 0)
#: the default request class for :meth:`get_request`
request_class = BaseRequest
def __init__(self, path='/', base_url=None, query_string=None,
method='GET', input_stream=None, content_type=None,
content_length=None, errors_stream=None, multithread=False,
multiprocess=False, run_once=False, headers=None, data=None,
environ_base=None, environ_overrides=None, charset='utf-8'):
path_s = make_literal_wrapper(path)
if query_string is None and path_s('?') in path:
path, query_string = path.split(path_s('?'), 1)
self.charset = charset
self.path = iri_to_uri(path)
if base_url is not None:
base_url = url_fix(iri_to_uri(base_url, charset), charset)
self.base_url = base_url
if isinstance(query_string, (bytes, text_type)):
self.query_string = query_string
else:
if query_string is None:
query_string = MultiDict()
elif not isinstance(query_string, MultiDict):
query_string = MultiDict(query_string)
self.args = query_string
self.method = method
if headers is None:
headers = Headers()
elif not isinstance(headers, Headers):
headers = Headers(headers)
self.headers = headers
if content_type is not None:
self.content_type = content_type
if errors_stream is None:
errors_stream = sys.stderr
self.errors_stream = errors_stream
self.multithread = multithread
self.multiprocess = multiprocess
self.run_once = run_once
self.environ_base = environ_base
self.environ_overrides = environ_overrides
self.input_stream = input_stream
self.content_length = content_length
self.closed = False
if data:
if input_stream is not None:
raise TypeError('can\'t provide input stream and data')
if isinstance(data, text_type):
data = data.encode(self.charset)
if isinstance(data, bytes):
self.input_stream = BytesIO(data)
if self.content_length is None:
self.content_length = len(data)
else:
for key, value in _iter_data(data):
if isinstance(value, (tuple, dict)) or \
hasattr(value, 'read'):
self._add_file_from_data(key, value)
else:
self.form.setlistdefault(key).append(value)
def _add_file_from_data(self, key, value):
"""Called in the EnvironBuilder to add files from the data dict."""
if isinstance(value, tuple):
self.files.add_file(key, *value)
elif isinstance(value, dict):
from warnings import warn
warn(DeprecationWarning('it\'s no longer possible to pass dicts '
'as `data`. Use tuples or FileStorage '
'objects instead'), stacklevel=2)
value = dict(value)
mimetype = value.pop('mimetype', None)
if mimetype is not None:
value['content_type'] = mimetype
self.files.add_file(key, **value)
else:
self.files.add_file(key, value)
def _get_base_url(self):
return url_unparse((self.url_scheme, self.host,
self.script_root, '', '')).rstrip('/') + '/'
def _set_base_url(self, value):
if value is None:
scheme = 'http'
netloc = 'localhost'
script_root = ''
else:
scheme, netloc, script_root, qs, anchor = url_parse(value)
if qs or anchor:
raise ValueError('base url must not contain a query string '
'or fragment')
self.script_root = script_root.rstrip('/')
self.host = netloc
self.url_scheme = scheme
base_url = property(_get_base_url, _set_base_url, doc='''
The base URL is a URL that is used to extract the WSGI
URL scheme, host (server name + server port) and the
script root (`SCRIPT_NAME`).''')
del _get_base_url, _set_base_url
def _get_content_type(self):
ct = self.headers.get('Content-Type')
if ct is None and not self._input_stream:
if self.method in ('POST', 'PUT', 'PATCH'):
if self._files:
return 'multipart/form-data'
return 'application/x-www-form-urlencoded'
return None
return ct
def _set_content_type(self, value):
if value is None:
self.headers.pop('Content-Type', None)
else:
self.headers['Content-Type'] = value
content_type = property(_get_content_type, _set_content_type, doc='''
The content type for the request. Reflected from and to the
:attr:`headers`. Do not set if you set :attr:`files` or
:attr:`form` for auto detection.''')
del _get_content_type, _set_content_type
def _get_content_length(self):
return self.headers.get('Content-Length', type=int)
def _set_content_length(self, value):
if value is None:
self.headers.pop('Content-Length', None)
else:
self.headers['Content-Length'] = str(value)
content_length = property(_get_content_length, _set_content_length, doc='''
The content length as integer. Reflected from and to the
:attr:`headers`. Do not set if you set :attr:`files` or
:attr:`form` for auto detection.''')
del _get_content_length, _set_content_length
def form_property(name, storage, doc):
key = '_' + name
def getter(self):
if self._input_stream is not None:
raise AttributeError('an input stream is defined')
rv = getattr(self, key)
if rv is None:
rv = storage()
setattr(self, key, rv)
return rv
def setter(self, value):
self._input_stream = None
setattr(self, key, value)
return property(getter, setter, doc)
form = form_property('form', MultiDict, doc='''
A :class:`MultiDict` of form values.''')
files = form_property('files', FileMultiDict, doc='''
A :class:`FileMultiDict` of uploaded files. You can use the
:meth:`~FileMultiDict.add_file` method to add new files to the
dict.''')
del form_property
def _get_input_stream(self):
return self._input_stream
def _set_input_stream(self, value):
self._input_stream = value
self._form = self._files = None
input_stream = property(_get_input_stream, _set_input_stream, doc='''
An optional input stream. If you set this it will clear
:attr:`form` and :attr:`files`.''')
del _get_input_stream, _set_input_stream
def _get_query_string(self):
if self._query_string is None:
if self._args is not None:
return url_encode(self._args, charset=self.charset)
return ''
return self._query_string
def _set_query_string(self, value):
self._query_string = value
self._args = None
query_string = property(_get_query_string, _set_query_string, doc='''
The query string. If you set this to a string :attr:`args` will
no longer be available.''')
del _get_query_string, _set_query_string
def _get_args(self):
if self._query_string is not None:
raise AttributeError('a query string is defined')
if self._args is None:
self._args = MultiDict()
return self._args
def _set_args(self, value):
self._query_string = None
self._args = value
args = property(_get_args, _set_args, doc='''
The URL arguments as :class:`MultiDict`.''')
del _get_args, _set_args
@property
def server_name(self):
"""The server name (read-only, use :attr:`host` to set)"""
return self.host.split(':', 1)[0]
@property
def server_port(self):
"""The server port as integer (read-only, use :attr:`host` to set)"""
pieces = self.host.split(':', 1)
if len(pieces) == 2 and pieces[1].isdigit():
return int(pieces[1])
elif self.url_scheme == 'https':
return 443
return 80
def __del__(self):
try:
self.close()
except Exception:
pass
def close(self):
"""Closes all files. If you put real :class:`file` objects into the
:attr:`files` dict you can call this method to automatically close
them all in one go.
"""
if self.closed:
return
try:
files = itervalues(self.files)
except AttributeError:
files = ()
for f in files:
try:
f.close()
except Exception:
pass
self.closed = True
def get_environ(self):
"""Return the built environ."""
input_stream = self.input_stream
content_length = self.content_length
content_type = self.content_type
if input_stream is not None:
start_pos = input_stream.tell()
input_stream.seek(0, 2)
end_pos = input_stream.tell()
input_stream.seek(start_pos)
content_length = end_pos - start_pos
elif content_type == 'multipart/form-data':
values = CombinedMultiDict([self.form, self.files])
input_stream, content_length, boundary = \
stream_encode_multipart(values, charset=self.charset)
content_type += '; boundary="%s"' % boundary
elif content_type == 'application/x-www-form-urlencoded':
#py2v3 review
values = url_encode(self.form, charset=self.charset)
values = values.encode('ascii')
content_length = len(values)
input_stream = BytesIO(values)
else:
input_stream = _empty_stream
result = {}
if self.environ_base:
result.update(self.environ_base)
def _path_encode(x):
return wsgi_encoding_dance(url_unquote(x, self.charset), self.charset)
qs = wsgi_encoding_dance(self.query_string)
result.update({
'REQUEST_METHOD': self.method,
'SCRIPT_NAME': _path_encode(self.script_root),
'PATH_INFO': _path_encode(self.path),
'QUERY_STRING': qs,
'SERVER_NAME': self.server_name,
'SERVER_PORT': str(self.server_port),
'HTTP_HOST': self.host,
'SERVER_PROTOCOL': self.server_protocol,
'CONTENT_TYPE': content_type or '',
'CONTENT_LENGTH': str(content_length or '0'),
'wsgi.version': self.wsgi_version,
'wsgi.url_scheme': self.url_scheme,
'wsgi.input': input_stream,
'wsgi.errors': self.errors_stream,
'wsgi.multithread': self.multithread,
'wsgi.multiprocess': self.multiprocess,
'wsgi.run_once': self.run_once
})
for key, value in self.headers.to_wsgi_list():
result['HTTP_%s' % key.upper().replace('-', '_')] = value
if self.environ_overrides:
result.update(self.environ_overrides)
return result
def get_request(self, cls=None):
"""Returns a request with the data. If the request class is not
specified :attr:`request_class` is used.
:param cls: The request wrapper to use.
"""
if cls is None:
cls = self.request_class
return cls(self.get_environ())
class ClientRedirectError(Exception):
"""
If a redirect loop is detected when using follow_redirects=True with
the :cls:`Client`, then this exception is raised.
"""
class Client(object):
"""This class allows to send requests to a wrapped application.
The response wrapper can be a class or factory function that takes
three arguments: app_iter, status and headers. The default response
wrapper just returns a tuple.
Example::
class ClientResponse(BaseResponse):
...
client = Client(MyApplication(), response_wrapper=ClientResponse)
The use_cookies parameter indicates whether cookies should be stored and
sent for subsequent requests. This is True by default, but passing False
will disable this behaviour.
If you want to request some subdomain of your application you may set
`allow_subdomain_redirects` to `True` as if not no external redirects
are allowed.
.. versionadded:: 0.5
`use_cookies` is new in this version. Older versions did not provide
builtin cookie support.
"""
def __init__(self, application, response_wrapper=None, use_cookies=True,
allow_subdomain_redirects=False):
self.application = application
self.response_wrapper = response_wrapper
if use_cookies:
self.cookie_jar = _TestCookieJar()
else:
self.cookie_jar = None
self.allow_subdomain_redirects = allow_subdomain_redirects
def set_cookie(self, server_name, key, value='', max_age=None,
expires=None, path='/', domain=None, secure=None,
httponly=False, charset='utf-8'):
"""Sets a cookie in the client's cookie jar. The server name
is required and has to match the one that is also passed to
the open call.
"""
assert self.cookie_jar is not None, 'cookies disabled'
header = dump_cookie(key, value, max_age, expires, path, domain,
secure, httponly, charset)
environ = create_environ(path, base_url='http://' + server_name)
headers = [('Set-Cookie', header)]
self.cookie_jar.extract_wsgi(environ, headers)
def delete_cookie(self, server_name, key, path='/', domain=None):
"""Deletes a cookie in the test client."""
self.set_cookie(server_name, key, expires=0, max_age=0,
path=path, domain=domain)
def run_wsgi_app(self, environ, buffered=False):
"""Runs the wrapped WSGI app with the given environment."""
if self.cookie_jar is not None:
self.cookie_jar.inject_wsgi(environ)
rv = run_wsgi_app(self.application, environ, buffered=buffered)
if self.cookie_jar is not None:
self.cookie_jar.extract_wsgi(environ, rv[2])
return rv
def resolve_redirect(self, response, new_location, environ, buffered=False):
"""Resolves a single redirect and triggers the request again
directly on this redirect client.
"""
scheme, netloc, script_root, qs, anchor = url_parse(new_location)
base_url = url_unparse((scheme, netloc, '', '', '')).rstrip('/') + '/'
cur_server_name = netloc.split(':', 1)[0].split('.')
real_server_name = get_host(environ).rsplit(':', 1)[0].split('.')
if self.allow_subdomain_redirects:
allowed = cur_server_name[-len(real_server_name):] == real_server_name
else:
allowed = cur_server_name == real_server_name
if not allowed:
raise RuntimeError('%r does not support redirect to '
'external targets' % self.__class__)
# For redirect handling we temporarily disable the response
# wrapper. This is not threadsafe but not a real concern
# since the test client must not be shared anyways.
old_response_wrapper = self.response_wrapper
self.response_wrapper = None
try:
return self.open(path=script_root, base_url=base_url,
query_string=qs, as_tuple=True,
buffered=buffered)
finally:
self.response_wrapper = old_response_wrapper
def open(self, *args, **kwargs):
"""Takes the same arguments as the :class:`EnvironBuilder` class with
some additions: You can provide a :class:`EnvironBuilder` or a WSGI
environment as only argument instead of the :class:`EnvironBuilder`
arguments and two optional keyword arguments (`as_tuple`, `buffered`)
that change the type of the return value or the way the application is
executed.
.. versionchanged:: 0.5
If a dict is provided as file in the dict for the `data` parameter
the content type has to be called `content_type` now instead of
`mimetype`. This change was made for consistency with
:class:`werkzeug.FileWrapper`.
The `follow_redirects` parameter was added to :func:`open`.
Additional parameters:
:param as_tuple: Returns a tuple in the form ``(environ, result)``
:param buffered: Set this to True to buffer the application run.
This will automatically close the application for
you as well.
:param follow_redirects: Set this to True if the `Client` should
follow HTTP redirects.
"""
as_tuple = kwargs.pop('as_tuple', False)
buffered = kwargs.pop('buffered', False)
follow_redirects = kwargs.pop('follow_redirects', False)
environ = None
if not kwargs and len(args) == 1:
if isinstance(args[0], EnvironBuilder):
environ = args[0].get_environ()
elif isinstance(args[0], dict):
environ = args[0]
if environ is None:
builder = EnvironBuilder(*args, **kwargs)
try:
environ = builder.get_environ()
finally:
builder.close()
response = self.run_wsgi_app(environ, buffered=buffered)
# handle redirects
redirect_chain = []
while 1:
status_code = int(response[1].split(None, 1)[0])
if status_code not in (301, 302, 303, 305, 307) \
or not follow_redirects:
break
new_location = response[2]['location']
new_redirect_entry = (new_location, status_code)
if new_redirect_entry in redirect_chain:
raise ClientRedirectError('loop detected')
redirect_chain.append(new_redirect_entry)
environ, response = self.resolve_redirect(response, new_location,
environ, buffered=buffered)
if self.response_wrapper is not None:
response = self.response_wrapper(*response)
if as_tuple:
return environ, response
return response
def get(self, *args, **kw):
"""Like open but method is enforced to GET."""
kw['method'] = 'GET'
return self.open(*args, **kw)
def patch(self, *args, **kw):
"""Like open but method is enforced to PATCH."""
kw['method'] = 'PATCH'
return self.open(*args, **kw)
def post(self, *args, **kw):
"""Like open but method is enforced to POST."""
kw['method'] = 'POST'
return self.open(*args, **kw)
def head(self, *args, **kw):
"""Like open but method is enforced to HEAD."""
kw['method'] = 'HEAD'
return self.open(*args, **kw)
def put(self, *args, **kw):
"""Like open but method is enforced to PUT."""
kw['method'] = 'PUT'
return self.open(*args, **kw)
def delete(self, *args, **kw):
"""Like open but method is enforced to DELETE."""
kw['method'] = 'DELETE'
return self.open(*args, **kw)
def options(self, *args, **kw):
"""Like open but method is enforced to OPTIONS."""
kw['method'] = 'OPTIONS'
return self.open(*args, **kw)
def trace(self, *args, **kw):
"""Like open but method is enforced to TRACE."""
kw['method'] = 'TRACE'
return self.open(*args, **kw)
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self.application
)
def create_environ(*args, **kwargs):
"""Create a new WSGI environ dict based on the values passed. The first
parameter should be the path of the request which defaults to '/'. The
second one can either be an absolute path (in that case the host is
localhost:80) or a full path to the request with scheme, netloc port and
the path to the script.
This accepts the same arguments as the :class:`EnvironBuilder`
constructor.
.. versionchanged:: 0.5
This function is now a thin wrapper over :class:`EnvironBuilder` which
was added in 0.5. The `headers`, `environ_base`, `environ_overrides`
and `charset` parameters were added.
"""
builder = EnvironBuilder(*args, **kwargs)
try:
return builder.get_environ()
finally:
builder.close()
def run_wsgi_app(app, environ, buffered=False):
"""Return a tuple in the form (app_iter, status, headers) of the
application output. This works best if you pass it an application that
returns an iterator all the time.
Sometimes applications may use the `write()` callable returned
by the `start_response` function. This tries to resolve such edge
cases automatically. But if you don't get the expected output you
should set `buffered` to `True` which enforces buffering.
If passed an invalid WSGI application the behavior of this function is
undefined. Never pass non-conforming WSGI applications to this function.
:param app: the application to execute.
:param buffered: set to `True` to enforce buffering.
:return: tuple in the form ``(app_iter, status, headers)``
"""
environ = _get_environ(environ)
response = []
buffer = []
def start_response(status, headers, exc_info=None):
if exc_info is not None:
reraise(*exc_info)
response[:] = [status, headers]
return buffer.append
app_iter = app(environ, start_response)
# when buffering we emit the close call early and convert the
# application iterator into a regular list
if buffered:
close_func = getattr(app_iter, 'close', None)
try:
app_iter = list(app_iter)
finally:
if close_func is not None:
close_func()
# otherwise we iterate the application iter until we have
# a response, chain the already received data with the already
# collected data and wrap it in a new `ClosingIterator` if
# we have a close callable.
else:
while not response:
buffer.append(next(app_iter))
if buffer:
close_func = getattr(app_iter, 'close', None)
app_iter = chain(buffer, app_iter)
if close_func is not None:
app_iter = ClosingIterator(app_iter, close_func)
return app_iter, response[0], Headers(response[1])
| apache-2.0 |
Distrotech/yum | docs/sphinxdocs/rstgenerator.py | 8 | 7753 | #! /usr/bin/python
import sys, re, os
def generateFile(input_directory, file_name, output_directory,
package_heirarchy=None, module_name=None):
"""Generate a rst file telling sphinx to just generate documentation
for the public interface automatically. Output will be written to
*file_name*.rst in the current directory.
:param input_directory: a string specifying the directory containing the
source code file
:param file_name: the name of the python source code file to generate
a sphinx rst file describing
:param output_directory: a string specifying the directory where
the generated rst file should be placed. If *output_directory* does
not already exist, it will be created
:param package_heirarchy: a list of strings, where each name is
the name of a package, in the order of the hierarchy
:param module_name: the name of the module. If not given, the .py is
removed from *file_name* to produce the module_name
"""
#Stick all output into a list of strings, then just join it and output
#it all in on go.
output = []
# Create the output directory if it doesn't already exist. Note that
# if the directory is created between the check and the creation, it
# might cause issues, but I don't think this likely at all to happen
if not os.path.exists(output_directory):
try:
os.makedirs(output_directory)
except OSError as e:
print "Error creating the output directory"
print e.args
try:
#Open the file
f = open(os.path.join(input_directory, file_name), 'r')
#Do the module output
if not module_name:
module_name = re.search('(\w+).py$', file_name).group(1)
#Append the package names, if there are any
full_module_name = module_name
if package_heirarchy:
full_module_name = '.'.join(package_heirarchy) + '.' + module_name
output.append(full_module_name)
output.append('=' * len(full_module_name))
output.append('.. automodule:: %s\n' % full_module_name)
#Read the file, and do output for classes
class_reg = re.compile('^class (\w+)')
func_reg = re.compile('^def ((?:[a-zA-Z0-9]+_)*[a-zA-Z0-9]+)')
#We don't need a blank line between autofunction directives, but we do
#need one between autofunctions and headings etc. for classes. This
#keeps track if we're switching from autofunctions to classes, so we
#can add that blank line.
finding_functions = False
for line in iter(f):
#Search for classes
match = class_reg.match(line)
if match is not None:
if finding_functions:
output.append('')
finding_functions = False
class_name = match.group(1)
output.append(class_name)
output.append('-' * len(class_name))
output.append('''.. autoclass:: %s
:members:
:show-inheritance:
''' % class_name)
#Search for top level functions
else:
match = func_reg.match(line)
if match is not None:
func_name = match.group(1)
output.append('.. autofunction:: ' + func_name)
finding_functions = True
f.close()
except IOError as e:
print "Error opening the input file : ", os.path.join(input_directory, file_name)
print e.args[1]
else:
#Write the output
try:
output_file_name = os.path.join(output_directory, module_name) + '.rst'
f = open(output_file_name, 'w')
f.write('\n'.join(output))
except IOError as e:
print "Error opening the output file : ", output_file_name
print e.args[1]
def generateIndex(module_list, output_directory):
"""Create an index.rst file for sphinx in the given directory.
:param module_list: a list of the names of the modules to list in
the index file
:param output_directory: the directory to create the index file in
"""
#Sort the module_list
module_list.sort()
try:
#open the file
f = open(os.path.join(output_directory, 'index.rst'), 'w')
#Do the output
f.write(""".. Yum documentation master file, created by
sphinx-quickstart on Mon Jun 27 14:01:20 2011.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Welcome to Yum's documentation!
===============================
Contents:
.. toctree::
:maxdepth: 2
""")
f.write('\n '.join(module_list))
f.write("""
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
""")
except IOError as e:
print "Error opening the output file."
print e.args[1]
def generateAll(source_directory, output_directory):
#Verify that both the source and output directories exist
# Keep a set of file names that are packages. This is
# useful so that later we will be able to figure out full
# module names.
packages = set()
# Keep a list of tuples containing python module names and
# relative paths, so that we can build the index file later
modules = []
# Walk the directory tree
for dirpath, dirnames, filenames in os.walk(source_directory, topdown=True):
# print dirpath
# print dirnames
# print filenames
# print
# Add the curent directory to packages if __init__.py exists
if '__init__.py' in filenames:
packages.add(dirpath)
# Find the hierarchy of packages that we are currently in
package_heirarchy = []
#Recurse up to the root
dirpath_i = dirpath
while dirpath_i != '/':
if dirpath_i in packages:
dirpath_i, tail = os.path.split(dirpath_i)
package_heirarchy.insert(0, tail)
else:
break
# Find the relative output directory, mirroring the input
# directory structure
relative_output_directory = ''
if not os.path.samefile(dirpath, source_directory):
relative_output_directory = os.path.relpath(dirpath, source_directory)
# Don't recurse into directories that are hidden, or for docs
for directory in dirnames:
if directory == "docs" or directory.startswith("."):
dirnames.remove(directory)
# Generate the rst for a file if it is a python source code file
for file_name in filenames:
# Skip file names that contain dashes, since they're not
# valid module names, so we won't be able to import them
# to generate the documentation anyway
if '-' in file_name:
continue
if file_name.endswith('.py'):
module_name = file_name.partition('.')[0]
modules.append(os.path.join(relative_output_directory,
module_name))
generateFile(dirpath, file_name,
os.path.join(output_directory, relative_output_directory),
package_heirarchy, module_name)
# Create the index.rst file
generateIndex(modules, output_directory)
if __name__ == "__main__":
generateAll(os.getcwd(), os.getcwd())
| gpl-2.0 |
bocon13/buck | scripts/top_down_stress_tester.py | 25 | 6514 | import argparse
import itertools
import json
import logging
import os
import subprocess
import sys
import tempfile
import zipfile
CACHE_DIR = 'buck-cache'
class CacheEntry(object):
pass
def get_cache_entry(path):
with zipfile.ZipFile(path) as f:
entry_map = {os.path.basename(n): n for n in f.namelist()}
entry = CacheEntry()
entry.target = f.read(entry_map['TARGET']).strip()
entry.rule_key = f.read(entry_map['RULE_KEY']).strip()
entry.deps = json.loads(f.read(entry_map['DEPS']))
entry.path = path
return entry
def get_cache_inventory():
inventory = {}
for item in os.listdir(CACHE_DIR):
entry = get_cache_entry(os.path.join(CACHE_DIR, item))
inventory[entry.target] = entry
return inventory
def get_missing_cache_entries(inventory):
"""
Find and return all entries missing in the cache.
"""
missing_entries = {}
for entry in inventory.itervalues():
if not os.path.exists(entry.path):
missing_entries[entry.target] = entry
return missing_entries
def clear_cache():
subprocess.check_call(['rm', '-rf', CACHE_DIR])
def clear_output():
subprocess.check_call(['rm', '-rf', 'buck-out'])
def run_buck(buck, *args):
logging.info('Running {} {}'.format(buck, ' '.join(args)))
# Always create a temp file, in case we need to serialize the
# arguments to it.
with tempfile.NamedTemporaryFile() as f:
# Point cache to a known location.
args.append('--config')
args.append('cache.dir=' + CACHE_DIR)
# If the command would be too long, put the args into a file and
# execute that.
if len(args) > 30:
for arg in args:
f.write(arg)
f.write(os.linesep)
f.flush()
args = ['@' + f.name]
return subprocess.check_output([buck] + list(args))
def preorder_traversal(roots, deps, callback):
"""
Execute the given callback during a preorder traversal of the graph.
"""
# Keep track of all the nodes processed.
seen = set()
def traverse(node, callback, chain):
# Make sure we only visit nodes once.
if node in seen:
return
seen.add(node)
# Run the callback with the current node and the chain of parent nodes we
# traversed to find it.
callback(node, chain)
# Recurse on depednencies, making sure to update the visiter chain.
for dep in deps[node]:
traverse(dep, callback, chain=chain + [node])
# Traverse starting from all the roots.
for root in roots:
traverse(root, callback, [])
def build(buck, targets):
"""
Verify that each of the actions the run when building the given targets
run correctly using a top-down build.
"""
# Now run a build to populate the cache.
logging.info('Running a build to populate the cache')
run_buck(buck, 'build', *targets)
# Find all targets reachable via the UI.
out = run_buck(buck, 'audit', 'dependencies', '--transitive', *targets)
ui_targets = set(out.splitlines())
ui_targets.update(targets)
# Grab an inventory of the cache and use it to form a dependency map.
cache_inventory = get_cache_inventory()
dependencies = {n.target: n.deps for n in cache_inventory.itervalues()}
# Keep track of all the processed nodes so we can print progress info.
processed = set()
# The callback to run for each build rule.
def handle(current, chain):
logging.info(
'Processing {} ({}/{})'
.format(current, len(processed), len(dependencies.keys())))
processed.add(current)
# Empty the previous builds output.
logging.info('Removing output from previous build')
clear_output()
# Remove the cache entry for this target.
entry = cache_inventory[current]
os.remove(entry.path)
logging.info(' removed {} => {}'.format(current, entry.path))
# Now run the build using the closest UI visible ancestor target.
logging.info('Running the build to check ' + current)
for node in itertools.chain([current], reversed(chain)):
if node in ui_targets:
run_buck(buck, 'build', '--just-build', current, node)
break
else:
assert False, 'couldn\'t find target in UI: ' + node
# We should *always* end with a full cache.
logging.info('Verifying cache...')
missing = get_missing_cache_entries(cache_inventory)
assert len(missing) == 0, '\n'.join(sorted(missing.keys()))
preorder_traversal(targets, dependencies, handle)
def test(buck, targets):
"""
Test that we can run tests when pulling from the cache.
"""
# Find all test targets.
test_targets = set()
out = run_buck(buck, 'targets', '--json', *targets)
for info in json.loads(out):
if info['buck.type'].endswith('_test'):
test_targets.add(
'//' + info['buck.base_path'] + ':' + info['name'])
if not test_targets:
raise Exception('no test targets')
# Now run a build to populate the cache.
logging.info('Running a build to populate the cache')
run_buck(buck, 'build', *test_targets)
# Empty the build output.
logging.info('Removing output from build')
clear_output()
# Now run the test
run_buck(buck, 'test', *test_targets)
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--buck', default='buck')
parser.add_argument('command', choices=('build', 'test'))
parser.add_argument('targets', metavar='target', nargs='+')
args = parser.parse_args(argv[1:])
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p')
# Resolve any aliases in the top-level targets.
out = run_buck(args.buck, 'targets', *args.targets)
targets = set(out.splitlines())
# Clear the cache and output directories to start with a clean slate.
logging.info('Clearing output and cache')
run_buck(args.buck, 'clean')
clear_output()
clear_cache()
# Run the subcommand
if args.command == 'build':
build(args.buck, targets)
elif args.command == 'test':
test(args.buck, targets)
else:
raise Exception('unknown command: ' + args.command)
sys.exit(main(sys.argv))
| apache-2.0 |
avastu/zulip | zerver/management/commands/initialize_voyager_db.py | 121 | 2246 | from __future__ import absolute_import
from django.core.management.base import BaseCommand
from django.contrib.sites.models import Site
from zerver.models import UserProfile, Stream, Recipient, \
Subscription, Realm, get_client, email_to_username
from django.conf import settings
from zerver.lib.bulk_create import bulk_create_users
from zerver.lib.actions import set_default_streams, do_create_realm
from optparse import make_option
settings.TORNADO_SERVER = None
def create_users(name_list, bot=False):
realms = {}
for realm in Realm.objects.all():
realms[realm.domain] = realm
user_set = set()
for full_name, email in name_list:
short_name = email_to_username(email)
user_set.add((email, full_name, short_name, True))
bulk_create_users(realms, user_set, bot)
class Command(BaseCommand):
help = "Populate an initial database for Zulip Voyager"
option_list = BaseCommand.option_list + (
make_option('--extra-users',
dest='extra_users',
type='int',
default=0,
help='The number of extra users to create'),
)
def handle(self, **options):
Realm.objects.create(domain="zulip.com")
names = [(settings.FEEDBACK_BOT_NAME, settings.FEEDBACK_BOT)]
create_users(names, bot=True)
get_client("website")
get_client("API")
internal_bots = [(bot['name'], bot['email_template'] % (settings.INTERNAL_BOT_DOMAIN,))
for bot in settings.INTERNAL_BOTS]
create_users(internal_bots, bot=True)
# Set the owners for these bots to the bots themselves
bots = UserProfile.objects.filter(email__in=[bot_info[1] for bot_info in internal_bots])
for bot in bots:
bot.bot_owner = bot
bot.save()
(admin_realm, _) = do_create_realm(settings.ADMIN_DOMAIN,
settings.ADMIN_DOMAIN, True)
set_default_streams(admin_realm, ["social", "engineering"])
self.stdout.write("Successfully populated database with initial data.\n")
site = Site.objects.get_current()
site.domain = settings.EXTERNAL_HOST
site.save()
| apache-2.0 |
tsdmgz/ansible | lib/ansible/modules/windows/win_command.py | 23 | 4430 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Ansible, inc
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: win_command
short_description: Executes a command on a remote Windows node
version_added: 2.2
description:
- The C(win_command) module takes the command name followed by a list of space-delimited arguments.
- The given command will be executed on all selected nodes. It will not be
processed through the shell, so variables like C($env:HOME) and operations
like C("<"), C(">"), C("|"), and C(";") will not work (use the M(win_shell)
module if you need these features).
- For non-Windows targets, use the M(command) module instead.
options:
free_form:
description:
- the C(win_command) module takes a free form command to run. There is no parameter actually named 'free form'.
See the examples!
required: true
creates:
description:
- a path or path filter pattern; when the referenced path exists on the target host, the task will be skipped.
removes:
description:
- a path or path filter pattern; when the referenced path B(does not) exist on the target host, the task will be skipped.
chdir:
description:
- set the specified path as the current working directory before executing a command.
stdin:
description:
- Set the stdin of the command directly to the specified value.
version_added: '2.5'
notes:
- If you want to run a command through a shell (say you are using C(<),
C(>), C(|), etc), you actually want the M(win_shell) module instead. The
C(win_command) module is much more secure as it's not affected by the user's
environment.
- C(creates), C(removes), and C(chdir) can be specified after the command. For instance, if you only want to run a command if a certain file does not
exist, use this.
- For non-Windows targets, use the M(command) module instead.
author:
- Matt Davis
'''
EXAMPLES = r'''
- name: Save the result of 'whoami' in 'whoami_out'
win_command: whoami
register: whoami_out
- name: Run command that only runs if folder exists and runs from a specific folder
win_command: wbadmin -backupTarget:C:\backup\
args:
chdir: C:\somedir\
creates: C:\backup\
- name: Run an executable and send data to the stdin for the executable
win_command: powershell.exe -
args:
stdin: Write-Host test
'''
RETURN = r'''
msg:
description: changed
returned: always
type: boolean
sample: True
start:
description: The command execution start time
returned: always
type: string
sample: '2016-02-25 09:18:26.429568'
end:
description: The command execution end time
returned: always
type: string
sample: '2016-02-25 09:18:26.755339'
delta:
description: The command execution delta time
returned: always
type: string
sample: '0:00:00.325771'
stdout:
description: The command standard output
returned: always
type: string
sample: 'Clustering node rabbit@slave1 with rabbit@master ...'
stderr:
description: The command standard error
returned: always
type: string
sample: 'ls: cannot access foo: No such file or directory'
cmd:
description: The command executed by the task
returned: always
type: string
sample: 'rabbitmqctl join_cluster rabbit@master'
rc:
description: The command return code (0 means success)
returned: always
type: int
sample: 0
stdout_lines:
description: The command standard output split in lines
returned: always
type: list
sample: [u'Clustering node rabbit@slave1 with rabbit@master ...']
'''
| gpl-3.0 |
ehogan/iris | lib/iris/tests/test_grib_load.py | 8 | 28851 | # (C) British Crown Copyright 2010 - 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# Import iris tests first so that some things can be initialised before
# importing anything else
import iris.tests as tests
import datetime
from distutils.version import StrictVersion
import cf_units
import numpy as np
import iris
import iris.exceptions
from iris.tests import mock
import iris.tests.stock
import iris.util
# Run tests in no graphics mode if matplotlib is not available.
if tests.MPL_AVAILABLE:
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
import iris.plot as iplt
import iris.quickplot as qplt
if tests.GRIB_AVAILABLE:
import gribapi
import iris.fileformats.grib
def _mock_gribapi_fetch(message, key):
"""
Fake the gribapi key-fetch.
Fetch key-value from the fake message (dictionary).
If the key is not present, raise the diagnostic exception.
"""
if key in message:
return message[key]
else:
raise _mock_gribapi.GribInternalError
def _mock_gribapi__grib_is_missing(grib_message, keyname):
"""
Fake the gribapi key-existence enquiry.
Return whether the key exists in the fake message (dictionary).
"""
return (keyname not in grib_message)
def _mock_gribapi__grib_get_native_type(grib_message, keyname):
"""
Fake the gribapi type-discovery operation.
Return type of key-value in the fake message (dictionary).
If the key is not present, raise the diagnostic exception.
"""
if keyname in grib_message:
return type(grib_message[keyname])
raise _mock_gribapi.GribInternalError(keyname)
if tests.GRIB_AVAILABLE:
# Construct a mock object to mimic the gribapi for GribWrapper testing.
_mock_gribapi = mock.Mock(spec=gribapi)
_mock_gribapi.GribInternalError = Exception
_mock_gribapi.grib_get_long = mock.Mock(side_effect=_mock_gribapi_fetch)
_mock_gribapi.grib_get_string = mock.Mock(side_effect=_mock_gribapi_fetch)
_mock_gribapi.grib_get_double = mock.Mock(side_effect=_mock_gribapi_fetch)
_mock_gribapi.grib_get_double_array = mock.Mock(
side_effect=_mock_gribapi_fetch)
_mock_gribapi.grib_is_missing = mock.Mock(
side_effect=_mock_gribapi__grib_is_missing)
_mock_gribapi.grib_get_native_type = mock.Mock(
side_effect=_mock_gribapi__grib_get_native_type)
# define seconds in an hour, for general test usage
_hour_secs = 3600.0
class FakeGribMessage(dict):
"""
A 'fake grib message' object, for testing GribWrapper construction.
Behaves as a dictionary, containing key-values for message keys.
"""
def __init__(self, **kwargs):
"""
Create a fake message object.
General keys can be set/add as required via **kwargs.
The keys 'edition' and 'time_code' are specially managed.
"""
# Start with a bare dictionary
dict.__init__(self)
# Extract specially-recognised keys.
edition = kwargs.pop('edition', 1)
time_code = kwargs.pop('time_code', None)
# Set the minimally required keys.
self._init_minimal_message(edition=edition)
# Also set a time-code, if given.
if time_code is not None:
self.set_timeunit_code(time_code)
# Finally, add any remaining passed key-values.
self.update(**kwargs)
def _init_minimal_message(self, edition=1):
# Set values for all the required keys.
# 'edition' controls the edition-specific keys.
self.update({
'Ni': 1,
'Nj': 1,
'numberOfValues': 1,
'alternativeRowScanning': 0,
'centre': 'ecmf',
'year': 2007,
'month': 3,
'day': 23,
'hour': 12,
'minute': 0,
'indicatorOfUnitOfTimeRange': 1,
'shapeOfTheEarth': 6,
'gridType': 'rotated_ll',
'angleOfRotation': 0.0,
'iDirectionIncrementInDegrees': 0.036,
'jDirectionIncrementInDegrees': 0.036,
'iScansNegatively': 0,
'jScansPositively': 1,
'longitudeOfFirstGridPointInDegrees': -5.70,
'latitudeOfFirstGridPointInDegrees': -4.452,
'jPointsAreConsecutive': 0,
'values': np.array([[1.0]]),
'indicatorOfParameter': 9999,
'parameterNumber': 9999,
})
# Add edition-dependent settings.
self['edition'] = edition
if edition == 1:
self.update({
'startStep': 24,
'timeRangeIndicator': 1,
'P1': 2, 'P2': 0,
# time unit - needed AS WELL as 'indicatorOfUnitOfTimeRange'
'unitOfTime': 1,
'table2Version': 9999,
})
if edition == 2:
self.update({
'iDirectionIncrementGiven': 1,
'jDirectionIncrementGiven': 1,
'uvRelativeToGrid': 0,
'forecastTime': 24,
'productDefinitionTemplateNumber': 0,
'stepRange': 24,
'discipline': 9999,
'parameterCategory': 9999,
'tablesVersion': 4
})
def set_timeunit_code(self, timecode):
# Do timecode setting (somewhat edition-dependent).
self['indicatorOfUnitOfTimeRange'] = timecode
if self['edition'] == 1:
# for some odd reason, GRIB1 code uses *both* of these
# NOTE kludge -- the 2 keys are really the same thing
self['unitOfTime'] = timecode
@tests.skip_data
@tests.skip_grib
class TestGribLoad(tests.GraphicsTest):
def setUp(self):
iris.fileformats.grib.hindcast_workaround = True
def tearDown(self):
iris.fileformats.grib.hindcast_workaround = False
def test_load(self):
cubes = iris.load(tests.get_data_path(('GRIB', 'rotated_uk',
"uk_wrongparam.grib1")))
self.assertCML(cubes, ("grib_load", "rotated.cml"))
cubes = iris.load(tests.get_data_path(('GRIB', "time_processed",
"time_bound.grib1")))
self.assertCML(cubes, ("grib_load", "time_bound_grib1.cml"))
cubes = iris.load(tests.get_data_path(('GRIB', "time_processed",
"time_bound.grib2")))
self.assertCML(cubes, ("grib_load", "time_bound_grib2.cml"))
cubes = iris.load(tests.get_data_path(('GRIB', "3_layer_viz",
"3_layer.grib2")))
cubes = iris.cube.CubeList([cubes[1], cubes[0], cubes[2]])
self.assertCML(cubes, ("grib_load", "3_layer.cml"))
def test_load_masked(self):
gribfile = tests.get_data_path(
('GRIB', 'missing_values', 'missing_values.grib2'))
cubes = iris.load(gribfile)
self.assertCML(cubes, ('grib_load', 'missing_values_grib2.cml'))
@tests.skip_plot
def test_y_fastest(self):
cubes = iris.load(tests.get_data_path(("GRIB", "y_fastest",
"y_fast.grib2")))
self.assertCML(cubes, ("grib_load", "y_fastest.cml"))
iplt.contourf(cubes[0])
plt.gca().coastlines()
plt.title("y changes fastest")
self.check_graphic()
@tests.skip_plot
def test_ij_directions(self):
def old_compat_load(name):
cube = iris.load(tests.get_data_path(('GRIB', 'ij_directions',
name)))[0]
return [cube]
cubes = old_compat_load("ipos_jpos.grib2")
self.assertCML(cubes, ("grib_load", "ipos_jpos.cml"))
iplt.contourf(cubes[0])
plt.gca().coastlines()
plt.title("ipos_jpos cube")
self.check_graphic()
cubes = old_compat_load("ipos_jneg.grib2")
self.assertCML(cubes, ("grib_load", "ipos_jneg.cml"))
iplt.contourf(cubes[0])
plt.gca().coastlines()
plt.title("ipos_jneg cube")
self.check_graphic()
cubes = old_compat_load("ineg_jneg.grib2")
self.assertCML(cubes, ("grib_load", "ineg_jneg.cml"))
iplt.contourf(cubes[0])
plt.gca().coastlines()
plt.title("ineg_jneg cube")
self.check_graphic()
cubes = old_compat_load("ineg_jpos.grib2")
self.assertCML(cubes, ("grib_load", "ineg_jpos.cml"))
iplt.contourf(cubes[0])
plt.gca().coastlines()
plt.title("ineg_jpos cube")
self.check_graphic()
def test_shape_of_earth(self):
def old_compat_load(name):
cube = iris.load(tests.get_data_path(('GRIB', 'shape_of_earth',
name)))[0]
return cube
# pre-defined sphere
cube = old_compat_load("0.grib2")
self.assertCML(cube, ("grib_load", "earth_shape_0.cml"))
# custom sphere
cube = old_compat_load("1.grib2")
self.assertCML(cube, ("grib_load", "earth_shape_1.cml"))
# IAU65 oblate sphere
cube = old_compat_load("2.grib2")
self.assertCML(cube, ("grib_load", "earth_shape_2.cml"))
# custom oblate spheroid (km)
cube = old_compat_load("3.grib2")
self.assertCML(cube, ("grib_load", "earth_shape_3.cml"))
# IAG-GRS80 oblate spheroid
cube = old_compat_load("4.grib2")
self.assertCML(cube, ("grib_load", "earth_shape_4.cml"))
# WGS84
cube = old_compat_load("5.grib2")
self.assertCML(cube, ("grib_load", "earth_shape_5.cml"))
# pre-defined sphere
cube = old_compat_load("6.grib2")
self.assertCML(cube, ("grib_load", "earth_shape_6.cml"))
# custom oblate spheroid (m)
cube = old_compat_load("7.grib2")
self.assertCML(cube, ("grib_load", "earth_shape_7.cml"))
# grib1 - same as grib2 shape 6, above
cube = old_compat_load("global.grib1")
self.assertCML(cube, ("grib_load", "earth_shape_grib1.cml"))
@tests.skip_plot
def test_polar_stereo_grib1(self):
cube = iris.load_cube(tests.get_data_path(
("GRIB", "polar_stereo", "ST4.2013052210.01h")))
self.assertCML(cube, ("grib_load", "polar_stereo_grib1.cml"))
qplt.contourf(cube, norm=LogNorm())
plt.gca().coastlines()
plt.gca().gridlines()
plt.title("polar stereo grib1")
self.check_graphic()
@tests.skip_plot
def test_polar_stereo_grib2(self):
cube = iris.load_cube(tests.get_data_path(
("GRIB", "polar_stereo",
"CMC_glb_TMP_ISBL_1015_ps30km_2013052000_P006.grib2")))
self.assertCML(cube, ("grib_load", "polar_stereo_grib2.cml"))
qplt.contourf(cube)
plt.gca().coastlines()
plt.gca().gridlines()
plt.title("polar stereo grib2")
self.check_graphic()
@tests.skip_plot
def test_lambert_grib1(self):
cube = iris.load_cube(tests.get_data_path(
("GRIB", "lambert", "lambert.grib1")))
self.assertCML(cube, ("grib_load", "lambert_grib1.cml"))
qplt.contourf(cube)
plt.gca().coastlines()
plt.gca().gridlines()
plt.title("lambert grib1")
self.check_graphic()
@tests.skip_plot
def test_lambert_grib2(self):
cube = iris.load_cube(tests.get_data_path(
("GRIB", "lambert", "lambert.grib2")))
self.assertCML(cube, ("grib_load", "lambert_grib2.cml"))
qplt.contourf(cube)
plt.gca().coastlines()
plt.gca().gridlines()
plt.title("lambert grib2")
self.check_graphic()
def test_regular_gg_grib1(self):
cube = iris.load_cube(tests.get_data_path(
('GRIB', 'gaussian', 'regular_gg.grib1')))
self.assertCML(cube, ('grib_load', 'regular_gg_grib1.cml'))
def test_regular_gg_grib2(self):
cube = iris.load_cube(tests.get_data_path(
('GRIB', 'gaussian', 'regular_gg.grib2')))
self.assertCML(cube, ('grib_load', 'regular_gg_grib2.cml'))
def test_reduced_ll(self):
cube = iris.load_cube(tests.get_data_path(
("GRIB", "reduced", "reduced_ll.grib1")))
self.assertCML(cube, ("grib_load", "reduced_ll_grib1.cml"))
def test_reduced_gg(self):
cube = iris.load_cube(tests.get_data_path(
("GRIB", "reduced", "reduced_gg.grib2")))
self.assertCML(cube, ("grib_load", "reduced_gg_grib2.cml"))
def test_reduced_missing(self):
cube = iris.load_cube(tests.get_data_path(
("GRIB", "reduced", "reduced_ll_missing.grib1")))
self.assertCML(cube, ("grib_load", "reduced_ll_missing_grib1.cml"))
@tests.skip_grib
class TestGribTimecodes(tests.IrisTest):
def _run_timetests(self, test_set):
# Check the unit-handling for given units-codes and editions.
# Operates on lists of cases for various time-units and grib-editions.
# Format: (edition, code, expected-exception,
# equivalent-seconds, description-string)
with mock.patch('iris.fileformats.grib.gribapi', _mock_gribapi):
for test_controls in test_set:
(
grib_edition, timeunit_codenum,
expected_error,
timeunit_secs, timeunit_str
) = test_controls
# Construct a suitable fake test message.
message = FakeGribMessage(
edition=grib_edition,
time_code=timeunit_codenum
)
if expected_error:
# Expect GribWrapper construction to fail.
with self.assertRaises(type(expected_error)) as ar_context:
msg = iris.fileformats.grib.GribWrapper(message)
self.assertEqual(
ar_context.exception.args,
expected_error.args)
continue
# 'ELSE'...
# Expect the wrapper construction to work.
# Make a GribWrapper object and test it.
wrapped_msg = iris.fileformats.grib.GribWrapper(message)
# Check the units string.
forecast_timeunit = wrapped_msg._forecastTimeUnit
self.assertEqual(
forecast_timeunit, timeunit_str,
'Bad unit string for edition={ed:01d}, '
'unitcode={code:01d} : '
'expected="{wanted}" GOT="{got}"'.format(
ed=grib_edition,
code=timeunit_codenum,
wanted=timeunit_str,
got=forecast_timeunit
)
)
# Check the data-starttime calculation.
interval_start_to_end = (
wrapped_msg._phenomenonDateTime
- wrapped_msg._referenceDateTime
)
if grib_edition == 1:
interval_from_units = wrapped_msg.P1
else:
interval_from_units = wrapped_msg.forecastTime
interval_from_units *= datetime.timedelta(0, timeunit_secs)
self.assertEqual(
interval_start_to_end, interval_from_units,
'Inconsistent start time offset for edition={ed:01d}, '
'unitcode={code:01d} : '
'from-unit="{unit_str}" '
'from-phenom-minus-ref="{e2e_str}"'.format(
ed=grib_edition,
code=timeunit_codenum,
unit_str=interval_from_units,
e2e_str=interval_start_to_end
)
)
# Test groups of testcases for various time-units and grib-editions.
# Format: (edition, code, expected-exception,
# equivalent-seconds, description-string)
def test_timeunits_common(self):
tests = (
(1, 0, None, 60.0, 'minutes'),
(1, 1, None, _hour_secs, 'hours'),
(1, 2, None, 24.0 * _hour_secs, 'days'),
(1, 10, None, 3.0 * _hour_secs, '3 hours'),
(1, 11, None, 6.0 * _hour_secs, '6 hours'),
(1, 12, None, 12.0 * _hour_secs, '12 hours'),
)
TestGribTimecodes._run_timetests(self, tests)
@staticmethod
def _err_bad_timeunit(code):
return iris.exceptions.NotYetImplementedError(
'Unhandled time unit for forecast '
'indicatorOfUnitOfTimeRange : {code}'.format(code=code)
)
def test_timeunits_grib1_specific(self):
tests = (
(1, 13, None, 0.25 * _hour_secs, '15 minutes'),
(1, 14, None, 0.5 * _hour_secs, '30 minutes'),
(1, 254, None, 1.0, 'seconds'),
(1, 111, TestGribTimecodes._err_bad_timeunit(111), 1.0, '??'),
)
TestGribTimecodes._run_timetests(self, tests)
def test_timeunits_grib2_specific(self):
tests = (
(2, 13, None, 1.0, 'seconds'),
# check the extra grib1 keys FAIL
(2, 14, TestGribTimecodes._err_bad_timeunit(14), 0.0, '??'),
(2, 254, TestGribTimecodes._err_bad_timeunit(254), 0.0, '??'),
)
TestGribTimecodes._run_timetests(self, tests)
def test_timeunits_calendar(self):
tests = (
(1, 3, TestGribTimecodes._err_bad_timeunit(3), 0.0, 'months'),
(1, 4, TestGribTimecodes._err_bad_timeunit(4), 0.0, 'years'),
(1, 5, TestGribTimecodes._err_bad_timeunit(5), 0.0, 'decades'),
(1, 6, TestGribTimecodes._err_bad_timeunit(6), 0.0, '30 years'),
(1, 7, TestGribTimecodes._err_bad_timeunit(7), 0.0, 'centuries'),
)
TestGribTimecodes._run_timetests(self, tests)
def test_timeunits_invalid(self):
tests = (
(1, 111, TestGribTimecodes._err_bad_timeunit(111), 1.0, '??'),
(2, 27, TestGribTimecodes._err_bad_timeunit(27), 1.0, '??'),
)
TestGribTimecodes._run_timetests(self, tests)
def test_load_probability_forecast(self):
# Test GribWrapper interpretation of PDT 4.9 data.
# NOTE:
# Currently Iris has only partial support for PDT 4.9.
# Though it can load the data, key metadata (thresholds) is lost.
# At present, we are not testing for this.
# Make a testing grib message in memory, with gribapi.
grib_message = gribapi.grib_new_from_samples('GRIB2')
gribapi.grib_set_long(grib_message, 'productDefinitionTemplateNumber',
9)
gribapi.grib_set_string(grib_message, 'stepRange', '10-55')
grib_wrapper = iris.fileformats.grib.GribWrapper(grib_message)
# Define two expected datetimes for _periodEndDateTime as
# gribapi v1.9.16 mis-calculates this.
# See https://software.ecmwf.int/wiki/display/GRIB/\
# GRIB+API+version+1.9.18+released
try:
# gribapi v1.9.16 has no __version__ attribute.
gribapi_ver = gribapi.__version__
except AttributeError:
gribapi_ver = gribapi.grib_get_api_version()
if StrictVersion(gribapi_ver) < StrictVersion('1.9.18'):
exp_end_date = datetime.datetime(year=2007, month=3, day=25,
hour=12, minute=0, second=0)
else:
exp_end_date = datetime.datetime(year=2007, month=3, day=25,
hour=19, minute=0, second=0)
# Check that it captures the statistics time period info.
# (And for now, nothing else)
self.assertEqual(
grib_wrapper._referenceDateTime,
datetime.datetime(year=2007, month=3, day=23,
hour=12, minute=0, second=0)
)
self.assertEqual(
grib_wrapper._periodStartDateTime,
datetime.datetime(year=2007, month=3, day=23,
hour=22, minute=0, second=0)
)
self.assertEqual(grib_wrapper._periodEndDateTime, exp_end_date)
def test_warn_unknown_pdts(self):
# Test loading of an unrecognised GRIB Product Definition Template.
# Get a temporary file by name (deleted afterward by context).
with self.temp_filename() as temp_gribfile_path:
# Write a test grib message to the temporary file.
with open(temp_gribfile_path, 'wb') as temp_gribfile:
grib_message = gribapi.grib_new_from_samples('GRIB2')
# Set the PDT to something unexpected.
gribapi.grib_set_long(
grib_message, 'productDefinitionTemplateNumber', 5)
gribapi.grib_write(grib_message, temp_gribfile)
# Load the message from the file as a cube.
cube_generator = iris.fileformats.grib.load_cubes(
temp_gribfile_path)
cube = next(cube_generator)
# Check the cube has an extra "warning" attribute.
self.assertEqual(
cube.attributes['GRIB_LOAD_WARNING'],
'unsupported GRIB2 ProductDefinitionTemplate: #4.5'
)
@tests.skip_grib
class TestGribSimple(tests.IrisTest):
# A testing class that does not need the test data.
def mock_grib(self):
# A mock grib message, with attributes that can't be Mocks themselves.
grib = mock.Mock()
grib.startStep = 0
grib.phenomenon_points = lambda unit: 3
grib._forecastTimeUnit = "hours"
grib.productDefinitionTemplateNumber = 0
# define a level type (NB these 2 are effectively the same)
grib.levelType = 1
grib.typeOfFirstFixedSurface = 1
grib.typeOfSecondFixedSurface = 1
return grib
def cube_from_message(self, grib):
# Parameter translation now uses the GribWrapper, so we must convert
# the Mock-based fake message to a FakeGribMessage.
with mock.patch('iris.fileformats.grib.gribapi', _mock_gribapi):
grib_message = FakeGribMessage(**grib.__dict__)
wrapped_msg = iris.fileformats.grib.GribWrapper(grib_message)
cube, _, _ = iris.fileformats.rules._make_cube(
wrapped_msg, iris.fileformats.grib.load_rules.convert)
return cube
@tests.skip_grib
class TestGrib1LoadPhenomenon(TestGribSimple):
# Test recognition of grib phenomenon types.
def mock_grib(self):
grib = super(TestGrib1LoadPhenomenon, self).mock_grib()
grib.edition = 1
return grib
def test_grib1_unknownparam(self):
grib = self.mock_grib()
grib.table2Version = 0
grib.indicatorOfParameter = 9999
cube = self.cube_from_message(grib)
self.assertEqual(cube.standard_name, None)
self.assertEqual(cube.long_name, None)
self.assertEqual(cube.units, cf_units.Unit("???"))
def test_grib1_unknown_local_param(self):
grib = self.mock_grib()
grib.table2Version = 128
grib.indicatorOfParameter = 999
cube = self.cube_from_message(grib)
self.assertEqual(cube.standard_name, None)
self.assertEqual(cube.long_name, 'UNKNOWN LOCAL PARAM 999.128')
self.assertEqual(cube.units, cf_units.Unit("???"))
def test_grib1_unknown_standard_param(self):
grib = self.mock_grib()
grib.table2Version = 1
grib.indicatorOfParameter = 975
cube = self.cube_from_message(grib)
self.assertEqual(cube.standard_name, None)
self.assertEqual(cube.long_name, 'UNKNOWN LOCAL PARAM 975.1')
self.assertEqual(cube.units, cf_units.Unit("???"))
def known_grib1(self, param, standard_str, units_str):
grib = self.mock_grib()
grib.table2Version = 1
grib.indicatorOfParameter = param
cube = self.cube_from_message(grib)
self.assertEqual(cube.standard_name, standard_str)
self.assertEqual(cube.long_name, None)
self.assertEqual(cube.units, cf_units.Unit(units_str))
def test_grib1_known_standard_params(self):
# at present, there are just a very few of these
self.known_grib1(11, 'air_temperature', 'kelvin')
self.known_grib1(33, 'x_wind', 'm s-1')
self.known_grib1(34, 'y_wind', 'm s-1')
@tests.skip_grib
class TestGrib2LoadPhenomenon(TestGribSimple):
# Test recognition of grib phenomenon types.
def mock_grib(self):
grib = super(TestGrib2LoadPhenomenon, self).mock_grib()
grib.edition = 2
grib._forecastTimeUnit = 'hours'
grib._forecastTime = 0.0
grib.phenomenon_points = lambda unit: [0.0]
return grib
def known_grib2(self, discipline, category, param,
standard_name, long_name, units_str):
grib = self.mock_grib()
grib.discipline = discipline
grib.parameterCategory = category
grib.parameterNumber = param
cube = self.cube_from_message(grib)
try:
_cf_units = cf_units.Unit(units_str)
except ValueError:
_cf_units = cf_units.Unit('???')
self.assertEqual(cube.standard_name, standard_name)
self.assertEqual(cube.long_name, long_name)
self.assertEqual(cube.units, _cf_units)
def test_grib2_unknownparam(self):
grib = self.mock_grib()
grib.discipline = 999
grib.parameterCategory = 999
grib.parameterNumber = 9999
cube = self.cube_from_message(grib)
self.assertEqual(cube.standard_name, None)
self.assertEqual(cube.long_name, None)
self.assertEqual(cube.units, cf_units.Unit("???"))
def test_grib2_known_standard_params(self):
# check we know how to translate at least these params
# I.E. all the ones the older scheme provided.
full_set = [
(0, 0, 0, "air_temperature", None, "kelvin"),
(0, 0, 2, "air_potential_temperature", None, "K"),
(0, 1, 0, "specific_humidity", None, "kg kg-1"),
(0, 1, 1, "relative_humidity", None, "%"),
(0, 1, 3, None, "precipitable_water", "kg m-2"),
(0, 1, 22, None, "cloud_mixing_ratio", "kg kg-1"),
(0, 1, 13, "liquid_water_content_of_surface_snow", None, "kg m-2"),
(0, 2, 1, "wind_speed", None, "m s-1"),
(0, 2, 2, "x_wind", None, "m s-1"),
(0, 2, 3, "y_wind", None, "m s-1"),
(0, 2, 8, "lagrangian_tendency_of_air_pressure", None, "Pa s-1"),
(0, 2, 10, "atmosphere_absolute_vorticity", None, "s-1"),
(0, 3, 0, "air_pressure", None, "Pa"),
(0, 3, 1, "air_pressure_at_sea_level", None, "Pa"),
(0, 3, 3, None, "icao_standard_atmosphere_reference_height", "m"),
(0, 3, 5, "geopotential_height", None, "m"),
(0, 3, 9, "geopotential_height_anomaly", None, "m"),
(0, 6, 1, "cloud_area_fraction", None, "%"),
(0, 6, 6, "atmosphere_mass_content_of_cloud_liquid_water", None,
"kg m-2"),
(0, 7, 6,
"atmosphere_specific_convective_available_potential_energy",
None, "J kg-1"),
(0, 7, 7, None, "convective_inhibition", "J kg-1"),
(0, 7, 8, None, "storm_relative_helicity", "J kg-1"),
(0, 14, 0, "atmosphere_mole_content_of_ozone", None, "Dobson"),
(2, 0, 0, "land_area_fraction", None, "1"),
(10, 2, 0, "sea_ice_area_fraction", None, "1")]
for (discipline, category, number,
standard_name, long_name, units) in full_set:
self.known_grib2(discipline, category, number,
standard_name, long_name, units)
if __name__ == "__main__":
tests.main()
| lgpl-3.0 |
CSSIP-AIR/UMETRICS | ThirdParty/unidecode/x0ac.py | 253 | 4709 | data = (
'ga', # 0x00
'gag', # 0x01
'gagg', # 0x02
'gags', # 0x03
'gan', # 0x04
'ganj', # 0x05
'ganh', # 0x06
'gad', # 0x07
'gal', # 0x08
'galg', # 0x09
'galm', # 0x0a
'galb', # 0x0b
'gals', # 0x0c
'galt', # 0x0d
'galp', # 0x0e
'galh', # 0x0f
'gam', # 0x10
'gab', # 0x11
'gabs', # 0x12
'gas', # 0x13
'gass', # 0x14
'gang', # 0x15
'gaj', # 0x16
'gac', # 0x17
'gak', # 0x18
'gat', # 0x19
'gap', # 0x1a
'gah', # 0x1b
'gae', # 0x1c
'gaeg', # 0x1d
'gaegg', # 0x1e
'gaegs', # 0x1f
'gaen', # 0x20
'gaenj', # 0x21
'gaenh', # 0x22
'gaed', # 0x23
'gael', # 0x24
'gaelg', # 0x25
'gaelm', # 0x26
'gaelb', # 0x27
'gaels', # 0x28
'gaelt', # 0x29
'gaelp', # 0x2a
'gaelh', # 0x2b
'gaem', # 0x2c
'gaeb', # 0x2d
'gaebs', # 0x2e
'gaes', # 0x2f
'gaess', # 0x30
'gaeng', # 0x31
'gaej', # 0x32
'gaec', # 0x33
'gaek', # 0x34
'gaet', # 0x35
'gaep', # 0x36
'gaeh', # 0x37
'gya', # 0x38
'gyag', # 0x39
'gyagg', # 0x3a
'gyags', # 0x3b
'gyan', # 0x3c
'gyanj', # 0x3d
'gyanh', # 0x3e
'gyad', # 0x3f
'gyal', # 0x40
'gyalg', # 0x41
'gyalm', # 0x42
'gyalb', # 0x43
'gyals', # 0x44
'gyalt', # 0x45
'gyalp', # 0x46
'gyalh', # 0x47
'gyam', # 0x48
'gyab', # 0x49
'gyabs', # 0x4a
'gyas', # 0x4b
'gyass', # 0x4c
'gyang', # 0x4d
'gyaj', # 0x4e
'gyac', # 0x4f
'gyak', # 0x50
'gyat', # 0x51
'gyap', # 0x52
'gyah', # 0x53
'gyae', # 0x54
'gyaeg', # 0x55
'gyaegg', # 0x56
'gyaegs', # 0x57
'gyaen', # 0x58
'gyaenj', # 0x59
'gyaenh', # 0x5a
'gyaed', # 0x5b
'gyael', # 0x5c
'gyaelg', # 0x5d
'gyaelm', # 0x5e
'gyaelb', # 0x5f
'gyaels', # 0x60
'gyaelt', # 0x61
'gyaelp', # 0x62
'gyaelh', # 0x63
'gyaem', # 0x64
'gyaeb', # 0x65
'gyaebs', # 0x66
'gyaes', # 0x67
'gyaess', # 0x68
'gyaeng', # 0x69
'gyaej', # 0x6a
'gyaec', # 0x6b
'gyaek', # 0x6c
'gyaet', # 0x6d
'gyaep', # 0x6e
'gyaeh', # 0x6f
'geo', # 0x70
'geog', # 0x71
'geogg', # 0x72
'geogs', # 0x73
'geon', # 0x74
'geonj', # 0x75
'geonh', # 0x76
'geod', # 0x77
'geol', # 0x78
'geolg', # 0x79
'geolm', # 0x7a
'geolb', # 0x7b
'geols', # 0x7c
'geolt', # 0x7d
'geolp', # 0x7e
'geolh', # 0x7f
'geom', # 0x80
'geob', # 0x81
'geobs', # 0x82
'geos', # 0x83
'geoss', # 0x84
'geong', # 0x85
'geoj', # 0x86
'geoc', # 0x87
'geok', # 0x88
'geot', # 0x89
'geop', # 0x8a
'geoh', # 0x8b
'ge', # 0x8c
'geg', # 0x8d
'gegg', # 0x8e
'gegs', # 0x8f
'gen', # 0x90
'genj', # 0x91
'genh', # 0x92
'ged', # 0x93
'gel', # 0x94
'gelg', # 0x95
'gelm', # 0x96
'gelb', # 0x97
'gels', # 0x98
'gelt', # 0x99
'gelp', # 0x9a
'gelh', # 0x9b
'gem', # 0x9c
'geb', # 0x9d
'gebs', # 0x9e
'ges', # 0x9f
'gess', # 0xa0
'geng', # 0xa1
'gej', # 0xa2
'gec', # 0xa3
'gek', # 0xa4
'get', # 0xa5
'gep', # 0xa6
'geh', # 0xa7
'gyeo', # 0xa8
'gyeog', # 0xa9
'gyeogg', # 0xaa
'gyeogs', # 0xab
'gyeon', # 0xac
'gyeonj', # 0xad
'gyeonh', # 0xae
'gyeod', # 0xaf
'gyeol', # 0xb0
'gyeolg', # 0xb1
'gyeolm', # 0xb2
'gyeolb', # 0xb3
'gyeols', # 0xb4
'gyeolt', # 0xb5
'gyeolp', # 0xb6
'gyeolh', # 0xb7
'gyeom', # 0xb8
'gyeob', # 0xb9
'gyeobs', # 0xba
'gyeos', # 0xbb
'gyeoss', # 0xbc
'gyeong', # 0xbd
'gyeoj', # 0xbe
'gyeoc', # 0xbf
'gyeok', # 0xc0
'gyeot', # 0xc1
'gyeop', # 0xc2
'gyeoh', # 0xc3
'gye', # 0xc4
'gyeg', # 0xc5
'gyegg', # 0xc6
'gyegs', # 0xc7
'gyen', # 0xc8
'gyenj', # 0xc9
'gyenh', # 0xca
'gyed', # 0xcb
'gyel', # 0xcc
'gyelg', # 0xcd
'gyelm', # 0xce
'gyelb', # 0xcf
'gyels', # 0xd0
'gyelt', # 0xd1
'gyelp', # 0xd2
'gyelh', # 0xd3
'gyem', # 0xd4
'gyeb', # 0xd5
'gyebs', # 0xd6
'gyes', # 0xd7
'gyess', # 0xd8
'gyeng', # 0xd9
'gyej', # 0xda
'gyec', # 0xdb
'gyek', # 0xdc
'gyet', # 0xdd
'gyep', # 0xde
'gyeh', # 0xdf
'go', # 0xe0
'gog', # 0xe1
'gogg', # 0xe2
'gogs', # 0xe3
'gon', # 0xe4
'gonj', # 0xe5
'gonh', # 0xe6
'god', # 0xe7
'gol', # 0xe8
'golg', # 0xe9
'golm', # 0xea
'golb', # 0xeb
'gols', # 0xec
'golt', # 0xed
'golp', # 0xee
'golh', # 0xef
'gom', # 0xf0
'gob', # 0xf1
'gobs', # 0xf2
'gos', # 0xf3
'goss', # 0xf4
'gong', # 0xf5
'goj', # 0xf6
'goc', # 0xf7
'gok', # 0xf8
'got', # 0xf9
'gop', # 0xfa
'goh', # 0xfb
'gwa', # 0xfc
'gwag', # 0xfd
'gwagg', # 0xfe
'gwags', # 0xff
)
| bsd-2-clause |
kutenai/django | django/views/decorators/csrf.py | 586 | 2202 | from functools import wraps
from django.middleware.csrf import CsrfViewMiddleware, get_token
from django.utils.decorators import available_attrs, decorator_from_middleware
csrf_protect = decorator_from_middleware(CsrfViewMiddleware)
csrf_protect.__name__ = "csrf_protect"
csrf_protect.__doc__ = """
This decorator adds CSRF protection in exactly the same way as
CsrfViewMiddleware, but it can be used on a per view basis. Using both, or
using the decorator multiple times, is harmless and efficient.
"""
class _EnsureCsrfToken(CsrfViewMiddleware):
# We need this to behave just like the CsrfViewMiddleware, but not reject
# requests or log warnings.
def _reject(self, request, reason):
return None
requires_csrf_token = decorator_from_middleware(_EnsureCsrfToken)
requires_csrf_token.__name__ = 'requires_csrf_token'
requires_csrf_token.__doc__ = """
Use this decorator on views that need a correct csrf_token available to
RequestContext, but without the CSRF protection that csrf_protect
enforces.
"""
class _EnsureCsrfCookie(CsrfViewMiddleware):
def _reject(self, request, reason):
return None
def process_view(self, request, callback, callback_args, callback_kwargs):
retval = super(_EnsureCsrfCookie, self).process_view(request, callback, callback_args, callback_kwargs)
# Forces process_response to send the cookie
get_token(request)
return retval
ensure_csrf_cookie = decorator_from_middleware(_EnsureCsrfCookie)
ensure_csrf_cookie.__name__ = 'ensure_csrf_cookie'
ensure_csrf_cookie.__doc__ = """
Use this decorator to ensure that a view sets a CSRF cookie, whether or not it
uses the csrf_token template tag, or the CsrfViewMiddleware is used.
"""
def csrf_exempt(view_func):
"""
Marks a view function as being exempt from the CSRF view protection.
"""
# We could just do view_func.csrf_exempt = True, but decorators
# are nicer if they don't have side-effects, so we return a new
# function.
def wrapped_view(*args, **kwargs):
return view_func(*args, **kwargs)
wrapped_view.csrf_exempt = True
return wraps(view_func, assigned=available_attrs(view_func))(wrapped_view)
| bsd-3-clause |
jaeilepp/mne-python | examples/realtime/plot_compute_rt_average.py | 7 | 1912 | """
========================================================
Compute real-time evoked responses using moving averages
========================================================
This example demonstrates how to connect to an MNE Real-time server
using the RtClient and use it together with RtEpochs to compute
evoked responses using moving averages.
Note: The MNE Real-time server (mne_rt_server), which is part of mne-cpp,
has to be running on the same computer.
"""
# Authors: Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Mainak Jas <mainak@neuro.hut.fi>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne.datasets import sample
from mne.realtime import RtEpochs, MockRtClient
print(__doc__)
# Fiff file to simulate the realtime client
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
raw = mne.io.read_raw_fif(raw_fname, preload=True)
# select gradiometers
picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True,
stim=True, exclude=raw.info['bads'])
# select the left-auditory condition
event_id, tmin, tmax = 1, -0.2, 0.5
# create the mock-client object
rt_client = MockRtClient(raw)
# create the real-time epochs object
rt_epochs = RtEpochs(rt_client, event_id, tmin, tmax, picks=picks,
decim=1, reject=dict(grad=4000e-13, eog=150e-6))
# start the acquisition
rt_epochs.start()
# send raw buffers
rt_client.send_data(rt_epochs, picks, tmin=0, tmax=150, buffer_size=1000)
for ii, ev in enumerate(rt_epochs.iter_evoked()):
print("Just got epoch %d" % (ii + 1))
ev.pick_types(meg=True, eog=False) # leave out the eog channel
if ii == 0:
evoked = ev
else:
evoked = mne.combine_evoked([evoked, ev], weights='nave')
plt.clf() # clear canvas
evoked.plot(axes=plt.gca()) # plot on current figure
plt.pause(0.05)
| bsd-3-clause |
ezequielpereira/Time-Line | libs/wx/lib/CDate.py | 6 | 3788 | # Name: CDate.py
# Purpose: Date and Calendar classes
#
# Author: Lorne White (email: lwhite1@planet.eon.net)
#
# Created:
# Version 0.2 08-Nov-1999
# Licence: wxWindows license
#----------------------------------------------------------------------------
# Updated: 01-Dec-2004
# Action: Cast the year variable to an integer under the Date Class
# Reason: When the year was compared in the isleap() function, if it was
# in a string format, then an error was raised.
import time
Month = {2: 'February', 3: 'March', None: 0, 'July': 7, 11:
'November', 'December': 12, 'June': 6, 'January': 1, 'September': 9,
'August': 8, 'March': 3, 'November': 11, 'April': 4, 12: 'December',
'May': 5, 10: 'October', 9: 'September', 8: 'August', 7: 'July', 6:
'June', 5: 'May', 4: 'April', 'October': 10, 'February': 2, 1:
'January', 0: None}
# Number of days per month (except for February in leap years)
mdays = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
# Full and abbreviated names of weekdays
day_name = [ 'Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday']
day_abbr = ['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', ]
# Return number of leap years in range [y1, y2)
# Assume y1 <= y2 and no funny (non-leap century) years
def leapdays(y1, y2):
return (y2+3)/4 - (y1+3)/4
# Return 1 for leap years, 0 for non-leap years
def isleap(year):
return year % 4 == 0 and (year % 100 <> 0 or year % 400 == 0)
def FillDate(val):
s = str(val)
if len(s) < 2:
s = '0' + s
return s
def julianDay(year, month, day):
b = 0L
year, month, day = long(year), long(month), long(day)
if month > 12L:
year = year + month/12L
month = month%12
elif month < 1L:
month = -month
year = year - month/12L - 1L
month = 12L - month%12L
if year > 0L:
yearCorr = 0L
else:
yearCorr = 3L
if month < 3L:
year = year - 1L
month = month + 12L
if year*10000L + month*100L + day > 15821014L:
b = 2L - year/100L + year/400L
return (1461L*year - yearCorr)/4L + 306001L*(month + 1L)/10000L + day + 1720994L + b
def TodayDay():
date = time.localtime(time.time())
year = date[0]
month = date[1]
day = date[2]
julian = julianDay(year, month, day)
daywk = dayOfWeek(julian)
daywk = day_name[daywk]
return(daywk)
def FormatDay(value):
date = FromFormat(value)
daywk = DateCalc.dayOfWeek(date)
daywk = day_name[daywk]
return(daywk)
def FromJulian(julian):
julian = long(julian)
if (julian < 2299160L):
b = julian + 1525L
else:
alpha = (4L*julian - 7468861L)/146097L
b = julian + 1526L + alpha - alpha/4L
c = (20L*b - 2442L)/7305L
d = 1461L*c/4L
e = 10000L*(b - d)/306001L
day = int(b - d - 306001L*e/10000L)
if e < 14L:
month = int(e - 1L)
else:
month = int(e - 13L)
if month > 2:
year = c - 4716L
else:
year = c - 4715L
year = int(year)
return year, month, day
def dayOfWeek(julian):
return int((julian + 1L)%7L)
def daysPerMonth(month, year):
ndays = mdays[month] + (month == 2 and isleap(year))
return ndays
class now:
def __init__(self):
self.date = time.localtime(time.time())
self.year = self.date[0]
self.month = self.date[1]
self.day = self.date[2]
class Date:
def __init__(self, year, month, day):
self.julian = julianDay(year, month, day)
self.month = month
self.year = int(year)
self.day_of_week = dayOfWeek(self.julian)
self.days_in_month = daysPerMonth(self.month, self.year)
| gpl-3.0 |
danimajo/pineapple_pdf | PIL/ImageEnhance.py | 11 | 2760 | #
# The Python Imaging Library.
# $Id$
#
# image enhancement classes
#
# For a background, see "Image Processing By Interpolation and
# Extrapolation", Paul Haeberli and Douglas Voorhies. Available
# at http://www.sgi.com/grafica/interp/index.html
#
# History:
# 1996-03-23 fl Created
# 2009-06-16 fl Fixed mean calculation
#
# Copyright (c) Secret Labs AB 1997.
# Copyright (c) Fredrik Lundh 1996.
#
# See the README file for information on usage and redistribution.
#
from PIL import Image, ImageFilter, ImageStat
class _Enhance:
def enhance(self, factor):
"""
Returns an enhanced image.
:param factor: A floating point value controlling the enhancement.
Factor 1.0 always returns a copy of the original image,
lower factors mean less color (brightness, contrast,
etc), and higher values more. There are no restrictions
on this value.
:rtype: :py:class:`~PIL.Image.Image`
"""
return Image.blend(self.degenerate, self.image, factor)
class Color(_Enhance):
"""Adjust image color balance.
This class can be used to adjust the colour balance of an image, in
a manner similar to the controls on a colour TV set. An enhancement
factor of 0.0 gives a black and white image. A factor of 1.0 gives
the original image.
"""
def __init__(self, image):
self.image = image
self.degenerate = image.convert("L").convert(image.mode)
class Contrast(_Enhance):
"""Adjust image contrast.
This class can be used to control the contrast of an image, similar
to the contrast control on a TV set. An enhancement factor of 0.0
gives a solid grey image. A factor of 1.0 gives the original image.
"""
def __init__(self, image):
self.image = image
mean = int(ImageStat.Stat(image.convert("L")).mean[0] + 0.5)
self.degenerate = Image.new("L", image.size, mean).convert(image.mode)
class Brightness(_Enhance):
"""Adjust image brightness.
This class can be used to control the brighntess of an image. An
enhancement factor of 0.0 gives a black image. A factor of 1.0 gives the
original image.
"""
def __init__(self, image):
self.image = image
self.degenerate = Image.new(image.mode, image.size, 0)
class Sharpness(_Enhance):
"""Adjust image sharpness.
This class can be used to adjust the sharpness of an image. An
enhancement factor of 0.0 gives a blurred image, a factor of 1.0 gives the
original image, and a factor of 2.0 gives a sharpened image.
"""
def __init__(self, image):
self.image = image
self.degenerate = image.filter(ImageFilter.SMOOTH)
| mit |
numenta/nupic.research | nupic/research/frameworks/sigopt/sigopt_experiment.py | 3 | 7277 | # Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2020, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
#
import os
from sigopt import Connection
class SigOptExperiment:
"""
Class used to wrap around the SigOpt API and designed to be used in any experiment
runner. A particular experiment runner, such as DistributedTrainable, will want to
subclass and redefine update_config_with_suggestion() to be specific to their
config.
"""
def __init__(self, experiment_id=None, sigopt_config=None):
"""
Initiate a connection to the SigOpt API and optionally store the id
and config for an existing experiment. The SigOpt API key should be
defined in the environment variable 'SIGOPT_KEY'.
:param experiment_id: (int) An existing experiment id.
:param sigopt_config: (dict) The config used to create experiment id.
"""
self.experiment_id = experiment_id
self.sigopt_config = sigopt_config
self.conn = None
self.training_run = None
self.api_key = os.environ.get("SIGOPT_KEY", None)
if self.api_key is None:
self.api_key = os.environ.get("SIGOPT_DEV_KEY", None)
assert self.api_key is not None, "No SigOpt API key!"
try:
self.conn = Connection(client_token=self.api_key)
except Exception:
print("Could not connect to SigOpt!")
raise
def create_experiment(self, sigopt_config=None):
"""
Create a new sigopt experiment using the config.
:param sigopt_config: dictionary containing the SigOpt experiment parameters. If
this is None, this method does nothing and acts as a pass through.
If sigopt_config contains the key experiment_id we reuse the corresponding
existing experiment. If None, or this key doesn't exist, we create a brand new
experiment using sigopt_config, and update sigopt_config with the new
experiment_id.
"""
if sigopt_config is None:
return
self.sigopt_config = sigopt_config
# Create SigOpt experiment if requested
experiment = self.conn.experiments().create(**sigopt_config)
self.experiment_id = experiment.id
self.sigopt_config = sigopt_config
sigopt_config["experiment_id"] = experiment.id
print("Created experiment: https://app.sigopt.com/experiment/"
+ str(experiment.id))
return self.experiment_id
def get_next_suggestion(self):
experiment = self.conn.experiments(self.experiment_id).fetch()
suggestion = self.conn.experiments(experiment.id).suggestions().create()
return suggestion
def update_observation(self, suggestion, values):
self.conn.experiments(self.experiment_id).observations().create(
suggestion=suggestion.id,
values=values,
)
def get_observation_count(self):
experiment = self.conn.experiments(self.experiment_id).fetch()
return experiment.progress.observation_count
def observations(self):
observations = self.conn.experiments(self.experiment_id).observations().fetch()
return observations.data
def create_observation(self, assignments, value, task=None):
"""
Create an observation with custom assignments.
"""
if task is None:
self.conn.experiments(self.experiment_id).observations().create(
assignments=assignments,
value=value
)
else:
self.conn.experiments(self.experiment_id).observations().create(
assignments=assignments,
value=value,
task=task
)
def open_suggestions(self):
suggestions = self.conn.experiments(self.experiment_id).suggestions().fetch(
state="open")
return suggestions.data
def delete_suggestion(self, suggestion):
self.conn.experiments(self.experiment_id).suggestions(
suggestion.id).delete()
def delete_open_suggestions(self):
"""
Delete all open suggestions.
"""
self.conn.experiments(self.experiment_id).suggestions().delete(state="open")
def get_best_assignments(self):
a = self.conn.experiments(self.experiment_id).best_assignments().fetch().data
# If you have not completed any observations, or you are early on a multitask
# experiment, you could have no best assignments.
if len(a) == 0:
return None
else:
return a[0]
def create_training_run(self, suggestion):
"""
Create training run using this suggestion. The training run is cached
for later creating checkpoints.
"""
self.training_run = self.conn.experiments(
self.experiment_id).training_runs().create(suggestion=suggestion.id)
def create_checkpoint(self, metric_value):
"""
Create a checkpoint for the (single) metric that is being optimized. In order to
use this you must have specified training_monitor when creating the experiment,
and must have called create_training_run() for this training run.
"""
assert self.training_run is not None
self.conn.experiments(self.experiment_id).training_runs(
self.training_run.id).checkpoints().create(
values=[dict(name=self.sigopt_config["metrics"][0]["name"],
value=metric_value)],
)
def get_experiment_details(self):
return self.conn.experiments(self.experiment_id).fetch()
def update_config_with_suggestion(self, config, suggestion):
"""
Given a SigOpt suggestion, update this config dict.
"""
# For multi-task experiments where epoch is the task. Must have a metadata
# field called max_epochs.
if suggestion.task is not None and "epoch" in suggestion.task.name:
max_epochs = self.sigopt_config["metadata"]["max_epochs"]
epochs = int(max_epochs * suggestion.task.cost)
print("Suggested task/cost/epochs for this multitask experiment: ",
suggestion.task.name, suggestion.task.cost, epochs)
config["epochs"] = epochs
@classmethod
def get_execution_order(cls):
return dict(
update_config_with_suggestion=[
"SigOptExperiment.update_config_with_suggestion"
],
)
| agpl-3.0 |
polyval/CNC | flask/Lib/site-packages/pip/_vendor/requests/packages/urllib3/response.py | 478 | 16459 | try:
import http.client as httplib
except ImportError:
import httplib
import zlib
import io
from socket import timeout as SocketTimeout
from ._collections import HTTPHeaderDict
from .exceptions import (
ProtocolError, DecodeError, ReadTimeoutError, ResponseNotChunked
)
from .packages.six import string_types as basestring, binary_type, PY3
from .connection import HTTPException, BaseSSLError
from .util.response import is_fp_closed
class DeflateDecoder(object):
def __init__(self):
self._first_try = True
self._data = binary_type()
self._obj = zlib.decompressobj()
def __getattr__(self, name):
return getattr(self._obj, name)
def decompress(self, data):
if not data:
return data
if not self._first_try:
return self._obj.decompress(data)
self._data += data
try:
return self._obj.decompress(data)
except zlib.error:
self._first_try = False
self._obj = zlib.decompressobj(-zlib.MAX_WBITS)
try:
return self.decompress(self._data)
finally:
self._data = None
class GzipDecoder(object):
def __init__(self):
self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS)
def __getattr__(self, name):
return getattr(self._obj, name)
def decompress(self, data):
if not data:
return data
return self._obj.decompress(data)
def _get_decoder(mode):
if mode == 'gzip':
return GzipDecoder()
return DeflateDecoder()
class HTTPResponse(io.IOBase):
"""
HTTP Response container.
Backwards-compatible to httplib's HTTPResponse but the response ``body`` is
loaded and decoded on-demand when the ``data`` property is accessed. This
class is also compatible with the Python standard library's :mod:`io`
module, and can hence be treated as a readable object in the context of that
framework.
Extra parameters for behaviour not present in httplib.HTTPResponse:
:param preload_content:
If True, the response's body will be preloaded during construction.
:param decode_content:
If True, attempts to decode specific content-encoding's based on headers
(like 'gzip' and 'deflate') will be skipped and raw data will be used
instead.
:param original_response:
When this HTTPResponse wrapper is generated from an httplib.HTTPResponse
object, it's convenient to include the original for debug purposes. It's
otherwise unused.
"""
CONTENT_DECODERS = ['gzip', 'deflate']
REDIRECT_STATUSES = [301, 302, 303, 307, 308]
def __init__(self, body='', headers=None, status=0, version=0, reason=None,
strict=0, preload_content=True, decode_content=True,
original_response=None, pool=None, connection=None):
if isinstance(headers, HTTPHeaderDict):
self.headers = headers
else:
self.headers = HTTPHeaderDict(headers)
self.status = status
self.version = version
self.reason = reason
self.strict = strict
self.decode_content = decode_content
self._decoder = None
self._body = None
self._fp = None
self._original_response = original_response
self._fp_bytes_read = 0
if body and isinstance(body, (basestring, binary_type)):
self._body = body
self._pool = pool
self._connection = connection
if hasattr(body, 'read'):
self._fp = body
# Are we using the chunked-style of transfer encoding?
self.chunked = False
self.chunk_left = None
tr_enc = self.headers.get('transfer-encoding', '').lower()
# Don't incur the penalty of creating a list and then discarding it
encodings = (enc.strip() for enc in tr_enc.split(","))
if "chunked" in encodings:
self.chunked = True
# We certainly don't want to preload content when the response is chunked.
if not self.chunked and preload_content and not self._body:
self._body = self.read(decode_content=decode_content)
def get_redirect_location(self):
"""
Should we redirect and where to?
:returns: Truthy redirect location string if we got a redirect status
code and valid location. ``None`` if redirect status and no
location. ``False`` if not a redirect status code.
"""
if self.status in self.REDIRECT_STATUSES:
return self.headers.get('location')
return False
def release_conn(self):
if not self._pool or not self._connection:
return
self._pool._put_conn(self._connection)
self._connection = None
@property
def data(self):
# For backwords-compat with earlier urllib3 0.4 and earlier.
if self._body:
return self._body
if self._fp:
return self.read(cache_content=True)
def tell(self):
"""
Obtain the number of bytes pulled over the wire so far. May differ from
the amount of content returned by :meth:``HTTPResponse.read`` if bytes
are encoded on the wire (e.g, compressed).
"""
return self._fp_bytes_read
def _init_decoder(self):
"""
Set-up the _decoder attribute if necessar.
"""
# Note: content-encoding value should be case-insensitive, per RFC 7230
# Section 3.2
content_encoding = self.headers.get('content-encoding', '').lower()
if self._decoder is None and content_encoding in self.CONTENT_DECODERS:
self._decoder = _get_decoder(content_encoding)
def _decode(self, data, decode_content, flush_decoder):
"""
Decode the data passed in and potentially flush the decoder.
"""
try:
if decode_content and self._decoder:
data = self._decoder.decompress(data)
except (IOError, zlib.error) as e:
content_encoding = self.headers.get('content-encoding', '').lower()
raise DecodeError(
"Received response with content-encoding: %s, but "
"failed to decode it." % content_encoding, e)
if flush_decoder and decode_content and self._decoder:
buf = self._decoder.decompress(binary_type())
data += buf + self._decoder.flush()
return data
def read(self, amt=None, decode_content=None, cache_content=False):
"""
Similar to :meth:`httplib.HTTPResponse.read`, but with two additional
parameters: ``decode_content`` and ``cache_content``.
:param amt:
How much of the content to read. If specified, caching is skipped
because it doesn't make sense to cache partial content as the full
response.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
:param cache_content:
If True, will save the returned data such that the same result is
returned despite of the state of the underlying file object. This
is useful if you want the ``.data`` property to continue working
after having ``.read()`` the file object. (Overridden if ``amt`` is
set.)
"""
self._init_decoder()
if decode_content is None:
decode_content = self.decode_content
if self._fp is None:
return
flush_decoder = False
try:
try:
if amt is None:
# cStringIO doesn't like amt=None
data = self._fp.read()
flush_decoder = True
else:
cache_content = False
data = self._fp.read(amt)
if amt != 0 and not data: # Platform-specific: Buggy versions of Python.
# Close the connection when no data is returned
#
# This is redundant to what httplib/http.client _should_
# already do. However, versions of python released before
# December 15, 2012 (http://bugs.python.org/issue16298) do
# not properly close the connection in all cases. There is
# no harm in redundantly calling close.
self._fp.close()
flush_decoder = True
except SocketTimeout:
# FIXME: Ideally we'd like to include the url in the ReadTimeoutError but
# there is yet no clean way to get at it from this context.
raise ReadTimeoutError(self._pool, None, 'Read timed out.')
except BaseSSLError as e:
# FIXME: Is there a better way to differentiate between SSLErrors?
if 'read operation timed out' not in str(e): # Defensive:
# This shouldn't happen but just in case we're missing an edge
# case, let's avoid swallowing SSL errors.
raise
raise ReadTimeoutError(self._pool, None, 'Read timed out.')
except HTTPException as e:
# This includes IncompleteRead.
raise ProtocolError('Connection broken: %r' % e, e)
self._fp_bytes_read += len(data)
data = self._decode(data, decode_content, flush_decoder)
if cache_content:
self._body = data
return data
finally:
if self._original_response and self._original_response.isclosed():
self.release_conn()
def stream(self, amt=2**16, decode_content=None):
"""
A generator wrapper for the read() method. A call will block until
``amt`` bytes have been read from the connection or until the
connection is closed.
:param amt:
How much of the content to read. The generator will return up to
much data per iteration, but may return less. This is particularly
likely when using compressed data. However, the empty string will
never be returned.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
"""
if self.chunked:
for line in self.read_chunked(amt, decode_content=decode_content):
yield line
else:
while not is_fp_closed(self._fp):
data = self.read(amt=amt, decode_content=decode_content)
if data:
yield data
@classmethod
def from_httplib(ResponseCls, r, **response_kw):
"""
Given an :class:`httplib.HTTPResponse` instance ``r``, return a
corresponding :class:`urllib3.response.HTTPResponse` object.
Remaining parameters are passed to the HTTPResponse constructor, along
with ``original_response=r``.
"""
headers = r.msg
if not isinstance(headers, HTTPHeaderDict):
if PY3: # Python 3
headers = HTTPHeaderDict(headers.items())
else: # Python 2
headers = HTTPHeaderDict.from_httplib(headers)
# HTTPResponse objects in Python 3 don't have a .strict attribute
strict = getattr(r, 'strict', 0)
resp = ResponseCls(body=r,
headers=headers,
status=r.status,
version=r.version,
reason=r.reason,
strict=strict,
original_response=r,
**response_kw)
return resp
# Backwards-compatibility methods for httplib.HTTPResponse
def getheaders(self):
return self.headers
def getheader(self, name, default=None):
return self.headers.get(name, default)
# Overrides from io.IOBase
def close(self):
if not self.closed:
self._fp.close()
@property
def closed(self):
if self._fp is None:
return True
elif hasattr(self._fp, 'closed'):
return self._fp.closed
elif hasattr(self._fp, 'isclosed'): # Python 2
return self._fp.isclosed()
else:
return True
def fileno(self):
if self._fp is None:
raise IOError("HTTPResponse has no file to get a fileno from")
elif hasattr(self._fp, "fileno"):
return self._fp.fileno()
else:
raise IOError("The file-like object this HTTPResponse is wrapped "
"around has no file descriptor")
def flush(self):
if self._fp is not None and hasattr(self._fp, 'flush'):
return self._fp.flush()
def readable(self):
# This method is required for `io` module compatibility.
return True
def readinto(self, b):
# This method is required for `io` module compatibility.
temp = self.read(len(b))
if len(temp) == 0:
return 0
else:
b[:len(temp)] = temp
return len(temp)
def _update_chunk_length(self):
# First, we'll figure out length of a chunk and then
# we'll try to read it from socket.
if self.chunk_left is not None:
return
line = self._fp.fp.readline()
line = line.split(b';', 1)[0]
try:
self.chunk_left = int(line, 16)
except ValueError:
# Invalid chunked protocol response, abort.
self.close()
raise httplib.IncompleteRead(line)
def _handle_chunk(self, amt):
returned_chunk = None
if amt is None:
chunk = self._fp._safe_read(self.chunk_left)
returned_chunk = chunk
self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
self.chunk_left = None
elif amt < self.chunk_left:
value = self._fp._safe_read(amt)
self.chunk_left = self.chunk_left - amt
returned_chunk = value
elif amt == self.chunk_left:
value = self._fp._safe_read(amt)
self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
self.chunk_left = None
returned_chunk = value
else: # amt > self.chunk_left
returned_chunk = self._fp._safe_read(self.chunk_left)
self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
self.chunk_left = None
return returned_chunk
def read_chunked(self, amt=None, decode_content=None):
"""
Similar to :meth:`HTTPResponse.read`, but with an additional
parameter: ``decode_content``.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
"""
self._init_decoder()
# FIXME: Rewrite this method and make it a class with a better structured logic.
if not self.chunked:
raise ResponseNotChunked("Response is not chunked. "
"Header 'transfer-encoding: chunked' is missing.")
if self._original_response and self._original_response._method.upper() == 'HEAD':
# Don't bother reading the body of a HEAD request.
# FIXME: Can we do this somehow without accessing private httplib _method?
self._original_response.close()
return
while True:
self._update_chunk_length()
if self.chunk_left == 0:
break
chunk = self._handle_chunk(amt)
yield self._decode(chunk, decode_content=decode_content,
flush_decoder=True)
# Chunk content ends with \r\n: discard it.
while True:
line = self._fp.fp.readline()
if not line:
# Some sites may not end with '\r\n'.
break
if line == b'\r\n':
break
# We read everything; close the "file".
if self._original_response:
self._original_response.close()
self.release_conn()
| apache-2.0 |
tmm1/home-assistant | homeassistant/components/sensor/temper.py | 10 | 2267 | """
homeassistant.components.sensor.temper
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Support for getting temperature from TEMPer devices.
Configuration:
To use the temper sensors you will need to add something like the following to
your configuration.yaml file.
sensor:
platform: temper
"""
import logging
from homeassistant.helpers.entity import Entity
from homeassistant.const import CONF_NAME, DEVICE_DEFAULT_NAME
_LOGGER = logging.getLogger(__name__)
REQUIREMENTS = ['https://github.com/rkabadi/temper-python/archive/'
'3dbdaf2d87b8db9a3cd6e5585fc704537dd2d09b.zip'
'#temperusb==1.2.3']
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices_callback, discovery_info=None):
""" Find and return Temper sensors. """
try:
# pylint: disable=no-name-in-module, import-error
from temperusb.temper import TemperHandler
except ImportError:
_LOGGER.error('Failed to import temperusb')
return False
temp_unit = hass.config.temperature_unit
name = config.get(CONF_NAME, DEVICE_DEFAULT_NAME)
temper_devices = TemperHandler().get_devices()
add_devices_callback([TemperSensor(dev, temp_unit, name + '_' + str(idx))
for idx, dev in enumerate(temper_devices)])
class TemperSensor(Entity):
""" Represents an Temper temperature sensor. """
def __init__(self, temper_device, temp_unit, name):
self.temper_device = temper_device
self.temp_unit = temp_unit
self.current_value = None
self._name = name
@property
def name(self):
""" Returns the name of the temperature sensor. """
return self._name
@property
def state(self):
""" Returns the state of the entity. """
return self.current_value
@property
def unit_of_measurement(self):
""" Unit of measurement of this entity, if any. """
return self.temp_unit
def update(self):
""" Retrieve latest state. """
try:
self.current_value = self.temper_device.get_temperature()
except IOError:
_LOGGER.error('Failed to get temperature due to insufficient '
'permissions. Try running with "sudo"')
| mit |
fxia22/ASM_xf | PythonD/site_python/twisted/internet/tcp.py | 2 | 22689 | # -*- test-case-name: twisted.test.test_tcp -*-
# Twisted, the Framework of Your Internet
# Copyright (C) 2001 Matthew W. Lefkowitz
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""Various asynchronous TCP/IP classes.
End users shouldn't use this module directly - use the reactor APIs instead.
Maintainer: U{Itamar Shtull-Trauring<mailto:twisted@itamarst.org>}
"""
# System Imports
import os
import stat
import types
import exceptions
import socket
import sys
import select
import operator
import warnings
try:
assert sys.platform != 'ms-dos5'
import fcntl
except:
fcntl = None
try:
from OpenSSL import SSL
except ImportError:
SSL = None
if os.name == 'nt':
# we hardcode these since windows actually wants e.g.
# WSAEALREADY rather than EALREADY. Possibly we should
# just be doing "from errno import WSAEALREADY as EALREADY".
EPERM = 10001
EINVAL = 10022
EWOULDBLOCK = 10035
EINPROGRESS = 10036
EALREADY = 10037
ECONNRESET = 10054
EISCONN = 10056
ENOTCONN = 10057
elif os.name != 'java':
from errno import EPERM
from errno import EINVAL
from errno import EWOULDBLOCK
from errno import EINPROGRESS
from errno import EALREADY
from errno import ECONNRESET
from errno import EISCONN
from errno import ENOTCONN
# Twisted Imports
from twisted.internet import protocol, defer, base
from twisted.persisted import styles
from twisted.python import log, failure, reflect
from twisted.python.runtime import platform, platformType
from twisted.internet.error import CannotListenError
# Sibling Imports
import abstract
import main
import interfaces
import error
class _TLSMixin:
writeBlockedOnRead = 0
readBlockedOnWrite = 0
sslShutdown = 0
def doRead(self):
if self.writeBlockedOnRead:
self.writeBlockedOnRead = 0
return self.doWrite()
try:
return Connection.doRead(self)
except SSL.ZeroReturnError:
# close SSL layer, since other side has done so, if we haven't
if not self.sslShutdown:
try:
self.socket.shutdown()
self.sslShutdown = 1
except SSL.Error:
pass
return main.CONNECTION_DONE
except SSL.WantReadError:
return
except SSL.WantWriteError:
self.readBlockedOnWrite = 1
self.startWriting()
return
except SSL.Error:
return main.CONNECTION_LOST
def doWrite(self):
if self.readBlockedOnWrite:
self.readBlockedOnWrite = 0
# XXX - This is touching internal guts bad bad bad
if not self.dataBuffer:
self.stopWriting()
return self.doRead()
return Connection.doWrite(self)
def writeSomeData(self, data):
if not data:
return 0
try:
return Connection.writeSomeData(self, data)
except SSL.WantWriteError:
return 0
except SSL.WantReadError:
self.writeBlockedOnRead = 1
except SSL.Error:
return main.CONNECTION_LOST
def _closeSocket(self):
try:
self.socket.sock_shutdown(2)
except:
try:
self.socket.close()
except:
pass
def _postLoseConnection(self):
"""Gets called after loseConnection(), after buffered data is sent.
We close the SSL transport layer, and if the other side hasn't
closed it yet we start reading, waiting for a ZeroReturnError
which will indicate the SSL shutdown has completed.
"""
try:
done = self.socket.shutdown()
self.sslShutdown = 1
except SSL.Error:
return main.CONNECTION_LOST
if done:
return main.CONNECTION_DONE
else:
# we wait for other side to close SSL connection -
# this will be signaled by SSL.ZeroReturnError when reading
# from the socket
self.stopWriting()
self.startReading()
# don't close socket just yet
return None
class Connection(abstract.FileDescriptor):
"""I am the superclass of all socket-based FileDescriptors.
This is an abstract superclass of all objects which represent a TCP/IP
connection based socket.
"""
__implements__ = abstract.FileDescriptor.__implements__, interfaces.ITCPTransport
TLS = 0
def __init__(self, skt, protocol, reactor=None):
abstract.FileDescriptor.__init__(self, reactor=reactor)
self.socket = skt
self.socket.setblocking(0)
self.fileno = skt.fileno
self.protocol = protocol
if SSL:
__implements__ = __implements__ + (interfaces.ITLSTransport,)
def startTLS(self, ctx):
assert not self.TLS
self._startTLS()
self.socket = SSL.Connection(ctx.getContext(), self.socket)
self.fileno = self.socket.fileno
def _startTLS(self):
self.TLS = 1
class TLSConnection(_TLSMixin, self.__class__):
pass
self.__class__ = TLSConnection
def doRead(self):
"""Calls self.protocol.dataReceived with all available data.
This reads up to self.bufferSize bytes of data from its socket, then
calls self.dataReceived(data) to process it. If the connection is not
lost through an error in the physical recv(), this function will return
the result of the dataReceived call.
"""
try:
data = self.socket.recv(self.bufferSize)
except socket.error, se:
if se.args[0] == EWOULDBLOCK:
return
else:
return main.CONNECTION_LOST
if not data:
return main.CONNECTION_LOST
return self.protocol.dataReceived(data)
def writeSomeData(self, data):
"""Connection.writeSomeData(data) -> #of bytes written | CONNECTION_LOST
This writes as much data as possible to the socket and returns either
the number of bytes read (which is positive) or a connection error code
(which is negative)
"""
try:
return self.socket.send(data)
except socket.error, se:
if se.args[0] == EWOULDBLOCK:
return 0
else:
return main.CONNECTION_LOST
def _closeSocket(self):
"""Called to close our socket."""
# This used to close() the socket, but that doesn't *really* close if
# there's another reference to it in the TCP/IP stack, e.g. if it was
# was inherited by a subprocess. And we really do want to close the
# connection. So we use shutdown() instead.
try:
self.socket.shutdown(2)
except socket.error:
pass
def connectionLost(self, reason):
"""See abstract.FileDescriptor.connectionLost().
"""
abstract.FileDescriptor.connectionLost(self, reason)
self._closeSocket()
protocol = self.protocol
del self.protocol
del self.socket
del self.fileno
try:
protocol.connectionLost(reason)
except TypeError, e:
# while this may break, it will only break on deprecated code
# as opposed to other approaches that might've broken on
# code that uses the new API (e.g. inspect).
if e.args and e.args[0] == "connectionLost() takes exactly 1 argument (2 given)":
warnings.warn("Protocol %s's connectionLost should accept a reason argument" % protocol,
category=DeprecationWarning, stacklevel=2)
protocol.connectionLost()
else:
raise
logstr = "Uninitialized"
def logPrefix(self):
"""Return the prefix to log with when I own the logging thread.
"""
return self.logstr
def getTcpNoDelay(self):
return operator.truth(self.socket.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY))
def setTcpNoDelay(self, enabled):
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, enabled)
def getTcpKeepAlive(self):
return operator.truth(self.socket.getsockopt(socket.SOL_SOCKET,
socket.SO_KEEPALIVE))
def setTcpKeepAlive(self, enabled):
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, enabled)
class BaseClient(Connection):
"""A base class for client TCP (and similiar) sockets.
"""
addressFamily = socket.AF_INET
socketType = socket.SOCK_STREAM
def _finishInit(self, whenDone, skt, error, reactor):
"""Called by base classes to continue to next stage of initialization."""
if whenDone:
Connection.__init__(self, skt, None, reactor)
self.doWrite = self.doConnect
self.doRead = self.doConnect
reactor.callLater(0, whenDone)
else:
reactor.callLater(0, self.failIfNotConnected, error)
def startTLS(self, ctx, client=1):
holder = Connection.startTLS(self, ctx)
if client:
self.socket.set_connect_state()
else:
self.socket.set_accept_state()
return holder
def stopConnecting(self):
"""Stop attempt to connect."""
self.failIfNotConnected(error.UserError())
def failIfNotConnected(self, err):
if (self.connected or
self.disconnected or
not (hasattr(self, "connector"))):
return
self.connector.connectionFailed(failure.Failure(err))
if hasattr(self, "reactor"):
# this doesn't happens if we failed in __init__
self.stopReading()
self.stopWriting()
del self.connector
def createInternetSocket(self):
"""(internal) Create a non-blocking socket using
self.addressFamily, self.socketType.
"""
s = socket.socket(self.addressFamily, self.socketType)
s.setblocking(0)
if fcntl and hasattr(fcntl, 'FD_CLOEXEC'):
old = fcntl.fcntl(s.fileno(), fcntl.F_GETFD)
fcntl.fcntl(s.fileno(), fcntl.F_SETFD, old | fcntl.FD_CLOEXEC)
return s
def resolveAddress(self):
if abstract.isIPAddress(self.addr[0]):
self._setRealAddress(self.addr[0])
else:
d = self.reactor.resolve(self.addr[0])
d.addCallbacks(self._setRealAddress, self.failIfNotConnected)
def _setRealAddress(self, address):
self.realAddress = (address, self.addr[1])
self.doConnect()
def doConnect(self):
"""I connect the socket.
Then, call the protocol's makeConnection, and start waiting for data.
"""
if not hasattr(self, "connector"):
# this happens when connection failed but doConnect
# was scheduled via a callLater in self._finishInit
return
# on windows failed connects are reported on exception
# list, not write or read list.
if platformType == "win32":
r, w, e = select.select([], [], [self.fileno()], 0.0)
if e:
err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
self.failIfNotConnected(error.getConnectError((err, os.strerror(err))))
return
try:
connectResult = self.socket.connect_ex(self.realAddress)
except socket.error, se:
connectResult = se.args[0]
if connectResult:
if connectResult == EISCONN:
pass
# on Windows EINVAL means sometimes that we should keep trying:
# http://msdn.microsoft.com/library/default.asp?url=/library/en-us/winsock/winsock/connect_2.asp
elif ((connectResult in (EWOULDBLOCK, EINPROGRESS, EALREADY)) or
(connectResult == EINVAL and platformType == "win32")):
self.startReading()
self.startWriting()
return
else:
self.failIfNotConnected(error.getConnectError((connectResult, os.strerror(connectResult))))
return
# If I have reached this point without raising or returning, that means
# that the socket is connected.
del self.doWrite
del self.doRead
self.connected = 1
# we first stop and then start, to reset any references to the old doRead
self.stopReading()
self.stopWriting()
self.startReading()
self.protocol = self.connector.buildProtocol(self.getPeer())
self.protocol.makeConnection(self)
self.logstr = self.protocol.__class__.__name__+",client"
def connectionLost(self, reason):
if not self.connected:
self.failIfNotConnected(error.ConnectError())
else:
Connection.connectionLost(self, reason)
self.connector.connectionLost(reason)
class Client(BaseClient):
"""A TCP client."""
def __init__(self, host, port, bindAddress, connector, reactor=None):
# BaseClient.__init__ is invoked later
self.connector = connector
self.addr = (host, port)
whenDone = self.resolveAddress
err = None
skt = None
try:
skt = self.createInternetSocket()
except socket.error, se:
err = error.ConnectBindError(se[0], se[1])
whenDone = None
if whenDone and bindAddress is not None:
try:
skt.bind(bindAddress)
except socket.error, se:
err = error.ConnectBindError(se[0], se[1])
whenDone = None
self._finishInit(whenDone, skt, err, reactor)
def getHost(self):
"""Returns a tuple of ('INET', hostname, port).
This indicates the address from which I am connecting.
"""
return ('INET',)+self.socket.getsockname()
def getPeer(self):
"""Returns a tuple of ('INET', hostname, port).
This indicates the address that I am connected to.
"""
return ('INET',)+self.addr
def __repr__(self):
s = '<%s to %s at %x>' % (self.__class__, self.addr, id(self))
return s
class Server(Connection):
"""Serverside socket-stream connection class.
I am a serverside network connection transport; a socket which came from an
accept() on a server.
"""
def __init__(self, sock, protocol, client, server, sessionno):
"""Server(sock, protocol, client, server, sessionno)
Initialize me with a socket, a protocol, a descriptor for my peer (a
tuple of host, port describing the other end of the connection), an
instance of Port, and a session number.
"""
Connection.__init__(self, sock, protocol)
self.server = server
self.client = client
self.sessionno = sessionno
self.hostname = client[0]
self.logstr = "%s,%s,%s" % (self.protocol.__class__.__name__, sessionno, self.hostname)
self.repstr = "<%s #%s on %s>" % (self.protocol.__class__.__name__, self.sessionno, self.server.port)
self.startReading()
self.connected = 1
def __repr__(self):
"""A string representation of this connection.
"""
return self.repstr
def startTLS(self, ctx, server=1):
holder = Connection.startTLS(self, ctx)
if server:
self.socket.set_accept_state()
else:
self.socket.set_connect_state()
return holder
def getHost(self):
"""Returns a tuple of ('INET', hostname, port).
This indicates the servers address.
"""
return ('INET',)+self.socket.getsockname()
def getPeer(self):
"""
Returns a tuple of ('INET', hostname, port), indicating the connected
client's address.
"""
return ('INET',)+self.client
class Port(base.BasePort):
"""I am a TCP server port, listening for connections.
When a connection is accepted, I will call my factory's buildProtocol with
the incoming connection as an argument, according to the specification
described in twisted.internet.interfaces.IProtocolFactory.
If you wish to change the sort of transport that will be used, my
`transport' attribute will be called with the signature expected for
Server.__init__, so it can be replaced.
"""
addressFamily = socket.AF_INET
socketType = socket.SOCK_STREAM
transport = Server
sessionno = 0
interface = ''
backlog = 5
def __init__(self, port, factory, backlog=5, interface='', reactor=None):
"""Initialize with a numeric port to listen on.
"""
base.BasePort.__init__(self, reactor=reactor)
self.port = port
self.factory = factory
self.backlog = backlog
self.interface = interface
def __repr__(self):
return "<%s on %s>" % (self.factory.__class__, self.port)
def createInternetSocket(self):
s = base.BasePort.createInternetSocket(self)
if platformType == "posix":
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return s
def startListening(self):
"""Create and bind my socket, and begin listening on it.
This is called on unserialization, and must be called after creating a
server to begin listening on the specified port.
"""
log.msg("%s starting on %s"%(self.factory.__class__, self.port))
try:
skt = self.createInternetSocket()
skt.bind((self.interface, self.port))
except socket.error, le:
raise CannotListenError, (self.interface, self.port, le)
self.factory.doStart()
skt.listen(self.backlog)
self.connected = 1
self.socket = skt
self.fileno = self.socket.fileno
self.numberAccepts = 100
self.startReading()
def doRead(self):
"""Called when my socket is ready for reading.
This accepts a connection and calls self.protocol() to handle the
wire-level protocol.
"""
try:
if platformType == "posix":
numAccepts = self.numberAccepts
else:
# win32 event loop breaks if we do more than one accept()
# in an iteration of the event loop.
numAccepts = 1
for i in range(numAccepts):
# we need this so we can deal with a factory's buildProtocol
# calling our loseConnection
if self.disconnecting:
return
try:
skt, addr = self.socket.accept()
except socket.error, e:
if e.args[0] == EWOULDBLOCK:
self.numberAccepts = i
break
elif e.args[0] == EPERM:
continue
raise
protocol = self.factory.buildProtocol(addr)
if protocol is None:
skt.close()
continue
s = self.sessionno
self.sessionno = s+1
transport = self.transport(skt, protocol, addr, self, s)
transport = self._preMakeConnection(transport)
protocol.makeConnection(transport)
else:
self.numberAccepts = self.numberAccepts+20
except:
# Note that in TLS mode, this will possibly catch SSL.Errors
# raised by self.socket.accept()
#
# There is no "except SSL.Error:" above because SSL may be
# None if there is no SSL support. In any case, all the
# "except SSL.Error:" suite would probably do is log.deferr()
# and return, so handling it here works just as well.
log.deferr()
def _preMakeConnection(self, transport):
return transport
def loseConnection(self, connDone=failure.Failure(main.CONNECTION_DONE)):
"""Stop accepting connections on this port.
This will shut down my socket and call self.connectionLost().
"""
self.disconnecting = 1
self.stopReading()
if self.connected:
self.reactor.callLater(0, self.connectionLost, connDone)
stopListening = loseConnection
def connectionLost(self, reason):
"""Cleans up my socket.
"""
log.msg('(Port %r Closed)' % self.port)
base.BasePort.connectionLost(self, reason)
self.connected = 0
self.socket.close()
del self.socket
del self.fileno
self.factory.doStop()
def logPrefix(self):
"""Returns the name of my class, to prefix log entries with.
"""
return reflect.qual(self.factory.__class__)
def getHost(self):
"""Returns a tuple of ('INET', hostname, port).
This indicates the server's address.
"""
return ('INET',)+self.socket.getsockname()
class Connector(base.BaseConnector):
def __init__(self, host, port, factory, timeout, bindAddress, reactor=None):
self.host = host
if isinstance(port, types.StringTypes):
try:
port = socket.getservbyname(port, 'tcp')
except socket.error, e:
raise error.ServiceNameUnknownError(string=str(e))
self.port = port
self.bindAddress = bindAddress
base.BaseConnector.__init__(self, factory, timeout, reactor)
def _makeTransport(self):
return Client(self.host, self.port, self.bindAddress, self, self.reactor)
def getDestination(self):
return ('INET', self.host, self.port)
| gpl-2.0 |
MarkMuth/xbmc | tools/EventClients/Clients/PS3 Sixaxis Controller/ps3d.py | 168 | 12019 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2008-2013 Team XBMC
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import sys
import traceback
import time
import struct
import threading
import os
if os.path.exists("../../lib/python"):
sys.path.append("../PS3 BD Remote")
sys.path.append("../../lib/python")
from bt.hid import HID
from bt.bt import bt_lookup_name
from xbmcclient import XBMCClient
from ps3 import sixaxis
from ps3_remote import process_keys as process_remote
try:
from ps3 import sixwatch
except Exception, e:
print "Failed to import sixwatch now disabled: " + str(e)
sixwatch = None
try:
import zeroconf
except:
zeroconf = None
ICON_PATH = "../../icons/"
else:
# fallback to system wide modules
from kodi.bt.hid import HID
from kodi.bt.bt import bt_lookup_name
from kodi.xbmcclient import XBMCClient
from kodi.ps3 import sixaxis
from kodi.ps3_remote import process_keys as process_remote
from kodi.defs import *
try:
from kodi.ps3 import sixwatch
except Exception, e:
print "Failed to import sixwatch now disabled: " + str(e)
sixwatch = None
try:
import kodi.zeroconf as zeroconf
except:
zeroconf = None
event_threads = []
def printerr():
trace = ""
exception = ""
exc_list = traceback.format_exception_only (sys.exc_type, sys.exc_value)
for entry in exc_list:
exception += entry
tb_list = traceback.format_tb(sys.exc_info()[2])
for entry in tb_list:
trace += entry
print("%s\n%s" % (exception, trace), "Script Error")
class StoppableThread ( threading.Thread ):
def __init__(self):
threading.Thread.__init__(self)
self._stop = False
self.set_timeout(0)
def stop_thread(self):
self._stop = True
def stop(self):
return self._stop
def close_sockets(self):
if self.isock:
try:
self.isock.close()
except:
pass
self.isock = None
if self.csock:
try:
self.csock.close()
except:
pass
self.csock = None
self.last_action = 0
def set_timeout(self, seconds):
self.timeout = seconds
def reset_timeout(self):
self.last_action = time.time()
def idle_time(self):
return time.time() - self.last_action
def timed_out(self):
if (time.time() - self.last_action) > self.timeout:
return True
else:
return False
class PS3SixaxisThread ( StoppableThread ):
def __init__(self, csock, isock, ipaddr="127.0.0.1"):
StoppableThread.__init__(self)
self.csock = csock
self.isock = isock
self.xbmc = XBMCClient(name="PS3 Sixaxis", icon_file=ICON_PATH + "/bluetooth.png", ip=ipaddr)
self.set_timeout(600)
def run(self):
six = sixaxis.sixaxis(self.xbmc, self.csock, self.isock)
self.xbmc.connect()
self.reset_timeout()
try:
while not self.stop():
if self.timed_out():
raise Exception("PS3 Sixaxis powering off, timed out")
if self.idle_time() > 50:
self.xbmc.connect()
try:
if six.process_socket(self.isock):
self.reset_timeout()
except Exception, e:
print e
break
except Exception, e:
printerr()
six.close()
self.close_sockets()
class PS3RemoteThread ( StoppableThread ):
def __init__(self, csock, isock, ipaddr="127.0.0.1"):
StoppableThread.__init__(self)
self.csock = csock
self.isock = isock
self.xbmc = XBMCClient(name="PS3 Blu-Ray Remote", icon_file=ICON_PATH + "/bluetooth.png", ip=ipaddr)
self.set_timeout(600)
self.services = []
self.current_xbmc = 0
def run(self):
self.xbmc.connect()
try:
# start the zeroconf thread if possible
try:
self.zeroconf_thread = ZeroconfThread()
self.zeroconf_thread.add_service('_xbmc-events._udp',
self.zeroconf_service_handler)
self.zeroconf_thread.start()
except Exception, e:
print str(e)
# main thread loop
while not self.stop():
status = process_remote(self.isock, self.xbmc)
if status == 2: # 2 = socket read timeout
if self.timed_out():
raise Exception("PS3 Blu-Ray Remote powering off, "\
"timed out")
elif status == 3: # 3 = ps and skip +
self.next_xbmc()
elif status == 4: # 4 = ps and skip -
self.previous_xbmc()
elif not status: # 0 = keys are normally processed
self.reset_timeout()
# process_remote() will raise an exception on read errors
except Exception, e:
print str(e)
self.zeroconf_thread.stop()
self.close_sockets()
def next_xbmc(self):
"""
Connect to the next XBMC instance
"""
self.current_xbmc = (self.current_xbmc + 1) % len( self.services )
self.reconnect()
return
def previous_xbmc(self):
"""
Connect to the previous XBMC instance
"""
self.current_xbmc -= 1
if self.current_xbmc < 0 :
self.current_xbmc = len( self.services ) - 1
self.reconnect()
return
def reconnect(self):
"""
Reconnect to an XBMC instance based on self.current_xbmc
"""
try:
service = self.services[ self.current_xbmc ]
print "Connecting to %s" % service['name']
self.xbmc.connect( service['address'], service['port'] )
self.xbmc.send_notification("PS3 Blu-Ray Remote", "New Connection", None)
except Exception, e:
print str(e)
def zeroconf_service_handler(self, event, service):
"""
Zeroconf event handler
"""
if event == zeroconf.SERVICE_FOUND: # new xbmc service detected
self.services.append( service )
elif event == zeroconf.SERVICE_LOST: # xbmc service lost
try:
# search for the service by name, since IP+port isn't available
for s in self.services:
# nuke it, if found
if service['name'] == s['name']:
self.services.remove(s)
break
except:
pass
return
class SixWatch(threading.Thread):
def __init__(self, mac):
threading.Thread.__init__(self)
self.mac = mac
self.daemon = True
self.start()
def run(self):
while True:
try:
sixwatch.main(self.mac)
except Exception, e:
print "Exception caught in sixwatch, restarting: " + str(e)
class ZeroconfThread ( threading.Thread ):
"""
"""
def __init__(self):
threading.Thread.__init__(self)
self._zbrowser = None
self._services = []
def run(self):
if zeroconf:
# create zeroconf service browser
self._zbrowser = zeroconf.Browser()
# add the requested services
for service in self._services:
self._zbrowser.add_service( service[0], service[1] )
# run the event loop
self._zbrowser.run()
return
def stop(self):
"""
Stop the zeroconf browser
"""
try:
self._zbrowser.stop()
except:
pass
return
def add_service(self, type, handler):
"""
Add a new service to search for.
NOTE: Services must be added before thread starts.
"""
self._services.append( [ type, handler ] )
def usage():
print """
PS3 Sixaxis / Blu-Ray Remote HID Server v0.1
Usage: ps3.py [bdaddress] [XBMC host]
bdaddress => address of local bluetooth device to use (default: auto)
(e.g. aa:bb:cc:dd:ee:ff)
ip address => IP address or hostname of the XBMC instance (default: localhost)
(e.g. 192.168.1.110)
"""
def start_hidd(bdaddr=None, ipaddr="127.0.0.1"):
devices = [ 'PLAYSTATION(R)3 Controller',
'BD Remote Control' ]
hid = HID(bdaddr)
watch = None
if sixwatch:
try:
print "Starting USB sixwatch"
watch = SixWatch(hid.get_local_address())
except Exception, e:
print "Failed to initialize sixwatch" + str(e)
pass
while True:
if hid.listen():
(csock, addr) = hid.get_control_socket()
device_name = bt_lookup_name(addr[0])
if device_name == devices[0]:
# handle PS3 controller
handle_ps3_controller(hid, ipaddr)
elif device_name == devices[1]:
# handle the PS3 remote
handle_ps3_remote(hid, ipaddr)
else:
print "Unknown Device: %s" % (device_name)
def handle_ps3_controller(hid, ipaddr):
print "Received connection from a Sixaxis PS3 Controller"
csock = hid.get_control_socket()[0]
isock = hid.get_interrupt_socket()[0]
sixaxis = PS3SixaxisThread(csock, isock, ipaddr)
add_thread(sixaxis)
sixaxis.start()
return
def handle_ps3_remote(hid, ipaddr):
print "Received connection from a PS3 Blu-Ray Remote"
csock = hid.get_control_socket()[0]
isock = hid.get_interrupt_socket()[0]
isock.settimeout(1)
remote = PS3RemoteThread(csock, isock, ipaddr)
add_thread(remote)
remote.start()
return
def add_thread(thread):
global event_threads
event_threads.append(thread)
def main():
if len(sys.argv)>3:
return usage()
bdaddr = ""
ipaddr = "127.0.0.1"
try:
for addr in sys.argv[1:]:
try:
# ensure that the addr is of the format 'aa:bb:cc:dd:ee:ff'
if "".join([ str(len(a)) for a in addr.split(":") ]) != "222222":
raise Exception("Invalid format")
bdaddr = addr
print "Connecting to Bluetooth device: %s" % bdaddr
except Exception, e:
try:
ipaddr = addr
print "Connecting to : %s" % ipaddr
except:
print str(e)
return usage()
except Exception, e:
pass
print "Starting HID daemon"
start_hidd(bdaddr, ipaddr)
if __name__=="__main__":
try:
main()
finally:
for t in event_threads:
try:
print "Waiting for thread "+str(t)+" to terminate"
t.stop_thread()
if t.isAlive():
t.join()
print "Thread "+str(t)+" terminated"
except Exception, e:
print str(e)
pass
| gpl-2.0 |
carmark/vbox | src/VBox/GuestHost/OpenGL/packer/pack_current.py | 22 | 1811 | # Copyright (c) 2001, Stanford University
# All rights reserved.
#
# See the file LICENSE.txt for information on redistributing this software.
# This script generates the pack_current.c file.
import sys
sys.path.append( "../glapi_parser" )
import apiutil
from pack_currenttypes import *
apiutil.CopyrightC()
print """
/* DO NOT EDIT - THIS FILE GENERATED BY THE pack_current.py SCRIPT */
#include <memory.h>
#include "packer.h"
#include "state/cr_currentpointers.h"
#include <stdio.h>
void crPackOffsetCurrentPointers( int offset )
{
CR_GET_PACKER_CONTEXT(pc);
GLnormal_p *normal = &(pc->current.c.normal);
GLcolor_p *color = &(pc->current.c.color);
GLsecondarycolor_p *secondaryColor = &(pc->current.c.secondaryColor);
GLtexcoord_p *texCoord = &(pc->current.c.texCoord);
GLindex_p *index = &(pc->current.c.index);
GLedgeflag_p *edgeFlag = &(pc->current.c.edgeFlag);
GLvertexattrib_p *vertexAttrib = &(pc->current.c.vertexAttrib);
GLfogcoord_p *fogCoord = &(pc->current.c.fogCoord);
int i;
"""
for k in current_fns.keys():
name = '%s%s' % (k[:1].lower(),k[1:])
if current_fns[k].has_key( 'array' ):
print '\tfor (i = 0 ; i < %s ; i++)' % current_fns[k]['array']
print '\t{'
for type in current_fns[k]['types']:
for size in current_fns[k]['sizes']:
indent = ""
ptr = "%s->%s%d" % (name, type, size )
if current_fns[k].has_key( 'array' ):
ptr += "[i]"
indent = "\t"
print "%s\tif ( %s )" % (indent, ptr)
print "%s\t{" % indent
print "%s\t\t%s += offset;" % (indent, ptr )
print "%s\t}" % indent
if current_fns[k].has_key( 'array' ):
print '\t}'
print """
}
void crPackNullCurrentPointers( void )
{
CR_GET_PACKER_CONTEXT(pc);
CRCurrentStateAttr *c = &(pc->current.c);
"""
print '\tmemset ( c, 0, sizeof (CRCurrentStateAttr));'
print "}"
| gpl-2.0 |
ekristen/mythboxee | mythtv/__init__.py | 1 | 1533 | #!/usr/bin/env python
__all__ = ['MythStatic', \
\
'DictData', 'DBData', 'DBDataWrite', 'DBDataCRef', 'MythDBConn', \
'MythBEConn', 'MythXMLConn', 'MythLog', 'MythError', \
'StorageGroup', 'Grabber', \
\
'ftopen', 'FileTransfer', 'FreeSpace', 'Program', 'Record', \
'Recorded', 'RecordedProgram', 'OldRecorded', 'Job', 'Channel', \
'Guide', 'Video', 'VideoGrabber', 'NetVisionRSSItem', \
'NetVisionTreeItem', 'NetVisionSite', 'NetVisionGrabber', \
\
'MythBE', 'Frontend', 'MythDB', 'MythVideo', 'MythXML']
import26 = """
import warnings
with warnings.catch_warnings():
warnings.simplefilter('ignore')
from MythStatic import *
from MythBase import *
from MythData import *
from MythFunc import *
"""
import25 = """
from MythStatic import *
from MythBase import *
from MythData import *
from MythFunc import *
"""
from sys import version_info
if version_info >= (2, 6): # 2.6 or newer
exec(import26)
else:
exec(import25)
__version__ = OWN_VERSION
#MythStatic.mysqldb = MySQLdb.__version__
if __name__ == '__main__':
banner = 'MythTV Python interactive shell.'
import code
try:
import readline, rlcompleter
except:
pass
else:
readline.parse_and_bind("tab: complete")
banner += ' TAB completion available.'
namespace = globals().copy()
namespace.update(locals())
code.InteractiveConsole(namespace).interact(banner)
| mit |
kitsunde/ansible | contrib/inventory/ovirt.py | 65 | 9521 | #!/usr/bin/env python
# Copyright 2015 IIX Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""
ovirt external inventory script
=================================
Generates inventory that Ansible can understand by making API requests to
oVirt via the ovirt-engine-sdk-python library.
When run against a specific host, this script returns the following variables
based on the data obtained from the ovirt_sdk Node object:
- ovirt_uuid
- ovirt_id
- ovirt_image
- ovirt_machine_type
- ovirt_ips
- ovirt_name
- ovirt_description
- ovirt_status
- ovirt_zone
- ovirt_tags
- ovirt_stats
When run in --list mode, instances are grouped by the following categories:
- zone:
zone group name.
- instance tags:
An entry is created for each tag. For example, if you have two instances
with a common tag called 'foo', they will both be grouped together under
the 'tag_foo' name.
- network name:
the name of the network is appended to 'network_' (e.g. the 'default'
network will result in a group named 'network_default')
- running status:
group name prefixed with 'status_' (e.g. status_up, status_down,..)
Examples:
Execute uname on all instances in the us-central1-a zone
$ ansible -i ovirt.py us-central1-a -m shell -a "/bin/uname -a"
Use the ovirt inventory script to print out instance specific information
$ contrib/inventory/ovirt.py --host my_instance
Author: Josha Inglis <jinglis@iix.net> based on the gce.py by Eric Johnson <erjohnso@google.com>
Version: 0.0.1
"""
USER_AGENT_PRODUCT = "Ansible-ovirt_inventory_plugin"
USER_AGENT_VERSION = "v1"
import sys
import os
import argparse
import ConfigParser
from collections import defaultdict
try:
import json
except ImportError:
# noinspection PyUnresolvedReferences,PyPackageRequirements
import simplejson as json
try:
# noinspection PyUnresolvedReferences
from ovirtsdk.api import API
# noinspection PyUnresolvedReferences
from ovirtsdk.xml import params
except ImportError:
print("ovirt inventory script requires ovirt-engine-sdk-python")
sys.exit(1)
class OVirtInventory(object):
def __init__(self):
# Read settings and parse CLI arguments
self.args = self.parse_cli_args()
self.driver = self.get_ovirt_driver()
# Just display data for specific host
if self.args.host:
print self.json_format_dict(
self.node_to_dict(self.get_instance(self.args.host)),
pretty=self.args.pretty
)
sys.exit(0)
# Otherwise, assume user wants all instances grouped
print(
self.json_format_dict(
data=self.group_instances(),
pretty=self.args.pretty
)
)
sys.exit(0)
@staticmethod
def get_ovirt_driver():
"""
Determine the ovirt authorization settings and return a ovirt_sdk driver.
:rtype : ovirtsdk.api.API
"""
kwargs = {}
ovirt_ini_default_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "ovirt.ini")
ovirt_ini_path = os.environ.get('OVIRT_INI_PATH', ovirt_ini_default_path)
# Create a ConfigParser.
# This provides empty defaults to each key, so that environment
# variable configuration (as opposed to INI configuration) is able
# to work.
config = ConfigParser.SafeConfigParser(defaults={
'ovirt_url': '',
'ovirt_username': '',
'ovirt_password': '',
'ovirt_api_secrets': '',
})
if 'ovirt' not in config.sections():
config.add_section('ovirt')
config.read(ovirt_ini_path)
# Attempt to get ovirt params from a configuration file, if one
# exists.
secrets_path = config.get('ovirt', 'ovirt_api_secrets')
secrets_found = False
try:
# noinspection PyUnresolvedReferences,PyPackageRequirements
import secrets
kwargs = getattr(secrets, 'OVIRT_KEYWORD_PARAMS', {})
secrets_found = True
except ImportError:
pass
if not secrets_found and secrets_path:
if not secrets_path.endswith('secrets.py'):
err = "Must specify ovirt_sdk secrets file as /absolute/path/to/secrets.py"
print(err)
sys.exit(1)
sys.path.append(os.path.dirname(secrets_path))
try:
# noinspection PyUnresolvedReferences,PyPackageRequirements
import secrets
kwargs = getattr(secrets, 'OVIRT_KEYWORD_PARAMS', {})
except ImportError:
pass
if not secrets_found:
kwargs = {
'url': config.get('ovirt', 'ovirt_url'),
'username': config.get('ovirt', 'ovirt_username'),
'password': config.get('ovirt', 'ovirt_password'),
}
# If the appropriate environment variables are set, they override
# other configuration; process those into our args and kwargs.
kwargs['url'] = os.environ.get('OVIRT_URL')
kwargs['username'] = os.environ.get('OVIRT_EMAIL')
kwargs['password'] = os.environ.get('OVIRT_PASS')
# Retrieve and return the ovirt driver.
return API(insecure=True, **kwargs)
@staticmethod
def parse_cli_args():
"""
Command line argument processing
:rtype : argparse.Namespace
"""
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on ovirt')
parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)')
parser.add_argument('--host', action='store', help='Get all information about an instance')
parser.add_argument('--pretty', action='store_true', default=False, help='Pretty format (default: False)')
return parser.parse_args()
def node_to_dict(self, inst):
"""
:type inst: params.VM
"""
if inst is None:
return {}
inst.get_custom_properties()
ips = [ip.get_address() for ip in inst.get_guest_info().get_ips().get_ip()] \
if inst.get_guest_info() is not None else []
stats = {}
for stat in inst.get_statistics().list():
stats[stat.get_name()] = stat.get_values().get_value()[0].get_datum()
return {
'ovirt_uuid': inst.get_id(),
'ovirt_id': inst.get_id(),
'ovirt_image': inst.get_os().get_type(),
'ovirt_machine_type': inst.get_instance_type(),
'ovirt_ips': ips,
'ovirt_name': inst.get_name(),
'ovirt_description': inst.get_description(),
'ovirt_status': inst.get_status().get_state(),
'ovirt_zone': inst.get_cluster().get_id(),
'ovirt_tags': self.get_tags(inst),
'ovirt_stats': stats,
# Hosts don't have a public name, so we add an IP
'ansible_ssh_host': ips[0] if len(ips) > 0 else None
}
@staticmethod
def get_tags(inst):
"""
:type inst: params.VM
"""
return [x.get_name() for x in inst.get_tags().list()]
# noinspection PyBroadException,PyUnusedLocal
def get_instance(self, instance_name):
"""Gets details about a specific instance """
try:
return self.driver.vms.get(name=instance_name)
except Exception as e:
return None
def group_instances(self):
"""Group all instances"""
groups = defaultdict(list)
meta = {"hostvars": {}}
for node in self.driver.vms.list():
assert isinstance(node, params.VM)
name = node.get_name()
meta["hostvars"][name] = self.node_to_dict(node)
zone = node.get_cluster().get_name()
groups[zone].append(name)
tags = self.get_tags(node)
for t in tags:
tag = 'tag_%s' % t
groups[tag].append(name)
nets = [x.get_name() for x in node.get_nics().list()]
for net in nets:
net = 'network_%s' % net
groups[net].append(name)
status = node.get_status().get_state()
stat = 'status_%s' % status.lower()
if stat in groups:
groups[stat].append(name)
else:
groups[stat] = [name]
groups["_meta"] = meta
return groups
@staticmethod
def json_format_dict(data, pretty=False):
""" Converts a dict to a JSON object and dumps it as a formatted
string """
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
# Run the script
OVirtInventory()
| gpl-3.0 |
Coelhon/MasterRepo.repository | plugin.video.SportsDevil-2016.12.31/lib/utils/fileUtils.py | 15 | 4919 | # -*- coding: utf-8 -*-
import os
import datetime, time
import random
import hashlib
import codecs
#######################################
# File Helpers
#######################################
def fileExists(filename):
return os.path.isfile(filename)
def getFileExtension(filename):
ext_pos = filename.rfind('.')
if ext_pos != -1:
return filename[ext_pos+1:]
else:
return ''
def get_immediate_subdirectories(directory):
return [name for name in os.listdir(directory)
if os.path.isdir(os.path.join(directory, name))]
def findInSubdirectory(filename, subdirectory=''):
if subdirectory:
path = subdirectory
else:
path = os.getcwd()
for root, _, names in os.walk(path):
if filename in names:
return os.path.join(root, filename)
raise 'File not found'
def cleanFilename(s):
if not s:
return ''
badchars = '\\/:*?\"<>|'
for c in badchars:
s = s.replace(c, '')
return s;
def randomFilename(directory, chars = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ', length = 8, prefix = '', suffix = '', attempts = 10000):
for _ in range(attempts):
filename = ''.join([random.choice(chars) for _ in range(length)])
filename = prefix + filename + suffix
if not os.path.exists(os.path.join(directory, filename)):
return filename
return None
def getFileContent(filename):
try:
f = codecs.open(filename,'r','utf-8')
txt = f.read()
f.close()
return txt
except:
return ''
def setFileContent(filename, txt, createFolders=False):
try:
if createFolders:
folderPath = os.path.dirname(filename)
if not os.path.exists(folderPath):
os.makedirs(folderPath, 0777)
f = codecs.open(filename, 'w','utf-8')
f.write(txt)
f.close()
return True
except:
return False
def appendFileContent(filename, txt):
try:
f = codecs.open(filename, 'a','utf-8')
f.write(txt)
f.close()
return True
except:
return False
def md5(fileName, excludeLine="", includeLine=""):
"""Compute md5 hash of the specified file"""
m = hashlib.md5()
try:
fd = codecs.open(fileName,"rb",'utf-8')
except IOError:
#print "Unable to open the file in readmode:", fileName
return
content = fd.readlines()
fd.close()
for eachLine in content:
if excludeLine and eachLine.startswith(excludeLine):
continue
m.update(eachLine)
m.update(includeLine)
return m.hexdigest()
def lastModifiedAt(path):
return datetime.datetime.utcfromtimestamp(os.path.getmtime(path))
def setLastModifiedAt(path, date):
try:
stinfo = os.stat(path)
atime = stinfo.st_atime
mtime = int(time.mktime(date.timetuple()))
os.utime(path, (atime, mtime))
return True
except:
pass
return False
def checkQuota(directory, limit=200*1024):
total_size = 0
for root, dirs, files in os.walk(directory, topdown=False):
for name in files:
total_size += os.path.getsize(os.path.join(root, name))
if total_size > limit:
return limit, False
return total_size, True
def clearDirectory(path):
try:
for root, _, files in os.walk(path , topdown = False):
for name in files:
os.remove(os.path.join(root, name))
except:
return False
return True
# http://akiscode.com/articles/sha-1directoryhash.shtml
# Copyright (c) 2009 Stephen Akiki
# MIT License (Means you can do whatever you want with this)
# See http://www.opensource.org/licenses/mit-license.php
# Error Codes:
# -1 -> Directory does not exist
# -2 -> General error (see stack traceback)
def GetHashofDirs(directory, verbose=0):
SHAhash = hashlib.sha1()
if not os.path.exists (directory):
return -1
try:
for root, _, files in os.walk(directory):
for names in files:
#if verbose == 1:
#print 'Hashing', names
filepath = os.path.join(root,names)
try:
f1 = codecs.open(filepath, 'rb','utf-8')
except:
# You can't open the file for some reason
f1.close()
continue
while 1:
# Read file in as little chunks
buf = f1.read(4096)
if not buf:
break
SHAhash.update(hashlib.sha1(buf).hexdigest())
f1.close()
except:
import traceback
# Print the stack traceback
traceback.print_exc()
return -2
return SHAhash.hexdigest()
| gpl-2.0 |
Drooids/odoo | addons/mail/tests/test_mail_message.py | 172 | 27426 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from .common import TestMail
from openerp.exceptions import AccessError
from openerp.osv.orm import except_orm
from openerp.tools import mute_logger
class TestMailMail(TestMail):
def test_00_partner_find_from_email(self):
""" Tests designed for partner fetch based on emails. """
cr, uid, user_raoul, group_pigs = self.cr, self.uid, self.user_raoul, self.group_pigs
# --------------------------------------------------
# Data creation
# --------------------------------------------------
# 1 - Partner ARaoul
p_a_id = self.res_partner.create(cr, uid, {'name': 'ARaoul', 'email': 'test@test.fr'})
# --------------------------------------------------
# CASE1: without object
# --------------------------------------------------
# Do: find partner with email -> first partner should be found
partner_info = self.mail_thread.message_partner_info_from_emails(cr, uid, None, ['Maybe Raoul <test@test.fr>'], link_mail=False)[0]
self.assertEqual(partner_info['full_name'], 'Maybe Raoul <test@test.fr>',
'mail_thread: message_partner_info_from_emails did not handle email')
self.assertEqual(partner_info['partner_id'], p_a_id,
'mail_thread: message_partner_info_from_emails wrong partner found')
# Data: add some data about partners
# 2 - User BRaoul
p_b_id = self.res_partner.create(cr, uid, {'name': 'BRaoul', 'email': 'test@test.fr', 'user_ids': [(4, user_raoul.id)]})
# Do: find partner with email -> first user should be found
partner_info = self.mail_thread.message_partner_info_from_emails(cr, uid, None, ['Maybe Raoul <test@test.fr>'], link_mail=False)[0]
self.assertEqual(partner_info['partner_id'], p_b_id,
'mail_thread: message_partner_info_from_emails wrong partner found')
# --------------------------------------------------
# CASE1: with object
# --------------------------------------------------
# Do: find partner in group where there is a follower with the email -> should be taken
self.mail_group.message_subscribe(cr, uid, [group_pigs.id], [p_b_id])
partner_info = self.mail_group.message_partner_info_from_emails(cr, uid, group_pigs.id, ['Maybe Raoul <test@test.fr>'], link_mail=False)[0]
self.assertEqual(partner_info['partner_id'], p_b_id,
'mail_thread: message_partner_info_from_emails wrong partner found')
class TestMailMessage(TestMail):
def test_00_mail_message_values(self):
""" Tests designed for testing email values based on mail.message, aliases, ... """
cr, uid, user_raoul_id = self.cr, self.uid, self.user_raoul_id
# Data: update + generic variables
reply_to1 = '_reply_to1@example.com'
reply_to2 = '_reply_to2@example.com'
email_from1 = 'from@example.com'
alias_domain = 'schlouby.fr'
raoul_from = 'Raoul Grosbedon <raoul@raoul.fr>'
raoul_from_alias = 'Raoul Grosbedon <raoul@schlouby.fr>'
raoul_reply_alias = 'YourCompany Pigs <group+pigs@schlouby.fr>'
# --------------------------------------------------
# Case1: without alias_domain
# --------------------------------------------------
param_ids = self.registry('ir.config_parameter').search(cr, uid, [('key', '=', 'mail.catchall.domain')])
self.registry('ir.config_parameter').unlink(cr, uid, param_ids)
# Do: free message; specified values > default values
msg_id = self.mail_message.create(cr, user_raoul_id, {'no_auto_thread': True, 'reply_to': reply_to1, 'email_from': email_from1})
msg = self.mail_message.browse(cr, user_raoul_id, msg_id)
# Test: message content
self.assertIn('reply_to', msg.message_id,
'mail_message: message_id should be specific to a mail_message with a given reply_to')
self.assertEqual(msg.reply_to, reply_to1,
'mail_message: incorrect reply_to: should come from values')
self.assertEqual(msg.email_from, email_from1,
'mail_message: incorrect email_from: should come from values')
# Do: create a mail_mail with the previous mail_message + specified reply_to
mail_id = self.mail_mail.create(cr, user_raoul_id, {'mail_message_id': msg_id, 'state': 'cancel', 'reply_to': reply_to2})
mail = self.mail_mail.browse(cr, user_raoul_id, mail_id)
# Test: mail_mail content
self.assertEqual(mail.reply_to, reply_to2,
'mail_mail: incorrect reply_to: should come from values')
self.assertEqual(mail.email_from, email_from1,
'mail_mail: incorrect email_from: should come from mail.message')
# Do: mail_message attached to a document
msg_id = self.mail_message.create(cr, user_raoul_id, {'model': 'mail.group', 'res_id': self.group_pigs_id})
msg = self.mail_message.browse(cr, user_raoul_id, msg_id)
# Test: message content
self.assertIn('mail.group', msg.message_id,
'mail_message: message_id should contain model')
self.assertIn('%s' % self.group_pigs_id, msg.message_id,
'mail_message: message_id should contain res_id')
self.assertEqual(msg.reply_to, raoul_from,
'mail_message: incorrect reply_to: should be Raoul')
self.assertEqual(msg.email_from, raoul_from,
'mail_message: incorrect email_from: should be Raoul')
# --------------------------------------------------
# Case2: with alias_domain, without catchall alias
# --------------------------------------------------
self.registry('ir.config_parameter').set_param(cr, uid, 'mail.catchall.domain', alias_domain)
self.registry('ir.config_parameter').unlink(cr, uid, self.registry('ir.config_parameter').search(cr, uid, [('key', '=', 'mail.catchall.alias')]))
# Update message
msg_id = self.mail_message.create(cr, user_raoul_id, {'model': 'mail.group', 'res_id': self.group_pigs_id})
msg = self.mail_message.browse(cr, user_raoul_id, msg_id)
# Test: generated reply_to
self.assertEqual(msg.reply_to, raoul_reply_alias,
'mail_mail: incorrect reply_to: should be Pigs alias')
# Update message: test alias on email_from
msg_id = self.mail_message.create(cr, user_raoul_id, {})
msg = self.mail_message.browse(cr, user_raoul_id, msg_id)
# Test: generated reply_to
self.assertEqual(msg.reply_to, raoul_from_alias,
'mail_mail: incorrect reply_to: should be message email_from using Raoul alias')
# --------------------------------------------------
# Case2: with alias_domain and catchall alias
# --------------------------------------------------
self.registry('ir.config_parameter').set_param(self.cr, self.uid, 'mail.catchall.alias', 'gateway')
# Update message
msg_id = self.mail_message.create(cr, user_raoul_id, {})
msg = self.mail_message.browse(cr, user_raoul_id, msg_id)
# Test: generated reply_to
self.assertEqual(msg.reply_to, 'YourCompany <gateway@schlouby.fr>',
'mail_mail: reply_to should equal the catchall email alias')
# Do: create a mail_mail
mail_id = self.mail_mail.create(cr, uid, {'state': 'cancel', 'reply_to': 'someone@example.com'})
mail = self.mail_mail.browse(cr, uid, mail_id)
# Test: mail_mail content
self.assertEqual(mail.reply_to, 'someone@example.com',
'mail_mail: reply_to should equal the rpely_to given to create')
@mute_logger('openerp.addons.base.ir.ir_model', 'openerp.models')
def test_10_mail_message_search_access_rights(self):
""" Testing mail_message.search() using specific _search implementation """
cr, uid, group_pigs_id = self.cr, self.uid, self.group_pigs_id
# Data: comment subtype for mail.message creation
ref = self.registry('ir.model.data').get_object_reference(cr, uid, 'mail', 'mt_comment')
subtype_id = ref and ref[1] or False
# Data: Birds group, private
group_birds_id = self.mail_group.create(self.cr, self.uid, {'name': 'Birds', 'public': 'private'})
# Data: Raoul is member of Pigs
self.mail_group.message_subscribe(cr, uid, [group_pigs_id], [self.partner_raoul_id])
# Data: various author_ids, partner_ids, documents
msg_id1 = self.mail_message.create(cr, uid, {'subject': '_Test', 'body': 'A', 'subtype_id': subtype_id})
msg_id2 = self.mail_message.create(cr, uid, {'subject': '_Test', 'body': 'A+B', 'partner_ids': [(6, 0, [self.partner_bert_id])], 'subtype_id': subtype_id})
msg_id3 = self.mail_message.create(cr, uid, {'subject': '_Test', 'body': 'A Pigs', 'model': 'mail.group', 'res_id': group_pigs_id, 'subtype_id': subtype_id})
msg_id4 = self.mail_message.create(cr, uid, {'subject': '_Test', 'body': 'A+B Pigs', 'model': 'mail.group', 'res_id': group_pigs_id, 'partner_ids': [(6, 0, [self.partner_bert_id])], 'subtype_id': subtype_id})
msg_id5 = self.mail_message.create(cr, uid, {'subject': '_Test', 'body': 'A+R Pigs', 'model': 'mail.group', 'res_id': group_pigs_id, 'partner_ids': [(6, 0, [self.partner_raoul_id])], 'subtype_id': subtype_id})
msg_id6 = self.mail_message.create(cr, uid, {'subject': '_Test', 'body': 'A Birds', 'model': 'mail.group', 'res_id': group_birds_id, 'subtype_id': subtype_id})
msg_id7 = self.mail_message.create(cr, self.user_raoul_id, {'subject': '_Test', 'body': 'B', 'subtype_id': subtype_id})
msg_id8 = self.mail_message.create(cr, self.user_raoul_id, {'subject': '_Test', 'body': 'B+R', 'partner_ids': [(6, 0, [self.partner_raoul_id])], 'subtype_id': subtype_id})
# Test: Bert: 2 messages that have Bert in partner_ids
msg_ids = self.mail_message.search(cr, self.user_bert_id, [('subject', 'like', '_Test')])
self.assertEqual(set([msg_id2, msg_id4]), set(msg_ids), 'mail_message search failed')
# Test: Raoul: 3 messages on Pigs Raoul can read (employee can read group with default values), 0 on Birds (private group)
msg_ids = self.mail_message.search(cr, self.user_raoul_id, [('subject', 'like', '_Test'), ('body', 'like', 'A')])
self.assertEqual(set([msg_id3, msg_id4, msg_id5]), set(msg_ids), 'mail_message search failed')
# Test: Raoul: 3 messages on Pigs Raoul can read (employee can read group with default values), 0 on Birds (private group) + 2 messages as author
msg_ids = self.mail_message.search(cr, self.user_raoul_id, [('subject', 'like', '_Test')])
self.assertEqual(set([msg_id3, msg_id4, msg_id5, msg_id7, msg_id8]), set(msg_ids), 'mail_message search failed')
# Test: Admin: all messages
msg_ids = self.mail_message.search(cr, uid, [('subject', 'like', '_Test')])
self.assertEqual(set([msg_id1, msg_id2, msg_id3, msg_id4, msg_id5, msg_id6, msg_id7, msg_id8]), set(msg_ids), 'mail_message search failed')
@mute_logger('openerp.addons.base.ir.ir_model', 'openerp.models')
def test_15_mail_message_check_access_rule(self):
""" Testing mail_message.check_access_rule() """
cr, uid = self.cr, self.uid
partner_bert_id, partner_raoul_id = self.partner_bert_id, self.partner_raoul_id
user_bert_id, user_raoul_id = self.user_bert_id, self.user_raoul_id
# Prepare groups: Pigs (employee), Jobs (public)
pigs_msg_id = self.mail_group.message_post(cr, uid, self.group_pigs_id, body='Message')
priv_msg_id = self.mail_group.message_post(cr, uid, self.group_priv_id, body='Message')
# prepare an attachment
attachment_id = self.ir_attachment.create(cr, uid, {'datas': 'My attachment'.encode('base64'), 'name': 'doc.txt', 'datas_fname': 'doc.txt'})
# ----------------------------------------
# CASE1: read
# ----------------------------------------
# Do: create a new mail.message
message_id = self.mail_message.create(cr, uid, {'body': 'My Body', 'attachment_ids': [(4, attachment_id)]})
# Test: Bert reads the message, crash because not notification/not in doc followers/not read on doc
with self.assertRaises(except_orm):
self.mail_message.read(cr, user_bert_id, message_id)
# Do: message is pushed to Bert
notif_id = self.mail_notification.create(cr, uid, {'message_id': message_id, 'partner_id': partner_bert_id})
# Test: Bert reads the message, ok because notification pushed
self.mail_message.read(cr, user_bert_id, message_id)
# Test: Bert downloads attachment, ok because he can read message
self.mail_message.download_attachment(cr, user_bert_id, message_id, attachment_id)
# Do: remove notification
self.mail_notification.unlink(cr, uid, notif_id)
# Test: Bert reads the message, crash because not notification/not in doc followers/not read on doc
with self.assertRaises(except_orm):
self.mail_message.read(cr, self.user_bert_id, message_id)
# Test: Bert downloads attachment, crash because he can't read message
with self.assertRaises(except_orm):
self.mail_message.download_attachment(cr, user_bert_id, message_id, attachment_id)
# Do: Bert is now the author
self.mail_message.write(cr, uid, [message_id], {'author_id': partner_bert_id})
# Test: Bert reads the message, ok because Bert is the author
self.mail_message.read(cr, user_bert_id, message_id)
# Do: Bert is not the author anymore
self.mail_message.write(cr, uid, [message_id], {'author_id': partner_raoul_id})
# Test: Bert reads the message, crash because not notification/not in doc followers/not read on doc
with self.assertRaises(except_orm):
self.mail_message.read(cr, user_bert_id, message_id)
# Do: message is attached to a document Bert can read, Jobs
self.mail_message.write(cr, uid, [message_id], {'model': 'mail.group', 'res_id': self.group_jobs_id})
# Test: Bert reads the message, ok because linked to a doc he is allowed to read
self.mail_message.read(cr, user_bert_id, message_id)
# Do: message is attached to a document Bert cannot read, Pigs
self.mail_message.write(cr, uid, [message_id], {'model': 'mail.group', 'res_id': self.group_pigs_id})
# Test: Bert reads the message, crash because not notification/not in doc followers/not read on doc
with self.assertRaises(except_orm):
self.mail_message.read(cr, user_bert_id, message_id)
# ----------------------------------------
# CASE2: create
# ----------------------------------------
# Do: Bert creates a message on Pigs -> ko, no creation rights
with self.assertRaises(AccessError):
self.mail_message.create(cr, user_bert_id, {'model': 'mail.group', 'res_id': self.group_pigs_id, 'body': 'Test'})
# Do: Bert create a message on Jobs -> ko, no creation rights
with self.assertRaises(AccessError):
self.mail_message.create(cr, user_bert_id, {'model': 'mail.group', 'res_id': self.group_jobs_id, 'body': 'Test'})
# Do: Bert create a private message -> ko, no creation rights
with self.assertRaises(AccessError):
self.mail_message.create(cr, user_bert_id, {'body': 'Test'})
# Do: Raoul creates a message on Jobs -> ok, write access to the related document
self.mail_message.create(cr, user_raoul_id, {'model': 'mail.group', 'res_id': self.group_jobs_id, 'body': 'Test'})
# Do: Raoul creates a message on Priv -> ko, no write access to the related document
with self.assertRaises(except_orm):
self.mail_message.create(cr, user_raoul_id, {'model': 'mail.group', 'res_id': self.group_priv_id, 'body': 'Test'})
# Do: Raoul creates a private message -> ok
self.mail_message.create(cr, user_raoul_id, {'body': 'Test'})
# Do: Raoul creates a reply to a message on Priv -> ko
with self.assertRaises(except_orm):
self.mail_message.create(cr, user_raoul_id, {'model': 'mail.group', 'res_id': self.group_priv_id, 'body': 'Test', 'parent_id': priv_msg_id})
# Do: Raoul creates a reply to a message on Priv-> ok if has received parent
self.mail_notification.create(cr, uid, {'message_id': priv_msg_id, 'partner_id': self.partner_raoul_id})
self.mail_message.create(cr, user_raoul_id, {'model': 'mail.group', 'res_id': self.group_priv_id, 'body': 'Test', 'parent_id': priv_msg_id})
def test_20_message_set_star(self):
""" Tests for starring messages and its related access rights """
cr, uid = self.cr, self.uid
# Data: post a message on Pigs
msg_id = self.group_pigs.message_post(body='My Body', subject='1')
msg = self.mail_message.browse(cr, uid, msg_id)
msg_raoul = self.mail_message.browse(cr, self.user_raoul_id, msg_id)
# Do: Admin stars msg
self.mail_message.set_message_starred(cr, uid, [msg.id], True)
msg.refresh()
# Test: notification exists
notif_ids = self.mail_notification.search(cr, uid, [('partner_id', '=', self.partner_admin_id), ('message_id', '=', msg.id)])
self.assertEqual(len(notif_ids), 1, 'mail_message set_message_starred: more than one notification created')
# Test: notification starred
notif = self.mail_notification.browse(cr, uid, notif_ids[0])
self.assertTrue(notif.starred, 'mail_notification starred failed')
self.assertTrue(msg.starred, 'mail_message starred failed')
# Do: Raoul stars msg
self.mail_message.set_message_starred(cr, self.user_raoul_id, [msg.id], True)
msg_raoul.refresh()
# Test: notification exists
notif_ids = self.mail_notification.search(cr, uid, [('partner_id', '=', self.partner_raoul_id), ('message_id', '=', msg.id)])
self.assertEqual(len(notif_ids), 1, 'mail_message set_message_starred: more than one notification created')
# Test: notification starred
notif = self.mail_notification.browse(cr, uid, notif_ids[0])
self.assertTrue(notif.starred, 'mail_notification starred failed')
self.assertTrue(msg_raoul.starred, 'mail_message starred failed')
# Do: Admin unstars msg
self.mail_message.set_message_starred(cr, uid, [msg.id], False)
msg.refresh()
msg_raoul.refresh()
# Test: msg unstarred for Admin, starred for Raoul
self.assertFalse(msg.starred, 'mail_message starred failed')
self.assertTrue(msg_raoul.starred, 'mail_message starred failed')
def test_30_message_set_read(self):
""" Tests for reading messages and its related access rights """
cr, uid = self.cr, self.uid
# Data: post a message on Pigs
msg_id = self.group_pigs.message_post(body='My Body', subject='1')
msg = self.mail_message.browse(cr, uid, msg_id)
msg_raoul = self.mail_message.browse(cr, self.user_raoul_id, msg_id)
# Do: Admin reads msg
self.mail_message.set_message_read(cr, uid, [msg.id], True)
msg.refresh()
# Test: notification exists
notif_ids = self.mail_notification.search(cr, uid, [('partner_id', '=', self.partner_admin_id), ('message_id', '=', msg.id)])
self.assertEqual(len(notif_ids), 1, 'mail_message set_message_read: more than one notification created')
# Test: notification read
notif = self.mail_notification.browse(cr, uid, notif_ids[0])
self.assertTrue(notif['is_read'], 'mail_notification read failed')
self.assertFalse(msg.to_read, 'mail_message read failed')
# Do: Raoul reads msg
self.mail_message.set_message_read(cr, self.user_raoul_id, [msg.id], True)
msg_raoul.refresh()
# Test: notification exists
notif_ids = self.mail_notification.search(cr, uid, [('partner_id', '=', self.partner_raoul_id), ('message_id', '=', msg.id)])
self.assertEqual(len(notif_ids), 1, 'mail_message set_message_read: more than one notification created')
# Test: notification read
notif = self.mail_notification.browse(cr, uid, notif_ids[0])
self.assertTrue(notif['is_read'], 'mail_notification starred failed')
self.assertFalse(msg_raoul.to_read, 'mail_message starred failed')
# Do: Admin unreads msg
self.mail_message.set_message_read(cr, uid, [msg.id], False)
msg.refresh()
msg_raoul.refresh()
# Test: msg unread for Admin, read for Raoul
self.assertTrue(msg.to_read, 'mail_message read failed')
self.assertFalse(msg_raoul.to_read, 'mail_message read failed')
def test_40_message_vote(self):
""" Test designed for the vote/unvote feature. """
cr, uid = self.cr, self.uid
# Data: post a message on Pigs
msg_id = self.group_pigs.message_post(body='My Body', subject='1')
msg = self.mail_message.browse(cr, uid, msg_id)
msg_raoul = self.mail_message.browse(cr, self.user_raoul_id, msg_id)
# Do: Admin vote for msg
self.mail_message.vote_toggle(cr, uid, [msg.id])
msg.refresh()
# Test: msg has Admin as voter
self.assertEqual(set(msg.vote_user_ids), set([self.user_admin]), 'mail_message vote: after voting, Admin should be in the voter')
# Do: Bert vote for msg
self.mail_message.vote_toggle(cr, self.user_raoul_id, [msg.id])
msg_raoul.refresh()
# Test: msg has Admin and Bert as voters
self.assertEqual(set(msg_raoul.vote_user_ids), set([self.user_admin, self.user_raoul]), 'mail_message vote: after voting, Admin and Bert should be in the voters')
# Do: Admin unvote for msg
self.mail_message.vote_toggle(cr, uid, [msg.id])
msg.refresh()
msg_raoul.refresh()
# Test: msg has Bert as voter
self.assertEqual(set(msg.vote_user_ids), set([self.user_raoul]), 'mail_message vote: after unvoting, Bert should be in the voter')
self.assertEqual(set(msg_raoul.vote_user_ids), set([self.user_raoul]), 'mail_message vote: after unvoting, Bert should be in the voter')
@mute_logger('openerp.addons.base.ir.ir_model', 'openerp.models')
def test_50_mail_flow_access_rights(self):
""" Test a Chatter-looks alike flow to test access rights """
cr, uid = self.cr, self.uid
mail_compose = self.registry('mail.compose.message')
partner_bert_id, partner_raoul_id = self.partner_bert_id, self.partner_raoul_id
user_bert_id, user_raoul_id = self.user_bert_id, self.user_raoul_id
# Prepare groups: Pigs (employee), Jobs (public)
pigs_msg_id = self.mail_group.message_post(cr, uid, self.group_pigs_id, body='Message', partner_ids=[self.partner_admin_id])
jobs_msg_id = self.mail_group.message_post(cr, uid, self.group_jobs_id, body='Message', partner_ids=[self.partner_admin_id])
# ----------------------------------------
# CASE1: Bert, without groups
# ----------------------------------------
# Do: Bert reads Jobs basic fields, ok because public = read access on the group
self.mail_group.read(cr, user_bert_id, [self.group_jobs_id], ['name', 'description'])
# Do: Bert reads Jobs messages, ok because read access on the group => read access on its messages
jobs_message_ids = self.mail_group.read(cr, user_bert_id, [self.group_jobs_id], ['message_ids'])[0]['message_ids']
self.mail_message.read(cr, user_bert_id, jobs_message_ids)
# Do: Bert browses Jobs, ok (no direct browse of partners), ok for messages, ko for followers (accessible to employees or partner manager)
bert_jobs = self.mail_group.browse(cr, user_bert_id, self.group_jobs_id)
trigger_read = bert_jobs.name
for message in bert_jobs.message_ids:
trigger_read = message.subject
for partner in bert_jobs.message_follower_ids:
with self.assertRaises(AccessError):
trigger_read = partner.name
# Do: Bert comments Jobs, ko because no creation right
with self.assertRaises(AccessError):
self.mail_group.message_post(cr, user_bert_id, self.group_jobs_id, body='I love Pigs')
# Do: Bert writes on its own profile, ko because no message create access
with self.assertRaises(AccessError):
self.res_users.message_post(cr, user_bert_id, user_bert_id, body='I love Bert')
self.res_partner.message_post(cr, user_bert_id, partner_bert_id, body='I love Bert')
# ----------------------------------------
# CASE2: Raoul, employee
# ----------------------------------------
# Do: Raoul browses Jobs -> ok, ok for message_ids, of for message_follower_ids
raoul_jobs = self.mail_group.browse(cr, user_raoul_id, self.group_jobs_id)
trigger_read = raoul_jobs.name
for message in raoul_jobs.message_ids:
trigger_read = message.subject
for partner in raoul_jobs.message_follower_ids:
trigger_read = partner.name
# Do: Raoul comments Jobs, ok
self.mail_group.message_post(cr, user_raoul_id, self.group_jobs_id, body='I love Pigs')
# Do: Raoul create a mail.compose.message record on Jobs, because he uses the wizard
compose_id = mail_compose.create(cr, user_raoul_id,
{'subject': 'Subject', 'body': 'Body text', 'partner_ids': []},
{'default_composition_mode': 'comment', 'default_model': 'mail.group', 'default_res_id': self.group_jobs_id})
mail_compose.send_mail(cr, user_raoul_id, [compose_id])
# Do: Raoul replies to a Jobs message using the composer
compose_id = mail_compose.create(cr, user_raoul_id,
{'subject': 'Subject', 'body': 'Body text'},
{'default_composition_mode': 'comment', 'default_parent_id': pigs_msg_id})
mail_compose.send_mail(cr, user_raoul_id, [compose_id])
| agpl-3.0 |
notsambeck/siftsite | siftsite/sift/sift_app.py | 1 | 8941 | # GPU info from desktop
# Hardware Class: graphics card
# Model: "nVidia GF119 [GeForce GT 620 OEM]"
# Vendor: pci 0x10de "nVidia Corporation"
# Device: pci 0x1049 "GF119 [GeForce GT 620 OEM]"
# SubVendor: pci 0x10de "nVidia Corporation"
import numpy as np
import os
import datetime
import time
import pickle
# dataset is a sift module that imports CIFAR and provides
# image transform functions and access to saved datasets/etc.
import dataset
import sift_keras
from sift_keras import model
twitter_mode = True
if twitter_mode:
from google.cloud import vision
vision_client = vision.Client()
import tweepy
from secret import consumerSecret, consumerKey
from secret import accessToken, accessTokenSecret
# secret.py is in .gitignore, stores twitter login keys as str
auth = tweepy.OAuthHandler(consumerKey, consumerSecret)
auth.set_access_token(accessToken, accessTokenSecret)
try:
api = tweepy.API(auth)
print('twitter connected')
# print(api.me())
except:
print('twitter connect failed')
twitter_mode = False
# optional functions for network visualization, debug
'''
import import_batch
import matplotlib.pyplot as plt
'''
# do you want to train the network? load a dataset:
# import pickle
# x, xt, y, yt = dataset.loadDataset('data/full_cifar_plus_161026.pkl')
model.load_weights(sift_keras.savefile)
batch_size = 1000 # over 2000 kills desktop
scale = 127.5 # scale factor for +/- 1
bad_wd = ['computer wallpaper',
'pattern',
'texture',
'font',
'text',
'line',
'atmosphere',
'close up',
'closeup',
'atmosphere of earth',
'grass family',
'black',
'blue',
'purple',
'green',
'material',
'phenomenon',
'grass']
boring = ['#green', '#blue', '#black', '#grass',
'#purple', '#pink', '#light', '#sky',
'#white', '#phenomenon', '#tree', '#water',
'#plant', '#tree', '#macrophotography',
'#cloud', '#plantstem', '#leaf', '#skin',
'#flora', '#photography', '#mouth']
bonus = ['#art', '#contemporaryart', '#painting', '#notart',
'#abstract', '#abstractart', '#contemporaryphotography',
'#conceptualartist', '#snapchat', '#sift']
def image_generator(increment, counter):
to_net = np.empty((batch_size, 32, 32, 3), 'float32')
for i in range(batch_size):
tr = dataset.get_transform(counter)
to_net[i] = dataset.idct(tr) # ycc format
counter = np.mod(np.add(counter, increment), dataset.quantization)
to_net = np.divide(np.subtract(to_net, scale), scale)
# print('batch stats: max={}, min={}'.format(to_net.max(), to_net.min()))
return to_net, counter
def Sift(increment=11999, restart=False):
'''non-visualized Sift program. Runs omega images then stops, counting by
increment. Net checks them, candidates are saved to a folder named
found_images/ -- today's date & increment -- / ####.png '''
last = 0
if not restart:
print('Loading saved state...')
try:
f = open('save.file', 'rb')
counter = pickle.load(f)
images_found = pickle.load(f)
processed = pickle.load(f)
tweeted = pickle.load(f)
print('{} images found of {} processed; tweeted {}.'
.format(images_found, processed*batch_size, tweeted))
f.close()
except FileNotFoundError:
print('save.file does not exist. RESTARTING')
counter = np.zeros((32, 32, 3), dtype='float32')
images_found = 0
processed = 0
tweeted = 0
else:
print('Warning: Restarting, will save over progress')
counter = np.zeros((32, 32, 3), dtype='float32')
images_found = 0
processed = 0
tweeted = 0
# make dir found_images
if not os.path.exists('found_images'):
os.makedirs('found_images')
directory = "".join(['found_images/', str(datetime.date.today()),
'_increment-', str(increment)])
if not os.path.exists(directory):
os.makedirs(directory)
print('saving to', directory)
# MAIN LOOP
# for rep in range(1):
while True:
if processed % 10 == 0:
print('processed {} batches of {}'.format(processed, batch_size))
processed += 1
data, counter = image_generator(increment, counter)
ps = model.predict_on_batch(data)
for i in range(batch_size):
if ps[i, 1] > ps[i, 0]:
images_found += 1
now = time.time()
print('Image found: no.', images_found, ' at ', now)
# s = Image.fromarray(dataset.orderPIL(images[im]))
s = dataset.net2pil(data[i])
f = ''.join([str(images_found), '_', str(ps[i, 1]), '.png'])
s.save(os.path.join(directory, f))
if now - last > 30: # only tweet after > 30 seconds
# s.resize((512, 512)).save('twitter.png')
arr = dataset.make_arr(s)
x = dataset.expand(arr)
xim = dataset.make_pil(x, input_format='RGB',
output_format='RGB')
xim.resize((512, 512)).save('twitter.png')
# twitter module
if twitter_mode:
with open(os.path.join(directory, f), 'rb') as tw:
content = tw.read()
try:
goog = vision_client.image(content=content)
labels = goog.detect_labels()
labels = [label for label in labels
if label.description not in bad_wd]
# num = labels[0].score
# word = labels[0].description
# print(word, num)
ds = ['#'+label.description.replace(' ', '')
for label in labels]
except:
print('Google api failed, not tweeting')
continue # or tweet without this continue
tweet = '''#DEFINITELY #FOUND #AN #IMAGE.
#painting #notapainting #art #notart'''
# skip boring images
if all([d in boring for d in ds]) or \
(ds[0] in boring and labels[0].score < .98):
print('boring image, not tweeting it.')
print('_'.join(ds))
continue
# different kinds of tweets
bot = i % 100
if bot <= 5:
ds.append('@pixelsorter')
elif 5 < bot <= 10:
ds.append('@WordPadBot')
elif bot == 99:
# spam mode
my_fs = api.followers()
u = my_fs[np.random.randint(0, len(my_fs))]
u_fs = api.followers(u.screen_name)
usr = u_fs[np.random.randint(0, len(u_fs))]
at = usr.screen_name
ds = ['@{} IS THIS YOUR IMAGE?'
.format(at)] + ds
else:
for _ in range(3):
r = np.random.randint(0, len(bonus))
ds.append(bonus[r])
# make tweet, cap length
tweet = '''IMAGE FOUND. #{}
{}'''.format(str(images_found), ' '.join(ds))
if len(tweet) > 130:
tweet = tweet[:110]
try:
print('tweeting:', tweet)
api.update_with_media('twitter.png', tweet)
last = now
tweeted += 1
except:
print('Tweet failed')
# save progress
if processed % 100 == 0:
print('saving progress to save.file')
f = open('save.file', 'wb')
pickle.dump(counter, f)
pickle.dump(images_found, f)
pickle.dump(processed, f)
pickle.dump(tweeted, f)
f.close()
if __name__ == '__main__':
print()
print('SIFTnonvisual loaded. Twitter={}. For visuals, run sift.py.'
.format(twitter_mode))
print()
Sift()
| mit |
c0ff3m4kr/python-uci | unittest/test_originalreader.py | 2 | 4020 | #encoding: UTF-8
'''
Created on Nov 24, 2015
@author: coffeemakr
'''
import unittest
import re
import itertools
import random
import string
import sys, os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.split(__file__)[0], "..")))
from uci.backend.originalreader import OriginalReader
class TestRegularExpressions(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testReName(self):
pass
def test_has_unclosed_quotes(self):
unclosed = ['some text "',
'some text \'',
'some \'text',
'some "text',
'"some text',
'\'some text',
'some "proper" \'unproper',
'some "proper" "unproper',
'some \'proper\' \'unproper',
'some \'proper\' "unproper',
'some "unproper\" \'proper',
r'some "unproper\\\' \"proper',
r'some \'unproper\\" \\\'proper',
'some muliline " unquote text \n the new line',
'some muliline " unquote text \n\r the new line',
'some muliline " unquote text \r\n the new line',
'some muliline " unquote text \n the \n new line',
]
closed = ['"some" "quotes"',
'\'some\' \'quotes\'',
'\'some\' "quotes"',
'"some" \'quotes\'',
' some "some" "quotes" quotes',
' multiline: " properly closed \n on the new line"',
' Properly close because the # quote " ist commented ',
'Properly closed because the " # comment is ignored is the quotes" ']
for value in unclosed:
result = OriginalReader.has_quotes_left_open(value + '\n')
self.assertTrue(result, "Unclosed quotes not detected: %s" % value)
for value in closed:
result = OriginalReader.has_quotes_left_open(value + '\n')
self.assertFalse(result, "Closed quotes faulty detected: %s" % value)
def test_get_section_name_and_type(self):
# Usually you do not need to enclose identifiers or values in quotes.
# Quotes are only required if the enclosed value contains spaces or tabs.
# Also it's legal to use double- instead of single-quotes when typing configuration options.
values = [
'a',
'abc',
'abcdef',
'abcdefghijk',
'A',
'ABC',
'ABCDEFGHIJK',
'Abcefg',
'aBCEFG',
'0',
'1',
'12',
'_abcdef',
'ab_cdef',
'abcedef_',
'_',
'5pe¢îalVälũe',
'%',
'=',
'=+/()=*+'
]
for value in values:
for type_escape in ('"', "'", ''):
for name_escape in ('"', "'", ''):
input = 'config ' + type_escape + value + type_escape + " " + name_escape + value + name_escape
name, type = OriginalReader.get_section_name_and_type(input)
self.assertEqual(value, name)
self.assertEqual(value, type)
values = """abcdef
abcdef
a b c d e f
a b cdef
abcdef
\tabcdef
abcdef\t
abcd\tef
\ta b c d\te\t\t\t\tf d """
values = values.splitlines()
for value in values:
for type_escape in ('"', "'"):
for name_escape in ('"', "'"):
input = 'config ' + type_escape + value + type_escape + " " + name_escape + value + name_escape
name, type = OriginalReader.get_section_name_and_type(input)
self.assertEqual(value, name)
self.assertEqual(value, type)
if __name__ == '__main__':
unittest.main() | gpl-2.0 |
richardfergie/googleads-python-lib | examples/dfp/v201411/user_service/get_all_roles.py | 4 | 1339 | #!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example gets all roles.
This sample can be used to determine which role id is needed when getting and
creating users."""
# Import appropriate modules from the client library.
from googleads import dfp
def main(client):
# Initialize appropriate service.
user_service = client.GetService('UserService', version='v201411')
# Get all roles.
roles = user_service.getAllRoles()
# Display results.
for role in roles:
print ('Role with id \'%s\' and name \'%s\' was found.'
% (role['id'], role['name']))
print '\nNumber of results found: %s' % len(roles)
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client)
| apache-2.0 |
zetacoin/zetacoin | qa/rpc-tests/invalidtxrequest.py | 108 | 2576 | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import ComparisonTestFramework
from test_framework.comptool import TestManager, TestInstance, RejectResult
from test_framework.blocktools import *
import time
'''
In this test we connect to one node over p2p, and test tx requests.
'''
# Use the ComparisonTestFramework with 1 node: only use --testbinary.
class InvalidTxRequestTest(ComparisonTestFramework):
''' Can either run this test as 1 node with expected answers, or two and compare them.
Change the "outcome" variable from each TestInstance object to only do the comparison. '''
def __init__(self):
super().__init__()
self.num_nodes = 1
def run_test(self):
test = TestManager(self, self.options.tmpdir)
test.add_all_connections(self.nodes)
self.tip = None
self.block_time = None
NetworkThread().start() # Start up network handling in another thread
test.run()
def get_tests(self):
if self.tip is None:
self.tip = int("0x" + self.nodes[0].getbestblockhash(), 0)
self.block_time = int(time.time())+1
'''
Create a new block with an anyone-can-spend coinbase
'''
height = 1
block = create_block(self.tip, create_coinbase(height), self.block_time)
self.block_time += 1
block.solve()
# Save the coinbase for later
self.block1 = block
self.tip = block.sha256
height += 1
yield TestInstance([[block, True]])
'''
Now we need that block to mature so we can spend the coinbase.
'''
test = TestInstance(sync_every_block=False)
for i in range(100):
block = create_block(self.tip, create_coinbase(height), self.block_time)
block.solve()
self.tip = block.sha256
self.block_time += 1
test.blocks_and_transactions.append([block, True])
height += 1
yield test
# b'\x64' is OP_NOTIF
# Transaction will be rejected with code 16 (REJECT_INVALID)
tx1 = create_transaction(self.block1.vtx[0], 0, b'\x64', 50 * COIN - 12000)
yield TestInstance([[tx1, RejectResult(16, b'mandatory-script-verify-flag-failed')]])
# TODO: test further transactions...
if __name__ == '__main__':
InvalidTxRequestTest().main()
| mit |
bkrukowski/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/standalone.py | 122 | 44901 | #!/usr/bin/env python
#
# Copyright 2012, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Standalone WebSocket server.
Use this file to launch pywebsocket without Apache HTTP Server.
BASIC USAGE
Go to the src directory and run
$ python mod_pywebsocket/standalone.py [-p <ws_port>]
[-w <websock_handlers>]
[-d <document_root>]
<ws_port> is the port number to use for ws:// connection.
<document_root> is the path to the root directory of HTML files.
<websock_handlers> is the path to the root directory of WebSocket handlers.
If not specified, <document_root> will be used. See __init__.py (or
run $ pydoc mod_pywebsocket) for how to write WebSocket handlers.
For more detail and other options, run
$ python mod_pywebsocket/standalone.py --help
or see _build_option_parser method below.
For trouble shooting, adding "--log_level debug" might help you.
TRY DEMO
Go to the src directory and run
$ python standalone.py -d example
to launch pywebsocket with the sample handler and html on port 80. Open
http://localhost/console.html, click the connect button, type something into
the text box next to the send button and click the send button. If everything
is working, you'll see the message you typed echoed by the server.
SUPPORTING TLS
To support TLS, run standalone.py with -t, -k, and -c options.
Note that when ssl module is used and the key/cert location is incorrect,
TLS connection silently fails while pyOpenSSL fails on startup.
SUPPORTING CLIENT AUTHENTICATION
To support client authentication with TLS, run standalone.py with -t, -k, -c,
and --tls-client-auth, and --tls-client-ca options.
E.g., $./standalone.py -d ../example -p 10443 -t -c ../test/cert/cert.pem -k
../test/cert/key.pem --tls-client-auth --tls-client-ca=../test/cert/cacert.pem
CONFIGURATION FILE
You can also write a configuration file and use it by specifying the path to
the configuration file by --config option. Please write a configuration file
following the documentation of the Python ConfigParser library. Name of each
entry must be the long version argument name. E.g. to set log level to debug,
add the following line:
log_level=debug
For options which doesn't take value, please add some fake value. E.g. for
--tls option, add the following line:
tls=True
Note that tls will be enabled even if you write tls=False as the value part is
fake.
When both a command line argument and a configuration file entry are set for
the same configuration item, the command line value will override one in the
configuration file.
THREADING
This server is derived from SocketServer.ThreadingMixIn. Hence a thread is
used for each request.
SECURITY WARNING
This uses CGIHTTPServer and CGIHTTPServer is not secure.
It may execute arbitrary Python code or external programs. It should not be
used outside a firewall.
"""
import BaseHTTPServer
import CGIHTTPServer
import SimpleHTTPServer
import SocketServer
import ConfigParser
import base64
import httplib
import logging
import logging.handlers
import optparse
import os
import re
import select
import socket
import sys
import threading
import time
from mod_pywebsocket import common
from mod_pywebsocket import dispatch
from mod_pywebsocket import handshake
from mod_pywebsocket import http_header_util
from mod_pywebsocket import memorizingfile
from mod_pywebsocket import util
_DEFAULT_LOG_MAX_BYTES = 1024 * 256
_DEFAULT_LOG_BACKUP_COUNT = 5
_DEFAULT_REQUEST_QUEUE_SIZE = 128
# 1024 is practically large enough to contain WebSocket handshake lines.
_MAX_MEMORIZED_LINES = 1024
# Constants for the --tls_module flag.
_TLS_BY_STANDARD_MODULE = 'ssl'
_TLS_BY_PYOPENSSL = 'pyopenssl'
class _StandaloneConnection(object):
"""Mimic mod_python mp_conn."""
def __init__(self, request_handler):
"""Construct an instance.
Args:
request_handler: A WebSocketRequestHandler instance.
"""
self._request_handler = request_handler
def get_local_addr(self):
"""Getter to mimic mp_conn.local_addr."""
return (self._request_handler.server.server_name,
self._request_handler.server.server_port)
local_addr = property(get_local_addr)
def get_remote_addr(self):
"""Getter to mimic mp_conn.remote_addr.
Setting the property in __init__ won't work because the request
handler is not initialized yet there."""
return self._request_handler.client_address
remote_addr = property(get_remote_addr)
def write(self, data):
"""Mimic mp_conn.write()."""
return self._request_handler.wfile.write(data)
def read(self, length):
"""Mimic mp_conn.read()."""
return self._request_handler.rfile.read(length)
def get_memorized_lines(self):
"""Get memorized lines."""
return self._request_handler.rfile.get_memorized_lines()
class _StandaloneRequest(object):
"""Mimic mod_python request."""
def __init__(self, request_handler, use_tls):
"""Construct an instance.
Args:
request_handler: A WebSocketRequestHandler instance.
"""
self._logger = util.get_class_logger(self)
self._request_handler = request_handler
self.connection = _StandaloneConnection(request_handler)
self._use_tls = use_tls
self.headers_in = request_handler.headers
def get_uri(self):
"""Getter to mimic request.uri.
This method returns the raw data at the Request-URI part of the
Request-Line, while the uri method on the request object of mod_python
returns the path portion after parsing the raw data. This behavior is
kept for compatibility.
"""
return self._request_handler.path
uri = property(get_uri)
def get_unparsed_uri(self):
"""Getter to mimic request.unparsed_uri."""
return self._request_handler.path
unparsed_uri = property(get_unparsed_uri)
def get_method(self):
"""Getter to mimic request.method."""
return self._request_handler.command
method = property(get_method)
def get_protocol(self):
"""Getter to mimic request.protocol."""
return self._request_handler.request_version
protocol = property(get_protocol)
def is_https(self):
"""Mimic request.is_https()."""
return self._use_tls
def _import_ssl():
global ssl
try:
import ssl
return True
except ImportError:
return False
def _import_pyopenssl():
global OpenSSL
try:
import OpenSSL.SSL
return True
except ImportError:
return False
class _StandaloneSSLConnection(object):
"""A wrapper class for OpenSSL.SSL.Connection to
- provide makefile method which is not supported by the class
- tweak shutdown method since OpenSSL.SSL.Connection.shutdown doesn't
accept the "how" argument.
- convert SysCallError exceptions that its recv method may raise into a
return value of '', meaning EOF. We cannot overwrite the recv method on
self._connection since it's immutable.
"""
_OVERRIDDEN_ATTRIBUTES = ['_connection', 'makefile', 'shutdown', 'recv']
def __init__(self, connection):
self._connection = connection
def __getattribute__(self, name):
if name in _StandaloneSSLConnection._OVERRIDDEN_ATTRIBUTES:
return object.__getattribute__(self, name)
return self._connection.__getattribute__(name)
def __setattr__(self, name, value):
if name in _StandaloneSSLConnection._OVERRIDDEN_ATTRIBUTES:
return object.__setattr__(self, name, value)
return self._connection.__setattr__(name, value)
def makefile(self, mode='r', bufsize=-1):
return socket._fileobject(self, mode, bufsize)
def shutdown(self, unused_how):
self._connection.shutdown()
def recv(self, bufsize, flags=0):
if flags != 0:
raise ValueError('Non-zero flags not allowed')
try:
return self._connection.recv(bufsize)
except OpenSSL.SSL.SysCallError, (err, message):
if err == -1:
# Suppress "unexpected EOF" exception. See the OpenSSL document
# for SSL_get_error.
return ''
raise
def _alias_handlers(dispatcher, websock_handlers_map_file):
"""Set aliases specified in websock_handler_map_file in dispatcher.
Args:
dispatcher: dispatch.Dispatcher instance
websock_handler_map_file: alias map file
"""
fp = open(websock_handlers_map_file)
try:
for line in fp:
if line[0] == '#' or line.isspace():
continue
m = re.match('(\S+)\s+(\S+)', line)
if not m:
logging.warning('Wrong format in map file:' + line)
continue
try:
dispatcher.add_resource_path_alias(
m.group(1), m.group(2))
except dispatch.DispatchException, e:
logging.error(str(e))
finally:
fp.close()
class WebSocketServer(SocketServer.ThreadingMixIn, BaseHTTPServer.HTTPServer):
"""HTTPServer specialized for WebSocket."""
# Overrides SocketServer.ThreadingMixIn.daemon_threads
daemon_threads = True
# Overrides BaseHTTPServer.HTTPServer.allow_reuse_address
allow_reuse_address = True
def __init__(self, options):
"""Override SocketServer.TCPServer.__init__ to set SSL enabled
socket object to self.socket before server_bind and server_activate,
if necessary.
"""
# Share a Dispatcher among request handlers to save time for
# instantiation. Dispatcher can be shared because it is thread-safe.
options.dispatcher = dispatch.Dispatcher(
options.websock_handlers,
options.scan_dir,
options.allow_handlers_outside_root_dir)
if options.websock_handlers_map_file:
_alias_handlers(options.dispatcher,
options.websock_handlers_map_file)
warnings = options.dispatcher.source_warnings()
if warnings:
for warning in warnings:
logging.warning('Warning in source loading: %s' % warning)
self._logger = util.get_class_logger(self)
self.request_queue_size = options.request_queue_size
self.__ws_is_shut_down = threading.Event()
self.__ws_serving = False
SocketServer.BaseServer.__init__(
self, (options.server_host, options.port), WebSocketRequestHandler)
# Expose the options object to allow handler objects access it. We name
# it with websocket_ prefix to avoid conflict.
self.websocket_server_options = options
self._create_sockets()
self.server_bind()
self.server_activate()
def _create_sockets(self):
self.server_name, self.server_port = self.server_address
self._sockets = []
if not self.server_name:
# On platforms that doesn't support IPv6, the first bind fails.
# On platforms that supports IPv6
# - If it binds both IPv4 and IPv6 on call with AF_INET6, the
# first bind succeeds and the second fails (we'll see 'Address
# already in use' error).
# - If it binds only IPv6 on call with AF_INET6, both call are
# expected to succeed to listen both protocol.
addrinfo_array = [
(socket.AF_INET6, socket.SOCK_STREAM, '', '', ''),
(socket.AF_INET, socket.SOCK_STREAM, '', '', '')]
else:
addrinfo_array = socket.getaddrinfo(self.server_name,
self.server_port,
socket.AF_UNSPEC,
socket.SOCK_STREAM,
socket.IPPROTO_TCP)
for addrinfo in addrinfo_array:
self._logger.info('Create socket on: %r', addrinfo)
family, socktype, proto, canonname, sockaddr = addrinfo
try:
socket_ = socket.socket(family, socktype)
except Exception, e:
self._logger.info('Skip by failure: %r', e)
continue
server_options = self.websocket_server_options
if server_options.use_tls:
# For the case of _HAS_OPEN_SSL, we do wrapper setup after
# accept.
if server_options.tls_module == _TLS_BY_STANDARD_MODULE:
if server_options.tls_client_auth:
if server_options.tls_client_cert_optional:
client_cert_ = ssl.CERT_OPTIONAL
else:
client_cert_ = ssl.CERT_REQUIRED
else:
client_cert_ = ssl.CERT_NONE
socket_ = ssl.wrap_socket(socket_,
keyfile=server_options.private_key,
certfile=server_options.certificate,
ssl_version=ssl.PROTOCOL_SSLv23,
ca_certs=server_options.tls_client_ca,
cert_reqs=client_cert_,
do_handshake_on_connect=False)
self._sockets.append((socket_, addrinfo))
def server_bind(self):
"""Override SocketServer.TCPServer.server_bind to enable multiple
sockets bind.
"""
failed_sockets = []
for socketinfo in self._sockets:
socket_, addrinfo = socketinfo
self._logger.info('Bind on: %r', addrinfo)
if self.allow_reuse_address:
socket_.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
socket_.bind(self.server_address)
except Exception, e:
self._logger.info('Skip by failure: %r', e)
socket_.close()
failed_sockets.append(socketinfo)
if self.server_address[1] == 0:
# The operating system assigns the actual port number for port
# number 0. This case, the second and later sockets should use
# the same port number. Also self.server_port is rewritten
# because it is exported, and will be used by external code.
self.server_address = (
self.server_name, socket_.getsockname()[1])
self.server_port = self.server_address[1]
self._logger.info('Port %r is assigned', self.server_port)
for socketinfo in failed_sockets:
self._sockets.remove(socketinfo)
def server_activate(self):
"""Override SocketServer.TCPServer.server_activate to enable multiple
sockets listen.
"""
failed_sockets = []
for socketinfo in self._sockets:
socket_, addrinfo = socketinfo
self._logger.info('Listen on: %r', addrinfo)
try:
socket_.listen(self.request_queue_size)
except Exception, e:
self._logger.info('Skip by failure: %r', e)
socket_.close()
failed_sockets.append(socketinfo)
for socketinfo in failed_sockets:
self._sockets.remove(socketinfo)
if len(self._sockets) == 0:
self._logger.critical(
'No sockets activated. Use info log level to see the reason.')
def server_close(self):
"""Override SocketServer.TCPServer.server_close to enable multiple
sockets close.
"""
for socketinfo in self._sockets:
socket_, addrinfo = socketinfo
self._logger.info('Close on: %r', addrinfo)
socket_.close()
def fileno(self):
"""Override SocketServer.TCPServer.fileno."""
self._logger.critical('Not supported: fileno')
return self._sockets[0][0].fileno()
def handle_error(self, request, client_address):
"""Override SocketServer.handle_error."""
self._logger.error(
'Exception in processing request from: %r\n%s',
client_address,
util.get_stack_trace())
# Note: client_address is a tuple.
def get_request(self):
"""Override TCPServer.get_request to wrap OpenSSL.SSL.Connection
object with _StandaloneSSLConnection to provide makefile method. We
cannot substitute OpenSSL.SSL.Connection.makefile since it's readonly
attribute.
"""
accepted_socket, client_address = self.socket.accept()
server_options = self.websocket_server_options
if server_options.use_tls:
if server_options.tls_module == _TLS_BY_STANDARD_MODULE:
try:
accepted_socket.do_handshake()
except ssl.SSLError, e:
self._logger.debug('%r', e)
raise
# Print cipher in use. Handshake is done on accept.
self._logger.debug('Cipher: %s', accepted_socket.cipher())
self._logger.debug('Client cert: %r',
accepted_socket.getpeercert())
elif server_options.tls_module == _TLS_BY_PYOPENSSL:
# We cannot print the cipher in use. pyOpenSSL doesn't provide
# any method to fetch that.
ctx = OpenSSL.SSL.Context(OpenSSL.SSL.SSLv23_METHOD)
ctx.use_privatekey_file(server_options.private_key)
ctx.use_certificate_file(server_options.certificate)
def default_callback(conn, cert, errnum, errdepth, ok):
return ok == 1
# See the OpenSSL document for SSL_CTX_set_verify.
if server_options.tls_client_auth:
verify_mode = OpenSSL.SSL.VERIFY_PEER
if not server_options.tls_client_cert_optional:
verify_mode |= OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT
ctx.set_verify(verify_mode, default_callback)
ctx.load_verify_locations(server_options.tls_client_ca,
None)
else:
ctx.set_verify(OpenSSL.SSL.VERIFY_NONE, default_callback)
accepted_socket = OpenSSL.SSL.Connection(ctx, accepted_socket)
accepted_socket.set_accept_state()
# Convert SSL related error into socket.error so that
# SocketServer ignores them and keeps running.
#
# TODO(tyoshino): Convert all kinds of errors.
try:
accepted_socket.do_handshake()
except OpenSSL.SSL.Error, e:
# Set errno part to 1 (SSL_ERROR_SSL) like the ssl module
# does.
self._logger.debug('%r', e)
raise socket.error(1, '%r' % e)
cert = accepted_socket.get_peer_certificate()
self._logger.debug('Client cert subject: %r',
cert.get_subject().get_components())
accepted_socket = _StandaloneSSLConnection(accepted_socket)
else:
raise ValueError('No TLS support module is available')
return accepted_socket, client_address
def serve_forever(self, poll_interval=0.5):
"""Override SocketServer.BaseServer.serve_forever."""
self.__ws_serving = True
self.__ws_is_shut_down.clear()
handle_request = self.handle_request
if hasattr(self, '_handle_request_noblock'):
handle_request = self._handle_request_noblock
else:
self._logger.warning('Fallback to blocking request handler')
try:
while self.__ws_serving:
r, w, e = select.select(
[socket_[0] for socket_ in self._sockets],
[], [], poll_interval)
for socket_ in r:
self.socket = socket_
handle_request()
self.socket = None
finally:
self.__ws_is_shut_down.set()
def shutdown(self):
"""Override SocketServer.BaseServer.shutdown."""
self.__ws_serving = False
self.__ws_is_shut_down.wait()
class WebSocketRequestHandler(CGIHTTPServer.CGIHTTPRequestHandler):
"""CGIHTTPRequestHandler specialized for WebSocket."""
# Use httplib.HTTPMessage instead of mimetools.Message.
MessageClass = httplib.HTTPMessage
def setup(self):
"""Override SocketServer.StreamRequestHandler.setup to wrap rfile
with MemorizingFile.
This method will be called by BaseRequestHandler's constructor
before calling BaseHTTPRequestHandler.handle.
BaseHTTPRequestHandler.handle will call
BaseHTTPRequestHandler.handle_one_request and it will call
WebSocketRequestHandler.parse_request.
"""
# Call superclass's setup to prepare rfile, wfile, etc. See setup
# definition on the root class SocketServer.StreamRequestHandler to
# understand what this does.
CGIHTTPServer.CGIHTTPRequestHandler.setup(self)
self.rfile = memorizingfile.MemorizingFile(
self.rfile,
max_memorized_lines=_MAX_MEMORIZED_LINES)
def __init__(self, request, client_address, server):
self._logger = util.get_class_logger(self)
self._options = server.websocket_server_options
# Overrides CGIHTTPServerRequestHandler.cgi_directories.
self.cgi_directories = self._options.cgi_directories
# Replace CGIHTTPRequestHandler.is_executable method.
if self._options.is_executable_method is not None:
self.is_executable = self._options.is_executable_method
# This actually calls BaseRequestHandler.__init__.
CGIHTTPServer.CGIHTTPRequestHandler.__init__(
self, request, client_address, server)
def parse_request(self):
"""Override BaseHTTPServer.BaseHTTPRequestHandler.parse_request.
Return True to continue processing for HTTP(S), False otherwise.
See BaseHTTPRequestHandler.handle_one_request method which calls
this method to understand how the return value will be handled.
"""
# We hook parse_request method, but also call the original
# CGIHTTPRequestHandler.parse_request since when we return False,
# CGIHTTPRequestHandler.handle_one_request continues processing and
# it needs variables set by CGIHTTPRequestHandler.parse_request.
#
# Variables set by this method will be also used by WebSocket request
# handling (self.path, self.command, self.requestline, etc. See also
# how _StandaloneRequest's members are implemented using these
# attributes).
if not CGIHTTPServer.CGIHTTPRequestHandler.parse_request(self):
return False
if self._options.use_basic_auth:
auth = self.headers.getheader('Authorization')
if auth != self._options.basic_auth_credential:
self.send_response(401)
self.send_header('WWW-Authenticate',
'Basic realm="Pywebsocket"')
self.end_headers()
self._logger.info('Request basic authentication')
return True
host, port, resource = http_header_util.parse_uri(self.path)
if resource is None:
self._logger.info('Invalid URI: %r', self.path)
self._logger.info('Fallback to CGIHTTPRequestHandler')
return True
server_options = self.server.websocket_server_options
if host is not None:
validation_host = server_options.validation_host
if validation_host is not None and host != validation_host:
self._logger.info('Invalid host: %r (expected: %r)',
host,
validation_host)
self._logger.info('Fallback to CGIHTTPRequestHandler')
return True
if port is not None:
validation_port = server_options.validation_port
if validation_port is not None and port != validation_port:
self._logger.info('Invalid port: %r (expected: %r)',
port,
validation_port)
self._logger.info('Fallback to CGIHTTPRequestHandler')
return True
self.path = resource
request = _StandaloneRequest(self, self._options.use_tls)
try:
# Fallback to default http handler for request paths for which
# we don't have request handlers.
if not self._options.dispatcher.get_handler_suite(self.path):
self._logger.info('No handler for resource: %r',
self.path)
self._logger.info('Fallback to CGIHTTPRequestHandler')
return True
except dispatch.DispatchException, e:
self._logger.info('Dispatch failed for error: %s', e)
self.send_error(e.status)
return False
# If any Exceptions without except clause setup (including
# DispatchException) is raised below this point, it will be caught
# and logged by WebSocketServer.
try:
try:
handshake.do_handshake(
request,
self._options.dispatcher,
allowDraft75=self._options.allow_draft75,
strict=self._options.strict)
except handshake.VersionException, e:
self._logger.info('Handshake failed for version error: %s', e)
self.send_response(common.HTTP_STATUS_BAD_REQUEST)
self.send_header(common.SEC_WEBSOCKET_VERSION_HEADER,
e.supported_versions)
self.end_headers()
return False
except handshake.HandshakeException, e:
# Handshake for ws(s) failed.
self._logger.info('Handshake failed for error: %s', e)
self.send_error(e.status)
return False
request._dispatcher = self._options.dispatcher
self._options.dispatcher.transfer_data(request)
except handshake.AbortedByUserException, e:
self._logger.info('Aborted: %s', e)
return False
def log_request(self, code='-', size='-'):
"""Override BaseHTTPServer.log_request."""
self._logger.info('"%s" %s %s',
self.requestline, str(code), str(size))
def log_error(self, *args):
"""Override BaseHTTPServer.log_error."""
# Despite the name, this method is for warnings than for errors.
# For example, HTTP status code is logged by this method.
self._logger.warning('%s - %s',
self.address_string(),
args[0] % args[1:])
def is_cgi(self):
"""Test whether self.path corresponds to a CGI script.
Add extra check that self.path doesn't contains ..
Also check if the file is a executable file or not.
If the file is not executable, it is handled as static file or dir
rather than a CGI script.
"""
if CGIHTTPServer.CGIHTTPRequestHandler.is_cgi(self):
if '..' in self.path:
return False
# strip query parameter from request path
resource_name = self.path.split('?', 2)[0]
# convert resource_name into real path name in filesystem.
scriptfile = self.translate_path(resource_name)
if not os.path.isfile(scriptfile):
return False
if not self.is_executable(scriptfile):
return False
return True
return False
def _get_logger_from_class(c):
return logging.getLogger('%s.%s' % (c.__module__, c.__name__))
def _configure_logging(options):
logging.addLevelName(common.LOGLEVEL_FINE, 'FINE')
logger = logging.getLogger()
logger.setLevel(logging.getLevelName(options.log_level.upper()))
if options.log_file:
handler = logging.handlers.RotatingFileHandler(
options.log_file, 'a', options.log_max, options.log_count)
else:
handler = logging.StreamHandler()
formatter = logging.Formatter(
'[%(asctime)s] [%(levelname)s] %(name)s: %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
deflate_log_level_name = logging.getLevelName(
options.deflate_log_level.upper())
_get_logger_from_class(util._Deflater).setLevel(
deflate_log_level_name)
_get_logger_from_class(util._Inflater).setLevel(
deflate_log_level_name)
def _build_option_parser():
parser = optparse.OptionParser()
parser.add_option('--config', dest='config_file', type='string',
default=None,
help=('Path to configuration file. See the file comment '
'at the top of this file for the configuration '
'file format'))
parser.add_option('-H', '--server-host', '--server_host',
dest='server_host',
default='',
help='server hostname to listen to')
parser.add_option('-V', '--validation-host', '--validation_host',
dest='validation_host',
default=None,
help='server hostname to validate in absolute path.')
parser.add_option('-p', '--port', dest='port', type='int',
default=common.DEFAULT_WEB_SOCKET_PORT,
help='port to listen to')
parser.add_option('-P', '--validation-port', '--validation_port',
dest='validation_port', type='int',
default=None,
help='server port to validate in absolute path.')
parser.add_option('-w', '--websock-handlers', '--websock_handlers',
dest='websock_handlers',
default='.',
help=('The root directory of WebSocket handler files. '
'If the path is relative, --document-root is used '
'as the base.'))
parser.add_option('-m', '--websock-handlers-map-file',
'--websock_handlers_map_file',
dest='websock_handlers_map_file',
default=None,
help=('WebSocket handlers map file. '
'Each line consists of alias_resource_path and '
'existing_resource_path, separated by spaces.'))
parser.add_option('-s', '--scan-dir', '--scan_dir', dest='scan_dir',
default=None,
help=('Must be a directory under --websock-handlers. '
'Only handlers under this directory are scanned '
'and registered to the server. '
'Useful for saving scan time when the handler '
'root directory contains lots of files that are '
'not handler file or are handler files but you '
'don\'t want them to be registered. '))
parser.add_option('--allow-handlers-outside-root-dir',
'--allow_handlers_outside_root_dir',
dest='allow_handlers_outside_root_dir',
action='store_true',
default=False,
help=('Scans WebSocket handlers even if their canonical '
'path is not under --websock-handlers.'))
parser.add_option('-d', '--document-root', '--document_root',
dest='document_root', default='.',
help='Document root directory.')
parser.add_option('-x', '--cgi-paths', '--cgi_paths', dest='cgi_paths',
default=None,
help=('CGI paths relative to document_root.'
'Comma-separated. (e.g -x /cgi,/htbin) '
'Files under document_root/cgi_path are handled '
'as CGI programs. Must be executable.'))
parser.add_option('-t', '--tls', dest='use_tls', action='store_true',
default=False, help='use TLS (wss://)')
parser.add_option('--tls-module', '--tls_module', dest='tls_module',
type='choice',
choices = [_TLS_BY_STANDARD_MODULE, _TLS_BY_PYOPENSSL],
help='Use ssl module if "%s" is specified. '
'Use pyOpenSSL module if "%s" is specified' %
(_TLS_BY_STANDARD_MODULE, _TLS_BY_PYOPENSSL))
parser.add_option('-k', '--private-key', '--private_key',
dest='private_key',
default='', help='TLS private key file.')
parser.add_option('-c', '--certificate', dest='certificate',
default='', help='TLS certificate file.')
parser.add_option('--tls-client-auth', dest='tls_client_auth',
action='store_true', default=False,
help='Requests TLS client auth on every connection.')
parser.add_option('--tls-client-cert-optional',
dest='tls_client_cert_optional',
action='store_true', default=False,
help=('Makes client certificate optional even though '
'TLS client auth is enabled.'))
parser.add_option('--tls-client-ca', dest='tls_client_ca', default='',
help=('Specifies a pem file which contains a set of '
'concatenated CA certificates which are used to '
'validate certificates passed from clients'))
parser.add_option('--basic-auth', dest='use_basic_auth',
action='store_true', default=False,
help='Requires Basic authentication.')
parser.add_option('--basic-auth-credential',
dest='basic_auth_credential', default='test:test',
help='Specifies the credential of basic authentication '
'by username:password pair (e.g. test:test).')
parser.add_option('-l', '--log-file', '--log_file', dest='log_file',
default='', help='Log file.')
# Custom log level:
# - FINE: Prints status of each frame processing step
parser.add_option('--log-level', '--log_level', type='choice',
dest='log_level', default='warn',
choices=['fine',
'debug', 'info', 'warning', 'warn', 'error',
'critical'],
help='Log level.')
parser.add_option('--deflate-log-level', '--deflate_log_level',
type='choice',
dest='deflate_log_level', default='warn',
choices=['debug', 'info', 'warning', 'warn', 'error',
'critical'],
help='Log level for _Deflater and _Inflater.')
parser.add_option('--thread-monitor-interval-in-sec',
'--thread_monitor_interval_in_sec',
dest='thread_monitor_interval_in_sec',
type='int', default=-1,
help=('If positive integer is specified, run a thread '
'monitor to show the status of server threads '
'periodically in the specified inteval in '
'second. If non-positive integer is specified, '
'disable the thread monitor.'))
parser.add_option('--log-max', '--log_max', dest='log_max', type='int',
default=_DEFAULT_LOG_MAX_BYTES,
help='Log maximum bytes')
parser.add_option('--log-count', '--log_count', dest='log_count',
type='int', default=_DEFAULT_LOG_BACKUP_COUNT,
help='Log backup count')
parser.add_option('--allow-draft75', dest='allow_draft75',
action='store_true', default=False,
help='Obsolete option. Ignored.')
parser.add_option('--strict', dest='strict', action='store_true',
default=False, help='Obsolete option. Ignored.')
parser.add_option('-q', '--queue', dest='request_queue_size', type='int',
default=_DEFAULT_REQUEST_QUEUE_SIZE,
help='request queue size')
return parser
class ThreadMonitor(threading.Thread):
daemon = True
def __init__(self, interval_in_sec):
threading.Thread.__init__(self, name='ThreadMonitor')
self._logger = util.get_class_logger(self)
self._interval_in_sec = interval_in_sec
def run(self):
while True:
thread_name_list = []
for thread in threading.enumerate():
thread_name_list.append(thread.name)
self._logger.info(
"%d active threads: %s",
threading.active_count(),
', '.join(thread_name_list))
time.sleep(self._interval_in_sec)
def _parse_args_and_config(args):
parser = _build_option_parser()
# First, parse options without configuration file.
temporary_options, temporary_args = parser.parse_args(args=args)
if temporary_args:
logging.critical(
'Unrecognized positional arguments: %r', temporary_args)
sys.exit(1)
if temporary_options.config_file:
try:
config_fp = open(temporary_options.config_file, 'r')
except IOError, e:
logging.critical(
'Failed to open configuration file %r: %r',
temporary_options.config_file,
e)
sys.exit(1)
config_parser = ConfigParser.SafeConfigParser()
config_parser.readfp(config_fp)
config_fp.close()
args_from_config = []
for name, value in config_parser.items('pywebsocket'):
args_from_config.append('--' + name)
args_from_config.append(value)
if args is None:
args = args_from_config
else:
args = args_from_config + args
return parser.parse_args(args=args)
else:
return temporary_options, temporary_args
def _main(args=None):
"""You can call this function from your own program, but please note that
this function has some side-effects that might affect your program. For
example, util.wrap_popen3_for_win use in this method replaces implementation
of os.popen3.
"""
options, args = _parse_args_and_config(args=args)
os.chdir(options.document_root)
_configure_logging(options)
if options.allow_draft75:
logging.warning('--allow_draft75 option is obsolete.')
if options.strict:
logging.warning('--strict option is obsolete.')
# TODO(tyoshino): Clean up initialization of CGI related values. Move some
# of code here to WebSocketRequestHandler class if it's better.
options.cgi_directories = []
options.is_executable_method = None
if options.cgi_paths:
options.cgi_directories = options.cgi_paths.split(',')
if sys.platform in ('cygwin', 'win32'):
cygwin_path = None
# For Win32 Python, it is expected that CYGWIN_PATH
# is set to a directory of cygwin binaries.
# For example, websocket_server.py in Chromium sets CYGWIN_PATH to
# full path of third_party/cygwin/bin.
if 'CYGWIN_PATH' in os.environ:
cygwin_path = os.environ['CYGWIN_PATH']
util.wrap_popen3_for_win(cygwin_path)
def __check_script(scriptpath):
return util.get_script_interp(scriptpath, cygwin_path)
options.is_executable_method = __check_script
if options.use_tls:
if options.tls_module is None:
if _import_ssl():
options.tls_module = _TLS_BY_STANDARD_MODULE
logging.debug('Using ssl module')
elif _import_pyopenssl():
options.tls_module = _TLS_BY_PYOPENSSL
logging.debug('Using pyOpenSSL module')
else:
logging.critical(
'TLS support requires ssl or pyOpenSSL module.')
sys.exit(1)
elif options.tls_module == _TLS_BY_STANDARD_MODULE:
if not _import_ssl():
logging.critical('ssl module is not available')
sys.exit(1)
elif options.tls_module == _TLS_BY_PYOPENSSL:
if not _import_pyopenssl():
logging.critical('pyOpenSSL module is not available')
sys.exit(1)
else:
logging.critical('Invalid --tls-module option: %r',
options.tls_module)
sys.exit(1)
if not options.private_key or not options.certificate:
logging.critical(
'To use TLS, specify private_key and certificate.')
sys.exit(1)
if (options.tls_client_cert_optional and
not options.tls_client_auth):
logging.critical('Client authentication must be enabled to '
'specify tls_client_cert_optional')
sys.exit(1)
else:
if options.tls_module is not None:
logging.critical('Use --tls-module option only together with '
'--use-tls option.')
sys.exit(1)
if options.tls_client_auth:
logging.critical('TLS must be enabled for client authentication.')
sys.exit(1)
if options.tls_client_cert_optional:
logging.critical('TLS must be enabled for client authentication.')
sys.exit(1)
if not options.scan_dir:
options.scan_dir = options.websock_handlers
if options.use_basic_auth:
options.basic_auth_credential = 'Basic ' + base64.b64encode(
options.basic_auth_credential)
try:
if options.thread_monitor_interval_in_sec > 0:
# Run a thread monitor to show the status of server threads for
# debugging.
ThreadMonitor(options.thread_monitor_interval_in_sec).start()
server = WebSocketServer(options)
server.serve_forever()
except Exception, e:
logging.critical('mod_pywebsocket: %s' % e)
logging.critical('mod_pywebsocket: %s' % util.get_stack_trace())
sys.exit(1)
if __name__ == '__main__':
_main(sys.argv[1:])
# vi:sts=4 sw=4 et
| bsd-3-clause |
asimshankar/tensorflow | tensorflow/contrib/summary/summary_ops_graph_test.py | 15 | 15532 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import time
import six
from tensorflow.contrib.summary import summary_test_util
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import summary_ops_v2 as summary_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.training import training_util
get_all = summary_test_util.get_all
class GraphFileTest(test_util.TensorFlowTestCase):
def testSummaryOps(self):
logdir = self.get_temp_dir()
writer = summary_ops.create_file_writer(logdir, max_queue=0)
with writer.as_default(), summary_ops.always_record_summaries():
summary_ops.generic('tensor', 1, step=1)
summary_ops.scalar('scalar', 2.0, step=1)
summary_ops.histogram('histogram', [1.0], step=1)
summary_ops.image('image', [[[[1.0]]]], step=1)
summary_ops.audio('audio', [[1.0]], 1.0, 1, step=1)
with self.cached_session() as sess:
sess.run(summary_ops.summary_writer_initializer_op())
sess.run(summary_ops.all_summary_ops())
# The working condition of the ops is tested in the C++ test so we just
# test here that we're calling them correctly.
self.assertTrue(gfile.Exists(logdir))
def testSummaryName(self):
logdir = self.get_temp_dir()
writer = summary_ops.create_file_writer(logdir, max_queue=0)
with writer.as_default(), summary_ops.always_record_summaries():
summary_ops.scalar('scalar', 2.0, step=1)
with self.cached_session() as sess:
sess.run(summary_ops.summary_writer_initializer_op())
sess.run(summary_ops.all_summary_ops())
events = summary_test_util.events_from_logdir(logdir)
self.assertEqual(2, len(events))
self.assertEqual('scalar', events[1].summary.value[0].tag)
def testSummaryNameScope(self):
logdir = self.get_temp_dir()
writer = summary_ops.create_file_writer(logdir, max_queue=0)
with writer.as_default(), summary_ops.always_record_summaries():
with ops.name_scope('scope'):
summary_ops.scalar('scalar', 2.0, step=1)
with self.cached_session() as sess:
sess.run(summary_ops.summary_writer_initializer_op())
sess.run(summary_ops.all_summary_ops())
events = summary_test_util.events_from_logdir(logdir)
self.assertEqual(2, len(events))
self.assertEqual('scope/scalar', events[1].summary.value[0].tag)
def testSummaryGlobalStep(self):
training_util.get_or_create_global_step()
logdir = self.get_temp_dir()
writer = summary_ops.create_file_writer(logdir, max_queue=0)
with writer.as_default(), summary_ops.always_record_summaries():
summary_ops.scalar('scalar', 2.0)
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
sess.run(summary_ops.summary_writer_initializer_op())
step, _ = sess.run(
[training_util.get_global_step(), summary_ops.all_summary_ops()])
events = summary_test_util.events_from_logdir(logdir)
self.assertEqual(2, len(events))
self.assertEqual(step, events[1].step)
def testMaxQueue(self):
logdir = self.get_temp_dir()
writer = summary_ops.create_file_writer(
logdir, max_queue=1, flush_millis=999999)
with writer.as_default(), summary_ops.always_record_summaries():
summary_ops.scalar('scalar', 2.0, step=1)
with self.cached_session() as sess:
sess.run(summary_ops.summary_writer_initializer_op())
get_total = lambda: len(summary_test_util.events_from_logdir(logdir))
# Note: First tf.Event is always file_version.
self.assertEqual(1, get_total())
sess.run(summary_ops.all_summary_ops())
self.assertEqual(1, get_total())
# Should flush after second summary since max_queue = 1
sess.run(summary_ops.all_summary_ops())
self.assertEqual(3, get_total())
def testFlushFunction(self):
logdir = self.get_temp_dir()
writer = summary_ops.create_file_writer(
logdir, max_queue=999999, flush_millis=999999)
with writer.as_default(), summary_ops.always_record_summaries():
summary_ops.scalar('scalar', 2.0, step=1)
flush_op = summary_ops.flush()
with self.cached_session() as sess:
sess.run(summary_ops.summary_writer_initializer_op())
get_total = lambda: len(summary_test_util.events_from_logdir(logdir))
# Note: First tf.Event is always file_version.
self.assertEqual(1, get_total())
sess.run(summary_ops.all_summary_ops())
self.assertEqual(1, get_total())
sess.run(flush_op)
self.assertEqual(2, get_total())
# Test "writer" parameter
sess.run(summary_ops.all_summary_ops())
sess.run(summary_ops.flush(writer=writer))
self.assertEqual(3, get_total())
sess.run(summary_ops.all_summary_ops())
sess.run(summary_ops.flush(writer=writer._resource)) # pylint:disable=protected-access
self.assertEqual(4, get_total())
def testSharedName(self):
logdir = self.get_temp_dir()
with summary_ops.always_record_summaries():
# Create with default shared name (should match logdir)
writer1 = summary_ops.create_file_writer(logdir)
with writer1.as_default():
summary_ops.scalar('one', 1.0, step=1)
# Create with explicit logdir shared name (should be same resource/file)
shared_name = 'logdir:' + logdir
writer2 = summary_ops.create_file_writer(logdir, name=shared_name)
with writer2.as_default():
summary_ops.scalar('two', 2.0, step=2)
# Create with different shared name (should be separate resource/file)
writer3 = summary_ops.create_file_writer(logdir, name='other')
with writer3.as_default():
summary_ops.scalar('three', 3.0, step=3)
with self.cached_session() as sess:
# Run init ops across writers sequentially to avoid race condition.
# TODO(nickfelt): fix race condition in resource manager lookup or create
sess.run(writer1.init())
sess.run(writer2.init())
time.sleep(1.1) # Ensure filename has a different timestamp
sess.run(writer3.init())
sess.run(summary_ops.all_summary_ops())
sess.run([writer1.flush(), writer2.flush(), writer3.flush()])
event_files = iter(sorted(gfile.Glob(os.path.join(logdir, '*tfevents*'))))
# First file has tags "one" and "two"
events = summary_test_util.events_from_file(next(event_files))
self.assertEqual('brain.Event:2', events[0].file_version)
tags = [e.summary.value[0].tag for e in events[1:]]
self.assertItemsEqual(['one', 'two'], tags)
# Second file has tag "three"
events = summary_test_util.events_from_file(next(event_files))
self.assertEqual('brain.Event:2', events[0].file_version)
tags = [e.summary.value[0].tag for e in events[1:]]
self.assertItemsEqual(['three'], tags)
# No more files
self.assertRaises(StopIteration, lambda: next(event_files))
def testWriterInitAndClose(self):
logdir = self.get_temp_dir()
with summary_ops.always_record_summaries():
writer = summary_ops.create_file_writer(
logdir, max_queue=100, flush_millis=1000000)
with writer.as_default():
summary_ops.scalar('one', 1.0, step=1)
with self.cached_session() as sess:
sess.run(summary_ops.summary_writer_initializer_op())
get_total = lambda: len(summary_test_util.events_from_logdir(logdir))
self.assertEqual(1, get_total()) # file_version Event
# Running init() again while writer is open has no effect
sess.run(writer.init())
self.assertEqual(1, get_total())
sess.run(summary_ops.all_summary_ops())
self.assertEqual(1, get_total())
# Running close() should do an implicit flush
sess.run(writer.close())
self.assertEqual(2, get_total())
# Running init() on a closed writer should start a new file
time.sleep(1.1) # Ensure filename has a different timestamp
sess.run(writer.init())
sess.run(summary_ops.all_summary_ops())
sess.run(writer.close())
files = sorted(gfile.Glob(os.path.join(logdir, '*tfevents*')))
self.assertEqual(2, len(files))
self.assertEqual(2, len(summary_test_util.events_from_file(files[1])))
def testWriterFlush(self):
logdir = self.get_temp_dir()
with summary_ops.always_record_summaries():
writer = summary_ops.create_file_writer(
logdir, max_queue=100, flush_millis=1000000)
with writer.as_default():
summary_ops.scalar('one', 1.0, step=1)
with self.cached_session() as sess:
sess.run(summary_ops.summary_writer_initializer_op())
get_total = lambda: len(summary_test_util.events_from_logdir(logdir))
self.assertEqual(1, get_total()) # file_version Event
sess.run(summary_ops.all_summary_ops())
self.assertEqual(1, get_total())
sess.run(writer.flush())
self.assertEqual(2, get_total())
class GraphDbTest(summary_test_util.SummaryDbTest):
def testGraphPassedToGraph_isForbiddenForThineOwnSafety(self):
with self.assertRaises(TypeError):
summary_ops.graph(ops.Graph())
with self.assertRaises(TypeError):
summary_ops.graph('')
def testGraphSummary(self):
training_util.get_or_create_global_step()
name = 'hi'
graph = graph_pb2.GraphDef(node=(node_def_pb2.NodeDef(name=name),))
with self.cached_session():
with self.create_db_writer().as_default():
summary_ops.initialize(graph=graph)
six.assertCountEqual(self, [name],
get_all(self.db, 'SELECT node_name FROM Nodes'))
def testScalarSummary(self):
"""Test record_summaries_every_n_global_steps and all_summaries()."""
with ops.Graph().as_default(), self.cached_session() as sess:
global_step = training_util.get_or_create_global_step()
global_step.initializer.run()
with ops.device('/cpu:0'):
step_increment = state_ops.assign_add(global_step, 1)
sess.run(step_increment) # Increment global step from 0 to 1
logdir = tempfile.mkdtemp()
with summary_ops.create_file_writer(logdir, max_queue=0,
name='t2').as_default():
with summary_ops.record_summaries_every_n_global_steps(2):
summary_ops.initialize()
summary_op = summary_ops.scalar('my_scalar', 2.0)
# Neither of these should produce a summary because
# global_step is 1 and "1 % 2 != 0"
sess.run(summary_ops.all_summary_ops())
sess.run(summary_op)
events = summary_test_util.events_from_logdir(logdir)
self.assertEqual(len(events), 1)
# Increment global step from 1 to 2 and check that the summary
# is now written
sess.run(step_increment)
sess.run(summary_ops.all_summary_ops())
events = summary_test_util.events_from_logdir(logdir)
self.assertEqual(len(events), 2)
self.assertEqual(events[1].summary.value[0].tag, 'my_scalar')
def testScalarSummaryNameScope(self):
"""Test record_summaries_every_n_global_steps and all_summaries()."""
with ops.Graph().as_default(), self.cached_session() as sess:
global_step = training_util.get_or_create_global_step()
global_step.initializer.run()
with ops.device('/cpu:0'):
step_increment = state_ops.assign_add(global_step, 1)
sess.run(step_increment) # Increment global step from 0 to 1
logdir = tempfile.mkdtemp()
with summary_ops.create_file_writer(logdir, max_queue=0,
name='t2').as_default():
with summary_ops.record_summaries_every_n_global_steps(2):
summary_ops.initialize()
with ops.name_scope('scope'):
summary_op = summary_ops.scalar('my_scalar', 2.0)
# Neither of these should produce a summary because
# global_step is 1 and "1 % 2 != 0"
sess.run(summary_ops.all_summary_ops())
sess.run(summary_op)
events = summary_test_util.events_from_logdir(logdir)
self.assertEqual(len(events), 1)
# Increment global step from 1 to 2 and check that the summary
# is now written
sess.run(step_increment)
sess.run(summary_ops.all_summary_ops())
events = summary_test_util.events_from_logdir(logdir)
self.assertEqual(len(events), 2)
self.assertEqual(events[1].summary.value[0].tag, 'scope/my_scalar')
def testSummaryGraphModeCond(self):
with ops.Graph().as_default(), self.cached_session():
training_util.get_or_create_global_step()
logdir = tempfile.mkdtemp()
with summary_ops.create_file_writer(
logdir, max_queue=0,
name='t2').as_default(), summary_ops.always_record_summaries():
summary_ops.initialize()
training_util.get_or_create_global_step().initializer.run()
def f():
summary_ops.scalar('scalar', 2.0)
return constant_op.constant(True)
pred = array_ops.placeholder(dtypes.bool)
x = control_flow_ops.cond(pred, f,
lambda: constant_op.constant(False))
x.eval(feed_dict={pred: True})
events = summary_test_util.events_from_logdir(logdir)
self.assertEqual(len(events), 2)
self.assertEqual(events[1].summary.value[0].tag, 'cond/scalar')
def testSummaryGraphModeWhile(self):
with ops.Graph().as_default(), self.cached_session():
training_util.get_or_create_global_step()
logdir = tempfile.mkdtemp()
with summary_ops.create_file_writer(
logdir, max_queue=0,
name='t2').as_default(), summary_ops.always_record_summaries():
summary_ops.initialize()
training_util.get_or_create_global_step().initializer.run()
def body(unused_pred):
summary_ops.scalar('scalar', 2.0)
return constant_op.constant(False)
def cond(pred):
return pred
pred = array_ops.placeholder(dtypes.bool)
x = control_flow_ops.while_loop(cond, body, [pred])
x.eval(feed_dict={pred: True})
events = summary_test_util.events_from_logdir(logdir)
self.assertEqual(len(events), 2)
self.assertEqual(events[1].summary.value[0].tag, 'while/scalar')
if __name__ == '__main__':
test.main()
| apache-2.0 |
kanarelo/reportlab | tests/test_lib_utils.py | 13 | 5924 | #Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
"""Tests for reportlab.lib.utils
"""
__version__=''' $Id$ '''
from reportlab.lib.testutils import setOutDir,makeSuiteForClasses, printLocation
setOutDir(__name__)
import os, time, sys
import reportlab
import unittest
from reportlab.lib import colors
from reportlab.lib.utils import recursiveImport, recursiveGetAttr, recursiveSetAttr, rl_isfile, \
isCompactDistro, isPy3
def _rel_open_and_read(fn):
from reportlab.lib.utils import open_and_read
from reportlab.lib.testutils import testsFolder
cwd = os.getcwd()
os.chdir(testsFolder)
try:
return open_and_read(fn)
finally:
os.chdir(cwd)
class ImporterTestCase(unittest.TestCase):
"Test import utilities"
count = 0
def setUp(self):
from reportlab.lib.utils import get_rl_tempdir
s = repr(int(time.time())) + repr(self.count)
self.__class__.count += 1
self._tempdir = get_rl_tempdir('reportlab_test','tmp_%s' % s)
if not os.path.isdir(self._tempdir):
os.makedirs(self._tempdir,0o700)
_testmodulename = os.path.join(self._tempdir,'test_module_%s.py' % s)
f = open(_testmodulename,'w')
f.write('__all__=[]\n')
f.close()
if sys.platform=='darwin' and isPy3:
time.sleep(0.3)
self._testmodulename = os.path.splitext(os.path.basename(_testmodulename))[0]
def tearDown(self):
from shutil import rmtree
rmtree(self._tempdir,1)
def test1(self):
"try stuff known to be in the path"
m1 = recursiveImport('reportlab.pdfgen.canvas')
import reportlab.pdfgen.canvas
assert m1 == reportlab.pdfgen.canvas
def test2(self):
"try under a well known directory NOT on the path"
from reportlab.lib.testutils import testsFolder
D = os.path.join(testsFolder,'..','tools','pythonpoint')
fn = os.path.join(D,'stdparser.py')
if rl_isfile(fn) or rl_isfile(fn+'c') or rl_isfile(fn+'o'):
m1 = recursiveImport('stdparser', baseDir=D)
def test3(self):
"ensure CWD is on the path"
try:
cwd = os.getcwd()
os.chdir(self._tempdir)
m1 = recursiveImport(self._testmodulename)
finally:
os.chdir(cwd)
def test4(self):
"ensure noCWD removes current dir from path"
try:
cwd = os.getcwd()
os.chdir(self._tempdir)
import sys
try:
del sys.modules[self._testmodulename]
except KeyError:
pass
self.assertRaises(ImportError,
recursiveImport,
self._testmodulename,
noCWD=1)
finally:
os.chdir(cwd)
def test5(self):
"recursive attribute setting/getting on modules"
import reportlab.lib.units
inch = recursiveGetAttr(reportlab, 'lib.units.inch')
assert inch == 72
recursiveSetAttr(reportlab, 'lib.units.cubit', 18*inch)
cubit = recursiveGetAttr(reportlab, 'lib.units.cubit')
assert cubit == 18*inch
def test6(self):
"recursive attribute setting/getting on drawings"
from reportlab.graphics.charts.barcharts import sampleH1
drawing = sampleH1()
recursiveSetAttr(drawing, 'barchart.valueAxis.valueMax', 72)
theMax = recursiveGetAttr(drawing, 'barchart.valueAxis.valueMax')
assert theMax == 72
def test7(self):
"test open and read of a simple relative file"
b = _rel_open_and_read('../docs/images/Edit_Prefs.gif')
def test8(self):
"test open and read of a relative file: URL"
b = _rel_open_and_read('file:../docs/images/Edit_Prefs.gif')
def test9(self):
"test open and read of an http: URL"
from reportlab.lib.utils import open_and_read
b = open_and_read('http://www.reportlab.com/rsrc/encryption.gif')
def test10(self):
"test open and read of a simple relative file"
from reportlab.lib.utils import open_and_read, getBytesIO
b = getBytesIO(_rel_open_and_read('../docs/images/Edit_Prefs.gif'))
b = open_and_read(b)
def test11(self):
"test open and read of an RFC 2397 data URI with base64 encoding"
result = _rel_open_and_read('data:image/gif;base64,R0lGODdhAQABAIAAAP///////ywAAAAAAQABAAACAkQBADs=')
self.assertEquals(result,b'GIF87a\x01\x00\x01\x00\x80\x00\x00\xff\xff\xff\xff\xff\xff,\x00\x00\x00\x00\x01\x00\x01\x00\x00\x02\x02D\x01\x00;')
def test12(self):
"test open and read of an RFC 2397 data URI without an encoding"
result = _rel_open_and_read('data:text/plain;,Hello%20World')
self.assertEquals(result,b'Hello World')
def testRecursiveImportErrors(self):
"check we get useful error messages"
try:
m1 = recursiveImport('reportlab.pdfgen.brush')
self.fail("Imported a nonexistent module")
except ImportError as e:
self.assertIn('reportlab.pdfgen.brush',str(e))
try:
m1 = recursiveImport('totally.non.existent')
self.fail("Imported a nonexistent module")
except ImportError as e:
self.assertIn('totally',str(e))
try:
#import a module in the 'tests' directory with a bug
m1 = recursiveImport('unimportable')
self.fail("Imported a buggy module")
except Exception as e:
self.assertIn(reportlab.isPy3 and 'division by zero' or 'integer division or modulo by zero',str(e))
def makeSuite():
return makeSuiteForClasses(ImporterTestCase)
if __name__ == "__main__": #noruntests
unittest.TextTestRunner().run(makeSuite())
printLocation()
| bsd-3-clause |
mvdriel/ansible-modules-core | cloud/rackspace/rax_network.py | 157 | 4110 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
DOCUMENTATION = '''
---
module: rax_network
short_description: create / delete an isolated network in Rackspace Public Cloud
description:
- creates / deletes a Rackspace Public Cloud isolated network.
version_added: "1.4"
options:
state:
description:
- Indicate desired state of the resource
choices:
- present
- absent
default: present
label:
description:
- Label (name) to give the network
default: null
cidr:
description:
- cidr of the network being created
default: null
author:
- "Christopher H. Laco (@claco)"
- "Jesse Keating (@j2sol)"
extends_documentation_fragment: rackspace.openstack
'''
EXAMPLES = '''
- name: Build an Isolated Network
gather_facts: False
tasks:
- name: Network create request
local_action:
module: rax_network
credentials: ~/.raxpub
label: my-net
cidr: 192.168.3.0/24
state: present
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
def cloud_network(module, state, label, cidr):
changed = False
network = None
networks = []
if not pyrax.cloud_networks:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
if state == 'present':
if not cidr:
module.fail_json(msg='missing required arguments: cidr')
try:
network = pyrax.cloud_networks.find_network_by_label(label)
except pyrax.exceptions.NetworkNotFound:
try:
network = pyrax.cloud_networks.create(label, cidr=cidr)
changed = True
except Exception, e:
module.fail_json(msg='%s' % e.message)
except Exception, e:
module.fail_json(msg='%s' % e.message)
elif state == 'absent':
try:
network = pyrax.cloud_networks.find_network_by_label(label)
network.delete()
changed = True
except pyrax.exceptions.NetworkNotFound:
pass
except Exception, e:
module.fail_json(msg='%s' % e.message)
if network:
instance = dict(id=network.id,
label=network.label,
cidr=network.cidr)
networks.append(instance)
module.exit_json(changed=changed, networks=networks)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
state=dict(default='present',
choices=['present', 'absent']),
label=dict(required=True),
cidr=dict()
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together(),
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
state = module.params.get('state')
label = module.params.get('label')
cidr = module.params.get('cidr')
setup_rax_module(module, pyrax)
cloud_network(module, state, label, cidr)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
### invoke the module
main()
| gpl-3.0 |
hogarthj/ansible | lib/ansible/modules/network/system/net_system.py | 104 | 3068 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: net_system
version_added: "2.4"
author: "Ricardo Carrillo Cruz (@rcarrillocruz)"
short_description: Manage the system attributes on network devices
description:
- This module provides declarative management of node system attributes
on network devices. It provides an option to configure host system
parameters or remove those parameters from the device active
configuration.
options:
hostname:
description:
- Configure the device hostname parameter. This option takes an ASCII string value.
domain_name:
description:
- Configure the IP domain name
on the remote device to the provided value. Value
should be in the dotted name form and will be
appended to the C(hostname) to create a fully-qualified
domain name.
domain_search:
description:
- Provides the list of domain suffixes to
append to the hostname for the purpose of doing name resolution.
This argument accepts a list of names and will be reconciled
with the current active configuration on the running node.
lookup_source:
description:
- Provides one or more source
interfaces to use for performing DNS lookups. The interface
provided in C(lookup_source) must be a valid interface configured
on the device.
name_servers:
description:
- List of DNS name servers by IP address to use to perform name resolution
lookups. This argument accepts either a list of DNS servers See
examples.
state:
description:
- State of the configuration
values in the device's current active configuration. When set
to I(present), the values should be configured in the device active
configuration and when set to I(absent) the values should not be
in the device active configuration
default: present
choices: ['present', 'absent']
"""
EXAMPLES = """
- name: configure hostname and domain name
net_system:
hostname: ios01
domain_name: test.example.com
domain-search:
- ansible.com
- redhat.com
- cisco.com
- name: remove configuration
net_system:
state: absent
- name: configure DNS lookup sources
net_system:
lookup_source: MgmtEth0/0/CPU0/0
- name: configure name servers
net_system:
name_servers:
- 8.8.8.8
- 8.8.4.4
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always, except for the platforms that use Netconf transport to manage the device.
type: list
sample:
- hostname ios01
- ip domain name test.example.com
"""
| gpl-3.0 |
scriptotek/lokar | almar/authorities.py | 2 | 2520 | # coding=utf-8
from __future__ import unicode_literals
import requests
import logging
import json
from colorama import Fore, Style
from .util import ANY_VALUE, pick, pick_one
log = logging.getLogger(__name__)
class Authorities(object):
def __init__(self, vocabularies):
self.vocabularies = vocabularies
def authorize_concept(self, concept):
if '2' not in concept.sf:
raise ValueError('No vocabulary code (2) given!')
if concept.sf['2'] in self.vocabularies:
vocab = self.vocabularies[concept.sf['2']]
else:
log.info(Fore.RED + '✘' + Style.RESET_ALL + ' Could not authorize: %s', concept)
return
response = vocab.authorize_term(concept.term, concept.tag)
if response.get('id') is not None:
identifier = response.get('id')
if concept.sf.get('0'):
if concept.sf.get('0') == ANY_VALUE:
pass # ignore ANY_VALUE
elif identifier != concept.sf['0']:
identifier = pick_one(
'The $$0 value does not match the authority record id. Please select which to use',
[concept.sf['0'], identifier]
)
concept.sf['0'] = identifier
log.info(Fore.GREEN + '✔' + Style.RESET_ALL + ' Authorized: %s', concept)
else:
log.info(Fore.RED + '✘' + Style.RESET_ALL + ' Could not authorize: %s', concept)
class Vocabulary(object):
marc_code = ''
skosmos_code = ''
def __init__(self, marc_code, id_service_url=None):
self.marc_code = marc_code
self.id_service_url = id_service_url
def authorize_term(self, term, tag):
# Lookup term with some id service to get the identifier to use in $0
if term == '':
return {}
url = self.id_service_url.format(vocabulary=self.marc_code, term=term, tag=tag)
response = requests.get(url)
log.debug('Authority service response: %s', response.text)
if response.status_code != 200 or response.text == '':
return {}
try:
response = json.loads(response.text)
except ValueError:
log.error('ID lookup service returned: %s', response.text)
return {}
if 'error' in response and response.get('uri') != 'info:srw/diagnostic/1/61':
log.warning('ID lookup service returned: %s', response['error'])
return response
| agpl-3.0 |
kfox1111/horizon | openstack_dashboard/dashboards/project/loadbalancers/urls.py | 64 | 2331 | # Copyright 2013, Big Switch Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls import patterns
from django.conf.urls import url
from openstack_dashboard.dashboards.project.loadbalancers import views
urlpatterns = patterns(
'openstack_dashboard.dashboards.project.loadbalancers.views',
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^addpool$', views.AddPoolView.as_view(), name='addpool'),
url(r'^updatepool/(?P<pool_id>[^/]+)/$',
views.UpdatePoolView.as_view(), name='updatepool'),
url(r'^addvip/(?P<pool_id>[^/]+)/$',
views.AddVipView.as_view(), name='addvip'),
url(r'^updatevip/(?P<vip_id>[^/]+)/$',
views.UpdateVipView.as_view(), name='updatevip'),
url(r'^addmember$', views.AddMemberView.as_view(), name='addmember'),
url(r'^updatemember/(?P<member_id>[^/]+)/$',
views.UpdateMemberView.as_view(), name='updatemember'),
url(r'^addmonitor$', views.AddMonitorView.as_view(), name='addmonitor'),
url(r'^updatemonitor/(?P<monitor_id>[^/]+)/$',
views.UpdateMonitorView.as_view(), name='updatemonitor'),
url(r'^association/add/(?P<pool_id>[^/]+)/$',
views.AddPMAssociationView.as_view(), name='addassociation'),
url(r'^association/delete/(?P<pool_id>[^/]+)/$',
views.DeletePMAssociationView.as_view(), name='deleteassociation'),
url(r'^pool/(?P<pool_id>[^/]+)/$',
views.PoolDetailsView.as_view(), name='pooldetails'),
url(r'^vip/(?P<vip_id>[^/]+)/$',
views.VipDetailsView.as_view(), name='vipdetails'),
url(r'^member/(?P<member_id>[^/]+)/$',
views.MemberDetailsView.as_view(), name='memberdetails'),
url(r'^monitor/(?P<monitor_id>[^/]+)/$',
views.MonitorDetailsView.as_view(), name='monitordetails'))
| apache-2.0 |
TheSimoms/Felleshoelet | spotifyconnector/venv/lib/python3.6/site-packages/pip/_internal/network/auth.py | 10 | 11119 | """Network Authentication Helpers
Contains interface (MultiDomainBasicAuth) and associated glue code for
providing credentials in the context of network requests.
"""
# The following comment should be removed at some point in the future.
# mypy: disallow-untyped-defs=False
import logging
from pip._vendor.requests.auth import AuthBase, HTTPBasicAuth
from pip._vendor.requests.utils import get_netrc_auth
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip._internal.utils.misc import (
ask,
ask_input,
ask_password,
remove_auth_from_url,
split_auth_netloc_from_url,
)
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from optparse import Values
from typing import Dict, Optional, Tuple
from pip._internal.vcs.versioncontrol import AuthInfo
Credentials = Tuple[str, str, str]
logger = logging.getLogger(__name__)
try:
import keyring # noqa
except ImportError:
keyring = None
except Exception as exc:
logger.warning(
"Keyring is skipped due to an exception: %s", str(exc),
)
keyring = None
def get_keyring_auth(url, username):
"""Return the tuple auth for a given url from keyring."""
if not url or not keyring:
return None
try:
try:
get_credential = keyring.get_credential
except AttributeError:
pass
else:
logger.debug("Getting credentials from keyring for %s", url)
cred = get_credential(url, username)
if cred is not None:
return cred.username, cred.password
return None
if username:
logger.debug("Getting password from keyring for %s", url)
password = keyring.get_password(url, username)
if password:
return username, password
except Exception as exc:
logger.warning(
"Keyring is skipped due to an exception: %s", str(exc),
)
class MultiDomainBasicAuth(AuthBase):
def __init__(self, prompting=True, index_urls=None):
# type: (bool, Optional[Values]) -> None
self.prompting = prompting
self.index_urls = index_urls
self.passwords = {} # type: Dict[str, AuthInfo]
# When the user is prompted to enter credentials and keyring is
# available, we will offer to save them. If the user accepts,
# this value is set to the credentials they entered. After the
# request authenticates, the caller should call
# ``save_credentials`` to save these.
self._credentials_to_save = None # type: Optional[Credentials]
def _get_index_url(self, url):
"""Return the original index URL matching the requested URL.
Cached or dynamically generated credentials may work against
the original index URL rather than just the netloc.
The provided url should have had its username and password
removed already. If the original index url had credentials then
they will be included in the return value.
Returns None if no matching index was found, or if --no-index
was specified by the user.
"""
if not url or not self.index_urls:
return None
for u in self.index_urls:
prefix = remove_auth_from_url(u).rstrip("/") + "/"
if url.startswith(prefix):
return u
def _get_new_credentials(self, original_url, allow_netrc=True,
allow_keyring=True):
"""Find and return credentials for the specified URL."""
# Split the credentials and netloc from the url.
url, netloc, url_user_password = split_auth_netloc_from_url(
original_url,
)
# Start with the credentials embedded in the url
username, password = url_user_password
if username is not None and password is not None:
logger.debug("Found credentials in url for %s", netloc)
return url_user_password
# Find a matching index url for this request
index_url = self._get_index_url(url)
if index_url:
# Split the credentials from the url.
index_info = split_auth_netloc_from_url(index_url)
if index_info:
index_url, _, index_url_user_password = index_info
logger.debug("Found index url %s", index_url)
# If an index URL was found, try its embedded credentials
if index_url and index_url_user_password[0] is not None:
username, password = index_url_user_password
if username is not None and password is not None:
logger.debug("Found credentials in index url for %s", netloc)
return index_url_user_password
# Get creds from netrc if we still don't have them
if allow_netrc:
netrc_auth = get_netrc_auth(original_url)
if netrc_auth:
logger.debug("Found credentials in netrc for %s", netloc)
return netrc_auth
# If we don't have a password and keyring is available, use it.
if allow_keyring:
# The index url is more specific than the netloc, so try it first
kr_auth = (
get_keyring_auth(index_url, username) or
get_keyring_auth(netloc, username)
)
if kr_auth:
logger.debug("Found credentials in keyring for %s", netloc)
return kr_auth
return username, password
def _get_url_and_credentials(self, original_url):
"""Return the credentials to use for the provided URL.
If allowed, netrc and keyring may be used to obtain the
correct credentials.
Returns (url_without_credentials, username, password). Note
that even if the original URL contains credentials, this
function may return a different username and password.
"""
url, netloc, _ = split_auth_netloc_from_url(original_url)
# Use any stored credentials that we have for this netloc
username, password = self.passwords.get(netloc, (None, None))
if username is None and password is None:
# No stored credentials. Acquire new credentials without prompting
# the user. (e.g. from netrc, keyring, or the URL itself)
username, password = self._get_new_credentials(original_url)
if username is not None or password is not None:
# Convert the username and password if they're None, so that
# this netloc will show up as "cached" in the conditional above.
# Further, HTTPBasicAuth doesn't accept None, so it makes sense to
# cache the value that is going to be used.
username = username or ""
password = password or ""
# Store any acquired credentials.
self.passwords[netloc] = (username, password)
assert (
# Credentials were found
(username is not None and password is not None) or
# Credentials were not found
(username is None and password is None)
), "Could not load credentials from url: {}".format(original_url)
return url, username, password
def __call__(self, req):
# Get credentials for this request
url, username, password = self._get_url_and_credentials(req.url)
# Set the url of the request to the url without any credentials
req.url = url
if username is not None and password is not None:
# Send the basic auth with this request
req = HTTPBasicAuth(username, password)(req)
# Attach a hook to handle 401 responses
req.register_hook("response", self.handle_401)
return req
# Factored out to allow for easy patching in tests
def _prompt_for_password(self, netloc):
username = ask_input("User for %s: " % netloc)
if not username:
return None, None
auth = get_keyring_auth(netloc, username)
if auth:
return auth[0], auth[1], False
password = ask_password("Password: ")
return username, password, True
# Factored out to allow for easy patching in tests
def _should_save_password_to_keyring(self):
if not keyring:
return False
return ask("Save credentials to keyring [y/N]: ", ["y", "n"]) == "y"
def handle_401(self, resp, **kwargs):
# We only care about 401 responses, anything else we want to just
# pass through the actual response
if resp.status_code != 401:
return resp
# We are not able to prompt the user so simply return the response
if not self.prompting:
return resp
parsed = urllib_parse.urlparse(resp.url)
# Prompt the user for a new username and password
username, password, save = self._prompt_for_password(parsed.netloc)
# Store the new username and password to use for future requests
self._credentials_to_save = None
if username is not None and password is not None:
self.passwords[parsed.netloc] = (username, password)
# Prompt to save the password to keyring
if save and self._should_save_password_to_keyring():
self._credentials_to_save = (parsed.netloc, username, password)
# Consume content and release the original connection to allow our new
# request to reuse the same one.
resp.content
resp.raw.release_conn()
# Add our new username and password to the request
req = HTTPBasicAuth(username or "", password or "")(resp.request)
req.register_hook("response", self.warn_on_401)
# On successful request, save the credentials that were used to
# keyring. (Note that if the user responded "no" above, this member
# is not set and nothing will be saved.)
if self._credentials_to_save:
req.register_hook("response", self.save_credentials)
# Send our new request
new_resp = resp.connection.send(req, **kwargs)
new_resp.history.append(resp)
return new_resp
def warn_on_401(self, resp, **kwargs):
"""Response callback to warn about incorrect credentials."""
if resp.status_code == 401:
logger.warning(
'401 Error, Credentials not correct for %s', resp.request.url,
)
def save_credentials(self, resp, **kwargs):
"""Response callback to save credentials on success."""
assert keyring is not None, "should never reach here without keyring"
if not keyring:
return
creds = self._credentials_to_save
self._credentials_to_save = None
if creds and resp.status_code < 400:
try:
logger.info('Saving credentials to keyring')
keyring.set_password(*creds)
except Exception:
logger.exception('Failed to save credentials')
| gpl-2.0 |
malept/youtube-dl | youtube_dl/extractor/espn.py | 22 | 5256 | from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
determine_ext,
int_or_none,
unified_timestamp,
)
class ESPNIE(InfoExtractor):
_VALID_URL = r'https?://(?:espn\.go|(?:www\.)?espn)\.com/video/clip(?:\?.*?\bid=|/_/id/)(?P<id>\d+)'
_TESTS = [{
'url': 'http://espn.go.com/video/clip?id=10365079',
'info_dict': {
'id': '10365079',
'ext': 'mp4',
'title': '30 for 30 Shorts: Judging Jewell',
'description': 'md5:39370c2e016cb4ecf498ffe75bef7f0f',
'timestamp': 1390936111,
'upload_date': '20140128',
},
'params': {
'skip_download': True,
},
}, {
# intl video, from http://www.espnfc.us/video/mls-highlights/150/video/2743663/must-see-moments-best-of-the-mls-season
'url': 'http://espn.go.com/video/clip?id=2743663',
'info_dict': {
'id': '2743663',
'ext': 'mp4',
'title': 'Must-See Moments: Best of the MLS season',
'description': 'md5:4c2d7232beaea572632bec41004f0aeb',
'timestamp': 1449446454,
'upload_date': '20151207',
},
'params': {
'skip_download': True,
},
'expected_warnings': ['Unable to download f4m manifest'],
}, {
'url': 'http://www.espn.com/video/clip?id=10365079',
'only_matching': True,
}, {
'url': 'http://www.espn.com/video/clip/_/id/17989860',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
clip = self._download_json(
'http://api-app.espn.com/v1/video/clips/%s' % video_id,
video_id)['videos'][0]
title = clip['headline']
format_urls = set()
formats = []
def traverse_source(source, base_source_id=None):
for source_id, source in source.items():
if isinstance(source, compat_str):
extract_source(source, base_source_id)
elif isinstance(source, dict):
traverse_source(
source,
'%s-%s' % (base_source_id, source_id)
if base_source_id else source_id)
def extract_source(source_url, source_id=None):
if source_url in format_urls:
return
format_urls.add(source_url)
ext = determine_ext(source_url)
if ext == 'smil':
formats.extend(self._extract_smil_formats(
source_url, video_id, fatal=False))
elif ext == 'f4m':
formats.extend(self._extract_f4m_formats(
source_url, video_id, f4m_id=source_id, fatal=False))
elif ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
source_url, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id=source_id, fatal=False))
else:
formats.append({
'url': source_url,
'format_id': source_id,
})
traverse_source(clip['links']['source'])
self._sort_formats(formats)
description = clip.get('caption') or clip.get('description')
thumbnail = clip.get('thumbnail')
duration = int_or_none(clip.get('duration'))
timestamp = unified_timestamp(clip.get('originalPublishDate'))
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'timestamp': timestamp,
'duration': duration,
'formats': formats,
}
class ESPNArticleIE(InfoExtractor):
_VALID_URL = r'https?://(?:espn\.go|(?:www\.)?espn)\.com/(?:[^/]+/)*(?P<id>[^/]+)'
_TESTS = [{
'url': 'https://espn.go.com/video/iframe/twitter/?cms=espn&id=10365079',
'only_matching': True,
}, {
'url': 'http://espn.go.com/nba/recap?gameId=400793786',
'only_matching': True,
}, {
'url': 'http://espn.go.com/blog/golden-state-warriors/post/_/id/593/how-warriors-rapidly-regained-a-winning-edge',
'only_matching': True,
}, {
'url': 'http://espn.go.com/sports/endurance/story/_/id/12893522/dzhokhar-tsarnaev-sentenced-role-boston-marathon-bombings',
'only_matching': True,
}, {
'url': 'http://espn.go.com/nba/playoffs/2015/story/_/id/12887571/john-wall-washington-wizards-no-swelling-left-hand-wrist-game-5-return',
'only_matching': True,
}]
@classmethod
def suitable(cls, url):
return False if ESPNIE.suitable(url) else super(ESPNArticleIE, cls).suitable(url)
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video_id = self._search_regex(
r'class=(["\']).*?video-play-button.*?\1[^>]+data-id=["\'](?P<id>\d+)',
webpage, 'video id', group='id')
return self.url_result(
'http://espn.go.com/video/clip?id=%s' % video_id, ESPNIE.ie_key())
| unlicense |
jdinuncio/ansible-modules-extras | cloud/amazon/cloudformation_facts.py | 23 | 11217 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: cloudformation_facts
short_description: Obtain facts about an AWS CloudFormation stack
description:
- Gets information about an AWS CloudFormation stack
requirements:
- boto3 >= 1.0.0
- python >= 2.6
version_added: "2.2"
author: Justin Menga (@jmenga)
options:
stack_name:
description:
- The name or id of the CloudFormation stack
required: true
all_facts:
description:
- Get all stack information for the stack
required: false
default: false
stack_events:
description:
- Get stack events for the stack
required: false
default: false
stack_template:
description:
- Get stack template body for the stack
required: false
default: false
stack_resources:
description:
- Get stack resources for the stack
required: false
default: false
stack_policy:
description:
- Get stack policy for the stack
required: false
default: false
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Get summary information about a stack
- cloudformation_facts:
stack_name: my-cloudformation-stack
# Facts are published in ansible_facts['cloudformation'][<stack_name>]
- debug:
msg: '{{ ansible_facts['cloudformation']['my-cloudformation-stack'] }}'
# Get all stack information about a stack
- cloudformation_facts:
stack_name: my-cloudformation-stack
all_facts: true
# Get stack resource and stack policy information about a stack
- cloudformation_facts:
stack_name: my-cloudformation-stack
stack_resources: true
stack_policy: true
# Example dictionary outputs for stack_outputs, stack_parameters and stack_resources:
"stack_outputs": {
"ApplicationDatabaseName": "dazvlpr01xj55a.ap-southeast-2.rds.amazonaws.com",
...
},
"stack_parameters": {
"DatabaseEngine": "mysql",
"DatabasePassword": "****",
...
},
"stack_resources": {
"AutoscalingGroup": "dev-someapp-AutoscalingGroup-1SKEXXBCAN0S7",
"AutoscalingSecurityGroup": "sg-abcd1234",
"ApplicationDatabase": "dazvlpr01xj55a",
"EcsTaskDefinition": "arn:aws:ecs:ap-southeast-2:123456789:task-definition/dev-someapp-EcsTaskDefinition-1F2VM9QB0I7K9:1"
...
}
'''
RETURN = '''
stack_description:
description: Summary facts about the stack
returned: always
type: dict
stack_outputs:
description: Dictionary of stack outputs keyed by the value of each output 'OutputKey' parameter and corresponding value of each output 'OutputValue' parameter
returned: always
type: dict
stack_parameters:
description: Dictionary of stack parameters keyed by the value of each parameter 'ParameterKey' parameter and corresponding value of each parameter 'ParameterValue' parameter
returned: always
type: dict
stack_events:
description: All stack events for the stack
returned: only if all_facts or stack_events is true
type: list of events
stack_policy:
description: Describes the stack policy for the stack
returned: only if all_facts or stack_policy is true
type: dict
stack_template:
description: Describes the stack template for the stack
returned: only if all_facts or stack_template is true
type: dict
stack_resource_list:
description: Describes stack resources for the stack
returned: only if all_facts or stack_resourses is true
type: list of resources
stack_resources:
description: Dictionary of stack resources keyed by the value of each resource 'LogicalResourceId' parameter and corresponding value of each resource 'PhysicalResourceId' parameter
returned: only if all_facts or stack_resourses is true
type: dict
'''
try:
import boto3
import botocore
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
from ansible.module_utils.ec2 import get_aws_connection_info, ec2_argument_spec
from ansible.module_utils.basic import AnsibleModule
from functools import partial
import json
import traceback
class CloudFormationServiceManager:
"""Handles CloudFormation Services"""
def __init__(self, module):
self.module = module
try:
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
self.client = boto3_conn(module, conn_type='client',
resource='cloudformation', region=region,
endpoint=ec2_url, **aws_connect_kwargs)
except botocore.exceptions.NoRegionError:
self.module.fail_json(msg="Region must be specified as a parameter, in AWS_DEFAULT_REGION environment variable or in boto configuration file")
except Exception as e:
self.module.fail_json(msg="Can't establish connection - " + str(e), exception=traceback.format_exc(e))
def describe_stack(self, stack_name):
try:
func = partial(self.client.describe_stacks,StackName=stack_name)
response = self.paginated_response(func, 'Stacks')
if response:
return response[0]
self.module.fail_json(msg="Error describing stack - an empty response was returned")
except Exception as e:
self.module.fail_json(msg="Error describing stack - " + str(e), exception=traceback.format_exc(e))
def list_stack_resources(self, stack_name):
try:
func = partial(self.client.list_stack_resources,StackName=stack_name)
return self.paginated_response(func, 'StackResourceSummaries')
except Exception as e:
self.module.fail_json(msg="Error listing stack resources - " + str(e), exception=traceback.format_exc(e))
def describe_stack_events(self, stack_name):
try:
func = partial(self.client.describe_stack_events,StackName=stack_name)
return self.paginated_response(func, 'StackEvents')
except Exception as e:
self.module.fail_json(msg="Error describing stack events - " + str(e), exception=traceback.format_exc(e))
def get_stack_policy(self, stack_name):
try:
response = self.client.get_stack_policy(StackName=stack_name)
stack_policy = response.get('StackPolicyBody')
if stack_policy:
return json.loads(stack_policy)
return dict()
except Exception as e:
self.module.fail_json(msg="Error getting stack policy - " + str(e), exception=traceback.format_exc(e))
def get_template(self, stack_name):
try:
response = self.client.get_template(StackName=stack_name)
return response.get('TemplateBody')
except Exception as e:
self.module.fail_json(msg="Error getting stack template - " + str(e), exception=traceback.format_exc(e))
def paginated_response(self, func, result_key, next_token=None):
'''
Returns expanded response for paginated operations.
The 'result_key' is used to define the concatenated results that are combined from each paginated response.
'''
args=dict()
if next_token:
args['NextToken'] = next_token
response = func(**args)
result = response.get(result_key)
next_token = response.get('NextToken')
if not next_token:
return result
return result + self.paginated_response(func, result_key, next_token)
def to_dict(items, key, value):
''' Transforms a list of items to a Key/Value dictionary '''
if items:
return dict(zip([i[key] for i in items], [i[value] for i in items]))
else:
return dict()
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
stack_name=dict(required=True, type='str' ),
all_facts=dict(required=False, default=False, type='bool'),
stack_policy=dict(required=False, default=False, type='bool'),
stack_events=dict(required=False, default=False, type='bool'),
stack_resources=dict(required=False, default=False, type='bool'),
stack_template=dict(required=False, default=False, type='bool'),
))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_BOTO3:
module.fail_json(msg='boto3 is required.')
# Describe the stack
service_mgr = CloudFormationServiceManager(module)
stack_name = module.params.get('stack_name')
result = {
'ansible_facts': { 'cloudformation': { stack_name:{} } }
}
facts = result['ansible_facts']['cloudformation'][stack_name]
facts['stack_description'] = service_mgr.describe_stack(stack_name)
# Create stack output and stack parameter dictionaries
if facts['stack_description']:
facts['stack_outputs'] = to_dict(facts['stack_description'].get('Outputs'), 'OutputKey', 'OutputValue')
facts['stack_parameters'] = to_dict(facts['stack_description'].get('Parameters'), 'ParameterKey', 'ParameterValue')
# normalize stack description API output
facts['stack_description'] = camel_dict_to_snake_dict(facts['stack_description'])
# camel2snake doesn't handle NotificationARNs properly, so let's fix that
facts['stack_description']['notification_arns'] = facts['stack_description'].pop('notification_ar_ns', [])
# Create optional stack outputs
all_facts = module.params.get('all_facts')
if all_facts or module.params.get('stack_resources'):
facts['stack_resource_list'] = service_mgr.list_stack_resources(stack_name)
facts['stack_resources'] = to_dict(facts.get('stack_resource_list'), 'LogicalResourceId', 'PhysicalResourceId')
if all_facts or module.params.get('stack_template'):
facts['stack_template'] = service_mgr.get_template(stack_name)
if all_facts or module.params.get('stack_policy'):
facts['stack_policy'] = service_mgr.get_stack_policy(stack_name)
if all_facts or module.params.get('stack_events'):
facts['stack_events'] = service_mgr.describe_stack_events(stack_name)
result['changed'] = False
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
| gpl-3.0 |
eeshangarg/oh-mainline | vendor/packages/twisted/twisted/protocols/dict.py | 60 | 10727 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Dict client protocol implementation.
@author: Pavel Pergamenshchik
"""
from twisted.protocols import basic
from twisted.internet import defer, protocol
from twisted.python import log
from StringIO import StringIO
def parseParam(line):
"""Chew one dqstring or atom from beginning of line and return (param, remaningline)"""
if line == '':
return (None, '')
elif line[0] != '"': # atom
mode = 1
else: # dqstring
mode = 2
res = ""
io = StringIO(line)
if mode == 2: # skip the opening quote
io.read(1)
while 1:
a = io.read(1)
if a == '"':
if mode == 2:
io.read(1) # skip the separating space
return (res, io.read())
elif a == '\\':
a = io.read(1)
if a == '':
return (None, line) # unexpected end of string
elif a == '':
if mode == 1:
return (res, io.read())
else:
return (None, line) # unexpected end of string
elif a == ' ':
if mode == 1:
return (res, io.read())
res += a
def makeAtom(line):
"""Munch a string into an 'atom'"""
# FIXME: proper quoting
return filter(lambda x: not (x in map(chr, range(33)+[34, 39, 92])), line)
def makeWord(s):
mustquote = range(33)+[34, 39, 92]
result = []
for c in s:
if ord(c) in mustquote:
result.append("\\")
result.append(c)
s = "".join(result)
return s
def parseText(line):
if len(line) == 1 and line == '.':
return None
else:
if len(line) > 1 and line[0:2] == '..':
line = line[1:]
return line
class Definition:
"""A word definition"""
def __init__(self, name, db, dbdesc, text):
self.name = name
self.db = db
self.dbdesc = dbdesc
self.text = text # list of strings not terminated by newline
class DictClient(basic.LineReceiver):
"""dict (RFC2229) client"""
data = None # multiline data
MAX_LENGTH = 1024
state = None
mode = None
result = None
factory = None
def __init__(self):
self.data = None
self.result = None
def connectionMade(self):
self.state = "conn"
self.mode = "command"
def sendLine(self, line):
"""Throw up if the line is longer than 1022 characters"""
if len(line) > self.MAX_LENGTH - 2:
raise ValueError("DictClient tried to send a too long line")
basic.LineReceiver.sendLine(self, line)
def lineReceived(self, line):
try:
line = line.decode("UTF-8")
except UnicodeError: # garbage received, skip
return
if self.mode == "text": # we are receiving textual data
code = "text"
else:
if len(line) < 4:
log.msg("DictClient got invalid line from server -- %s" % line)
self.protocolError("Invalid line from server")
self.transport.LoseConnection()
return
code = int(line[:3])
line = line[4:]
method = getattr(self, 'dictCode_%s_%s' % (code, self.state), self.dictCode_default)
method(line)
def dictCode_default(self, line):
"""Unkown message"""
log.msg("DictClient got unexpected message from server -- %s" % line)
self.protocolError("Unexpected server message")
self.transport.loseConnection()
def dictCode_221_ready(self, line):
"""We are about to get kicked off, do nothing"""
pass
def dictCode_220_conn(self, line):
"""Greeting message"""
self.state = "ready"
self.dictConnected()
def dictCode_530_conn(self):
self.protocolError("Access denied")
self.transport.loseConnection()
def dictCode_420_conn(self):
self.protocolError("Server temporarily unavailable")
self.transport.loseConnection()
def dictCode_421_conn(self):
self.protocolError("Server shutting down at operator request")
self.transport.loseConnection()
def sendDefine(self, database, word):
"""Send a dict DEFINE command"""
assert self.state == "ready", "DictClient.sendDefine called when not in ready state"
self.result = None # these two are just in case. In "ready" state, result and data
self.data = None # should be None
self.state = "define"
command = "DEFINE %s %s" % (makeAtom(database.encode("UTF-8")), makeWord(word.encode("UTF-8")))
self.sendLine(command)
def sendMatch(self, database, strategy, word):
"""Send a dict MATCH command"""
assert self.state == "ready", "DictClient.sendMatch called when not in ready state"
self.result = None
self.data = None
self.state = "match"
command = "MATCH %s %s %s" % (makeAtom(database), makeAtom(strategy), makeAtom(word))
self.sendLine(command.encode("UTF-8"))
def dictCode_550_define(self, line):
"""Invalid database"""
self.mode = "ready"
self.defineFailed("Invalid database")
def dictCode_550_match(self, line):
"""Invalid database"""
self.mode = "ready"
self.matchFailed("Invalid database")
def dictCode_551_match(self, line):
"""Invalid strategy"""
self.mode = "ready"
self.matchFailed("Invalid strategy")
def dictCode_552_define(self, line):
"""No match"""
self.mode = "ready"
self.defineFailed("No match")
def dictCode_552_match(self, line):
"""No match"""
self.mode = "ready"
self.matchFailed("No match")
def dictCode_150_define(self, line):
"""n definitions retrieved"""
self.result = []
def dictCode_151_define(self, line):
"""Definition text follows"""
self.mode = "text"
(word, line) = parseParam(line)
(db, line) = parseParam(line)
(dbdesc, line) = parseParam(line)
if not (word and db and dbdesc):
self.protocolError("Invalid server response")
self.transport.loseConnection()
else:
self.result.append(Definition(word, db, dbdesc, []))
self.data = []
def dictCode_152_match(self, line):
"""n matches found, text follows"""
self.mode = "text"
self.result = []
self.data = []
def dictCode_text_define(self, line):
"""A line of definition text received"""
res = parseText(line)
if res == None:
self.mode = "command"
self.result[-1].text = self.data
self.data = None
else:
self.data.append(line)
def dictCode_text_match(self, line):
"""One line of match text received"""
def l(s):
p1, t = parseParam(s)
p2, t = parseParam(t)
return (p1, p2)
res = parseText(line)
if res == None:
self.mode = "command"
self.result = map(l, self.data)
self.data = None
else:
self.data.append(line)
def dictCode_250_define(self, line):
"""ok"""
t = self.result
self.result = None
self.state = "ready"
self.defineDone(t)
def dictCode_250_match(self, line):
"""ok"""
t = self.result
self.result = None
self.state = "ready"
self.matchDone(t)
def protocolError(self, reason):
"""override to catch unexpected dict protocol conditions"""
pass
def dictConnected(self):
"""override to be notified when the server is ready to accept commands"""
pass
def defineFailed(self, reason):
"""override to catch reasonable failure responses to DEFINE"""
pass
def defineDone(self, result):
"""override to catch succesful DEFINE"""
pass
def matchFailed(self, reason):
"""override to catch resonable failure responses to MATCH"""
pass
def matchDone(self, result):
"""override to catch succesful MATCH"""
pass
class InvalidResponse(Exception):
pass
class DictLookup(DictClient):
"""Utility class for a single dict transaction. To be used with DictLookupFactory"""
def protocolError(self, reason):
if not self.factory.done:
self.factory.d.errback(InvalidResponse(reason))
self.factory.clientDone()
def dictConnected(self):
if self.factory.queryType == "define":
apply(self.sendDefine, self.factory.param)
elif self.factory.queryType == "match":
apply(self.sendMatch, self.factory.param)
def defineFailed(self, reason):
self.factory.d.callback([])
self.factory.clientDone()
self.transport.loseConnection()
def defineDone(self, result):
self.factory.d.callback(result)
self.factory.clientDone()
self.transport.loseConnection()
def matchFailed(self, reason):
self.factory.d.callback([])
self.factory.clientDone()
self.transport.loseConnection()
def matchDone(self, result):
self.factory.d.callback(result)
self.factory.clientDone()
self.transport.loseConnection()
class DictLookupFactory(protocol.ClientFactory):
"""Utility factory for a single dict transaction"""
protocol = DictLookup
done = None
def __init__(self, queryType, param, d):
self.queryType = queryType
self.param = param
self.d = d
self.done = 0
def clientDone(self):
"""Called by client when done."""
self.done = 1
del self.d
def clientConnectionFailed(self, connector, error):
self.d.errback(error)
def clientConnectionLost(self, connector, error):
if not self.done:
self.d.errback(error)
def buildProtocol(self, addr):
p = self.protocol()
p.factory = self
return p
def define(host, port, database, word):
"""Look up a word using a dict server"""
d = defer.Deferred()
factory = DictLookupFactory("define", (database, word), d)
from twisted.internet import reactor
reactor.connectTCP(host, port, factory)
return d
def match(host, port, database, strategy, word):
"""Match a word using a dict server"""
d = defer.Deferred()
factory = DictLookupFactory("match", (database, strategy, word), d)
from twisted.internet import reactor
reactor.connectTCP(host, port, factory)
return d
| agpl-3.0 |
notriddle/servo | components/script/dom/bindings/codegen/parser/tests/test_replaceable.py | 138 | 1833 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
def should_throw(parser, harness, message, code):
parser = parser.reset();
threw = False
try:
parser.parse(code)
parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown: %s" % message)
def WebIDLTest(parser, harness):
# The [Replaceable] extended attribute MUST take no arguments.
should_throw(parser, harness, "no arguments", """
interface I {
[Replaceable=X] readonly attribute long A;
};
""")
# An attribute with the [Replaceable] extended attribute MUST NOT also be
# declared with the [PutForwards] extended attribute.
should_throw(parser, harness, "PutForwards", """
interface I {
[PutForwards=B, Replaceable] readonly attribute J A;
};
interface J {
attribute long B;
};
""")
# The [Replaceable] extended attribute MUST NOT be used on an attribute
# that is not read only.
should_throw(parser, harness, "writable attribute", """
interface I {
[Replaceable] attribute long A;
};
""")
# The [Replaceable] extended attribute MUST NOT be used on a static
# attribute.
should_throw(parser, harness, "static attribute", """
interface I {
[Replaceable] static readonly attribute long A;
};
""")
# The [Replaceable] extended attribute MUST NOT be used on an attribute
# declared on a callback interface.
should_throw(parser, harness, "callback interface", """
callback interface I {
[Replaceable] readonly attribute long A;
};
""")
| mpl-2.0 |
pombredanne/tlsh | tlsh_bh_tool/tlsh_bh_tool.py | 2 | 8200 | __name__ = "tlsh_bh_tool"
__version__ = 1.0
__author__ = "Jayson Pryde"
import os, sys, tlsh, argparse, threading, pefile, requests
import logging, logging.config, hashlib, ConfigParser, simplejson
from extended_file_properties import extended_file_properties as efp
from os.path import getsize
class TlshStruct:
files = []
threads = []
flag = True
out = None
lock = None
logger = None
outname = None
counter = int(0)
thread_count = int(0)
restrict = None
query_url = None
file_basic_details = {}
file_prop_details = {}
file_cert_details = {}
def main(options):
if _init(options):
if os.path.isfile(options.file): _processFile1(options.file)
elif os.path.isdir(options.file):
_enumerateFiles(options.file)
_initOut()
_initScanningThreads()
_startScanningThreads()
_stopScanningThreads()
_deinitOut()
def _enumerateFiles(folder_path):
for root, dirs, files in os.walk(folder_path):
for name in files: TlshStruct.files.append(os.path.join(root, name))
def _init(options):
ret = True
try:
TlshStruct.thread_count = int(options.thread_count)
logging.config.fileConfig('tlsh_bh_tool.conf')
TlshStruct.logger = logging.getLogger('tlshbh')
cfg = ConfigParser.ConfigParser()
cfg.readfp(open("./tlsh_bh_tool.cfg"))
TlshStruct.file_basic_details["apikey"] = cfg.get('Credentials', 'apikey')
TlshStruct.file_basic_details["user"] = cfg.get('Credentials', 'user')
TlshStruct.query_url = cfg.get('Webservice', 'query_url')
TlshStruct.restrict = int(options.restrict)
TlshStruct.outname = options.out
except Exception, ex:
print "ERROR: Problem during initialization : %s" % ex
ret = False
finally:
return ret
def _initOut():
try:
TlshStruct.out = open(TlshStruct.outname, "w")
except Exception, ex:
print "ERROR: Problem initializing output : %s" % ex
def _deinitOut():
if TlshStruct.out is not None: TlshStruct.out.close()
def _initScanningThreads():
try:
TlshStruct.lock = threading.Lock()
for i in range(0, TlshStruct.thread_count): TlshStruct.threads.append(threading.Thread(target=_processFile2))
except Exception, ex:
print "ERROR: Problem in initializing scanning threads : %s" % ex
def _startScanningThreads():
for thr in TlshStruct.threads: thr.start()
def _stopScanningThreads():
for thr in TlshStruct.threads:
if thr.isAlive(): thr.join()
def _getSha256(filename):
h = None
sha256 = "NULL"
try:
h = hashlib.sha256()
with open(filename, "rb") as f:
while True:
block = f.read(2**12)
if not block: break
h.update(block)
sha256 = str(h.hexdigest())
except Exception, ex:
TlshStruct.logger.error("Problem in getting SHA1 of %s : %s" % (filename, ex))
sha256 = "ERROR"
finally:
return sha256
def _sendQuery():
result = {}
params = dict(TlshStruct.file_basic_details.items() + TlshStruct.file_prop_details.items() + TlshStruct.file_cert_details.items())
try:
response = requests.get(TlshStruct.query_url, params=params, verify=False)
if response.status_code == 200: result = simplejson.loads(response.content)
except Exception, ex:
TlshStruct.logger.error("Problem in sending query : %s" % ex)
finally:
return result
def _resetFileDetails():
TlshStruct.file_basic_details["tlsh"] = ""
TlshStruct.file_basic_details["sha256"] = ""
TlshStruct.file_prop_details = {}
TlshStruct.file_cert_details = {}
def _recordResults(result):
if result is None: return
if result["status"] == "ok":
if len(result["matches"]) != 0:
if TlshStruct.out is None:
try:
f = open(TlshStruct.outname, "w")
for r in result["matches"]:
f.write("%s,%s,%s,%s,%s\n" % (TlshStruct.file_basic_details["sha256"], TlshStruct.file_basic_details["tlsh"],
r["id"], r["tag"], r["distance_score"]))
f.close()
except Exception, ex:
TlshStruct.logger.error("Cannot write file %s : %s" % (TlshStruct.outname, ex))
else:
for r in result["matches"]:
TlshStruct.out.write("%s,%s,%s,%s,%s\n" % (TlshStruct.file_basic_details["sha256"], TlshStruct.file_basic_details["tlsh"],
r["id"], r["tag"], r["distance_score"]))
def _processFile1(filename):
print "Processing %s..." % filename
_resetFileDetails()
if getsize(filename) <= 512: TlshStruct.logger.error("File %s too small to compute tlsh value")
else:
result = None
try:
TlshStruct.file_basic_details["filename"] = filename
TlshStruct.file_basic_details["tlsh"] = tlsh.hash(open(filename, "rb").read())
TlshStruct.file_basic_details["sha256"] = _getSha256(filename)
if not TlshStruct.restrict:
prop_details = efp.getBasicFileProperties(filename)
cert_details = efp.getCertificateDetails(filename)
TlshStruct.file_prop_details = prop_details if prop_details is not None else {}
TlshStruct.file_cert_details = cert_details if cert_details is not None else {}
result = _sendQuery()
except Exception, ex:
print "ERROR: Problem in getting tlsh value of %s : %s" % (filename, ex)
tlsh_val = "error"
finally:
_recordResults(result)
def _processFile2():
while TlshStruct.flag:
TlshStruct.lock.acquire()
try:
filename = TlshStruct.files.pop()
_processFile1(filename)
except Exception, ex:
if TlshStruct.counter == TlshStruct.thread_count:
TlshStruct.flag = False
TlshStruct.counter = 0
else: TlshStruct.counter+=1
finally:
TlshStruct.lock.release()
def _showBanner():
if os.name == "nt": os.system("cls")
elif os.name == "posix": os.system("clear")
print "********************************"
print "* TLSH BlackHat Tool v1.0 *"
print "* Demo Version *"
print "********************************"
if __name__ == "tlsh_bh_tool":
_showBanner()
parser = argparse.ArgumentParser()
parser.add_argument("file", help="directory containing files | file")
parser.add_argument("-out", default="matches.csv", help="CSV file containing query results. Default is matches.csv")
parser.add_argument("-restrict", dest="restrict", default=0, help="0 == send all file properties | 1 == send only basic information (i.e. sha256 and tlsh).")
parser.add_argument("-tc", dest="thread_count", default=3, help="scanning thread count. Default is 3")
options = parser.parse_args()
main(options)
| apache-2.0 |
tedelhourani/ansible | lib/ansible/modules/network/radware/vdirect_file.py | 22 | 8840 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2017 Radware LTD.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
module: vdirect_file
author: Evgeny Fedoruk @ Radware LTD (@evgenyfedoruk)
short_description: Uploads a new or updates an existing runnable file into Radware vDirect server
description:
- Uploads a new or updates an existing configuration template or workflow template into the Radware vDirect server.
All parameters may be set as environment variables.
notes:
- Requires the Radware vdirect-client Python package on the host. This is as easy as
C(pip install vdirect-client)
version_added: "2.4"
options:
vdirect_ip:
description:
- Primary vDirect server IP address, may be set as VDIRECT_IP environment variable.
required: true
vdirect_user:
description:
- vDirect server username, may be set as VDIRECT_USER environment variable.
required: true
default: None
vdirect_password:
description:
- vDirect server password, may be set as VDIRECT_PASSWORD environment variable.
required: true
default: None
vdirect_secondary_ip:
description:
- Secondary vDirect server IP address, may be set as VDIRECT_SECONDARY_IP environment variable.
required: false
default: None
vdirect_wait:
description:
- Wait for async operation to complete, may be set as VDIRECT_WAIT environment variable.
required: false
type: bool
default: 'yes'
vdirect_https_port:
description:
- vDirect server HTTPS port number, may be set as VDIRECT_HTTPS_PORT environment variable.
required: false
default: 2189
vdirect_http_port:
description:
- vDirect server HTTP port number, may be set as VDIRECT_HTTP_PORT environment variable.
required: false
default: 2188
vdirect_timeout:
description:
- Amount of time to wait for async operation completion [seconds],
- may be set as VDIRECT_TIMEOUT environment variable.
required: false
default: 60
vdirect_use_ssl:
description:
- If C(no), an HTTP connection will be used instead of the default HTTPS connection,
- may be set as VDIRECT_HTTPS or VDIRECT_USE_SSL environment variable.
required: false
type: bool
default: 'yes'
vdirect_validate_certs:
description:
- If C(no), SSL certificates will not be validated,
- may be set as VDIRECT_VALIDATE_CERTS or VDIRECT_VERIFY environment variable.
- This should only set to C(no) used on personally controlled sites using self-signed certificates.
required: false
type: bool
default: 'yes'
file_name:
description:
- vDirect runnable file name to be uploaded.
- May be velocity configuration template (.vm) or workflow template zip file (.zip).
required: true
requirements:
- "vdirect-client >= 4.1.1"
'''
EXAMPLES = '''
- name: vdirect_file
vdirect_file:
vdirect_primary_ip: 10.10.10.10
vdirect_user: vDirect
vdirect_password: radware
file_name: /tmp/get_vlans.vm
'''
RETURN = '''
result:
description: Message detailing upload result
returned: success
type: string
sample: "Workflow template created"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
import os
import os.path
try:
from vdirect_client import rest_client
HAS_REST_CLIENT = True
except ImportError:
HAS_REST_CLIENT = False
TEMPLATE_EXTENSION = '.vm'
WORKFLOW_EXTENSION = '.zip'
WRONG_EXTENSION_ERROR = 'The file_name parameter must have ' \
'velocity script (.vm) extension or ZIP archive (.zip) extension'
CONFIGURATION_TEMPLATE_CREATED_SUCCESS = 'Configuration template created'
CONFIGURATION_TEMPLATE_UPDATED_SUCCESS = 'Configuration template updated'
WORKFLOW_TEMPLATE_CREATED_SUCCESS = 'Workflow template created'
WORKFLOW_TEMPLATE_UPDATED_SUCCESS = 'Workflow template updated'
meta_args = dict(
vdirect_ip=dict(
required=True, fallback=(env_fallback, ['VDIRECT_IP']),
default=None),
vdirect_user=dict(
required=True, fallback=(env_fallback, ['VDIRECT_USER']),
default=None),
vdirect_password=dict(
required=True, fallback=(env_fallback, ['VDIRECT_PASSWORD']),
default=None, no_log=True, type='str'),
vdirect_secondary_ip=dict(
required=False, fallback=(env_fallback, ['VDIRECT_SECONDARY_IP']),
default=None),
vdirect_use_ssl=dict(
required=False, fallback=(env_fallback, ['VDIRECT_HTTPS', 'VDIRECT_USE_SSL']),
default=True, type='bool'),
vdirect_wait=dict(
required=False, fallback=(env_fallback, ['VDIRECT_WAIT']),
default=True, type='bool'),
vdirect_timeout=dict(
required=False, fallback=(env_fallback, ['VDIRECT_TIMEOUT']),
default=60, type='int'),
vdirect_validate_certs=dict(
required=False, fallback=(env_fallback, ['VDIRECT_VERIFY', 'VDIRECT_VALIDATE_CERTS']),
default=True, type='bool'),
vdirect_https_port=dict(
required=False, fallback=(env_fallback, ['VDIRECT_HTTPS_PORT']),
default=2189, type='int'),
vdirect_http_port=dict(
required=False, fallback=(env_fallback, ['VDIRECT_HTTP_PORT']),
default=2188, type='int'),
file_name=dict(required=True, default=None)
)
class VdirectFile(object):
def __init__(self, params):
self.client = rest_client.RestClient(params['vdirect_ip'],
params['vdirect_user'],
params['vdirect_password'],
wait=params['vdirect_wait'],
secondary_vdirect_ip=params['vdirect_secondary_ip'],
https_port=params['vdirect_https_port'],
http_port=params['vdirect_http_port'],
timeout=params['vdirect_timeout'],
https=params['vdirect_use_ssl'],
verify=params['vdirect_validate_certs'])
def upload(self, fqn):
if fqn.endswith(TEMPLATE_EXTENSION):
template_name = os.path.basename(fqn)
template = rest_client.Template(self.client)
runnable_file = open(fqn, 'r')
file_content = runnable_file.read()
result = template.create_from_source(file_content, template_name, fail_if_invalid=True)
if result[rest_client.RESP_STATUS] == 409:
template.upload_source(file_content, template_name, fail_if_invalid=True)
result = CONFIGURATION_TEMPLATE_UPDATED_SUCCESS
else:
result = CONFIGURATION_TEMPLATE_CREATED_SUCCESS
elif fqn.endswith(WORKFLOW_EXTENSION):
workflow = rest_client.WorkflowTemplate(self.client)
runnable_file = open(fqn, 'rb')
file_content = runnable_file.read()
result = workflow.create_template_from_archive(file_content, fail_if_invalid=True)
if result[rest_client.RESP_STATUS] == 409:
workflow.update_archive(file_content, os.path.splitext(os.path.basename(fqn))[0])
result = WORKFLOW_TEMPLATE_UPDATED_SUCCESS
else:
result = WORKFLOW_TEMPLATE_CREATED_SUCCESS
else:
result = WRONG_EXTENSION_ERROR
return result
def main():
if not HAS_REST_CLIENT:
raise ImportError("The python vdirect-client module is required")
module = AnsibleModule(argument_spec=meta_args)
try:
vdirect_file = VdirectFile(module.params)
result = vdirect_file.upload(module.params['file_name'])
result = dict(result=result)
module.exit_json(**result)
except Exception as e:
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
| gpl-3.0 |
sunqm/pyscf | examples/grad/10-excited_state_cisd_grad.py | 2 | 1556 | #!/usr/bin/env python
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Analytical nuclear gradients of CISD excited state.
'''
from pyscf import gto
from pyscf import scf
from pyscf import ci
mol = gto.M(
atom = [
["O" , (0. , 0. , 0.)],
[1 , (0. ,-0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]],
basis = '631g'
)
mf = scf.RHF(mol).run()
myci = ci.CISD(mf)
myci.nroots = 4
myci.run()
# PySCF-1.6.1 and newer supports the .Gradients method to create a grad
# object after grad module was imported. It is equivalent to call the
# .nuc_grad_method method.
from pyscf import grad
g = myci.Gradients().kernel(state=3)
print('Gradients of the 3rd excited state')
print(g)
# An equivalent way to specify the exicited state is to directly input the
# excited state wavefunction
g = myci.nuc_grad_method().kernel(myci.ci[3])
print('Gradients of the 3rd excited state')
print(g)
#
# Use gradients scanner.
#
# Note the returned gradients are based on atomic unit.
#
g_scanner = myci.nuc_grad_method().as_scanner(state=3)
e, g = g_scanner(mol)
print('Gradients of the 3rd excited state')
print(g)
#
# Specify state ID for the gradients of another state.
#
# Unless explicitly specified as an input argument of set_geom_ function,
# set_geom_ function will use the same unit as the one specified in mol.unit.
mol.set_geom_('''O 0. 0. 0.1
H 0. -0.757 0.587
H 0. 0.757 0.587''')
e, g = g_scanner(mol, state=2)
print('Gradients of the 2nd excited state')
print(g)
| apache-2.0 |
ioam/lancet | lancet/core.py | 1 | 39453 | #
# Lancet core
#
import os, itertools, copy
import re, glob, string
import json
import param
try:
import numpy as np
np_ftypes = np.sctypes['float']
except:
np, np_ftypes = None, []
try: from pandas import DataFrame
except: DataFrame = None # pyflakes:ignore (try/except import)
try: from holoviews import Table
except: Table = None # pyflakes:ignore (try/except import)
from collections import defaultdict, OrderedDict
float_types = [float] + np_ftypes
def identityfn(x): return x
def fp_repr(x): return str(x) if (type(x) in float_types) else repr(x)
def set_fp_precision(value):
"""
Function to set the floating precision across lancet.
"""
Arguments.set_default('fp_precision', value)
def to_table(args, vdims=[]):
"Helper function to convet an Args object to a HoloViews Table"
if not Table:
return "HoloViews Table not available"
kdims = [dim for dim in args.constant_keys + args.varying_keys
if dim not in vdims]
items = [tuple([spec[k] for k in kdims+vdims])
for spec in args.specs]
return Table(items, kdims=kdims, vdims=vdims)
#=====================#
# Argument Specifiers #
#=====================#
class PrettyPrinted(object):
"""
A mixin class for generating pretty-printed representations.
"""
def pprint_args(self, pos_args, keyword_args, infix_operator=None, extra_params={}):
"""
Method to define the positional arguments and keyword order
for pretty printing.
"""
if infix_operator and not (len(pos_args)==2 and keyword_args==[]):
raise Exception('Infix format requires exactly two'
' positional arguments and no keywords')
(kwargs,_,_,_) = self._pprint_args
self._pprint_args = (keyword_args + kwargs, pos_args, infix_operator, extra_params)
def _pprint(self, cycle=False, flat=False, annotate=False, onlychanged=True, level=1, tab = ' '):
"""
Pretty printer that prints only the modified keywords and
generates flat representations (for repr) and optionally
annotates the top of the repr with a comment.
"""
(kwargs, pos_args, infix_operator, extra_params) = self._pprint_args
(br, indent) = ('' if flat else '\n', '' if flat else tab * level)
prettify = lambda x: isinstance(x, PrettyPrinted) and not flat
pretty = lambda x: x._pprint(flat=flat, level=level+1) if prettify(x) else repr(x)
params = dict(self.get_param_values())
show_lexsort = getattr(self, '_lexorder', None) is not None
modified = [k for (k,v) in self.get_param_values(onlychanged=onlychanged)]
pkwargs = [(k, params[k]) for k in kwargs if (k in modified)] + list(extra_params.items())
arg_list = [(k,params[k]) for k in pos_args] + pkwargs
lines = []
if annotate: # Optional annotating comment
len_ckeys, len_vkeys = len(self.constant_keys), len(self.varying_keys)
info_triple = (len(self),
', %d constant key(s)' % len_ckeys if len_ckeys else '',
', %d varying key(s)' % len_vkeys if len_vkeys else '')
annotation = '# == %d items%s%s ==\n' % info_triple
lines = [annotation]
if show_lexsort: lines.append('(')
if cycle:
lines.append('%s(...)' % self.__class__.__name__)
elif infix_operator:
level = level - 1
triple = (pretty(params[pos_args[0]]), infix_operator, pretty(params[pos_args[1]]))
lines.append('%s %s %s' % triple)
else:
lines.append('%s(' % self.__class__.__name__)
for (k,v) in arg_list:
lines.append('%s%s=%s' % (br+indent, k, pretty(v)))
lines.append(',')
lines = lines[:-1] +[br+(tab*(level-1))+')'] # Remove trailing comma
if show_lexsort:
lines.append(').lexsort(%s)' % ', '.join(repr(el) for el in self._lexorder))
return ''.join(lines)
def __repr__(self):
return self._pprint(flat=True, onlychanged=False)
def __str__(self):
return self._pprint()
class Arguments(PrettyPrinted, param.Parameterized):
"""
The abstract, base class that defines the core interface and
methods for all members of the Arguments family of classes,
including either the simple, static members of Args below, or the
sophisticated parameter exploration algorithms subclassing from
DynamicArgs defined in dynamic.py.
The Args subclass may be used directly and forms the root of one
family of classes that have statically defined or precomputed
argument sets (defined below). The second subfamily are the
DynamicArgs, designed to allow more sophisticated, online
parameter space exploration techniques such as hill climbing,
bisection search, genetic algorithms and so on.
"""
fp_precision = param.Integer(default=4, constant=True, doc='''
The floating point precision to use for floating point values.
Unlike other basic Python types, floats need care with their
representation as you only want to display up to the precision
actually specified. A floating point precision of 0 casts
number to integers before representing them.''')
def __init__(self, **params):
self._pprint_args = ([],[],None,{})
self.pprint_args([],['fp_precision', 'dynamic'])
super(Arguments,self).__init__(**params)
# Some types cannot be sorted easily (e.g. numpy arrays)
self.unsortable_keys = []
def __iter__(self): return self
def __contains__(self, value):
return value in (self.constant_keys + self.varying_keys)
@classmethod
def spec_formatter(cls, spec):
" Formats the elements of an argument set appropriately"
return type(spec)((k, str(v)) for (k,v) in spec.items())
@property
def constant_keys(self):
"""
Returns the list of parameter names whose values are constant
as the argument specifier is iterated. Note that the union of
constant and varying_keys should partition the entire set of
keys in the case where there are no unsortable keys.
"""
raise NotImplementedError
@property
def constant_items(self):
"""
Returns the set of constant items as a list of tuples. This
allows easy conversion to dictionary format. Note, the items
should be supplied in the same key ordering as for
constant_keys for consistency.
"""
raise NotImplementedError
@property
def varying_keys(self):
"""
Returns the list of parameters whose values vary as the
argument specifier is iterated. Whenever it is possible, keys
should be sorted from those slowest to faster varying and
sorted alphanumerically within groups that vary at the same
rate.
"""
raise NotImplementedError
def round_floats(self, specs, fp_precision):
_round_float = lambda v, fp: np.round(v, fp) if (type(v) in np_ftypes) else round(v, fp)
_round = (lambda v, fp: int(v)) if fp_precision==0 else _round_float
return (dict((k, _round(v, fp_precision) if (type(v) in float_types) else v)
for (k,v) in spec.items()) for spec in specs)
def __next__(self):
"""
Called to get a list of specifications: dictionaries with
parameter name keys and string values.
"""
raise StopIteration
next = __next__
def copy(self):
"""
Convenience method to avoid using the specifier without
exhausting it.
"""
return copy.copy(self)
def _collect_by_key(self,specs):
"""
Returns a dictionary like object with the lists of values
collapsed by their respective key. Useful to find varying vs
constant keys and to find how fast keys vary.
"""
# Collect (key, value) tuples as list of lists, flatten with chain
allkeys = itertools.chain.from_iterable(
[[(k, run[k]) for k in run] for run in specs])
collection = defaultdict(list)
for (k,v) in allkeys: collection[k].append(v)
return collection
def _operator(self, operator, other):
identities = [isinstance(el, Identity) for el in [self, other]]
if not any(identities): return operator(self,other)
if all(identities): return Identity()
elif identities[1]: return self
else: return other
def __add__(self, other):
"""
Concatenates two argument specifiers.
"""
return self._operator(Concatenate, other)
def __mul__(self, other):
"""
Takes the Cartesian product of two argument specifiers.
"""
return self._operator(CartesianProduct, other)
def _cartesian_product(self, first_specs, second_specs):
"""
Takes the Cartesian product of the specifications. Result will
contain N specifications where N = len(first_specs) *
len(second_specs) and keys are merged.
Example: [{'a':1},{'b':2}] * [{'c':3},{'d':4}] =
[{'a':1,'c':3},{'a':1,'d':4},{'b':2,'c':3},{'b':2,'d':4}]
"""
return [ dict(zip(
list(s1.keys()) + list(s2.keys()),
list(s1.values()) + list(s2.values())
))
for s1 in first_specs for s2 in second_specs ]
def summary(self):
"""
A succinct summary of the argument specifier. Unlike the repr,
a summary does not have to be complete but must supply the
most relevant information about the object to the user.
"""
print("Items: %s" % len(self))
varying_keys = ', '.join('%r' % k for k in self.varying_keys)
print("Varying Keys: %s" % varying_keys)
items = ', '.join(['%s=%r' % (k,v)
for (k,v) in self.constant_items])
if self.constant_items:
print("Constant Items: %s" % items)
class Identity(Arguments):
"""
The identity element for any Arguments object 'args' under the *
operator (CartesianProduct) and + operator (Concatenate). The
following identities hold:
args is (Identity() * args)
args is (args * Identity())
args is (Identity() + args)
args is (args + Identity())
Note that the empty Args() object can also fulfill the role of
Identity under the addition operator.
"""
fp_precision = param.Integer(default=None, allow_None=True,
precedence=(-1), constant=True, doc='''
fp_precision is disabled as Identity() never contains any
arguments.''')
def __eq__(self, other): return isinstance(other, Identity)
def __repr__(self): return "Identity()"
def __str__(self): return repr(self)
def __nonzero__(self): raise ValueError("The boolean value of Identity is undefined")
def __bool__(self): raise ValueError("The boolean value of Identity is undefined")
class Args(Arguments):
"""
An Arguments class that supports statically specified or
precomputed argument sets. It may be used directly to specify
argument values but also forms the base class for a family of more
specific static Argument classes. Each subclass is less flexible
and general but allows arguments to be easily and succinctly
specified. For instance, the Range subclass allows parameter
ranges to be easily declared.
The constructor of Args accepts argument definitions in two
different formats. The keyword format allows constant arguments to
be specified directly and easily. For instance:
>>> v1 = Args(a=2, b=3)
>>> v1
Args(fp_precision=4,a=2,b=3)
The alternative input format takes an explicit list of the
argument specifications:
>>> v2 = Args([{'a':2, 'b':3}]) # Equivalent behaviour to above
>>> v1.specs == v2.specs
True
This latter format is completely flexible and general, allowing
any arbitrary list of arguments to be specified as desired. This
is not generally recommended however as the structure of a
parameter space is often expressed more clearly by composing
together simpler, more succinct Args objects with the
CartesianProduct (*) or Concatenation (+) operators.
"""
specs = param.List(default=[], constant=True, doc='''
The static list of specifications (ie. dictionaries) to be
returned by the specifier. Float values are rounded according
to fp_precision.''')
def __init__(self, specs=None, fp_precision=None, **params):
if fp_precision is None: fp_precision = Arguments.fp_precision
raw_specs, params, explicit = self._build_specs(specs, params, fp_precision)
super(Args, self).__init__(fp_precision=fp_precision, specs=raw_specs, **params)
self._lexorder = None
if explicit:
self.pprint_args(['specs'],[])
else: # Present in kwarg format
self.pprint_args([], self.constant_keys, None,
OrderedDict(sorted(self.constant_items)))
def _build_specs(self, specs, kwargs, fp_precision):
"""
Returns the specs, the remaining kwargs and whether or not the
constructor was called with kwarg or explicit specs.
"""
if specs is None:
overrides = param.ParamOverrides(self, kwargs,
allow_extra_keywords=True)
extra_kwargs = overrides.extra_keywords()
kwargs = dict([(k,v) for (k,v) in kwargs.items()
if k not in extra_kwargs])
rounded_specs = list(self.round_floats([extra_kwargs],
fp_precision))
if extra_kwargs=={}: return [], kwargs, True
else: return rounded_specs, kwargs, False
return list(self.round_floats(specs, fp_precision)), kwargs, True
def __iter__(self):
self._exhausted = False
return self
def __next__(self):
if self._exhausted:
raise StopIteration
else:
self._exhausted=True
return self.specs
next = __next__
def _unique(self, sequence, idfun=repr):
"""
Note: repr() must be implemented properly on all objects. This
is implicitly assumed by Lancet when Python objects need to be
formatted to string representation.
"""
seen = {}
return [seen.setdefault(idfun(e),e) for e in sequence
if idfun(e) not in seen]
def show(self, exclude=[]):
"""
Convenience method to inspect the available argument values in
human-readable format. The ordering of keys is determined by
how quickly they vary.
The exclude list allows specific keys to be excluded for
readability (e.g. to hide long, absolute filenames).
"""
ordering = self.constant_keys + self.varying_keys
spec_lines = [', '.join(['%s=%s' % (k, s[k]) for k in ordering
if (k in s) and (k not in exclude)])
for s in self.specs]
print('\n'.join(['%d: %s' % (i,l) for (i,l) in enumerate(spec_lines)]))
def lexsort(self, *order):
"""
The lexical sort order is specified by a list of string
arguments. Each string is a key name prefixed by '+' or '-'
for ascending and descending sort respectively. If the key is
not found in the operand's set of varying keys, it is ignored.
"""
if order == []:
raise Exception("Please specify the keys for sorting, use"
"'+' prefix for ascending,"
"'-' for descending.)")
if not set(el[1:] for el in order).issubset(set(self.varying_keys)):
raise Exception("Key(s) specified not in the set of varying keys.")
sorted_args = copy.deepcopy(self)
specs_param = sorted_args.params('specs')
specs_param.constant = False
sorted_args.specs = self._lexsorted_specs(order)
specs_param.constant = True
sorted_args._lexorder = order
return sorted_args
def _lexsorted_specs(self, order):
"""
A lexsort is specified using normal key string prefixed by '+'
(for ascending) or '-' for (for descending).
Note that in Python 2, if a key is missing, None is returned
(smallest Python value). In Python 3, an Exception will be
raised regarding comparison of heterogenous types.
"""
specs = self.specs[:]
if not all(el[0] in ['+', '-'] for el in order):
raise Exception("Please specify the keys for sorting, use"
"'+' prefix for ascending,"
"'-' for descending.)")
sort_cycles = [(el[1:], True if el[0]=='+' else False)
for el in reversed(order)
if el[1:] in self.varying_keys]
for (key, ascending) in sort_cycles:
specs = sorted(specs, key=lambda s: s.get(key, None),
reverse=(not ascending))
return specs
@property
def constant_keys(self):
collection = self._collect_by_key(self.specs)
return [k for k in sorted(collection) if
(len(self._unique(collection[k])) == 1)]
@property
def constant_items(self):
collection = self._collect_by_key(self.specs)
return [(k,collection[k][0]) for k in self.constant_keys]
@property
def varying_keys(self):
collection = self._collect_by_key(self.specs)
constant_set = set(self.constant_keys)
unordered_varying = set(collection.keys()).difference(constant_set)
# Finding out how fast keys are varying
grouplens = [(len([len(list(y)) for (_,y)
in itertools.groupby(collection[k])]),k)
for k in collection
if (k not in self.unsortable_keys)]
varying_counts = [(n,k) for (n,k) in sorted(grouplens) if (k in unordered_varying)]
# Grouping keys with common frequency alphanumerically (desired behaviour).
ddict = defaultdict(list)
for (n,k) in varying_counts: ddict[n].append(k)
alphagroups = [sorted(ddict[k]) for k in sorted(ddict)]
return [el for group in alphagroups for el in group] + sorted(self.unsortable_keys)
@property
def dframe(self):
return DataFrame(self.specs) if DataFrame else "Pandas not available"
@property
def table(self):
return to_table(self)
def __len__(self): return len(self.specs)
class Concatenate(Args):
"""
Concatenate is the sequential composition of two specifiers. The
specifier created by the compositon (firsts + second) generates
the arguments in first followed by the arguments in second.
"""
first = param.ClassSelector(default=None, class_=Args, allow_None=True, constant=True, doc='''
The first specifier in the concatenation.''')
second = param.ClassSelector(default=None, class_=Args, allow_None=True, constant=True, doc='''
The second specifier in the concatenation.''')
def __init__(self, first, second):
max_precision = max(first.fp_precision, second.fp_precision)
specs = first.specs + second.specs
super(Concatenate, self).__init__(specs, fp_precision=max_precision,
first=first, second=second)
self.pprint_args(['first', 'second'],[], infix_operator='+')
class CartesianProduct(Args):
"""
CartesianProduct is the Cartesian product of two specifiers. The
specifier created by the compositon (firsts * second) generates
the cartesian produce of the arguments in first followed by the
arguments in second. Note that len(first * second) =
len(first)*len(second)
"""
first = param.ClassSelector(default=None, class_=Args, allow_None=True,
constant=True, doc='''The first specifier in the Cartesian product.''')
second = param.ClassSelector(default=None, class_=Args, allow_None=True,
constant=True, doc='''The second specifier in the Cartesian product.''')
def __init__(self, first, second):
max_precision = max(first.fp_precision, second.fp_precision)
specs = self._cartesian_product(first.specs, second.specs)
overlap = (set(first.varying_keys + first.constant_keys)
& set(second.varying_keys + second.constant_keys))
assert overlap == set(), ('Sets of keys cannot overlap'
'between argument specifiers'
'in cartesian product.')
super(CartesianProduct, self).__init__(specs, fp_precision=max_precision,
first=first, second=second)
self.pprint_args(['first', 'second'],[], infix_operator='*')
class Range(Args):
"""
Range generates an argument from a numerically interpolated range
which is linear by default. An optional function can be specified
to sample a numeric range with regular intervals.
"""
key = param.String(default='', constant=True, doc='''
The key assigned to the values computed over the numeric range.''')
start_value = param.Number(default=None, allow_None=True, constant=True, doc='''
The starting numeric value of the range.''')
end_value = param.Number(default=None, allow_None=True, constant=True, doc='''
The ending numeric value of the range (inclusive).''')
steps = param.Integer(default=2, constant=True, bounds=(1,None), doc='''
The number of steps to interpolate over. Default is 2 which
returns the start and end values without interpolation.''')
# Can't this be a lambda?
mapfn = param.Callable(default=identityfn, constant=True, doc='''
The function to be mapped across the linear range. The
identity function is used by by default''')
def __init__(self, key, start_value, end_value, steps=2, mapfn=identityfn, **params):
values = self.linspace(start_value, end_value, steps)
specs = [{key:mapfn(val)} for val in values ]
super(Range, self).__init__(specs, key=key, start_value=start_value,
end_value=end_value, steps=steps,
mapfn=mapfn, **params)
self.pprint_args(['key', 'start_value'], ['end_value', 'steps'])
def linspace(self, start, stop, n):
""" Simple replacement for numpy linspace"""
if n == 1: return [start]
L = [0.0] * n
nm1 = n - 1
nm1inv = 1.0 / nm1
for i in range(n):
L[i] = nm1inv * (start*(nm1 - i) + stop*i)
return L
class List(Args):
"""
An argument specifier that takes its values from a given list.
"""
values = param.List(default=[], constant=True, doc='''
The list values that are to be returned by the specifier''')
key = param.String(default='default', constant=True, doc='''
The key assigned to the elements of the supplied list.''')
def __init__(self, key, values, **params):
specs = [{key:val} for val in values]
super(List, self).__init__(specs, key=key, values=values, **params)
self.pprint_args(['key', 'values'], [])
class Log(Args):
"""
Specifier that loads arguments from a log file in task id (tid)
order. This wrapper class allows a concise representation of file
logs with the option of adding the task id to the loaded
specifications.
For full control over the arguments, you can use this class to
create a fully specified Args object as follows:
Args(Log.extract_log(<log_file>).values()),
"""
log_path = param.String(default=None, allow_None=True, constant=True, doc='''
The relative or absolute path to the log file. If a relative
path is given, the absolute path is computed relative to
os.getcwd().''')
tid_key = param.String(default='tid', constant=True, allow_None=True, doc='''
If not None, the key given to the tid values included in the
loaded specifications. If None, the tid number is ignored.''')
@staticmethod
def extract_log(log_path, dict_type=dict):
"""
Parses the log file generated by a launcher and returns
dictionary with tid keys and specification values.
Ordering can be maintained by setting dict_type to the
appropriate constructor (i.e. OrderedDict). Keys are converted
from unicode to strings for kwarg use.
"""
log_path = (log_path if os.path.isfile(log_path)
else os.path.join(os.getcwd(), log_path))
with open(log_path,'r') as log:
splits = (line.split() for line in log)
uzipped = ((int(split[0]), json.loads(" ".join(split[1:]))) for split in splits)
szipped = [(i, dict((str(k),v) for (k,v) in d.items())) for (i,d) in uzipped]
return dict_type(szipped)
@staticmethod
def write_log(log_path, data, allow_append=True):
"""
Writes the supplied specifications to the log path. The data
may be supplied as either as a an Args or as a list of
dictionaries.
By default, specifications will be appropriately appended to
an existing log file. This can be disabled by setting
allow_append to False.
"""
append = os.path.isfile(log_path)
islist = isinstance(data, list)
if append and not allow_append:
raise Exception('Appending has been disabled'
' and file %s exists' % log_path)
if not (islist or isinstance(data, Args)):
raise Exception('Can only write Args objects or dictionary'
' lists to log file.')
specs = data if islist else data.specs
if not all(isinstance(el,dict) for el in specs):
raise Exception('List elements must be dictionaries.')
log_file = open(log_path, 'r+') if append else open(log_path, 'w')
start = int(log_file.readlines()[-1].split()[0])+1 if append else 0
ascending_indices = range(start, start+len(data))
log_str = '\n'.join(['%d %s' % (tid, json.dumps(el))
for (tid, el) in zip(ascending_indices,specs)])
log_file.write("\n"+log_str if append else log_str)
log_file.close()
def __init__(self, log_path, tid_key='tid', **params):
log_items = sorted(Log.extract_log(log_path).items())
if tid_key is None:
log_specs = [spec for (_, spec) in log_items]
else:
log_specs = [dict(list(spec.items())+[(tid_key,idx)])
for (idx, spec) in log_items]
super(Log, self).__init__(log_specs,
log_path=log_path,
tid_key=tid_key,
**params)
self.pprint_args(['log_path'], ['tid_key'])
class FilePattern(Args):
"""
A FilePattern specifier allows files to be matched and information
encoded in filenames to be extracted via an extended form of
globbing. This object may be used to specify filename arguments to
CommandTemplates when launching jobs but it also very useful for
collating files for analysis.
For instance, you can find the absolute filenames of all npz files
in a 'data' subdirectory (relative to the root) that start with
'timeseries' using the pattern 'data/timeseries*.npz'.
In addition to globbing supported by the glob module, patterns can
extract metadata encoded in filenames using a subset of the Python
format specification syntax. To illustrate, you can use
'data/timeseries-{date}.npz' to record the date strings associated
with matched files. Note that a particular named fields can only
be used in a particular pattern once.
By default metadata is extracted as strings but format types are
supported in the usual manner
eg. 'data/timeseries-{day:d}-{month:d}.npz' will extract the day
and month from the filename as integer values. Only field names
and types are recognised with other format specification syntax
ignored. Type codes supported: 'd', 'b', 'o', 'x', 'e','E','f',
'F','g', 'G', 'n' (if ommited, result is a string by default).
Note that ordering is determined via ascending alphanumeric sort
and that actual filenames should not include any globbing
characters, namely: '?','*','[' and ']' (general good practice for
filenames anyway).
"""
key = param.String(default=None, allow_None=True, constant=True, doc='''
The key name given to the matched file path strings.''')
pattern = param.String(default=None, allow_None=True, constant=True, doc='''
The pattern files are to be searched against.''')
root = param.String(default=None, allow_None=True, constant=True, doc='''
The root directory from which patterns are to be loaded. The
root is set relative to os.getcwd().''')
@classmethod
def directory(cls, directory, root=None, extension=None, **kwargs):
"""
Load all the files in a given directory selecting only files
with the given extension if specified. The given kwargs are
passed through to the normal constructor.
"""
root = os.getcwd() if root is None else root
suffix = '' if extension is None else '.' + extension.rsplit('.')[-1]
pattern = directory + os.sep + '*' + suffix
key = os.path.join(root, directory,'*').rsplit(os.sep)[-2]
format_parse = list(string.Formatter().parse(key))
if not all([el is None for el in zip(*format_parse)[1]]):
raise Exception('Directory cannot contain format field specifications')
return cls(key, pattern, root, **kwargs)
def __init__(self, key, pattern, root=None, **params):
root = os.getcwd() if root is None else root
specs = self._load_expansion(key, root, pattern)
self.files = [s[key] for s in specs]
super(FilePattern, self).__init__(specs, key=key, pattern=pattern,
root=root, **params)
self.pprint_args(['key', 'pattern'], ['root'])
def fields(self):
"""
Return the fields specified in the pattern using Python's
formatting mini-language.
"""
parse = list(string.Formatter().parse(self.pattern))
return [f for f in zip(*parse)[1] if f is not None]
def _load_expansion(self, key, root, pattern):
"""
Loads the files that match the given pattern.
"""
path_pattern = os.path.join(root, pattern)
expanded_paths = self._expand_pattern(path_pattern)
specs=[]
for (path, tags) in expanded_paths:
filelist = [os.path.join(path,f) for f in os.listdir(path)] if os.path.isdir(path) else [path]
for filepath in filelist:
specs.append(dict(tags,**{key:os.path.abspath(filepath)}))
return sorted(specs, key=lambda s: s[key])
def _expand_pattern(self, pattern):
"""
From the pattern decomposition, finds the absolute paths
matching the pattern.
"""
(globpattern, regexp, fields, types) = self._decompose_pattern(pattern)
filelist = glob.glob(globpattern)
expansion = []
for fname in filelist:
if fields == []:
expansion.append((fname, {}))
continue
match = re.match(regexp, fname)
if match is None: continue
match_items = match.groupdict().items()
tags = dict((k,types.get(k, str)(v)) for (k,v) in match_items)
expansion.append((fname, tags))
return expansion
def _decompose_pattern(self, pattern):
"""
Given a path pattern with format declaration, generates a
four-tuple (glob_pattern, regexp pattern, fields, type map)
"""
sep = '~lancet~sep~'
float_codes = ['e','E','f', 'F','g', 'G', 'n']
typecodes = dict([(k,float) for k in float_codes]
+ [('b',bin), ('d',int), ('o',oct), ('x',hex)])
parse = list(string.Formatter().parse(pattern))
text, fields, codes, _ = zip(*parse)
# Finding the field types from format string
types = []
for (field, code) in zip(fields, codes):
if code in ['', None]: continue
constructor = typecodes.get(code[-1], None)
if constructor: types += [(field, constructor)]
stars = ['' if not f else '*' for f in fields]
globpat = ''.join(text+star for (text,star) in zip(text,stars))
refields = ['' if not f else sep+('(?P<%s>.*?)'% f)+sep for f in fields]
parts = ''.join(text+group for (text,group) in zip(text, refields)).split(sep)
for i in range(0, len(parts), 2): parts[i] = re.escape(parts[i])
regexp_pattern = ''.join(parts).replace('\\*','.*')
fields = list(f for f in fields if f)
return globpat, regexp_pattern , fields, dict(types)
@property
def table(self):
return to_table(self, [self.key])
# Importing from filetypes requires PrettyPrinted to be defined first
from lancet.filetypes import FileType
class FileInfo(Args):
"""
Loads metadata from a set of filenames. For instance, you can load
metadata associated with a series of image files given by a
FilePattern. Unlike other explicit instances of Args, this object
extends the values of an existing Args object. Once you have
loaded the metadata, FileInfo allows you to load the file data
into a pandas DataFrame or a HoloViews Table.
"""
source = param.ClassSelector(class_ = Args, doc='''
The argument specifier that supplies the file paths.''')
filetype = param.ClassSelector(constant=True, class_= FileType, doc='''
A FileType object to be applied to each file path.''')
key = param.String(constant=True, doc='''
The key used to find the file paths for inspection.''')
ignore = param.List(default=[], constant=True, doc='''
Metadata keys that are to be explicitly ignored. ''')
def __init__(self, source, key, filetype, ignore = [], **params):
specs = self._info(source, key, filetype, ignore)
super(FileInfo, self).__init__(specs,
source = source,
filetype = filetype,
key = key,
ignore=ignore,
**params)
self.pprint_args(['source', 'key', 'filetype'], ['ignore'])
@classmethod
def from_pattern(cls, pattern, filetype=None, key='filename', root=None, ignore=[]):
"""
Convenience method to directly chain a pattern processed by
FilePattern into a FileInfo instance.
Note that if a default filetype has been set on FileInfo, the
filetype argument may be omitted.
"""
filepattern = FilePattern(key, pattern, root=root)
if FileInfo.filetype and filetype is None:
filetype = FileInfo.filetype
elif filetype is None:
raise Exception("The filetype argument must be supplied unless "
"an appropriate default has been specified as "
"FileInfo.filetype")
return FileInfo(filepattern, key, filetype, ignore=ignore)
@property
def table(self):
return to_table(self, [self.key])
def load(self, val, **kwargs):
"""
Load the file contents into the supplied pandas dataframe or
HoloViews Table. This allows a selection to be made over the
metadata before loading the file contents (may be slow).
"""
if Table and isinstance(val, Table):
return self.load_table(val, **kwargs)
elif DataFrame and isinstance(val, DataFrame):
return self.load_dframe(val, **kwargs)
else:
raise Exception("Type %s not a DataFrame or Table." % type(val))
def load_table(self, table):
"""
Load the file contents into the supplied Table using the
specified key and filetype. The input table should have the
filenames as values which will be replaced by the loaded
data. If data_key is specified, this key will be used to index
the loaded data to retrive the specified item.
"""
items, data_keys = [], None
for key, filename in table.items():
data_dict = self.filetype.data(filename[0])
current_keys = tuple(sorted(data_dict.keys()))
values = [data_dict[k] for k in current_keys]
if data_keys is None:
data_keys = current_keys
elif data_keys != current_keys:
raise Exception("Data keys are inconsistent")
items.append((key, values))
return Table(items, kdims=table.kdims, vdims=data_keys)
def load_dframe(self, dframe):
"""
Load the file contents into the supplied dataframe using the
specified key and filetype.
"""
filename_series = dframe[self.key]
loaded_data = filename_series.map(self.filetype.data)
keys = [list(el.keys()) for el in loaded_data.values]
for key in set().union(*keys):
key_exists = key in dframe.columns
if key_exists:
self.warning("Appending '_data' suffix to data key %r to avoid"
"overwriting existing metadata with the same name." % key)
suffix = '_data' if key_exists else ''
dframe[key+suffix] = loaded_data.map(lambda x: x.get(key, np.nan))
return dframe
def _info(self, source, key, filetype, ignore):
"""
Generates the union of the source.specs and the metadata
dictionary loaded by the filetype object.
"""
specs, mdata = [], {}
mdata_clashes = set()
for spec in source.specs:
if key not in spec:
raise Exception("Key %r not available in 'source'." % key)
mdata = dict((k,v) for (k,v) in filetype.metadata(spec[key]).items()
if k not in ignore)
mdata_spec = {}
mdata_spec.update(spec)
mdata_spec.update(mdata)
specs.append(mdata_spec)
mdata_clashes = mdata_clashes | (set(spec.keys()) & set(mdata.keys()))
# Metadata clashes can be avoided by using the ignore list.
if mdata_clashes:
self.warning("Loaded metadata keys overriding source keys.")
return specs
| bsd-3-clause |
gnowxilef/plexpy | lib/requests/adapters.py | 205 | 16799 | # -*- coding: utf-8 -*-
"""
requests.adapters
~~~~~~~~~~~~~~~~~
This module contains the transport adapters that Requests uses to define
and maintain connections.
"""
import socket
from .models import Response
from .packages.urllib3 import Retry
from .packages.urllib3.poolmanager import PoolManager, proxy_from_url
from .packages.urllib3.response import HTTPResponse
from .packages.urllib3.util import Timeout as TimeoutSauce
from .compat import urlparse, basestring
from .utils import (DEFAULT_CA_BUNDLE_PATH, get_encoding_from_headers,
prepend_scheme_if_needed, get_auth_from_url, urldefragauth)
from .structures import CaseInsensitiveDict
from .packages.urllib3.exceptions import ConnectTimeoutError
from .packages.urllib3.exceptions import HTTPError as _HTTPError
from .packages.urllib3.exceptions import MaxRetryError
from .packages.urllib3.exceptions import ProxyError as _ProxyError
from .packages.urllib3.exceptions import ProtocolError
from .packages.urllib3.exceptions import ReadTimeoutError
from .packages.urllib3.exceptions import SSLError as _SSLError
from .packages.urllib3.exceptions import ResponseError
from .cookies import extract_cookies_to_jar
from .exceptions import (ConnectionError, ConnectTimeout, ReadTimeout, SSLError,
ProxyError, RetryError)
from .auth import _basic_auth_str
DEFAULT_POOLBLOCK = False
DEFAULT_POOLSIZE = 10
DEFAULT_RETRIES = 0
class BaseAdapter(object):
"""The Base Transport Adapter"""
def __init__(self):
super(BaseAdapter, self).__init__()
def send(self):
raise NotImplementedError
def close(self):
raise NotImplementedError
class HTTPAdapter(BaseAdapter):
"""The built-in HTTP Adapter for urllib3.
Provides a general-case interface for Requests sessions to contact HTTP and
HTTPS urls by implementing the Transport Adapter interface. This class will
usually be created by the :class:`Session <Session>` class under the
covers.
:param pool_connections: The number of urllib3 connection pools to cache.
:param pool_maxsize: The maximum number of connections to save in the pool.
:param int max_retries: The maximum number of retries each connection
should attempt. Note, this applies only to failed DNS lookups, socket
connections and connection timeouts, never to requests where data has
made it to the server. By default, Requests does not retry failed
connections. If you need granular control over the conditions under
which we retry a request, import urllib3's ``Retry`` class and pass
that instead.
:param pool_block: Whether the connection pool should block for connections.
Usage::
>>> import requests
>>> s = requests.Session()
>>> a = requests.adapters.HTTPAdapter(max_retries=3)
>>> s.mount('http://', a)
"""
__attrs__ = ['max_retries', 'config', '_pool_connections', '_pool_maxsize',
'_pool_block']
def __init__(self, pool_connections=DEFAULT_POOLSIZE,
pool_maxsize=DEFAULT_POOLSIZE, max_retries=DEFAULT_RETRIES,
pool_block=DEFAULT_POOLBLOCK):
if max_retries == DEFAULT_RETRIES:
self.max_retries = Retry(0, read=False)
else:
self.max_retries = Retry.from_int(max_retries)
self.config = {}
self.proxy_manager = {}
super(HTTPAdapter, self).__init__()
self._pool_connections = pool_connections
self._pool_maxsize = pool_maxsize
self._pool_block = pool_block
self.init_poolmanager(pool_connections, pool_maxsize, block=pool_block)
def __getstate__(self):
return dict((attr, getattr(self, attr, None)) for attr in
self.__attrs__)
def __setstate__(self, state):
# Can't handle by adding 'proxy_manager' to self.__attrs__ because
# because self.poolmanager uses a lambda function, which isn't pickleable.
self.proxy_manager = {}
self.config = {}
for attr, value in state.items():
setattr(self, attr, value)
self.init_poolmanager(self._pool_connections, self._pool_maxsize,
block=self._pool_block)
def init_poolmanager(self, connections, maxsize, block=DEFAULT_POOLBLOCK, **pool_kwargs):
"""Initializes a urllib3 PoolManager.
This method should not be called from user code, and is only
exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param connections: The number of urllib3 connection pools to cache.
:param maxsize: The maximum number of connections to save in the pool.
:param block: Block when no free connections are available.
:param pool_kwargs: Extra keyword arguments used to initialize the Pool Manager.
"""
# save these values for pickling
self._pool_connections = connections
self._pool_maxsize = maxsize
self._pool_block = block
self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize,
block=block, strict=True, **pool_kwargs)
def proxy_manager_for(self, proxy, **proxy_kwargs):
"""Return urllib3 ProxyManager for the given proxy.
This method should not be called from user code, and is only
exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param proxy: The proxy to return a urllib3 ProxyManager for.
:param proxy_kwargs: Extra keyword arguments used to configure the Proxy Manager.
:returns: ProxyManager
"""
if not proxy in self.proxy_manager:
proxy_headers = self.proxy_headers(proxy)
self.proxy_manager[proxy] = proxy_from_url(
proxy,
proxy_headers=proxy_headers,
num_pools=self._pool_connections,
maxsize=self._pool_maxsize,
block=self._pool_block,
**proxy_kwargs)
return self.proxy_manager[proxy]
def cert_verify(self, conn, url, verify, cert):
"""Verify a SSL certificate. This method should not be called from user
code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param conn: The urllib3 connection object associated with the cert.
:param url: The requested URL.
:param verify: Whether we should actually verify the certificate.
:param cert: The SSL certificate to verify.
"""
if url.lower().startswith('https') and verify:
cert_loc = None
# Allow self-specified cert location.
if verify is not True:
cert_loc = verify
if not cert_loc:
cert_loc = DEFAULT_CA_BUNDLE_PATH
if not cert_loc:
raise Exception("Could not find a suitable SSL CA certificate bundle.")
conn.cert_reqs = 'CERT_REQUIRED'
conn.ca_certs = cert_loc
else:
conn.cert_reqs = 'CERT_NONE'
conn.ca_certs = None
if cert:
if not isinstance(cert, basestring):
conn.cert_file = cert[0]
conn.key_file = cert[1]
else:
conn.cert_file = cert
def build_response(self, req, resp):
"""Builds a :class:`Response <requests.Response>` object from a urllib3
response. This should not be called from user code, and is only exposed
for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`
:param req: The :class:`PreparedRequest <PreparedRequest>` used to generate the response.
:param resp: The urllib3 response object.
"""
response = Response()
# Fallback to None if there's no status_code, for whatever reason.
response.status_code = getattr(resp, 'status', None)
# Make headers case-insensitive.
response.headers = CaseInsensitiveDict(getattr(resp, 'headers', {}))
# Set encoding.
response.encoding = get_encoding_from_headers(response.headers)
response.raw = resp
response.reason = response.raw.reason
if isinstance(req.url, bytes):
response.url = req.url.decode('utf-8')
else:
response.url = req.url
# Add new cookies from the server.
extract_cookies_to_jar(response.cookies, req, resp)
# Give the Response some context.
response.request = req
response.connection = self
return response
def get_connection(self, url, proxies=None):
"""Returns a urllib3 connection for the given URL. This should not be
called from user code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param url: The URL to connect to.
:param proxies: (optional) A Requests-style dictionary of proxies used on this request.
"""
proxies = proxies or {}
proxy = proxies.get(urlparse(url.lower()).scheme)
if proxy:
proxy = prepend_scheme_if_needed(proxy, 'http')
proxy_manager = self.proxy_manager_for(proxy)
conn = proxy_manager.connection_from_url(url)
else:
# Only scheme should be lower case
parsed = urlparse(url)
url = parsed.geturl()
conn = self.poolmanager.connection_from_url(url)
return conn
def close(self):
"""Disposes of any internal state.
Currently, this just closes the PoolManager, which closes pooled
connections.
"""
self.poolmanager.clear()
def request_url(self, request, proxies):
"""Obtain the url to use when making the final request.
If the message is being sent through a HTTP proxy, the full URL has to
be used. Otherwise, we should only use the path portion of the URL.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param proxies: A dictionary of schemes to proxy URLs.
"""
proxies = proxies or {}
scheme = urlparse(request.url).scheme
proxy = proxies.get(scheme)
if proxy and scheme != 'https':
url = urldefragauth(request.url)
else:
url = request.path_url
return url
def add_headers(self, request, **kwargs):
"""Add any headers needed by the connection. As of v2.0 this does
nothing by default, but is left for overriding by users that subclass
the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` to add headers to.
:param kwargs: The keyword arguments from the call to send().
"""
pass
def proxy_headers(self, proxy):
"""Returns a dictionary of the headers to add to any request sent
through a proxy. This works with urllib3 magic to ensure that they are
correctly sent to the proxy, rather than in a tunnelled request if
CONNECT is being used.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param proxies: The url of the proxy being used for this request.
:param kwargs: Optional additional keyword arguments.
"""
headers = {}
username, password = get_auth_from_url(proxy)
if username and password:
headers['Proxy-Authorization'] = _basic_auth_str(username,
password)
return headers
def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None):
"""Sends PreparedRequest object. Returns Response object.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param stream: (optional) Whether to stream the request content.
:param timeout: (optional) How long to wait for the server to send
data before giving up, as a float, or a (`connect timeout, read
timeout <user/advanced.html#timeouts>`_) tuple.
:type timeout: float or tuple
:param verify: (optional) Whether to verify SSL certificates.
:param cert: (optional) Any user-provided SSL certificate to be trusted.
:param proxies: (optional) The proxies dictionary to apply to the request.
"""
conn = self.get_connection(request.url, proxies)
self.cert_verify(conn, request.url, verify, cert)
url = self.request_url(request, proxies)
self.add_headers(request)
chunked = not (request.body is None or 'Content-Length' in request.headers)
if isinstance(timeout, tuple):
try:
connect, read = timeout
timeout = TimeoutSauce(connect=connect, read=read)
except ValueError as e:
# this may raise a string formatting error.
err = ("Invalid timeout {0}. Pass a (connect, read) "
"timeout tuple, or a single float to set "
"both timeouts to the same value".format(timeout))
raise ValueError(err)
else:
timeout = TimeoutSauce(connect=timeout, read=timeout)
try:
if not chunked:
resp = conn.urlopen(
method=request.method,
url=url,
body=request.body,
headers=request.headers,
redirect=False,
assert_same_host=False,
preload_content=False,
decode_content=False,
retries=self.max_retries,
timeout=timeout
)
# Send the request.
else:
if hasattr(conn, 'proxy_pool'):
conn = conn.proxy_pool
low_conn = conn._get_conn(timeout=timeout)
try:
low_conn.putrequest(request.method,
url,
skip_accept_encoding=True)
for header, value in request.headers.items():
low_conn.putheader(header, value)
low_conn.endheaders()
for i in request.body:
low_conn.send(hex(len(i))[2:].encode('utf-8'))
low_conn.send(b'\r\n')
low_conn.send(i)
low_conn.send(b'\r\n')
low_conn.send(b'0\r\n\r\n')
r = low_conn.getresponse()
resp = HTTPResponse.from_httplib(
r,
pool=conn,
connection=low_conn,
preload_content=False,
decode_content=False
)
except:
# If we hit any problems here, clean up the connection.
# Then, reraise so that we can handle the actual exception.
low_conn.close()
raise
else:
# All is well, return the connection to the pool.
conn._put_conn(low_conn)
except (ProtocolError, socket.error) as err:
raise ConnectionError(err, request=request)
except MaxRetryError as e:
if isinstance(e.reason, ConnectTimeoutError):
raise ConnectTimeout(e, request=request)
if isinstance(e.reason, ResponseError):
raise RetryError(e, request=request)
raise ConnectionError(e, request=request)
except _ProxyError as e:
raise ProxyError(e)
except (_SSLError, _HTTPError) as e:
if isinstance(e, _SSLError):
raise SSLError(e, request=request)
elif isinstance(e, ReadTimeoutError):
raise ReadTimeout(e, request=request)
else:
raise
return self.build_response(request, resp)
| gpl-3.0 |
Denwey/TShock | scripts/create_release.py | 6 | 3912 | # Hey there, this is used to compile TShock on the build server.
# Don't change it. Thanks!
import os
import shutil
import subprocess
import urllib2
import zipfile
cur_wd = os.getcwd()
release_dir = os.path.join(cur_wd, "releases")
terraria_bin_name = "TerrariaServer.exe"
sql_bins_names = ["Mono.Data.Sqlite.dll", "MySql.Data.dll"]
sqlite_dep = "sqlite3.dll"
json_bin_name = "Newtonsoft.Json.dll"
http_bin_name = "HttpServer.dll"
tshock_bin_name = "TShockAPI.dll"
tshock_symbols = "TShockAPI.dll.mdb"
bcrypt_bin_name = "BCrypt.Net.dll"
terraria_release_bin = os.path.join(cur_wd, "TerrariaServerAPI", "bin", "Release", terraria_bin_name)
terraria_debug_bin = os.path.join(cur_wd, "TerrariaServerAPI", "bin", "Debug", terraria_bin_name)
sql_dep = os.path.join(cur_wd, "prebuilts")
http_bin = os.path.join(cur_wd, "prebuilts", http_bin_name)
json_bin = os.path.join(cur_wd, "prebuilts", json_bin_name)
bcrypt_bin = os.path.join(cur_wd, "prebuilts", bcrypt_bin_name)
release_bin = os.path.join(cur_wd, "TShockAPI", "bin", "Release", tshock_bin_name)
debug_folder = os.path.join(cur_wd, "TShockAPI", "bin", "Debug")
def create_release_folder():
os.mkdir(release_dir)
def copy_dependencies():
shutil.copy(http_bin, release_dir)
shutil.copy(json_bin, release_dir)
shutil.copy(bcrypt_bin, release_dir)
shutil.copy(os.path.join(sql_dep, sqlite_dep), release_dir)
for f in sql_bins_names:
shutil.copy(os.path.join(sql_dep, f), release_dir)
def copy_debug_files():
shutil.copy(terraria_debug_bin, release_dir)
shutil.copy(os.path.join(debug_folder, tshock_bin_name), release_dir)
shutil.copy(os.path.join(debug_folder, tshock_symbols), release_dir)
def copy_release_files():
shutil.copy(terraria_release_bin, release_dir)
shutil.copy(release_bin, release_dir)
shutil.copy(release_bin, release_dir)
def create_base_zip(name):
os.chdir(release_dir)
zip = zipfile.ZipFile(name, "w")
zip.write(terraria_bin_name)
zip.write(sqlite_dep)
zip.write(http_bin_name, os.path.join("ServerPlugins", http_bin_name))
zip.write(json_bin_name, json_bin_name)
zip.write(bcrypt_bin_name, os.path.join("ServerPlugins", bcrypt_bin_name))
for f in sql_bins_names:
zip.write(f, os.path.join("ServerPlugins", f))
return zip
def package_release():
copy_release_files()
zip = create_base_zip("tshock_release.zip")
zip.write(tshock_bin_name, os.path.join("ServerPlugins", tshock_bin_name))
zip.close()
os.remove(tshock_bin_name)
os.remove(terraria_bin_name)
os.chdir(cur_wd)
def package_debug():
copy_debug_files()
zip = create_base_zip("tshock_debug.zip")
zip.write(tshock_bin_name, os.path.join("ServerPlugins", tshock_bin_name))
zip.write(tshock_symbols, os.path.join("ServerPlugins", tshock_symbols))
zip.close()
os.remove(tshock_bin_name)
os.remove(tshock_symbols)
os.remove(terraria_bin_name)
os.chdir(cur_wd)
def delete_files():
os.chdir(release_dir)
for f in sql_bins_names:
os.remove(f)
os.remove(sqlite_dep)
os.remove(json_bin_name)
os.remove(bcrypt_bin_name)
os.remove(http_bin_name)
os.chdir(cur_wd)
def update_terraria_source():
subprocess.check_call(['/usr/bin/git', 'submodule', 'init'])
subprocess.check_call(['/usr/bin/git', 'submodule', 'update'])
def build_software():
release_proc = subprocess.Popen(['/usr/local/bin/xbuild', './TShockAPI/TShockAPI.csproj', '/p:Configuration=Release'])
debug_proc = subprocess.Popen(['/usr/local/bin/xbuild', './TShockAPI/TShockAPI.csproj', '/p:Configuration=Debug'])
release_proc.wait()
debug_proc.wait()
if (release_proc.returncode != 0):
raise CalledProcessError(release_proc.returncode)
if (debug_proc.returncode != 0):
raise CalledProcessError(debug_proc.returncode)
if __name__ == '__main__':
create_release_folder()
update_terraria_source()
copy_dependencies()
build_software()
package_release()
package_debug()
delete_files()
| gpl-3.0 |
elysium001/zamboni | mkt/webpay/tests/test_webpay_jwt.py | 17 | 10477 | # -*- coding: utf-8 -*-
import json
import urlparse
from urllib import urlencode
from django.conf import settings
from django.core.urlresolvers import reverse
from django.test.utils import override_settings
import jwt
import mock
from mozpay.verify import verify_claims, verify_keys
from nose.tools import eq_, ok_, raises
from mkt.constants.payments import PROVIDER_REFERENCE
from mkt.developers.models import AddonPaymentAccount, PaymentAccount
from mkt.purchase.models import Contribution
from mkt.purchase.tests.utils import InAppPurchaseTest, PurchaseTest
from mkt.site.helpers import absolutify
from mkt.webpay.webpay_jwt import (get_product_jwt, InAppProduct,
SimulatedInAppProduct, WebAppProduct)
class TestPurchaseJWT(PurchaseTest):
def setUp(self):
super(TestPurchaseJWT, self).setUp()
self.product = WebAppProduct(self.addon)
self.contribution = Contribution.objects.create(
user=self.user,
addon=self.addon,
)
def decode_token(self):
token = get_product_jwt(self.product, self.contribution)
return jwt.decode(str(token['webpayJWT']), verify=False)
def test_claims(self):
verify_claims(self.decode_token())
def test_keys(self):
verify_keys(self.decode_token(),
('iss',
'typ',
'aud',
'iat',
'exp',
'request.name',
'request.description',
'request.pricePoint',
'request.postbackURL',
'request.chargebackURL',
'request.productData'))
def test_valid_jwt(self):
token_data = self.decode_token()
eq_(token_data['iss'], settings.APP_PURCHASE_KEY)
eq_(token_data['typ'], settings.APP_PURCHASE_TYP)
eq_(token_data['aud'], settings.APP_PURCHASE_AUD)
request = token_data['request']
eq_(request['id'], self.product.external_id())
eq_(request['name'], self.product.name())
eq_(request['defaultLocale'], self.product.default_locale())
eq_(request['icons'], self.product.icons())
eq_(request['description'], self.product.description())
eq_(request['pricePoint'], self.product.price().name)
eq_(request['postbackURL'], absolutify(reverse('webpay.postback')))
eq_(request['chargebackURL'], absolutify(reverse('webpay.chargeback')))
product = urlparse.parse_qs(request['productData'])
expected = urlparse.parse_qs(
urlencode(self.product.product_data(self.contribution)))
eq_(product['buyer_email'], [self.user.email])
eq_(product, expected)
@raises(ValueError)
def test_empty_public_id(self):
self.addon.update(solitude_public_id=None)
self.decode_token()
def test_no_user(self):
self.contribution.update(user=None)
token_data = self.decode_token()
request = token_data['request']
product = urlparse.parse_qs(request['productData'])
ok_('buyer_email' not in product)
def test_locales(self):
with mock.patch.object(self.product, 'localized_properties') as props:
loc_data = {
'es': {
'name': 'El Mocoso',
'description': u'descripción de la aplicación',
}
}
props.return_value = loc_data
token_data = self.decode_token()
# Make sure the JWT passes through localized_properties() data.
eq_(token_data['request']['locales'], loc_data)
class BaseTestWebAppProduct(PurchaseTest):
def setUp(self):
super(BaseTestWebAppProduct, self).setUp()
self.product = WebAppProduct(self.addon)
self.contribution = Contribution.objects.create(
user=self.user,
addon=self.addon,
)
self.contribution = Contribution.objects.get()
class TestWebAppProduct(BaseTestWebAppProduct):
def test_external_id_with_no_domain(self):
with self.settings(DOMAIN=None):
eq_(self.product.external_id(),
'marketplace-dev:{0}'.format(self.addon.pk))
def test_external_id_with_domain(self):
with self.settings(DOMAIN='marketplace.allizom.org'):
eq_(self.product.external_id(),
'marketplace:{0}'.format(self.addon.pk))
def test_webapp_product(self):
eq_(self.product.id(), self.addon.pk)
eq_(self.product.name(), unicode(self.addon.name))
eq_(self.product.addon(), self.addon)
eq_(self.product.default_locale(), self.addon.default_locale)
eq_(self.product.price(), self.addon.premium.price)
eq_(self.product.icons()['64'],
absolutify(self.addon.get_icon_url(64)))
eq_(self.product.description(), self.addon.description)
eq_(self.product.application_size(),
self.addon.current_version.all_files[0].size)
eq_(self.product.simulation(), None)
product_data = self.product.product_data(self.contribution)
eq_(product_data['contrib_uuid'], self.contribution.uuid)
eq_(product_data['public_id'], self.public_id)
eq_(product_data['addon_id'], self.product.addon().pk)
eq_(product_data['application_size'], self.product.application_size())
@override_settings(AMO_LANGUAGES=('en-US', 'es', 'fr'))
def test_localized_properties(self):
en_name = unicode(self.addon.name)
en_desc = unicode(self.addon.description)
loc_names = {
'fr': 'Le Vaurien',
'es': 'El Mocoso',
}
loc_desc = {
'fr': u"ceci est une description d'application",
'es': u'se trata de una descripción de la aplicación',
}
self.addon.name = loc_names
self.addon.description = loc_desc
self.addon.save()
names = self.product.localized_properties()
eq_(names['es']['name'], loc_names['es'])
eq_(names['es']['description'], loc_desc['es'])
eq_(names['fr']['name'], loc_names['fr'])
eq_(names['fr']['description'], loc_desc['fr'])
eq_(names['en-US']['name'], en_name)
eq_(names['en-US']['description'], en_desc)
@override_settings(PAYMENT_PROVIDERS=['bango', 'reference'])
class TestWebAppProductMultipleProviders(BaseTestWebAppProduct):
def setUp(self):
super(TestWebAppProductMultipleProviders, self).setUp()
account = PaymentAccount.objects.create(
user=self.user, uri='foo', name='test', inactive=False,
solitude_seller=self.seller, account_id=321, seller_uri='abc',
provider=PROVIDER_REFERENCE)
AddonPaymentAccount.objects.create(
addon=self.addon, account_uri='foo',
payment_account=account, product_uri='newuri')
def test_webapp_product_multiple_providers(self):
product_data = self.product.product_data(self.contribution)
eq_(product_data['contrib_uuid'], self.contribution.uuid)
eq_(product_data['public_id'], self.public_id)
eq_(product_data['addon_id'], self.product.addon().pk)
eq_(product_data['application_size'],
self.product.application_size())
class TestInAppProduct(InAppPurchaseTest):
def setUp(self):
super(TestInAppProduct, self).setUp()
self.contribution = Contribution.objects.create(
user=self.user,
addon=self.addon,
)
self.product = InAppProduct(self.inapp)
def test_external_id_with_no_domain(self):
with self.settings(DOMAIN=None):
eq_(self.product.external_id(),
'inapp.marketplace-dev:{0}'.format(self.inapp.pk))
def test_external_id_with_domain(self):
with self.settings(DOMAIN='marketplace.allizom.org'):
eq_(self.product.external_id(),
'inapp.marketplace:{0}'.format(self.inapp.pk))
def test_inapp_product(self):
eq_(self.product.id(), self.inapp.pk)
eq_(self.product.name(), unicode(self.inapp.name))
eq_(self.product.addon(), self.inapp.webapp)
eq_(self.product.price(), self.inapp.price)
eq_(self.product.icons()[64], absolutify(self.inapp.logo_url))
eq_(self.product.description(), self.inapp.webapp.description)
eq_(self.product.application_size(), None)
eq_(self.product.simulation(), None)
product_data = self.product.product_data(self.contribution)
eq_(product_data['contrib_uuid'], self.contribution.uuid)
eq_(product_data['addon_id'], self.product.addon().pk)
eq_(product_data['inapp_id'], self.product.id())
eq_(product_data['application_size'], self.product.application_size())
eq_(product_data['public_id'], self.public_id)
def test_no_url(self):
self.inapp.logo_url = None
with self.settings(MEDIA_URL='/media/'):
eq_(self.product.icons()[64],
'http://testserver/media/img/mkt/icons/rocket-64.png')
def test_no_user(self):
product_data = self.product.product_data(self.contribution)
ok_('buyer_email' not in product_data)
class TestSimulatedInAppProduct(InAppPurchaseTest):
def setUp(self):
super(TestSimulatedInAppProduct, self).setUp()
self.contribution = Contribution.objects.create()
self.inapp.webapp = None
self.inapp.simulate = json.dumps({'result': 'postback'})
self.inapp.stub = True
self.inapp.save()
self.product = SimulatedInAppProduct(self.inapp)
def test_inapp_product(self):
eq_(self.product.id(), self.inapp.pk)
eq_(self.product.name(), unicode(self.inapp.name))
eq_(self.product.addon(), None)
eq_(self.product.price(), self.inapp.price)
eq_(self.product.icons()[64], absolutify(self.inapp.logo_url))
eq_(self.product.application_size(), None)
eq_(self.product.description(),
'This is a stub product for testing only')
eq_(self.product.simulation(), {'result': 'postback'})
product_data = self.product.product_data(self.contribution)
eq_(product_data['contrib_uuid'], self.contribution.uuid)
eq_(product_data['inapp_id'], self.product.id())
eq_(product_data['application_size'], self.product.application_size())
| bsd-3-clause |
kaarolch/ansible | lib/ansible/modules/monitoring/zabbix_group.py | 48 | 7590 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2013-2014, Epic Games, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: zabbix_group
short_description: Zabbix host groups creates/deletes
description:
- Create host groups if they do not exist.
- Delete existing host groups if they exist.
version_added: "1.8"
author:
- "(@cove)"
- "Tony Minfei Ding"
- "Harrison Gu (@harrisongu)"
requirements:
- "python >= 2.6"
- zabbix-api
options:
server_url:
description:
- Url of Zabbix server, with protocol (http or https).
C(url) is an alias for C(server_url).
required: true
aliases: [ "url" ]
login_user:
description:
- Zabbix user name.
required: true
login_password:
description:
- Zabbix user password.
required: true
http_login_user:
description:
- Basic Auth login
required: false
default: None
version_added: "2.1"
http_login_password:
description:
- Basic Auth password
required: false
default: None
version_added: "2.1"
state:
description:
- Create or delete host group.
required: false
default: "present"
choices: [ "present", "absent" ]
timeout:
description:
- The timeout of API request(seconds).
default: 10
host_groups:
description:
- List of host groups to create or delete.
required: true
aliases: [ "host_group" ]
notes:
- Too many concurrent updates to the same group may cause Zabbix to return errors, see examples for a workaround if needed.
'''
EXAMPLES = '''
# Base create host groups example
- name: Create host groups
local_action:
module: zabbix_group
server_url: http://monitor.example.com
login_user: username
login_password: password
state: present
host_groups:
- Example group1
- Example group2
# Limit the Zabbix group creations to one host since Zabbix can return an error when doing concurent updates
- name: Create host groups
local_action:
module: zabbix_group
server_url: http://monitor.example.com
login_user: username
login_password: password
state: present
host_groups:
- Example group1
- Example group2
when: inventory_hostname==groups['group_name'][0]
'''
try:
from zabbix_api import ZabbixAPI, ZabbixAPISubClass
from zabbix_api import Already_Exists
HAS_ZABBIX_API = True
except ImportError:
HAS_ZABBIX_API = False
class HostGroup(object):
def __init__(self, module, zbx):
self._module = module
self._zapi = zbx
# create host group(s) if not exists
def create_host_group(self, group_names):
try:
group_add_list = []
for group_name in group_names:
result = self._zapi.hostgroup.get({'filter': {'name': group_name}})
if not result:
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
self._zapi.hostgroup.create({'name': group_name})
group_add_list.append(group_name)
except Already_Exists:
return group_add_list
return group_add_list
except Exception as e:
self._module.fail_json(msg="Failed to create host group(s): %s" % e)
# delete host group(s)
def delete_host_group(self, group_ids):
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
self._zapi.hostgroup.delete(group_ids)
except Exception as e:
self._module.fail_json(msg="Failed to delete host group(s), Exception: %s" % e)
# get group ids by name
def get_group_ids(self, host_groups):
group_ids = []
group_list = self._zapi.hostgroup.get({'output': 'extend', 'filter': {'name': host_groups}})
for group in group_list:
group_id = group['groupid']
group_ids.append(group_id)
return group_ids, group_list
def main():
module = AnsibleModule(
argument_spec=dict(
server_url=dict(type='str', required=True, aliases=['url']),
login_user=dict(type='str', required=True),
login_password=dict(type='str', required=True, no_log=True),
http_login_user=dict(type='str',required=False, default=None),
http_login_password=dict(type='str',required=False, default=None, no_log=True),
host_groups=dict(type='list', required=True, aliases=['host_group']),
state=dict(default="present", choices=['present','absent']),
timeout=dict(type='int', default=10)
),
supports_check_mode=True
)
if not HAS_ZABBIX_API:
module.fail_json(msg="Missing requried zabbix-api module (check docs or install with: pip install zabbix-api)")
server_url = module.params['server_url']
login_user = module.params['login_user']
login_password = module.params['login_password']
http_login_user = module.params['http_login_user']
http_login_password = module.params['http_login_password']
host_groups = module.params['host_groups']
state = module.params['state']
timeout = module.params['timeout']
zbx = None
# login to zabbix
try:
zbx = ZabbixAPI(server_url, timeout=timeout, user=http_login_user, passwd=http_login_password)
zbx.login(login_user, login_password)
except Exception as e:
module.fail_json(msg="Failed to connect to Zabbix server: %s" % e)
hostGroup = HostGroup(module, zbx)
group_ids = []
group_list = []
if host_groups:
group_ids, group_list = hostGroup.get_group_ids(host_groups)
if state == "absent":
# delete host groups
if group_ids:
delete_group_names = []
hostGroup.delete_host_group(group_ids)
for group in group_list:
delete_group_names.append(group['name'])
module.exit_json(changed=True,
result="Successfully deleted host group(s): %s." % ",".join(delete_group_names))
else:
module.exit_json(changed=False, result="No host group(s) to delete.")
else:
# create host groups
group_add_list = hostGroup.create_host_group(host_groups)
if len(group_add_list) > 0:
module.exit_json(changed=True, result="Successfully created host group(s): %s" % group_add_list)
else:
module.exit_json(changed=False)
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
Datera/cinder | cinder/api/versions.py | 1 | 4172 | # Copyright 2010 OpenStack Foundation
# Copyright 2015 Clinton Knight
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from oslo_config import cfg
from six.moves import http_client
from cinder.api import extensions
from cinder.api import openstack
from cinder.api.openstack import api_version_request
from cinder.api.openstack import wsgi
from cinder.api.views import versions as views_versions
CONF = cfg.CONF
_LINKS = [{
"rel": "describedby",
"type": "text/html",
"href": "https://docs.openstack.org/",
}]
_KNOWN_VERSIONS = {
"v2.0": {
"id": "v2.0",
"status": "DEPRECATED",
"version": "",
"min_version": "",
"updated": "2017-02-25T12:00:00Z",
"links": _LINKS,
"media-types": [{
"base": "application/json",
"type": "application/vnd.openstack.volume+json;version=2",
}]
},
"v3.0": {
"id": "v3.0",
"status": "CURRENT",
"version": api_version_request._MAX_API_VERSION,
"min_version": api_version_request._MIN_API_VERSION,
"updated": api_version_request.UPDATED,
"links": _LINKS,
"media-types": [{
"base": "application/json",
"type": "application/vnd.openstack.volume+json;version=3",
}]
},
}
class Versions(openstack.APIRouter):
"""Route versions requests."""
ExtensionManager = extensions.ExtensionManager
def _setup_routes(self, mapper, ext_mgr):
self.resources['versions'] = create_resource()
mapper.connect('versions', '/',
controller=self.resources['versions'],
action='all')
mapper.redirect('', '/')
def _setup_ext_routes(self, mapper, ext_mgr):
# NOTE(mriedem): The version router doesn't care about extensions.
pass
# NOTE (jose-castro-leon): Avoid to register extensions
# on the versions router, the versions router does not offer
# resources to be extended.
def _setup_extensions(self, ext_mgr):
pass
class VersionsController(wsgi.Controller):
def __init__(self):
super(VersionsController, self).__init__(None)
@wsgi.Controller.api_version('2.0')
def index(self, req): # pylint: disable=E0102
"""Return versions supported prior to the microversions epoch."""
builder = views_versions.get_view_builder(req)
known_versions = copy.deepcopy(_KNOWN_VERSIONS)
known_versions.pop('v3.0')
return builder.build_versions(known_versions)
@index.api_version('3.0')
def index(self, req): # pylint: disable=E0102
"""Return versions supported after the start of microversions."""
builder = views_versions.get_view_builder(req)
known_versions = copy.deepcopy(_KNOWN_VERSIONS)
known_versions.pop('v2.0')
return builder.build_versions(known_versions)
# NOTE (cknight): Calling the versions API without
# /v2 or /v3 in the URL will lead to this unversioned
# method, which should always return info about all
# available versions.
@wsgi.response(http_client.MULTIPLE_CHOICES)
def all(self, req):
"""Return all known and enabled versions."""
builder = views_versions.get_view_builder(req)
known_versions = copy.deepcopy(_KNOWN_VERSIONS)
if not CONF.enable_v2_api:
known_versions.pop('v2.0')
if not CONF.enable_v3_api:
known_versions.pop('v3.0')
return builder.build_versions(known_versions)
def create_resource():
return wsgi.Resource(VersionsController())
| apache-2.0 |
commonwealth-of-puerto-rico/libre | libre/apps/data_drivers/migrations/0032_auto__add_sourcepythonscript__del_field_sourcews_endpoint__del_field_s.py | 2 | 17916 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'SourcePythonScript'
db.create_table(u'data_drivers_sourcepythonscript', (
(u'source_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['data_drivers.Source'], unique=True, primary_key=True)),
))
db.send_create_signal(u'data_drivers', ['SourcePythonScript'])
# Deleting field 'SourceWS.endpoint'
db.delete_column(u'data_drivers_sourcews', 'endpoint')
# Deleting field 'SourceWS.parameters'
db.delete_column(u'data_drivers_sourcews', 'parameters')
def backwards(self, orm):
# Deleting model 'SourcePythonScript'
db.delete_table(u'data_drivers_sourcepythonscript')
# User chose to not deal with backwards NULL issues for 'SourceWS.endpoint'
raise RuntimeError("Cannot reverse this migration. 'SourceWS.endpoint' and its values cannot be restored.")
# The following code is provided here to aid in writing a correct migration # Adding field 'SourceWS.endpoint'
db.add_column(u'data_drivers_sourcews', 'endpoint',
self.gf('django.db.models.fields.CharField')(max_length=64),
keep_default=False)
# Adding field 'SourceWS.parameters'
db.add_column(u'data_drivers_sourcews', 'parameters',
self.gf('django.db.models.fields.TextField')(default='', blank=True),
keep_default=False)
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'data_drivers.csvcolumn': {
'Meta': {'object_name': 'CSVColumn'},
'data_type': ('django.db.models.fields.PositiveIntegerField', [], {}),
'default': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'import_column': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'import_regex': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'skip_regex': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'columns'", 'to': u"orm['data_drivers.SourceCSV']"})
},
u'data_drivers.databaseresultcolumn': {
'Meta': {'object_name': 'DatabaseResultColumn'},
'data_type': ('django.db.models.fields.PositiveIntegerField', [], {}),
'default': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'import_column': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'columns'", 'to': u"orm['data_drivers.SourceDatabase']"})
},
u'data_drivers.fixedwidthcolumn': {
'Meta': {'object_name': 'FixedWidthColumn'},
'data_type': ('django.db.models.fields.PositiveIntegerField', [], {}),
'default': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'import_column': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'import_regex': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'size': ('django.db.models.fields.PositiveIntegerField', [], {}),
'skip_regex': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'columns'", 'to': u"orm['data_drivers.SourceFixedWidth']"})
},
u'data_drivers.leafletmarker': {
'Meta': {'ordering': "['label', 'slug']", 'object_name': 'LeafletMarker'},
'icon': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'leafletmarker-icon'", 'to': u"orm['icons.Icon']"}),
'icon_anchor_x': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'icon_anchor_y': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '48', 'blank': 'True'}),
'popup_anchor_x': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'popup_anchor_y': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'shadow': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'leafletmarker-shadow'", 'null': 'True', 'to': u"orm['icons.Icon']"}),
'shadow_anchor_x': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'shadow_anchor_y': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'blank': 'True'})
},
u'data_drivers.restresultcolumn': {
'Meta': {'object_name': 'RESTResultColumn'},
'data_type': ('django.db.models.fields.PositiveIntegerField', [], {}),
'default': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'import_column': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'import_regex': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'new_name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'skip_regex': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'columns'", 'to': u"orm['data_drivers.SourceRESTAPI']"})
},
u'data_drivers.shapefilecolumn': {
'Meta': {'object_name': 'ShapefileColumn'},
'data_type': ('django.db.models.fields.PositiveIntegerField', [], {}),
'default': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'import_column': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'import_regex': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'new_name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'skip_regex': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'columns'", 'to': u"orm['data_drivers.SourceShape']"})
},
u'data_drivers.source': {
'Meta': {'ordering': "['name', 'slug']", 'object_name': 'Source'},
'allowed_groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'limit': ('django.db.models.fields.PositiveIntegerField', [], {'default': '50'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'origin': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['origins.Origin']"}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '48', 'blank': 'True'})
},
u'data_drivers.sourcecsv': {
'Meta': {'ordering': "['name', 'slug']", 'object_name': 'SourceCSV', '_ormbases': [u'data_drivers.Source']},
'delimiter': ('django.db.models.fields.CharField', [], {'default': "','", 'max_length': '1', 'blank': 'True'}),
'quote_character': ('django.db.models.fields.CharField', [], {'max_length': '1', 'blank': 'True'}),
u'source_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['data_drivers.Source']", 'unique': 'True', 'primary_key': 'True'})
},
u'data_drivers.sourcedata': {
'Meta': {'object_name': 'SourceData'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'row': ('picklefield.fields.PickledObjectField', [], {}),
'row_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'source_data_version': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'data'", 'to': u"orm['data_drivers.SourceDataVersion']"})
},
u'data_drivers.sourcedatabase': {
'Meta': {'ordering': "['name', 'slug']", 'object_name': 'SourceDatabase', '_ormbases': [u'data_drivers.Source']},
u'source_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['data_drivers.Source']", 'unique': 'True', 'primary_key': 'True'})
},
u'data_drivers.sourcedataversion': {
'Meta': {'unique_together': "(('source', 'datetime'), ('source', 'timestamp'), ('source', 'checksum'))", 'object_name': 'SourceDataVersion'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 9, 17, 0, 0)'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'metadata': ('picklefield.fields.PickledObjectField', [], {'blank': 'True'}),
'ready': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'versions'", 'to': u"orm['data_drivers.Source']"}),
'timestamp': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'})
},
u'data_drivers.sourcefixedwidth': {
'Meta': {'ordering': "['name', 'slug']", 'object_name': 'SourceFixedWidth', '_ormbases': [u'data_drivers.Source']},
u'source_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['data_drivers.Source']", 'unique': 'True', 'primary_key': 'True'})
},
u'data_drivers.sourcepythonscript': {
'Meta': {'ordering': "['name', 'slug']", 'object_name': 'SourcePythonScript', '_ormbases': [u'data_drivers.Source']},
u'source_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['data_drivers.Source']", 'unique': 'True', 'primary_key': 'True'})
},
u'data_drivers.sourcerestapi': {
'Meta': {'object_name': 'SourceRESTAPI'},
u'source_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['data_drivers.Source']", 'unique': 'True', 'primary_key': 'True'})
},
u'data_drivers.sourceshape': {
'Meta': {'ordering': "['name', 'slug']", 'object_name': 'SourceShape', '_ormbases': [u'data_drivers.Source']},
'marker_template': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'markers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['data_drivers.LeafletMarker']", 'null': 'True', 'blank': 'True'}),
'new_projection': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'popup_template': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'source_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['data_drivers.Source']", 'unique': 'True', 'primary_key': 'True'}),
'template_header': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
u'data_drivers.sourcespreadsheet': {
'Meta': {'ordering': "['name', 'slug']", 'object_name': 'SourceSpreadsheet', '_ormbases': [u'data_drivers.Source']},
'sheet': ('django.db.models.fields.CharField', [], {'default': "'0'", 'max_length': '32'}),
u'source_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['data_drivers.Source']", 'unique': 'True', 'primary_key': 'True'})
},
u'data_drivers.sourcews': {
'Meta': {'object_name': 'SourceWS'},
u'source_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['data_drivers.Source']", 'unique': 'True', 'primary_key': 'True'})
},
u'data_drivers.spreadsheetcolumn': {
'Meta': {'object_name': 'SpreadsheetColumn'},
'default': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'import_column': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'import_regex': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'skip_regex': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'columns'", 'to': u"orm['data_drivers.SourceSpreadsheet']"})
},
u'data_drivers.webservicecolumn': {
'Meta': {'object_name': 'WebServiceColumn'},
'data_type': ('django.db.models.fields.PositiveIntegerField', [], {}),
'default': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'import_column': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'import_regex': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'new_name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'skip_regex': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'columns'", 'to': u"orm['data_drivers.SourceWS']"})
},
u'icons.icon': {
'Meta': {'ordering': "['label', 'name']", 'object_name': 'Icon'},
'icon_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '48', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '48'})
},
u'origins.origin': {
'Meta': {'ordering': "('label',)", 'object_name': 'Origin'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '128'})
}
}
complete_apps = ['data_drivers'] | gpl-3.0 |
f3at/feat | src/feat/test/test_common_serialization_json.py | 1 | 10860 | # Headers in this file shall remain intact.
# -*- coding: utf-8 -*-
# -*- Mode: Python -*-
# vi:si:et:sw=4:sts=4:ts=4
# F3AT - Flumotion Asynchronous Autonomous Agent Toolkit
# Copyright (C) 2010,2011 Flumotion Services, S.A.
# All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# See "LICENSE.GPL" in the source distribution for more information.
import itertools
import types
from zope.interface import Interface
from zope.interface.interface import InterfaceClass
from feat.common import reflect, serialization, formatable
from feat.common.serialization import base, json
from feat.interface.serialization import *
from feat.test import common
from . import common_serialization
@serialization.register
class DummyClass(serialization.Serializable):
def dummy_method(self):
pass
def dummy_function():
pass
class DummyInterface(Interface):
pass
class JSONConvertersTest(common_serialization.ConverterTest):
def setUp(self):
common_serialization.ConverterTest.setUp(self)
ext = self.externalizer
self.serializer = json.Serializer(externalizer=ext)
self.unserializer = json.Unserializer(externalizer=ext)
def convertion_table(self, capabilities, freezing):
### Basic immutable types ###
yield str, [""], str, ['[".enc", "UTF8", ""]',
'[".bytes", ""]'], False
yield str, ["dummy"], str, ['[".enc", "UTF8", "dummy"]',
'[".bytes", "ZHVtbXk="]'], False
yield str, ["\xFF"], str, ['[".bytes", "/w=="]'], False
yield unicode, [u""], str, ['""'], False
yield unicode, [u"dummy"], str, ['"dummy"'], False
yield unicode, [u"áéí"], str, ['"\\u00e1\\u00e9\\u00ed"'], False
yield [int, long], [0], str, ["0"], False
yield [int, long], [42], str, ["42"], False
yield [int, long], [-42], str, ["-42"], False
yield [int, long], [0L], str, ["0"], False
yield long, [2**72], str, ["4722366482869645213696"], False
yield long, [-2**72], str, ["-4722366482869645213696"], False
yield float, [0.0], str, ["0.0"], False
yield float, [3.141], str, ["3.141"], False
yield float, [-3.141], str, ["-3.141"], False
yield float, [1e20], str, ["1e+20"], False
yield float, [1e-22], str, ["1e-22"], False
yield bool, [True], str, ["true"], False
yield bool, [False], str, ["false"], False
yield type(None), [None], str, ["null"], False
### Types ###
from datetime import datetime
yield type, [int], str, ['[".type", "__builtin__.int"]'], False
yield (type, [datetime],
str, ['[".type", "datetime.datetime"]'], False)
yield (type, [common_serialization.SerializableDummy],
str, ['[".type", "feat.test.common_serialization.'
'SerializableDummy"]'], False)
yield (InterfaceClass, [DummyInterface],
str, ['[".type", "feat.test.test_common_serialization_json.'
'DummyInterface"]'], False)
### Enums ###
DummyEnum = common_serialization.DummyEnum
yield (DummyEnum, [DummyEnum.a],
str, ['[".enum", "feat.test.common_serialization.'
'DummyEnum.a"]'], False)
yield (DummyEnum, [DummyEnum.c],
str, ['[".enum", "feat.test.common_serialization.'
'DummyEnum.c"]'], False)
### External References ###
if freezing:
name = '[".enc", "UTF8", "%s"]' % self.ext_val.type_name
identifier = '[".tuple", %s, %d]' % (name, id(self.ext_val))
yield (type(self.ext_val), [self.ext_val],
str, [identifier], False)
yield (type(self.ext_snap_val), [self.ext_snap_val],
str, [str(id(self.ext_snap_val))], False)
else:
name = '[".enc", "UTF8", "%s"]' % self.ext_val.type_name
identifier = '[".tuple", %s, %d]' % (name, id(self.ext_val))
yield (common_serialization.SerializableDummy, [self.ext_val],
str, ['[".ext", %s]' % identifier], False)
### Freezing-Only Types ###
if freezing:
mod_name = "feat.test.test_common_serialization_json"
fun_name = '"%s.dummy_function"' % mod_name
meth_name = '"%s.DummyClass.dummy_method"' % mod_name
yield types.FunctionType, [dummy_function], str, [fun_name], True
yield (types.FunctionType, [DummyClass.dummy_method],
str, [meth_name], True)
o = DummyClass()
yield types.FunctionType, [o.dummy_method], str, [meth_name], True
#### Basic mutable types plus tuples ###
# Exception for empty tuple singleton
yield tuple, [()], str, ['[".tuple"]'], False
yield tuple, [(1, 2, 3)], str, ['[".tuple", 1, 2, 3]'], True
yield list, [[]], str, ['[]'], True
yield list, [[1, 2, 3]], str, ['[1, 2, 3]'], True
yield set, [set([])], str, ['[".set"]'], True
yield set, [set([1, 3])], str, ['[".set", 1, 3]'], True
yield dict, [{}], str, ['{}'], True
yield dict, [{"1": 2, "3": 4}], str, ['{"1": 2, "3": 4}'], True
# Container with different types
yield (tuple, [(0.11, "a", u"z", False, None,
(1, ), [2], set([3]), {"4": 5})],
str, ['[".tuple", 0.11, [".enc", "UTF8", "a"], "z", false, '
'null, [".tuple", 1], [2], [".set", 3], {"4": 5}]'], True)
yield (list, [[0.11, "a", u"z", False, None,
(1, ), [2], set([3]), {"4": 5}]],
str, ['[0.11, [".enc", "UTF8", "a"], "z", false, null, '
'[".tuple", 1], [2], [".set", 3], {"4": 5}]'], True)
### References and Dereferences ###
# Simple reference in list
a = []
b = [a, a]
yield list, [b], str, ['[[".ref", 1, []], [".deref", 1]]'], True
# Simple reference in tuple
a = ()
b = (a, a)
yield tuple, [b], str, ['[".tuple", [".ref", 1, [".tuple"]], '
'[".deref", 1]]'], True
# Simple dereference in dict value.
a = []
b = [a, {"1": a}]
yield list, [b], str, ['[[".ref", 1, []], {"1": [".deref", 1]}]'], True
# Simple reference in dict value.
a = []
b = [{"1": a}, a]
yield list, [b], str, ['[{"1": [".ref", 1, []]}, [".deref", 1]]'], True
# Multiple reference in dictionary values, because dictionary order
# is not predictable all possibilities have to be tested
a = {}
b = {"1": a, "2": a, "3": a}
yield (dict, [b], str,
['{"1": [".ref", 1, {}], "2": [".deref", 1], "3": [".deref", 1]}',
'{"1": [".ref", 1, {}], "3": [".deref", 1], "2": [".deref", 1]}',
'{"2": [".ref", 1, {}], "1": [".deref", 1], "3": [".deref", 1]}',
'{"2": [".ref", 1, {}], "3": [".deref", 1], "1": [".deref", 1]}',
'{"3": [".ref", 1, {}], "1": [".deref", 1], "2": [".deref", 1]}',
'{"3": [".ref", 1, {}], "2": [".deref", 1], "1": [".deref", 1]}'],
True)
# Simple dereference in set.
a = ()
b = [a, set([a])]
yield list, [b], str, ['[[".ref", 1, [".tuple"]], '
'[".set", [".deref", 1]]]'], True
# Simple reference in set.
a = ()
b = [set([a]), a]
yield list, [b], str, ['[[".set", [".ref", 1, [".tuple"]]], '
'[".deref", 1]]'], True
# Multiple reference in set, because set values order
# is not predictable all possibilities have to be tested
a = ()
b = set([(1, a), (2, a)])
yield (set, [b], str,
['[".set", [".tuple", 1, [".ref", 1, [".tuple"]]], '
'[".tuple", 2, [".deref", 1]]]',
'[".set", [".tuple", 2, [".ref", 1, [".tuple"]]], '
'[".tuple", 1, [".deref", 1]]]'], True)
# List self-reference
a = []
a.append(a)
yield list, [a], str, ['[".ref", 1, [[".deref", 1]]]'], True
# Dict self-reference
a = {}
a["1"] = a
yield dict, [a], str, ['[".ref", 1, {"1": [".deref", 1]}]'], True
# Multiple references
a = []
b = [a]
c = [a, b]
d = [a, b, c]
yield list, [d], str, ['[[".ref", 1, []], '
'[".ref", 2, [[".deref", 1]]], '
'[[".deref", 1], [".deref", 2]]]'], True
# Default instance
o = DummyClass()
o.value = 42
if freezing:
yield (DummyClass, [o], str, ['{"value": 42}'], True)
else:
name = reflect.canonical_name(o)
yield (DummyClass, [o], str,
['{".type": "%s", "value": 42}' % name,
'{"value": 42, ".type": "%s"}' % name], True)
Klass = DummyClass
name = reflect.canonical_name(Klass)
a = Klass()
b = Klass()
c = Klass()
a.ref = b
b.ref = a
c.ref = c
if freezing:
yield (Klass, [a], str,
['[".ref", 1, {"ref": {"ref": [".deref", 1]}}]'], True)
yield (Klass, [b], str,
['[".ref", 1, {"ref": {"ref": [".deref", 1]}}]'], True)
yield (Klass, [c], str,
['[".ref", 1, {"ref": [".deref", 1]}]'], True)
else:
yield (Klass, [a], str,
[('[".ref", 1, {".type": "%s", "ref": {".type": "%s", '
'"ref": [".deref", 1]}}]') % (name, name)], True)
yield (Klass, [b], str,
[('[".ref", 1, {".type": "%s", "ref": {".type": "%s", '
'"ref": [".deref", 1]}}]') % (name, name)], True)
yield (Klass, [c], str, [('[".ref", 1, {".type": "%s", "ref": '
'[".deref", 1]}]') % (name, )], True)
| gpl-2.0 |
ArduPilot/ardupilot | Tools/scripts/runcoptertest.py | 18 | 1770 | #!/usr/bin/env python
import pexpect, time, sys
from pymavlink import mavutil
def wait_heartbeat(mav, timeout=10):
'''wait for a heartbeat'''
start_time = time.time()
while time.time() < start_time+timeout:
if mav.recv_match(type='HEARTBEAT', blocking=True, timeout=0.5) is not None:
return
raise Exception("Failed to get heartbeat")
def wait_mode(mav, modes, timeout=10):
'''wait for one of a set of flight modes'''
start_time = time.time()
last_mode = None
while time.time() < start_time+timeout:
wait_heartbeat(mav, timeout=10)
if mav.flightmode != last_mode:
print("Flightmode %s" % mav.flightmode)
last_mode = mav.flightmode
if mav.flightmode in modes:
return
print("Failed to get mode from %s" % modes)
sys.exit(1)
def wait_time(mav, simtime):
'''wait for simulation time to pass'''
imu = mav.recv_match(type='RAW_IMU', blocking=True)
t1 = imu.time_usec*1.0e-6
while True:
imu = mav.recv_match(type='RAW_IMU', blocking=True)
t2 = imu.time_usec*1.0e-6
if t2 - t1 > simtime:
break
cmd = '../Tools/autotest/sim_vehicle.py -D'
mavproxy = pexpect.spawn(cmd, logfile=sys.stdout, timeout=30)
mavproxy.expect("Frame")
mav = mavutil.mavlink_connection('127.0.0.1:14550')
wait_mode(mav, ['STABILIZE'])
mavproxy.send('speedup 40\n')
mavproxy.expect('using GPS')
mavproxy.expect('using GPS')
mavproxy.expect('using GPS')
mavproxy.expect('using GPS')
mavproxy.send('arm throttle\n')
mavproxy.expect('Arming')
mavproxy.send('mode loiter\n')
wait_mode(mav, ['LOITER'])
mavproxy.send('rc 3 2000\n')
wait_time(mav, 20)
mavproxy.send('rc 3 1500\n')
mavproxy.send('mode CIRCLE\n')
wait_time(mav, 90)
| gpl-3.0 |
noironetworks/heat | heat/engine/resources/openstack/sahara/job_binary.py | 2 | 4294 | #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_utils import uuidutils
from heat.common import exception
from heat.common.i18n import _
from heat.engine import properties
from heat.engine import resource
from heat.engine import rsrc_defn
from heat.engine import support
class JobBinary(resource.Resource):
"""A resource for creating sahara job binary.
A job binary stores an URL to a single script or Jar file and any
credentials needed to retrieve the file.
"""
support_status = support.SupportStatus(version='5.0.0')
PROPERTIES = (
NAME, URL, DESCRIPTION, CREDENTIALS
) = (
'name', 'url', 'description', 'credentials'
)
_CREDENTIAL_KEYS = (
USER, PASSWORD
) = (
'user', 'password'
)
properties_schema = {
NAME: properties.Schema(
properties.Schema.STRING,
_('Name of the job binary.'),
update_allowed=True
),
URL: properties.Schema(
properties.Schema.STRING,
_('URL for the job binary. Must be in the format '
'swift://<container>/<path> or internal-db://<uuid>.'),
required=True,
update_allowed=True
),
DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('Description of the job binary.'),
default='',
update_allowed=True
),
CREDENTIALS: properties.Schema(
properties.Schema.MAP,
_('Credentials used for swift. Not required if sahara is '
'configured to use proxy users and delegated trusts for '
'access.'),
schema={
USER: properties.Schema(
properties.Schema.STRING,
_('Username for accessing the job binary URL.'),
required=True
),
PASSWORD: properties.Schema(
properties.Schema.STRING,
_('Password for accessing the job binary URL.'),
required=True
),
},
update_allowed=True
)
}
default_client_name = 'sahara'
entity = 'job_binaries'
def _job_binary_name(self):
return self.properties[self.NAME] or self.physical_resource_name()
def _prepare_properties(self):
credentials = self.properties[self.CREDENTIALS] or {}
return {
'name': self._job_binary_name(),
'description': self.properties[self.DESCRIPTION],
'url': self.properties[self.URL],
'extra': credentials
}
def validate(self):
super(JobBinary, self).validate()
url = self.properties[self.URL]
if not (url.startswith('swift://') or (url.startswith('internal-db://')
and uuidutils.is_uuid_like(url[len("internal-db://"):]))):
msg = _("%s is not a valid job location.") % url
raise exception.StackValidationFailed(
path=[self.stack.t.RESOURCES, self.name,
self.stack.t.get_section_name(rsrc_defn.PROPERTIES)],
message=msg)
def handle_create(self):
args = self._prepare_properties()
job_binary = self.client().job_binaries.create(**args)
self.resource_id_set(job_binary.id)
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if prop_diff:
self.properties = json_snippet.properties(
self.properties_schema,
self.context)
data = self._prepare_properties()
self.client().job_binaries.update(self.resource_id, data)
def resource_mapping():
return {
'OS::Sahara::JobBinary': JobBinary
}
| apache-2.0 |
tlodge/dreamplug_kernel | arch/ia64/scripts/unwcheck.py | 13143 | 1714 | #!/usr/bin/python
#
# Usage: unwcheck.py FILE
#
# This script checks the unwind info of each function in file FILE
# and verifies that the sum of the region-lengths matches the total
# length of the function.
#
# Based on a shell/awk script originally written by Harish Patil,
# which was converted to Perl by Matthew Chapman, which was converted
# to Python by David Mosberger.
#
import os
import re
import sys
if len(sys.argv) != 2:
print "Usage: %s FILE" % sys.argv[0]
sys.exit(2)
readelf = os.getenv("READELF", "readelf")
start_pattern = re.compile("<([^>]*)>: \[0x([0-9a-f]+)-0x([0-9a-f]+)\]")
rlen_pattern = re.compile(".*rlen=([0-9]+)")
def check_func (func, slots, rlen_sum):
if slots != rlen_sum:
global num_errors
num_errors += 1
if not func: func = "[%#x-%#x]" % (start, end)
print "ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum)
return
num_funcs = 0
num_errors = 0
func = False
slots = 0
rlen_sum = 0
for line in os.popen("%s -u %s" % (readelf, sys.argv[1])):
m = start_pattern.match(line)
if m:
check_func(func, slots, rlen_sum)
func = m.group(1)
start = long(m.group(2), 16)
end = long(m.group(3), 16)
slots = 3 * (end - start) / 16
rlen_sum = 0L
num_funcs += 1
else:
m = rlen_pattern.match(line)
if m:
rlen_sum += long(m.group(1))
check_func(func, slots, rlen_sum)
if num_errors == 0:
print "No errors detected in %u functions." % num_funcs
else:
if num_errors > 1:
err="errors"
else:
err="error"
print "%u %s detected in %u functions." % (num_errors, err, num_funcs)
sys.exit(1)
| gpl-2.0 |
sdeepanshu02/microblog | flask/Lib/site-packages/sqlparse/compat.py | 20 | 1140 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Andi Albrecht, albrecht.andi@gmail.com
#
# This module is part of python-sqlparse and is released under
# the BSD License: https://opensource.org/licenses/BSD-3-Clause
"""Python 2/3 compatibility.
This module only exists to avoid a dependency on six
for very trivial stuff. We only need to take care of
string types, buffers and metaclasses.
Parts of the code is copied directly from six:
https://bitbucket.org/gutworth/six
"""
import sys
from io import TextIOBase
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY3:
def unicode_compatible(cls):
return cls
bytes_type = bytes
text_type = str
string_types = (str,)
from io import StringIO
file_types = (StringIO, TextIOBase)
elif PY2:
def unicode_compatible(cls):
cls.__unicode__ = cls.__str__
cls.__str__ = lambda x: x.__unicode__().encode('utf-8')
return cls
bytes_type = str
text_type = unicode
string_types = (str, unicode,)
from StringIO import StringIO
file_types = (file, StringIO, TextIOBase)
from StringIO import StringIO
| bsd-3-clause |
frc1418/2014 | robot/robot/tests/docstring_test.py | 1 | 4367 | '''
The purpose of this test is to ensure that all of your robot code
has docstrings. Properly using docstrings will make your code
more maintainable and look more professional.
'''
import inspect
import os
import re
import sys
# easy way to disable the test in a hurry
disable_test = True
# if you want to be really pedantic, enforce sphinx docstrings. Ha.
pedantic_docstrings = False
# regex to use to detect the sphinx docstrings
param_re = re.compile("^:param (\S+?):\s*(.+)$")
def ignore_object(o, robot_path):
'''Returns true if the object can be ignored'''
if inspect.isbuiltin(o):
return True
try:
src = inspect.getsourcefile(o)
except TypeError:
return True
return src is None or not os.path.abspath(src).startswith(robot_path)
def print_fn_err(msg, parent, fn, errors):
if inspect.isclass(parent):
name = '%s.%s' % (parent.__name__, fn.__name__)
else:
name = '%s' % fn.__name__
err = "ERROR: %s '%s()'\n-> See %s:%s" % (msg, name,
inspect.getsourcefile(fn),
inspect.getsourcelines(fn)[1])
print(err)
errors.append(err)
def check_function(parent, fn, errors):
doc = inspect.getdoc(fn)
if doc is None:
print_fn_err('No docstring for', parent, fn, errors)
elif pedantic_docstrings:
# find the list of parameters
args, varargs, keywords, defaults = inspect.getargspec(fn)
if len(args) > 0 and args[0] == 'self':
del args[0]
if varargs is not None:
args.append(varargs)
if keywords is not None:
args.append(keywords)
params = []
for line in doc.splitlines():
match = param_re.match(line)
if not match:
continue
arg = match.group(1)
if arg not in args:
print_fn_err("Param '%s' is documented but isn't a parameter for" % arg, parent, fn, errors)
params.append(arg)
if len(params) != len(args):
diff = set(args).difference(params)
if len(diff) == 1:
print_fn_err("Param '%s' is not documented in docstring for" % diff.pop(), parent, fn, errors)
elif len(diff) > 1:
print_fn_err("Params '%s' are not documented in docstring for" % "','".join(diff), parent, fn, errors)
else:
for param, arg in zip(params, args):
if param != arg:
print_fn_err("Param '%s' is out of order, does not match param '%s' in docstring for" % (param, arg), parent, fn, errors)
def check_object(o, robot_path, errors):
if inspect.isclass(o) and inspect.getdoc(o) is None:
err = "ERROR: Class '%s' has no docstring!\n-> See %s:%s" % (o.__name__,
inspect.getsourcefile(o),
inspect.getsourcelines(o)[1])
print(err)
errors.append(err)
for name, value in inspect.getmembers(o):
if ignore_object(value, robot_path):
continue
check_thing(o, value, robot_path, errors)
def check_thing(parent, thing, robot_path, errors):
if inspect.isclass(thing):
check_object(thing, robot_path, errors)
elif inspect.isfunction(thing):
check_function(parent, thing, errors)
if not disable_test:
def test_docstrings(robot, robot_path):
'''If you get an error saying that robot_path is an invalid fixture
then you should upgrade your version of pyfrc'''
# this allows abspath() to work correctly
os.chdir(robot_path)
errors = []
for module in sys.modules.values():
if ignore_object(module, robot_path):
continue
check_object(module, robot_path, errors)
# if you get an error here, look at stdout for the error message
assert len(errors) == 0
| bsd-3-clause |
jbowes/ansible-modules-extras | packaging/os/openbsd_pkg.py | 59 | 14137 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Patrik Lundin <patrik@sigterm.se>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import re
import shlex
import syslog
DOCUMENTATION = '''
---
module: openbsd_pkg
author: "Patrik Lundin (@eest)"
version_added: "1.1"
short_description: Manage packages on OpenBSD.
description:
- Manage packages on OpenBSD using the pkg tools.
options:
name:
required: true
description:
- Name of the package.
state:
required: true
choices: [ present, latest, absent ]
description:
- C(present) will make sure the package is installed.
C(latest) will make sure the latest version of the package is installed.
C(absent) will make sure the specified package is not installed.
'''
EXAMPLES = '''
# Make sure nmap is installed
- openbsd_pkg: name=nmap state=present
# Make sure nmap is the latest version
- openbsd_pkg: name=nmap state=latest
# Make sure nmap is not installed
- openbsd_pkg: name=nmap state=absent
# Specify a pkg flavour with '--'
- openbsd_pkg: name=vim--nox11 state=present
# Specify the default flavour to avoid ambiguity errors
- openbsd_pkg: name=vim-- state=present
# Update all packages on the system
- openbsd_pkg: name=* state=latest
'''
# Control if we write debug information to syslog.
debug = False
# Function used for executing commands.
def execute_command(cmd, module):
if debug:
syslog.syslog("execute_command(): cmd = %s" % cmd)
# Break command line into arguments.
# This makes run_command() use shell=False which we need to not cause shell
# expansion of special characters like '*'.
cmd_args = shlex.split(cmd)
return module.run_command(cmd_args)
# Function used for getting the name of a currently installed package.
def get_current_name(name, pkg_spec, module):
info_cmd = 'pkg_info'
(rc, stdout, stderr) = execute_command("%s" % (info_cmd), module)
if rc != 0:
return (rc, stdout, stderr)
if pkg_spec['version']:
pattern = "^%s" % name
elif pkg_spec['flavor']:
pattern = "^%s-.*-%s\s" % (pkg_spec['stem'], pkg_spec['flavor'])
else:
pattern = "^%s-" % pkg_spec['stem']
if debug:
syslog.syslog("get_current_name(): pattern = %s" % pattern)
for line in stdout.splitlines():
if debug:
syslog.syslog("get_current_name: line = %s" % line)
match = re.search(pattern, line)
if match:
current_name = line.split()[0]
return current_name
# Function used to find out if a package is currently installed.
def get_package_state(name, pkg_spec, module):
info_cmd = 'pkg_info -e'
if pkg_spec['version']:
command = "%s %s" % (info_cmd, name)
elif pkg_spec['flavor']:
command = "%s %s-*-%s" % (info_cmd, pkg_spec['stem'], pkg_spec['flavor'])
else:
command = "%s %s-*" % (info_cmd, pkg_spec['stem'])
rc, stdout, stderr = execute_command(command, module)
if (stderr):
module.fail_json(msg="failed in get_package_state(): " + stderr)
if rc == 0:
return True
else:
return False
# Function used to make sure a package is present.
def package_present(name, installed_state, pkg_spec, module):
if module.check_mode:
install_cmd = 'pkg_add -Imn'
else:
install_cmd = 'pkg_add -Im'
if installed_state is False:
# Attempt to install the package
(rc, stdout, stderr) = execute_command("%s %s" % (install_cmd, name), module)
# The behaviour of pkg_add is a bit different depending on if a
# specific version is supplied or not.
#
# When a specific version is supplied the return code will be 0 when
# a package is found and 1 when it is not, if a version is not
# supplied the tool will exit 0 in both cases:
if pkg_spec['version']:
# Depend on the return code.
if debug:
syslog.syslog("package_present(): depending on return code")
if rc:
changed=False
else:
# Depend on stderr instead.
if debug:
syslog.syslog("package_present(): depending on stderr")
if stderr:
# There is a corner case where having an empty directory in
# installpath prior to the right location will result in a
# "file:/local/package/directory/ is empty" message on stderr
# while still installing the package, so we need to look for
# for a message like "packagename-1.0: ok" just in case.
match = re.search("\W%s-[^:]+: ok\W" % name, stdout)
if match:
# It turns out we were able to install the package.
if debug:
syslog.syslog("package_present(): we were able to install package")
pass
else:
# We really did fail, fake the return code.
if debug:
syslog.syslog("package_present(): we really did fail")
rc = 1
changed=False
else:
if debug:
syslog.syslog("package_present(): stderr was not set")
if rc == 0:
if module.check_mode:
module.exit_json(changed=True)
changed=True
else:
rc = 0
stdout = ''
stderr = ''
changed=False
return (rc, stdout, stderr, changed)
# Function used to make sure a package is the latest available version.
def package_latest(name, installed_state, pkg_spec, module):
if module.check_mode:
upgrade_cmd = 'pkg_add -umn'
else:
upgrade_cmd = 'pkg_add -um'
pre_upgrade_name = ''
if installed_state is True:
# Fetch name of currently installed package.
pre_upgrade_name = get_current_name(name, pkg_spec, module)
if debug:
syslog.syslog("package_latest(): pre_upgrade_name = %s" % pre_upgrade_name)
# Attempt to upgrade the package.
(rc, stdout, stderr) = execute_command("%s %s" % (upgrade_cmd, name), module)
# Look for output looking something like "nmap-6.01->6.25: ok" to see if
# something changed (or would have changed). Use \W to delimit the match
# from progress meter output.
match = re.search("\W%s->.+: ok\W" % pre_upgrade_name, stdout)
if match:
if module.check_mode:
module.exit_json(changed=True)
changed = True
else:
changed = False
# FIXME: This part is problematic. Based on the issues mentioned (and
# handled) in package_present() it is not safe to blindly trust stderr
# as an indicator that the command failed, and in the case with
# empty installpath directories this will break.
#
# For now keep this safeguard here, but ignore it if we managed to
# parse out a successful update above. This way we will report a
# successful run when we actually modify something but fail
# otherwise.
if changed != True:
if stderr:
rc=1
return (rc, stdout, stderr, changed)
else:
# If package was not installed at all just make it present.
if debug:
syslog.syslog("package_latest(): package is not installed, calling package_present()")
return package_present(name, installed_state, pkg_spec, module)
# Function used to make sure a package is not installed.
def package_absent(name, installed_state, module):
if module.check_mode:
remove_cmd = 'pkg_delete -In'
else:
remove_cmd = 'pkg_delete -I'
if installed_state is True:
# Attempt to remove the package.
rc, stdout, stderr = execute_command("%s %s" % (remove_cmd, name), module)
if rc == 0:
if module.check_mode:
module.exit_json(changed=True)
changed=True
else:
changed=False
else:
rc = 0
stdout = ''
stderr = ''
changed=False
return (rc, stdout, stderr, changed)
# Function used to parse the package name based on packages-specs(7).
# The general name structure is "stem-version[-flavors]".
def parse_package_name(name, pkg_spec, module):
# Do some initial matches so we can base the more advanced regex on that.
version_match = re.search("-[0-9]", name)
versionless_match = re.search("--", name)
# Stop if someone is giving us a name that both has a version and is
# version-less at the same time.
if version_match and versionless_match:
module.fail_json(msg="Package name both has a version and is version-less: " + name)
# If name includes a version.
if version_match:
match = re.search("^(?P<stem>.*)-(?P<version>[0-9][^-]*)(?P<flavor_separator>-)?(?P<flavor>[a-z].*)?$", name)
if match:
pkg_spec['stem'] = match.group('stem')
pkg_spec['version_separator'] = '-'
pkg_spec['version'] = match.group('version')
pkg_spec['flavor_separator'] = match.group('flavor_separator')
pkg_spec['flavor'] = match.group('flavor')
else:
module.fail_json(msg="Unable to parse package name at version_match: " + name)
# If name includes no version but is version-less ("--").
elif versionless_match:
match = re.search("^(?P<stem>.*)--(?P<flavor>[a-z].*)?$", name)
if match:
pkg_spec['stem'] = match.group('stem')
pkg_spec['version_separator'] = '-'
pkg_spec['version'] = None
pkg_spec['flavor_separator'] = '-'
pkg_spec['flavor'] = match.group('flavor')
else:
module.fail_json(msg="Unable to parse package name at versionless_match: " + name)
# If name includes no version, and is not version-less, it is all a stem.
else:
match = re.search("^(?P<stem>.*)$", name)
if match:
pkg_spec['stem'] = match.group('stem')
pkg_spec['version_separator'] = None
pkg_spec['version'] = None
pkg_spec['flavor_separator'] = None
pkg_spec['flavor'] = None
else:
module.fail_json(msg="Unable to parse package name at else: " + name)
# Sanity check that there are no trailing dashes in flavor.
# Try to stop strange stuff early so we can be strict later.
if pkg_spec['flavor']:
match = re.search("-$", pkg_spec['flavor'])
if match:
module.fail_json(msg="Trailing dash in flavor: " + pkg_spec['flavor'])
# Function used for upgrading all installed packages.
def upgrade_packages(module):
if module.check_mode:
upgrade_cmd = 'pkg_add -Imnu'
else:
upgrade_cmd = 'pkg_add -Imu'
# Attempt to upgrade all packages.
rc, stdout, stderr = execute_command("%s" % upgrade_cmd, module)
# Try to find any occurance of a package changing version like:
# "bzip2-1.0.6->1.0.6p0: ok".
match = re.search("\W\w.+->.+: ok\W", stdout)
if match:
if module.check_mode:
module.exit_json(changed=True)
changed=True
else:
changed=False
# It seems we can not trust the return value, so depend on the presence of
# stderr to know if something failed.
if stderr:
rc = 1
else:
rc = 0
return (rc, stdout, stderr, changed)
# ===========================================
# Main control flow.
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True),
state = dict(required=True, choices=['absent', 'installed', 'latest', 'present', 'removed']),
),
supports_check_mode = True
)
name = module.params['name']
state = module.params['state']
rc = 0
stdout = ''
stderr = ''
result = {}
result['name'] = name
result['state'] = state
if name == '*':
if state != 'latest':
module.fail_json(msg="the package name '*' is only valid when using state=latest")
else:
# Perform an upgrade of all installed packages.
(rc, stdout, stderr, changed) = upgrade_packages(module)
else:
# Parse package name and put results in the pkg_spec dictionary.
pkg_spec = {}
parse_package_name(name, pkg_spec, module)
# Get package state.
installed_state = get_package_state(name, pkg_spec, module)
# Perform requested action.
if state in ['installed', 'present']:
(rc, stdout, stderr, changed) = package_present(name, installed_state, pkg_spec, module)
elif state in ['absent', 'removed']:
(rc, stdout, stderr, changed) = package_absent(name, installed_state, module)
elif state == 'latest':
(rc, stdout, stderr, changed) = package_latest(name, installed_state, pkg_spec, module)
if rc != 0:
if stderr:
module.fail_json(msg=stderr)
else:
module.fail_json(msg=stdout)
result['changed'] = changed
module.exit_json(**result)
# Import module snippets.
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
jdavidagudelo/django-social-auth-corrected | social_auth/backends/contrib/angel.py | 1 | 2120 | """
settings.py should include the following:
ANGEL_CLIENT_ID = '...'
ANGEL_CLIENT_SECRET = '...'
Optional scope to include 'email' and/or 'messages' separated by space:
ANGEL_AUTH_EXTRA_ARGUMENTS = {'scope': 'email messages'}
More information on scope can be found at https://angel.co/api/oauth/faq
"""
from urllib import urlencode
try:
import json as simplejson
except ImportError:
try:
import simplejson
except ImportError:
from django.utils import simplejson
from social_auth.backends import BaseOAuth2, OAuthBackend
from social_auth.utils import dsa_urlopen
ANGEL_SERVER = 'angel.co'
ANGEL_AUTHORIZATION_URL = 'https://angel.co/api/oauth/authorize/'
ANGEL_ACCESS_TOKEN_URL = 'https://angel.co/api/oauth/token/'
ANGEL_CHECK_AUTH = 'https://api.angel.co/1/me/'
class AngelBackend(OAuthBackend):
name = 'angel'
def get_user_id(self, details, response):
return response['id']
def get_user_details(self, response):
"""Return user details from Angel account"""
username = response['angellist_url'].split('/')[-1]
first_name = response['name'].split(' ')[0]
last_name = response['name'].split(' ')[-1]
email = response['email']
return {
'username': username,
'first_name': first_name,
'last_name': last_name,
'email': email,
}
class AngelAuth(BaseOAuth2):
"""Angel OAuth mechanism"""
AUTHORIZATION_URL = ANGEL_AUTHORIZATION_URL
ACCESS_TOKEN_URL = ANGEL_ACCESS_TOKEN_URL
AUTH_BACKEND = AngelBackend
SETTINGS_KEY_NAME = 'ANGEL_CLIENT_ID'
SETTINGS_SECRET_NAME = 'ANGEL_CLIENT_SECRET'
REDIRECT_STATE = False
STATE_PARAMETER = False
def user_data(self, access_token, *args, **kwargs):
"""Loads user data from service"""
params = {'access_token': access_token}
url = ANGEL_CHECK_AUTH + '?' + urlencode(params)
try:
return simplejson.load(dsa_urlopen(url))
except ValueError:
return None
# Backend definition
BACKENDS = {
'angel': AngelAuth,
}
| bsd-3-clause |
nttks/edx-platform | common/test/acceptance/tests/video/test_video_handout.py | 1 | 6986 | # -*- coding: utf-8 -*-
"""
Acceptance tests for CMS Video Handout.
"""
from nose.plugins.attrib import attr
from unittest import skip
from .test_studio_video_module import CMSVideoBaseTest
@attr('shard_5')
class VideoHandoutTest(CMSVideoBaseTest):
"""
CMS Video Handout Test Class
"""
def setUp(self):
super(VideoHandoutTest, self).setUp()
def _create_course_unit_with_handout(self, handout_filename, save_settings=True):
"""
Create a course with unit and also upload handout
Arguments:
handout_filename (str): handout file name to be uploaded
save_settings (bool): save settings or not
"""
self.navigate_to_course_unit()
self.edit_component()
self.open_advanced_tab()
self.video.upload_handout(handout_filename)
if save_settings:
self.save_unit_settings()
@skip("This doesn't work. #2601")
def test_handout_uploads_correctly(self):
"""
Scenario: Handout uploading works correctly
Given I have created a Video component with handout file "textbook.pdf"
Then I can see video button "handout"
And I can download handout file with mime type "application/pdf"
"""
self._create_course_unit_with_handout('textbook.pdf')
self.assertTrue(self.video.is_handout_button_visible)
self.assertEqual(self.video.download_handout('application/pdf'), (True, True))
@skip("This doesn't work. #2601")
def test_handout_download_works_with_save(self):
"""
Scenario: Handout downloading works correctly w/ preliminary saving
Given I have created a Video component with handout file "textbook.pdf"
And I save changes
And I edit the component
And I open tab "Advanced"
And I can download handout file in editor with mime type "application/pdf"
"""
self._create_course_unit_with_handout('textbook.pdf')
self.edit_component()
self.open_advanced_tab()
self.assertEqual(self.video.download_handout('application/pdf', is_editor=True), (True, True))
@skip("This doesn't work. #2601")
def test_handout_download_works_wo_save(self):
"""
Scenario: Handout downloading works correctly w/o preliminary saving
Given I have created a Video component with handout file "textbook.pdf"
And I can download handout file in editor with mime type "application/pdf"
"""
self._create_course_unit_with_handout('textbook.pdf', save_settings=False)
self.assertEqual(self.video.download_handout('application/pdf', is_editor=True), (True, True))
@skip("This doesn't work. #2601")
def test_handout_clearing_works_w_save(self):
"""
Scenario: Handout clearing works correctly w/ preliminary saving
Given I have created a Video component with handout file "textbook.pdf"
And I save changes
And I can download handout file with mime type "application/pdf"
And I edit the component
And I open tab "Advanced"
And I clear handout
And I save changes
Then I do not see video button "handout"
"""
self._create_course_unit_with_handout('textbook.pdf')
self.assertEqual(self.video.download_handout('application/pdf'), (True, True))
self.edit_component()
self.open_advanced_tab()
self.video.clear_handout()
self.save_unit_settings()
self.assertFalse(self.video.is_handout_button_visible)
@skip("This doesn't work. #2601")
def test_handout_clearing_works_wo_save(self):
"""
Scenario: Handout clearing works correctly w/o preliminary saving
Given I have created a Video component with handout file "asset.html"
And I clear handout
And I save changes
Then I do not see video button "handout"
"""
self._create_course_unit_with_handout('asset.html', save_settings=False)
self.video.clear_handout()
self.save_unit_settings()
self.assertFalse(self.video.is_handout_button_visible)
@skip("This doesn't work. #2601")
def test_handout_replace_w_save(self):
"""
Scenario: User can easy replace the handout by another one w/ preliminary saving
Given I have created a Video component with handout file "asset.html"
And I save changes
Then I can see video button "handout"
And I can download handout file with mime type "text/html"
And I edit the component
And I open tab "Advanced"
And I replace handout file by "textbook.pdf"
And I save changes
Then I can see video button "handout"
And I can download handout file with mime type "application/pdf"
"""
self._create_course_unit_with_handout('asset.html')
self.assertTrue(self.video.is_handout_button_visible)
self.assertEqual(self.video.download_handout('text/html'), (True, True))
self.edit_component()
self.open_advanced_tab()
self.video.upload_handout('textbook.pdf')
self.save_unit_settings()
self.assertTrue(self.video.is_handout_button_visible)
self.assertEqual(self.video.download_handout('application/pdf'), (True, True))
@skip("This doesn't work. #2601")
def test_handout_replace_wo_save(self):
"""
Scenario: User can easy replace the handout by another one w/o preliminary saving
Given I have created a Video component with handout file "asset.html"
And I replace handout file by "textbook.pdf"
And I save changes
Then I can see video button "handout"
And I can download handout file with mime type "application/pdf"
"""
self._create_course_unit_with_handout('asset.html', save_settings=False)
self.video.upload_handout('textbook.pdf')
self.save_unit_settings()
self.assertTrue(self.video.is_handout_button_visible)
self.assertEqual(self.video.download_handout('application/pdf'), (True, True))
@skip("This doesn't work. #2601")
def test_handout_upload_and_clear_works(self):
"""
Scenario: Upload file "A" -> Remove it -> Upload file "B"
Given I have created a Video component with handout file "asset.html"
And I clear handout
And I upload handout file "textbook.pdf"
And I save changes
Then I can see video button "handout"
And I can download handout file with mime type "application/pdf"
"""
self._create_course_unit_with_handout('asset.html', save_settings=False)
self.video.clear_handout()
self.video.upload_handout('textbook.pdf')
self.save_unit_settings()
self.assertTrue(self.video.is_handout_button_visible)
self.assertEqual(self.video.download_handout('application/pdf'), (True, True))
| agpl-3.0 |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/scipy/io/harwell_boeing/hb.py | 5 | 18422 | """
Implementation of Harwell-Boeing read/write.
At the moment not the full Harwell-Boeing format is supported. Supported
features are:
- assembled, non-symmetric, real matrices
- integer for pointer/indices
- exponential format for float values, and int format
"""
from __future__ import division, print_function, absolute_import
# TODO:
# - Add more support (symmetric/complex matrices, non-assembled matrices ?)
# XXX: reading is reasonably efficient (>= 85 % is in numpy.fromstring), but
# takes a lot of memory. Being faster would require compiled code.
# write is not efficient. Although not a terribly exciting task,
# having reusable facilities to efficiently read/write fortran-formatted files
# would be useful outside this module.
import warnings
import numpy as np
from scipy.sparse import csc_matrix
from scipy.io.harwell_boeing._fortran_format_parser import \
FortranFormatParser, IntFormat, ExpFormat
from scipy._lib.six import string_types
__all__ = ["MalformedHeader", "hb_read", "hb_write", "HBInfo", "HBFile",
"HBMatrixType"]
class MalformedHeader(Exception):
pass
class LineOverflow(Warning):
pass
def _nbytes_full(fmt, nlines):
"""Return the number of bytes to read to get every full lines for the
given parsed fortran format."""
return (fmt.repeat * fmt.width + 1) * (nlines - 1)
class HBInfo(object):
@classmethod
def from_data(cls, m, title="Default title", key="0", mxtype=None, fmt=None):
"""Create a HBInfo instance from an existing sparse matrix.
Parameters
----------
m : sparse matrix
the HBInfo instance will derive its parameters from m
title : str
Title to put in the HB header
key : str
Key
mxtype : HBMatrixType
type of the input matrix
fmt : dict
not implemented
Returns
-------
hb_info : HBInfo instance
"""
pointer = m.indptr
indices = m.indices
values = m.data
nrows, ncols = m.shape
nnon_zeros = m.nnz
if fmt is None:
# +1 because HB use one-based indexing (Fortran), and we will write
# the indices /pointer as such
pointer_fmt = IntFormat.from_number(np.max(pointer+1))
indices_fmt = IntFormat.from_number(np.max(indices+1))
if values.dtype.kind in np.typecodes["AllFloat"]:
values_fmt = ExpFormat.from_number(-np.max(np.abs(values)))
elif values.dtype.kind in np.typecodes["AllInteger"]:
values_fmt = IntFormat.from_number(-np.max(np.abs(values)))
else:
raise NotImplementedError("type %s not implemented yet" % values.dtype.kind)
else:
raise NotImplementedError("fmt argument not supported yet.")
if mxtype is None:
if not np.isrealobj(values):
raise ValueError("Complex values not supported yet")
if values.dtype.kind in np.typecodes["AllInteger"]:
tp = "integer"
elif values.dtype.kind in np.typecodes["AllFloat"]:
tp = "real"
else:
raise NotImplementedError("type %s for values not implemented"
% values.dtype)
mxtype = HBMatrixType(tp, "unsymmetric", "assembled")
else:
raise ValueError("mxtype argument not handled yet.")
def _nlines(fmt, size):
nlines = size // fmt.repeat
if nlines * fmt.repeat != size:
nlines += 1
return nlines
pointer_nlines = _nlines(pointer_fmt, pointer.size)
indices_nlines = _nlines(indices_fmt, indices.size)
values_nlines = _nlines(values_fmt, values.size)
total_nlines = pointer_nlines + indices_nlines + values_nlines
return cls(title, key,
total_nlines, pointer_nlines, indices_nlines, values_nlines,
mxtype, nrows, ncols, nnon_zeros,
pointer_fmt.fortran_format, indices_fmt.fortran_format,
values_fmt.fortran_format)
@classmethod
def from_file(cls, fid):
"""Create a HBInfo instance from a file object containing a matrix in the
HB format.
Parameters
----------
fid : file-like matrix
File or file-like object containing a matrix in the HB format.
Returns
-------
hb_info : HBInfo instance
"""
# First line
line = fid.readline().strip("\n")
if not len(line) > 72:
raise ValueError("Expected at least 72 characters for first line, "
"got: \n%s" % line)
title = line[:72]
key = line[72:]
# Second line
line = fid.readline().strip("\n")
if not len(line.rstrip()) >= 56:
raise ValueError("Expected at least 56 characters for second line, "
"got: \n%s" % line)
total_nlines = _expect_int(line[:14])
pointer_nlines = _expect_int(line[14:28])
indices_nlines = _expect_int(line[28:42])
values_nlines = _expect_int(line[42:56])
rhs_nlines = line[56:72].strip()
if rhs_nlines == '':
rhs_nlines = 0
else:
rhs_nlines = _expect_int(rhs_nlines)
if not rhs_nlines == 0:
raise ValueError("Only files without right hand side supported for "
"now.")
# Third line
line = fid.readline().strip("\n")
if not len(line) >= 70:
raise ValueError("Expected at least 72 character for third line, got:\n"
"%s" % line)
mxtype_s = line[:3].upper()
if not len(mxtype_s) == 3:
raise ValueError("mxtype expected to be 3 characters long")
mxtype = HBMatrixType.from_fortran(mxtype_s)
if mxtype.value_type not in ["real", "integer"]:
raise ValueError("Only real or integer matrices supported for "
"now (detected %s)" % mxtype)
if not mxtype.structure == "unsymmetric":
raise ValueError("Only unsymmetric matrices supported for "
"now (detected %s)" % mxtype)
if not mxtype.storage == "assembled":
raise ValueError("Only assembled matrices supported for now")
if not line[3:14] == " " * 11:
raise ValueError("Malformed data for third line: %s" % line)
nrows = _expect_int(line[14:28])
ncols = _expect_int(line[28:42])
nnon_zeros = _expect_int(line[42:56])
nelementals = _expect_int(line[56:70])
if not nelementals == 0:
raise ValueError("Unexpected value %d for nltvl (last entry of line 3)"
% nelementals)
# Fourth line
line = fid.readline().strip("\n")
ct = line.split()
if not len(ct) == 3:
raise ValueError("Expected 3 formats, got %s" % ct)
return cls(title, key,
total_nlines, pointer_nlines, indices_nlines, values_nlines,
mxtype, nrows, ncols, nnon_zeros,
ct[0], ct[1], ct[2],
rhs_nlines, nelementals)
def __init__(self, title, key,
total_nlines, pointer_nlines, indices_nlines, values_nlines,
mxtype, nrows, ncols, nnon_zeros,
pointer_format_str, indices_format_str, values_format_str,
right_hand_sides_nlines=0, nelementals=0):
"""Do not use this directly, but the class ctrs (from_* functions)."""
self.title = title
self.key = key
if title is None:
title = "No Title"
if len(title) > 72:
raise ValueError("title cannot be > 72 characters")
if key is None:
key = "|No Key"
if len(key) > 8:
warnings.warn("key is > 8 characters (key is %s)" % key, LineOverflow)
self.total_nlines = total_nlines
self.pointer_nlines = pointer_nlines
self.indices_nlines = indices_nlines
self.values_nlines = values_nlines
parser = FortranFormatParser()
pointer_format = parser.parse(pointer_format_str)
if not isinstance(pointer_format, IntFormat):
raise ValueError("Expected int format for pointer format, got %s"
% pointer_format)
indices_format = parser.parse(indices_format_str)
if not isinstance(indices_format, IntFormat):
raise ValueError("Expected int format for indices format, got %s" %
indices_format)
values_format = parser.parse(values_format_str)
if isinstance(values_format, ExpFormat):
if mxtype.value_type not in ["real", "complex"]:
raise ValueError("Inconsistency between matrix type %s and "
"value type %s" % (mxtype, values_format))
values_dtype = np.float64
elif isinstance(values_format, IntFormat):
if mxtype.value_type not in ["integer"]:
raise ValueError("Inconsistency between matrix type %s and "
"value type %s" % (mxtype, values_format))
# XXX: fortran int -> dtype association ?
values_dtype = int
else:
raise ValueError("Unsupported format for values %r" % (values_format,))
self.pointer_format = pointer_format
self.indices_format = indices_format
self.values_format = values_format
self.pointer_dtype = np.int32
self.indices_dtype = np.int32
self.values_dtype = values_dtype
self.pointer_nlines = pointer_nlines
self.pointer_nbytes_full = _nbytes_full(pointer_format, pointer_nlines)
self.indices_nlines = indices_nlines
self.indices_nbytes_full = _nbytes_full(indices_format, indices_nlines)
self.values_nlines = values_nlines
self.values_nbytes_full = _nbytes_full(values_format, values_nlines)
self.nrows = nrows
self.ncols = ncols
self.nnon_zeros = nnon_zeros
self.nelementals = nelementals
self.mxtype = mxtype
def dump(self):
"""Gives the header corresponding to this instance as a string."""
header = [self.title.ljust(72) + self.key.ljust(8)]
header.append("%14d%14d%14d%14d" %
(self.total_nlines, self.pointer_nlines,
self.indices_nlines, self.values_nlines))
header.append("%14s%14d%14d%14d%14d" %
(self.mxtype.fortran_format.ljust(14), self.nrows,
self.ncols, self.nnon_zeros, 0))
pffmt = self.pointer_format.fortran_format
iffmt = self.indices_format.fortran_format
vffmt = self.values_format.fortran_format
header.append("%16s%16s%20s" %
(pffmt.ljust(16), iffmt.ljust(16), vffmt.ljust(20)))
return "\n".join(header)
def _expect_int(value, msg=None):
try:
return int(value)
except ValueError:
if msg is None:
msg = "Expected an int, got %s"
raise ValueError(msg % value)
def _read_hb_data(content, header):
# XXX: look at a way to reduce memory here (big string creation)
ptr_string = "".join([content.read(header.pointer_nbytes_full),
content.readline()])
ptr = np.fromstring(ptr_string,
dtype=int, sep=' ')
ind_string = "".join([content.read(header.indices_nbytes_full),
content.readline()])
ind = np.fromstring(ind_string,
dtype=int, sep=' ')
val_string = "".join([content.read(header.values_nbytes_full),
content.readline()])
val = np.fromstring(val_string,
dtype=header.values_dtype, sep=' ')
try:
return csc_matrix((val, ind-1, ptr-1),
shape=(header.nrows, header.ncols))
except ValueError as e:
raise e
def _write_data(m, fid, header):
def write_array(f, ar, nlines, fmt):
# ar_nlines is the number of full lines, n is the number of items per
# line, ffmt the fortran format
pyfmt = fmt.python_format
pyfmt_full = pyfmt * fmt.repeat
# for each array to write, we first write the full lines, and special
# case for partial line
full = ar[:(nlines - 1) * fmt.repeat]
for row in full.reshape((nlines-1, fmt.repeat)):
f.write(pyfmt_full % tuple(row) + "\n")
nremain = ar.size - full.size
if nremain > 0:
f.write((pyfmt * nremain) % tuple(ar[ar.size - nremain:]) + "\n")
fid.write(header.dump())
fid.write("\n")
# +1 is for fortran one-based indexing
write_array(fid, m.indptr+1, header.pointer_nlines,
header.pointer_format)
write_array(fid, m.indices+1, header.indices_nlines,
header.indices_format)
write_array(fid, m.data, header.values_nlines,
header.values_format)
class HBMatrixType(object):
"""Class to hold the matrix type."""
# q2f* translates qualified names to fortran character
_q2f_type = {
"real": "R",
"complex": "C",
"pattern": "P",
"integer": "I",
}
_q2f_structure = {
"symmetric": "S",
"unsymmetric": "U",
"hermitian": "H",
"skewsymmetric": "Z",
"rectangular": "R"
}
_q2f_storage = {
"assembled": "A",
"elemental": "E",
}
_f2q_type = dict([(j, i) for i, j in _q2f_type.items()])
_f2q_structure = dict([(j, i) for i, j in _q2f_structure.items()])
_f2q_storage = dict([(j, i) for i, j in _q2f_storage.items()])
@classmethod
def from_fortran(cls, fmt):
if not len(fmt) == 3:
raise ValueError("Fortran format for matrix type should be 3 "
"characters long")
try:
value_type = cls._f2q_type[fmt[0]]
structure = cls._f2q_structure[fmt[1]]
storage = cls._f2q_storage[fmt[2]]
return cls(value_type, structure, storage)
except KeyError:
raise ValueError("Unrecognized format %s" % fmt)
def __init__(self, value_type, structure, storage="assembled"):
self.value_type = value_type
self.structure = structure
self.storage = storage
if value_type not in self._q2f_type:
raise ValueError("Unrecognized type %s" % value_type)
if structure not in self._q2f_structure:
raise ValueError("Unrecognized structure %s" % structure)
if storage not in self._q2f_storage:
raise ValueError("Unrecognized storage %s" % storage)
@property
def fortran_format(self):
return self._q2f_type[self.value_type] + \
self._q2f_structure[self.structure] + \
self._q2f_storage[self.storage]
def __repr__(self):
return "HBMatrixType(%s, %s, %s)" % \
(self.value_type, self.structure, self.storage)
class HBFile(object):
def __init__(self, file, hb_info=None):
"""Create a HBFile instance.
Parameters
----------
file : file-object
StringIO work as well
hb_info : HBInfo, optional
Should be given as an argument for writing, in which case the file
should be writable.
"""
self._fid = file
if hb_info is None:
self._hb_info = HBInfo.from_file(file)
else:
#raise IOError("file %s is not writable, and hb_info "
# "was given." % file)
self._hb_info = hb_info
@property
def title(self):
return self._hb_info.title
@property
def key(self):
return self._hb_info.key
@property
def type(self):
return self._hb_info.mxtype.value_type
@property
def structure(self):
return self._hb_info.mxtype.structure
@property
def storage(self):
return self._hb_info.mxtype.storage
def read_matrix(self):
return _read_hb_data(self._fid, self._hb_info)
def write_matrix(self, m):
return _write_data(m, self._fid, self._hb_info)
def hb_read(path_or_open_file):
"""Read HB-format file.
Parameters
----------
path_or_open_file : path-like or file-like
If a file-like object, it is used as-is. Otherwise it is opened
before reading.
Returns
-------
data : scipy.sparse.csc_matrix instance
The data read from the HB file as a sparse matrix.
Notes
-----
At the moment not the full Harwell-Boeing format is supported. Supported
features are:
- assembled, non-symmetric, real matrices
- integer for pointer/indices
- exponential format for float values, and int format
"""
def _get_matrix(fid):
hb = HBFile(fid)
return hb.read_matrix()
if hasattr(path_or_open_file, 'read'):
return _get_matrix(path_or_open_file)
else:
with open(path_or_open_file) as f:
return _get_matrix(f)
def hb_write(path_or_open_file, m, hb_info=None):
"""Write HB-format file.
Parameters
----------
path_or_open_file : path-like or file-like
If a file-like object, it is used as-is. Otherwise it is opened
before writing.
m : sparse-matrix
the sparse matrix to write
hb_info : HBInfo
contains the meta-data for write
Returns
-------
None
Notes
-----
At the moment not the full Harwell-Boeing format is supported. Supported
features are:
- assembled, non-symmetric, real matrices
- integer for pointer/indices
- exponential format for float values, and int format
"""
if hb_info is None:
hb_info = HBInfo.from_data(m)
def _set_matrix(fid):
hb = HBFile(fid, hb_info)
return hb.write_matrix(m)
if hasattr(path_or_open_file, 'write'):
return _set_matrix(path_or_open_file)
else:
with open(path_or_open_file, 'w') as f:
return _set_matrix(f)
| gpl-3.0 |
goffersoft/common-utils-python | experimental/tests/testLogInit.py | 1 | 1256 | import logging
from unittest import TestCase
from testfixtures import LogCapture
from com.goffersoft.logging.logconf import init_logging
class TestLogInit(TestCase):
def test_log_message(self):
init_logging('./conf/logconf_template.json')
with LogCapture() as l:
logger = logging.getLogger()
logger.info('a info message')
logger.error('a error message')
logger.debug('a debug message')
logger.critical('a critical message')
logger.warn('a warning message')
exc_msg = 'the if statement throws an exception'\
'in the python3 world(as it should)'\
'but gets executed in the python2 world'
try:
if('hello' > 3):
logger.exception(exc_msg)
except:
logger.exception(exc_msg)
l.check(
('root', 'INFO', 'a info message'),
('root', 'ERROR', 'a error message'),
('root', 'DEBUG', 'a debug message'),
('root', 'CRITICAL', 'a critical message'),
('root', 'WARNING', 'a warning message'),
('root', 'ERROR', exc_msg),
)
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.