repo_name
stringlengths
5
100
path
stringlengths
4
294
copies
stringclasses
990 values
size
stringlengths
4
7
content
stringlengths
666
1M
license
stringclasses
15 values
eshijia/SUR
magnum/tests/unit/api/controllers/v1/test_pod.py
7
25660
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import mock from oslo_config import cfg from oslo_policy import policy from oslo_utils import timeutils from six.moves.urllib import parse as urlparse from wsme import types as wtypes from magnum.api.controllers.v1 import pod as api_pod from magnum.common import utils from magnum.conductor import api as rpcapi from magnum import objects from magnum.tests import base from magnum.tests.unit.api import base as api_base from magnum.tests.unit.api import utils as apiutils from magnum.tests.unit.objects import utils as obj_utils class TestPodObject(base.TestCase): def test_pod_init(self): pod_dict = apiutils.pod_post_data(bay_uuid=None) del pod_dict['desc'] pod = api_pod.Pod(**pod_dict) self.assertEqual(wtypes.Unset, pod.desc) class TestListPod(api_base.FunctionalTest): def setUp(self): super(TestListPod, self).setUp() obj_utils.create_test_bay(self.context) def test_empty(self): response = self.get_json('/pods') self.assertEqual([], response['pods']) def _assert_pod_fields(self, pod): pod_fields = ['name', 'bay_uuid', 'desc', 'images', 'labels', 'status', 'host'] for field in pod_fields: self.assertIn(field, pod) def test_one(self): pod = obj_utils.create_test_pod(self.context) response = self.get_json('/pods') self.assertEqual(pod.uuid, response['pods'][0]["uuid"]) self._assert_pod_fields(response['pods'][0]) def test_get_one(self): pod = obj_utils.create_test_pod(self.context) response = self.get_json('/pods/%s' % pod['uuid']) self.assertEqual(pod.uuid, response['uuid']) self._assert_pod_fields(response) def test_get_one_by_name(self): pod = obj_utils.create_test_pod(self.context) response = self.get_json('/pods/%s' % pod['name']) self.assertEqual(pod.uuid, response['uuid']) self._assert_pod_fields(response) def test_get_one_by_name_not_found(self): response = self.get_json('/pods/not_found', expect_errors=True) self.assertEqual(response.status_int, 404) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) def test_get_one_by_name_multiple_pod(self): obj_utils.create_test_pod(self.context, name='test_pod', uuid=utils.generate_uuid()) obj_utils.create_test_pod(self.context, name='test_pod', uuid=utils.generate_uuid()) response = self.get_json('/pods/test_pod', expect_errors=True) self.assertEqual(response.status_int, 409) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) def test_get_all_with_pagination_marker(self): pod_list = [] for id_ in range(4): pod = obj_utils.create_test_pod(self.context, id=id_, uuid=utils.generate_uuid()) pod_list.append(pod.uuid) response = self.get_json('/pods?limit=3&marker=%s' % pod_list[2]) self.assertEqual(1, len(response['pods'])) self.assertEqual(pod_list[-1], response['pods'][0]['uuid']) def test_detail(self): pod = obj_utils.create_test_pod(self.context) response = self.get_json('/pods/detail') self.assertEqual(pod.uuid, response['pods'][0]["uuid"]) self._assert_pod_fields(response['pods'][0]) def test_detail_with_pagination_marker(self): pod_list = [] for id_ in range(4): pod = obj_utils.create_test_pod(self.context, id=id_, uuid=utils.generate_uuid()) pod_list.append(pod.uuid) response = self.get_json('/pods/detail?limit=3&marker=%s' % pod_list[2]) self.assertEqual(1, len(response['pods'])) self.assertEqual(pod_list[-1], response['pods'][0]['uuid']) self._assert_pod_fields(response['pods'][0]) def test_detail_against_single(self): pod = obj_utils.create_test_pod(self.context) response = self.get_json('/pods/%s/detail' % pod['uuid'], expect_errors=True) self.assertEqual(404, response.status_int) def test_many(self): pod_list = [] for id_ in range(5): pod = obj_utils.create_test_pod(self.context, id=id_, uuid=utils.generate_uuid()) pod_list.append(pod.uuid) response = self.get_json('/pods') self.assertEqual(len(pod_list), len(response['pods'])) uuids = [p['uuid'] for p in response['pods']] self.assertEqual(sorted(pod_list), sorted(uuids)) def test_links(self): uuid = utils.generate_uuid() obj_utils.create_test_pod(self.context, id=1, uuid=uuid) response = self.get_json('/pods/%s' % uuid) self.assertIn('links', response.keys()) self.assertEqual(2, len(response['links'])) self.assertIn(uuid, response['links'][0]['href']) for l in response['links']: bookmark = l['rel'] == 'bookmark' self.assertTrue(self.validate_link(l['href'], bookmark=bookmark)) def test_collection_links(self): for id_ in range(5): obj_utils.create_test_pod(self.context, id=id_, uuid=utils.generate_uuid()) response = self.get_json('/pods/?limit=3') self.assertEqual(3, len(response['pods'])) next_marker = response['pods'][-1]['uuid'] self.assertIn(next_marker, response['next']) def test_collection_links_default_limit(self): cfg.CONF.set_override('max_limit', 3, 'api') for id_ in range(5): obj_utils.create_test_pod(self.context, id=id_, uuid=utils.generate_uuid()) response = self.get_json('/pods') self.assertEqual(3, len(response['pods'])) next_marker = response['pods'][-1]['uuid'] self.assertIn(next_marker, response['next']) class TestPatch(api_base.FunctionalTest): def setUp(self): super(TestPatch, self).setUp() obj_utils.create_test_bay(self.context) self.pod = obj_utils.create_test_pod(self.context, desc='pod_example_A_desc', status='Running') @mock.patch('oslo_utils.timeutils.utcnow') def test_replace_ok(self, mock_utcnow): test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time new_desc = 'pod_example_B_desc' response = self.get_json('/pods/%s' % self.pod.uuid) self.assertNotEqual(new_desc, response['desc']) response = self.patch_json('/pods/%s' % self.pod.uuid, [{'path': '/desc', 'value': new_desc, 'op': 'replace'}]) self.assertEqual('application/json', response.content_type) self.assertEqual(200, response.status_code) response = self.get_json('/pods/%s' % self.pod.uuid) self.assertEqual(new_desc, response['desc']) return_updated_at = timeutils.parse_isotime( response['updated_at']).replace(tzinfo=None) self.assertEqual(test_time, return_updated_at) def test_replace_bay_uuid(self): another_bay = obj_utils.create_test_bay(self.context, uuid=utils.generate_uuid()) response = self.patch_json('/pods/%s' % self.pod.uuid, [{'path': '/bay_uuid', 'value': another_bay.uuid, 'op': 'replace'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(200, response.status_code) def test_replace_non_existent_bay_uuid(self): response = self.patch_json('/pods/%s' % self.pod.uuid, [{'path': '/bay_uuid', 'value': utils.generate_uuid(), 'op': 'replace'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_code) self.assertTrue(response.json['error_message']) def test_replace_internal_field(self): response = self.patch_json( '/pods/%s' % self.pod.uuid, [{'path': '/labels', 'value': {}, 'op': 'replace'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_code) self.assertTrue(response.json['error_message']) def test_replace_non_existent_pod(self): response = self.patch_json('/pods/%s' % utils.generate_uuid(), [{'path': '/desc', 'value': 'pod_example_B_desc', 'op': 'replace'}], expect_errors=True) self.assertEqual(404, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) @mock.patch.object(rpcapi.API, 'pod_update') @mock.patch.object(api_pod.Pod, 'parse_manifest') def test_replace_with_manifest(self, parse_manifest, pod_update): response = self.patch_json('/pods/%s' % self.pod.uuid, [{'path': '/manifest', 'value': '{}', 'op': 'replace'}]) self.assertEqual(200, response.status_int) self.assertEqual('application/json', response.content_type) parse_manifest.assert_called_once_with() self.assertTrue(pod_update.is_called) def test_add_ok(self): new_desc = 'pod_example_B_desc' response = self.patch_json( '/pods/%s' % self.pod.uuid, [{'path': '/desc', 'value': new_desc, 'op': 'add'}]) self.assertEqual('application/json', response.content_type) self.assertEqual(200, response.status_int) response = self.get_json('/pods/%s' % self.pod.uuid) self.assertEqual(new_desc, response['desc']) def test_add_multi(self): new_status = 'Stopped' new_desc = 'pod_example_B_desc' response = self.get_json('/pods/%s' % self.pod.uuid) self.assertNotEqual(new_status, response['status']) self.assertNotEqual(new_desc, response['desc']) json = [ { 'path': '/status', 'value': new_status, 'op': 'add' }, { 'path': '/desc', 'value': new_desc, 'op': 'add' } ] response = self.patch_json('/pods/%s' % self.pod.uuid, json) self.assertEqual('application/json', response.content_type) self.assertEqual(200, response.status_code) response = self.get_json('/pods/%s' % self.pod.uuid) self.assertEqual(new_status, response['status']) self.assertEqual(new_desc, response['desc']) def test_add_non_existent_property(self): response = self.patch_json( '/pods/%s' % self.pod.uuid, [{'path': '/foo', 'value': 'bar', 'op': 'add'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_int) self.assertTrue(response.json['error_message']) def test_remove_ok(self): response = self.get_json('/pods/%s' % self.pod.uuid) self.assertIsNotNone(response['desc']) response = self.patch_json('/pods/%s' % self.pod.uuid, [{'path': '/desc', 'op': 'remove'}]) self.assertEqual('application/json', response.content_type) self.assertEqual(200, response.status_code) response = self.get_json('/pods/%s' % self.pod.uuid) self.assertIsNone(response['desc']) def test_remove_uuid(self): response = self.patch_json('/pods/%s' % self.pod.uuid, [{'path': '/uuid', 'op': 'remove'}], expect_errors=True) self.assertEqual(400, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) def test_remove_bay_uuid(self): response = self.patch_json('/pods/%s' % self.pod.uuid, [{'path': '/bay_uuid', 'op': 'remove'}], expect_errors=True) self.assertEqual(400, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) def test_remove_internal_field(self): response = self.patch_json('/pods/%s' % self.pod.uuid, [{'path': '/labels', 'op': 'remove'}], expect_errors=True) self.assertEqual(400, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) def test_remove_non_existent_property(self): response = self.patch_json( '/pods/%s' % self.pod.uuid, [{'path': '/non-existent', 'op': 'remove'}], expect_errors=True) self.assertEqual(400, response.status_code) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) @mock.patch('oslo_utils.timeutils.utcnow') def test_replace_ok_by_name(self, mock_utcnow): test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time response = self.patch_json('/pods/%s' % self.pod.name, [{'path': '/desc', 'op': 'remove'}]) self.assertEqual('application/json', response.content_type) self.assertEqual(200, response.status_code) response = self.get_json('/pods/%s' % self.pod.uuid) self.assertEqual('pod1', response['name']) return_updated_at = timeutils.parse_isotime( response['updated_at']).replace(tzinfo=None) self.assertEqual(test_time, return_updated_at) @mock.patch('oslo_utils.timeutils.utcnow') def test_replace_ok_by_name_not_found(self, mock_utcnow): name = 'not_found' test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time response = self.patch_json('/pods/%s' % name, [{'path': '/desc', 'op': 'remove'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(404, response.status_code) @mock.patch('oslo_utils.timeutils.utcnow') def test_replace_ok_by_name_multiple_pod(self, mock_utcnow): test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time obj_utils.create_test_pod(self.context, name='test_pod', uuid=utils.generate_uuid()) obj_utils.create_test_pod(self.context, name='test_pod', uuid=utils.generate_uuid()) response = self.patch_json('/pods/test_pod', [{'path': '/desc', 'op': 'remove'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(409, response.status_code) class TestPost(api_base.FunctionalTest): def setUp(self): super(TestPost, self).setUp() obj_utils.create_test_bay(self.context) p = mock.patch.object(rpcapi.API, 'pod_create') self.mock_pod_create = p.start() self.mock_pod_create.side_effect = self._simulate_rpc_pod_create self.addCleanup(p.stop) p = mock.patch('magnum.objects.BayModel.get_by_uuid') self.mock_baymodel_get_by_uuid = p.start() self.mock_baymodel_get_by_uuid.return_value.coe = 'kubernetes' self.addCleanup(p.stop) def _simulate_rpc_pod_create(self, pod): pod.create() return pod @mock.patch('oslo_utils.timeutils.utcnow') def test_create_pod(self, mock_utcnow): pdict = apiutils.pod_post_data() test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time response = self.post_json('/pods', pdict) self.assertEqual('application/json', response.content_type) self.assertEqual(201, response.status_int) # Check location header self.assertIsNotNone(response.location) expected_location = '/v1/pods/%s' % pdict['uuid'] self.assertEqual(urlparse.urlparse(response.location).path, expected_location) self.assertEqual(pdict['uuid'], response.json['uuid']) self.assertNotIn('updated_at', response.json.keys) return_created_at = timeutils.parse_isotime( response.json['created_at']).replace(tzinfo=None) self.assertEqual(test_time, return_created_at) def test_create_pod_set_project_id_and_user_id(self): pdict = apiutils.pod_post_data() def _simulate_rpc_pod_create(pod): self.assertEqual(pod.project_id, self.context.project_id) self.assertEqual(pod.user_id, self.context.user_id) pod.create() return pod self.mock_pod_create.side_effect = _simulate_rpc_pod_create self.post_json('/pods', pdict) def test_create_pod_doesnt_contain_id(self): with mock.patch.object(self.dbapi, 'create_pod', wraps=self.dbapi.create_pod) as cc_mock: pdict = apiutils.pod_post_data(desc='pod_example_A_desc') response = self.post_json('/pods', pdict) self.assertEqual(pdict['desc'], response.json['desc']) cc_mock.assert_called_once_with(mock.ANY) # Check that 'id' is not in first arg of positional args self.assertNotIn('id', cc_mock.call_args[0][0]) def test_create_pod_generate_uuid(self): pdict = apiutils.pod_post_data() del pdict['uuid'] response = self.post_json('/pods', pdict) self.assertEqual('application/json', response.content_type) self.assertEqual(201, response.status_int) self.assertEqual(pdict['desc'], response.json['desc']) self.assertTrue(utils.is_uuid_like(response.json['uuid'])) def test_create_pod_no_bay_uuid(self): pdict = apiutils.pod_post_data() del pdict['bay_uuid'] response = self.post_json('/pods', pdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_int) def test_create_pod_with_non_existent_bay_uuid(self): pdict = apiutils.pod_post_data(bay_uuid=utils.generate_uuid()) response = self.post_json('/pods', pdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_int) self.assertTrue(response.json['error_message']) def test_create_pod_with_invalid_manifest(self): pdict = apiutils.pod_post_data() pdict['manifest'] = 'wrong manifest' response = self.post_json('/pods', pdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_int) self.assertTrue(response.json['error_message']) def test_create_pod_no_manifest(self): pdict = apiutils.pod_post_data() del pdict['manifest'] response = self.post_json('/pods', pdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_int) self.assertTrue(response.json['error_message']) def test_create_pod_no_id_in_manifest(self): pdict = apiutils.pod_post_data() pdict['manifest'] = {} response = self.post_json('/pods', pdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_int) self.assertTrue(response.json['error_message']) class TestDelete(api_base.FunctionalTest): def setUp(self): super(TestDelete, self).setUp() obj_utils.create_test_bay(self.context) self.pod = obj_utils.create_test_pod(self.context) p = mock.patch.object(rpcapi.API, 'pod_delete') self.mock_pod_delete = p.start() self.mock_pod_delete.side_effect = self._simulate_rpc_pod_delete self.addCleanup(p.stop) def _simulate_rpc_pod_delete(self, pod_uuid): pod = objects.Pod.get_by_uuid(self.context, pod_uuid) pod.destroy() def test_delete_pod(self): self.delete('/pods/%s' % self.pod.uuid) response = self.get_json('/pods/%s' % self.pod.uuid, expect_errors=True) self.assertEqual(404, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) def test_delete_pod_by_name(self): self.delete('/pods/%s' % self.pod.name) response = self.get_json('/pods/%s' % self.pod.name, expect_errors=True) self.assertEqual(404, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) def test_delete_pod_by_name_not_found(self): response = self.delete('/pods/not_found', expect_errors=True) self.assertEqual(404, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) def test_delete_multiple_pod_by_name(self): obj_utils.create_test_pod(self.context, name='test_pod', uuid=utils.generate_uuid()) obj_utils.create_test_pod(self.context, name='test_pod', uuid=utils.generate_uuid()) response = self.delete('/pods/test_pod', expect_errors=True) self.assertEqual(409, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) def test_delete_pod_not_found(self): uuid = utils.generate_uuid() response = self.delete('/pods/%s' % uuid, expect_errors=True) self.assertEqual(404, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) class TestPodPolicyEnforcement(api_base.FunctionalTest): def _common_policy_check(self, rule, func, *arg, **kwarg): self.policy.set_rules({rule: 'project:non_fake'}) exc = self.assertRaises(policy.PolicyNotAuthorized, func, *arg, **kwarg) self.assertTrue(exc.message.startswith(rule)) self.assertTrue(exc.message.endswith('disallowed by policy')) def test_policy_disallow_get_all(self): self._common_policy_check( 'pod:get_all', self.get_json, '/pods') def test_policy_disallow_get_one(self): self._common_policy_check( 'pod:get', self.get_json, '/pods/111-222-333') def test_policy_disallow_detail(self): self._common_policy_check( 'pod:detail', self.get_json, '/pods/111-222-333/detail') def test_policy_disallow_update(self): pod = obj_utils.create_test_pod(self.context, desc='test pod', uuid=utils.generate_uuid()) self._common_policy_check( 'pod:update', self.patch_json, '/pods/%s' % pod.uuid, [{'path': '/desc', 'value': 'new test pod', 'op': 'replace'}]) def test_policy_disallow_create(self): pdict = apiutils.pod_post_data() self._common_policy_check( 'pod:create', self.post_json, '/pods', pdict) def test_policy_disallow_delete(self): pod = obj_utils.create_test_pod(self.context, name='test_pod', uuid=utils.generate_uuid()) self._common_policy_check( 'pod:delete', self.delete, '/pods/%s' % pod.uuid)
apache-2.0
LazyOpser/recruit
yuanxin/201708/201708-07-RE-demo01.py
1
1343
#!/usr/bin/python # encoding:utf-8 """ @author: yuanxin contact: @file: 2017/8/15-RE-demo01.py @time: 2017/8/15 """ import re print('01',re.findall("a..c",'aaabbccc')) # 01 ['abbc'] , ^ 从字符串开始匹配的位置,找到所有元素,返回一个列表 print('01',re.findall("a..c",'aaabbbbbbccc')) # 01 [] , " .. " 仅能匹配2个字符 print('02',re.findall('leon$','leonABCleonDEF')) # 02 [] , 使用 $ ,从字符串结尾开始匹配 print('03',re.findall('leon',"leonABCleonDEF")) # 03 ['leon', 'leon'] , findall ,匹配全部的 leon print('04',re.findall('leon$',"leonABCleonDEF")) # 04 [] , 使用 $ ,从字符串结尾开始匹配 print('05',re.match('leon','leonABCleonDEF')) # 05 <_sre.SRE_Match object; span=(0, 4), match='leon'> , print('06',re.match('leon$','leonABCleonDEF')) # 06 None ; match,只在字符串开始的地方查找; """ * + ? {} : 的匹配""" print('07',re.findall('xin*','yuanxi')) # 07 ['xin'] ; * 号匹配前一个字符 0 - 无限次; print('07-02',re.findall('xin*','yuanxinnnnnnnn')) # 07-02 ['xinnnnnnnn'] ; * 号匹配前一个字符 0 - 无限次 ; print('08',re.findall('xin+','yuanxin')) # 08 ['xin'] ; + 号匹配前一个字符 1 - 无限次; print('08-02',re.findall('xin+','yuanxinnnnnnn')) # 08 ['xin'] ; + 号匹配前一个字符 1 - 无限次;
gpl-3.0
tschijnmo/GCMCbyGULP
GCMCbyGULP/utils.py
1
1172
""" Small utility functions ======================= """ import collections def ensure_list_of_str(val, tag): """Ensures that the given value is a list of strings It ensures that the given value is a list of strings and return them, or value error will be raised. If a single string is given, a singleton list will be returned. :param val: The value to be ensured to be a list of strings. :param str tag: A tag for the value, used for error reporting. :returns: The ensured list of strings. """ try: if isinstance(val, basestring): return [str(val), ] elif isinstance(val, collections.Iterable): ret_val = [] for i in val: if isinstance(i, basestring): ret_val.append(str(i)) else: raise ValueError(i) continue return ret_val else: raise ValueError(val) except ValueError as exc: raise ValueError( 'Invalid value {val} for tag {tag}, string expected!'.format( tag=tag, val=exc.args[0] ) )
mit
iulian787/spack
var/spack/repos/builtin/packages/py-opt-einsum/package.py
3
1209
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class PyOptEinsum(PythonPackage): """Optimized Einsum: A tensor contraction order optimizer.""" homepage = "https://github.com/dgasmith/opt_einsum" url = "https://pypi.io/packages/source/o/opt_einsum/opt_einsum-3.1.0.tar.gz" version('3.2.1', sha256='83b76a98d18ae6a5cc7a0d88955a7f74881f0e567a0f4c949d24c942753eb998') version('3.2.0', sha256='738b0a1db1d3084d360081bb64d826f9db06d2df7cc0bf8e2c9356028da1fa31') version('3.1.0', sha256='edfada4b1d0b3b782ace8bc14e80618ff629abf53143e1e6bbf9bd00b11ece77') version('2.3.2', sha256='d3d464b4da7ef09e444c30e4003a27def37f85ff10ff2671e5f7d7813adac35b') depends_on('python@:2', type=('build', 'run'), when='@2') depends_on('python@3.5:', type=('build', 'run'), when='@3:') depends_on('py-setuptools', type='build') depends_on('py-numpy@1.7:', type=('build', 'run')) depends_on('py-pytest', type='test') depends_on('py-pytest-cov', type='test') depends_on('py-pytest-pep8', type='test')
lgpl-2.1
elkingtonmcb/django
tests/template_tests/test_loaders.py
263
14253
# -*- coding: utf-8 -*- from __future__ import unicode_literals import os.path import sys import tempfile import types import unittest from contextlib import contextmanager from django.template import Context, TemplateDoesNotExist from django.template.engine import Engine from django.test import SimpleTestCase, ignore_warnings, override_settings from django.utils import six from django.utils.deprecation import RemovedInDjango20Warning from .utils import TEMPLATE_DIR try: import pkg_resources except ImportError: pkg_resources = None class CachedLoaderTests(SimpleTestCase): def setUp(self): self.engine = Engine( dirs=[TEMPLATE_DIR], loaders=[ ('django.template.loaders.cached.Loader', [ 'django.template.loaders.filesystem.Loader', ]), ], ) def test_get_template(self): template = self.engine.get_template('index.html') self.assertEqual(template.origin.name, os.path.join(TEMPLATE_DIR, 'index.html')) self.assertEqual(template.origin.template_name, 'index.html') self.assertEqual(template.origin.loader, self.engine.template_loaders[0].loaders[0]) cache = self.engine.template_loaders[0].get_template_cache self.assertEqual(cache['index.html'], template) # Run a second time from cache template = self.engine.get_template('index.html') self.assertEqual(template.origin.name, os.path.join(TEMPLATE_DIR, 'index.html')) self.assertEqual(template.origin.template_name, 'index.html') self.assertEqual(template.origin.loader, self.engine.template_loaders[0].loaders[0]) def test_get_template_missing(self): with self.assertRaises(TemplateDoesNotExist): self.engine.get_template('doesnotexist.html') e = self.engine.template_loaders[0].get_template_cache['doesnotexist.html'] self.assertEqual(e.args[0], 'doesnotexist.html') @ignore_warnings(category=RemovedInDjango20Warning) def test_load_template(self): loader = self.engine.template_loaders[0] template, origin = loader.load_template('index.html') self.assertEqual(template.origin.template_name, 'index.html') cache = self.engine.template_loaders[0].template_cache self.assertEqual(cache['index.html'][0], template) # Run a second time from cache loader = self.engine.template_loaders[0] source, name = loader.load_template('index.html') self.assertEqual(template.origin.template_name, 'index.html') @ignore_warnings(category=RemovedInDjango20Warning) def test_load_template_missing(self): """ #19949 -- TemplateDoesNotExist exceptions should be cached. """ loader = self.engine.template_loaders[0] self.assertFalse('missing.html' in loader.template_cache) with self.assertRaises(TemplateDoesNotExist): loader.load_template("missing.html") self.assertEqual( loader.template_cache["missing.html"], TemplateDoesNotExist, "Cached loader failed to cache the TemplateDoesNotExist exception", ) def test_templatedir_caching(self): """ #13573 -- Template directories should be part of the cache key. """ # Retrieve a template specifying a template directory to check t1, name = self.engine.find_template('test.html', (os.path.join(TEMPLATE_DIR, 'first'),)) # Now retrieve the same template name, but from a different directory t2, name = self.engine.find_template('test.html', (os.path.join(TEMPLATE_DIR, 'second'),)) # The two templates should not have the same content self.assertNotEqual(t1.render(Context({})), t2.render(Context({}))) @unittest.skipUnless(pkg_resources, 'setuptools is not installed') class EggLoaderTests(SimpleTestCase): @contextmanager def create_egg(self, name, resources): """ Creates a mock egg with a list of resources. name: The name of the module. resources: A dictionary of template names mapped to file-like objects. """ if six.PY2: name = name.encode('utf-8') class MockLoader(object): pass class MockProvider(pkg_resources.NullProvider): def __init__(self, module): pkg_resources.NullProvider.__init__(self, module) self.module = module def _has(self, path): return path in self.module._resources def _isdir(self, path): return False def get_resource_stream(self, manager, resource_name): return self.module._resources[resource_name] def _get(self, path): return self.module._resources[path].read() def _fn(self, base, resource_name): return os.path.normcase(resource_name) egg = types.ModuleType(name) egg.__loader__ = MockLoader() egg.__path__ = ['/some/bogus/path/'] egg.__file__ = '/some/bogus/path/__init__.pyc' egg._resources = resources sys.modules[name] = egg pkg_resources._provider_factories[MockLoader] = MockProvider try: yield finally: del sys.modules[name] del pkg_resources._provider_factories[MockLoader] @classmethod @ignore_warnings(category=RemovedInDjango20Warning) def setUpClass(cls): cls.engine = Engine(loaders=[ 'django.template.loaders.eggs.Loader', ]) cls.loader = cls.engine.template_loaders[0] super(EggLoaderTests, cls).setUpClass() def test_get_template(self): templates = { os.path.normcase('templates/y.html'): six.StringIO("y"), } with self.create_egg('egg', templates): with override_settings(INSTALLED_APPS=['egg']): template = self.engine.get_template("y.html") self.assertEqual(template.origin.name, 'egg:egg:templates/y.html') self.assertEqual(template.origin.template_name, 'y.html') self.assertEqual(template.origin.loader, self.engine.template_loaders[0]) output = template.render(Context({})) self.assertEqual(output, "y") @ignore_warnings(category=RemovedInDjango20Warning) def test_load_template_source(self): loader = self.engine.template_loaders[0] templates = { os.path.normcase('templates/y.html'): six.StringIO("y"), } with self.create_egg('egg', templates): with override_settings(INSTALLED_APPS=['egg']): source, name = loader.load_template_source('y.html') self.assertEqual(source.strip(), 'y') self.assertEqual(name, 'egg:egg:templates/y.html') def test_non_existing(self): """ Template loading fails if the template is not in the egg. """ with self.create_egg('egg', {}): with override_settings(INSTALLED_APPS=['egg']): with self.assertRaises(TemplateDoesNotExist): self.engine.get_template('not-existing.html') def test_not_installed(self): """ Template loading fails if the egg is not in INSTALLED_APPS. """ templates = { os.path.normcase('templates/y.html'): six.StringIO("y"), } with self.create_egg('egg', templates): with self.assertRaises(TemplateDoesNotExist): self.engine.get_template('y.html') class FileSystemLoaderTests(SimpleTestCase): @classmethod def setUpClass(cls): cls.engine = Engine(dirs=[TEMPLATE_DIR]) super(FileSystemLoaderTests, cls).setUpClass() @contextmanager def set_dirs(self, dirs): original_dirs = self.engine.dirs self.engine.dirs = dirs try: yield finally: self.engine.dirs = original_dirs @contextmanager def source_checker(self, dirs): loader = self.engine.template_loaders[0] def check_sources(path, expected_sources): expected_sources = [os.path.abspath(s) for s in expected_sources] self.assertEqual( [origin.name for origin in loader.get_template_sources(path)], expected_sources, ) with self.set_dirs(dirs): yield check_sources def test_get_template(self): template = self.engine.get_template('index.html') self.assertEqual(template.origin.name, os.path.join(TEMPLATE_DIR, 'index.html')) self.assertEqual(template.origin.template_name, 'index.html') self.assertEqual(template.origin.loader, self.engine.template_loaders[0]) self.assertEqual(template.origin.loader_name, 'django.template.loaders.filesystem.Loader') @ignore_warnings(category=RemovedInDjango20Warning) def test_load_template_source(self): loader = self.engine.template_loaders[0] source, name = loader.load_template_source('index.html') self.assertEqual(source.strip(), 'index') self.assertEqual(name, os.path.join(TEMPLATE_DIR, 'index.html')) def test_directory_security(self): with self.source_checker(['/dir1', '/dir2']) as check_sources: check_sources('index.html', ['/dir1/index.html', '/dir2/index.html']) check_sources('/etc/passwd', []) check_sources('etc/passwd', ['/dir1/etc/passwd', '/dir2/etc/passwd']) check_sources('../etc/passwd', []) check_sources('../../../etc/passwd', []) check_sources('/dir1/index.html', ['/dir1/index.html']) check_sources('../dir2/index.html', ['/dir2/index.html']) check_sources('/dir1blah', []) check_sources('../dir1blah', []) def test_unicode_template_name(self): with self.source_checker(['/dir1', '/dir2']) as check_sources: # UTF-8 bytestrings are permitted. check_sources(b'\xc3\x85ngstr\xc3\xb6m', ['/dir1/Ångström', '/dir2/Ångström']) # Unicode strings are permitted. check_sources('Ångström', ['/dir1/Ångström', '/dir2/Ångström']) def test_utf8_bytestring(self): """ Invalid UTF-8 encoding in bytestrings should raise a useful error """ engine = Engine() loader = engine.template_loaders[0] with self.assertRaises(UnicodeDecodeError): list(loader.get_template_sources(b'\xc3\xc3', ['/dir1'])) def test_unicode_dir_name(self): with self.source_checker([b'/Stra\xc3\x9fe']) as check_sources: check_sources('Ångström', ['/Straße/Ångström']) check_sources(b'\xc3\x85ngstr\xc3\xb6m', ['/Straße/Ångström']) @unittest.skipUnless( os.path.normcase('/TEST') == os.path.normpath('/test'), "This test only runs on case-sensitive file systems.", ) def test_case_sensitivity(self): with self.source_checker(['/dir1', '/DIR2']) as check_sources: check_sources('index.html', ['/dir1/index.html', '/DIR2/index.html']) check_sources('/DIR1/index.HTML', ['/DIR1/index.HTML']) def test_file_does_not_exist(self): with self.assertRaises(TemplateDoesNotExist): self.engine.get_template('doesnotexist.html') @unittest.skipIf( sys.platform == 'win32', "Python on Windows doesn't have working os.chmod().", ) def test_permissions_error(self): with tempfile.NamedTemporaryFile() as tmpfile: tmpdir = os.path.dirname(tmpfile.name) tmppath = os.path.join(tmpdir, tmpfile.name) os.chmod(tmppath, 0o0222) with self.set_dirs([tmpdir]): with self.assertRaisesMessage(IOError, 'Permission denied'): self.engine.get_template(tmpfile.name) def test_notafile_error(self): with self.assertRaises(IOError): self.engine.get_template('first') class AppDirectoriesLoaderTests(SimpleTestCase): @classmethod def setUpClass(cls): cls.engine = Engine( loaders=['django.template.loaders.app_directories.Loader'], ) super(AppDirectoriesLoaderTests, cls).setUpClass() @override_settings(INSTALLED_APPS=['template_tests']) def test_get_template(self): template = self.engine.get_template('index.html') self.assertEqual(template.origin.name, os.path.join(TEMPLATE_DIR, 'index.html')) self.assertEqual(template.origin.template_name, 'index.html') self.assertEqual(template.origin.loader, self.engine.template_loaders[0]) @ignore_warnings(category=RemovedInDjango20Warning) @override_settings(INSTALLED_APPS=['template_tests']) def test_load_template_source(self): loader = self.engine.template_loaders[0] source, name = loader.load_template_source('index.html') self.assertEqual(source.strip(), 'index') self.assertEqual(name, os.path.join(TEMPLATE_DIR, 'index.html')) @override_settings(INSTALLED_APPS=[]) def test_not_installed(self): with self.assertRaises(TemplateDoesNotExist): self.engine.get_template('index.html') class LocmemLoaderTests(SimpleTestCase): @classmethod def setUpClass(cls): cls.engine = Engine( loaders=[('django.template.loaders.locmem.Loader', { 'index.html': 'index', })], ) super(LocmemLoaderTests, cls).setUpClass() def test_get_template(self): template = self.engine.get_template('index.html') self.assertEqual(template.origin.name, 'index.html') self.assertEqual(template.origin.template_name, 'index.html') self.assertEqual(template.origin.loader, self.engine.template_loaders[0]) @ignore_warnings(category=RemovedInDjango20Warning) def test_load_template_source(self): loader = self.engine.template_loaders[0] source, name = loader.load_template_source('index.html') self.assertEqual(source.strip(), 'index') self.assertEqual(name, 'index.html')
bsd-3-clause
foodszhang/kbengine
kbe/res/scripts/common/Lib/idlelib/idle_test/test_rstrip.py
143
1613
import unittest import idlelib.RstripExtension as rs from idlelib.idle_test.mock_idle import Editor class rstripTest(unittest.TestCase): def test_rstrip_line(self): editor = Editor() text = editor.text do_rstrip = rs.RstripExtension(editor).do_rstrip do_rstrip() self.assertEqual(text.get('1.0', 'insert'), '') text.insert('1.0', ' ') do_rstrip() self.assertEqual(text.get('1.0', 'insert'), '') text.insert('1.0', ' \n') do_rstrip() self.assertEqual(text.get('1.0', 'insert'), '\n') def test_rstrip_multiple(self): editor = Editor() # Uncomment following to verify that test passes with real widgets. ## from idlelib.EditorWindow import EditorWindow as Editor ## from tkinter import Tk ## editor = Editor(root=Tk()) text = editor.text do_rstrip = rs.RstripExtension(editor).do_rstrip original = ( "Line with an ending tab \n" "Line ending in 5 spaces \n" "Linewithnospaces\n" " indented line\n" " indented line with trailing space \n" " ") stripped = ( "Line with an ending tab\n" "Line ending in 5 spaces\n" "Linewithnospaces\n" " indented line\n" " indented line with trailing space\n") text.insert('1.0', original) do_rstrip() self.assertEqual(text.get('1.0', 'insert'), stripped) if __name__ == '__main__': unittest.main(verbosity=2, exit=False)
lgpl-3.0
elfnor/sverchok
nodes/vector/variable_lacunarity.py
3
3958
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### import bpy from bpy.props import EnumProperty, IntProperty, FloatProperty from mathutils import noise from sverchok.node_tree import SverchCustomTreeNode from sverchok.data_structure import updateNode from sverchok.utils.sv_seed_funcs import get_offset, seed_adjusted # noise nodes # from http://www.blender.org/documentation/blender_python_api_current/mathutils.noise.html noise_options = [ ('BLENDER', 0), ('STDPERLIN', 1), ('NEWPERLIN', 2), ('VORONOI_F1', 3), ('VORONOI_F2', 4), ('VORONOI_F3', 5), ('VORONOI_F4', 6), ('VORONOI_F2F1', 7), ('VORONOI_CRACKLE', 8), ('CELLNOISE', 14) ] def var_func(position, distortion, _noise_type1, _noise_type2): return noise.variable_lacunarity(position, distortion, _noise_type1, _noise_type2) noise_dict = {t[0]: t[1] for t in noise_options} avail_noise = [(t[0], t[0].title(), t[0].title(), '', t[1]) for t in noise_options] class SvLacunarityNode(bpy.types.Node, SverchCustomTreeNode): '''Variable lacunarity node''' bl_idname = 'SvLacunarityNode' bl_label = 'Variable Lacunarity' bl_icon = 'FORCE_TURBULENCE' noise_type1 = EnumProperty( items=avail_noise, default='STDPERLIN', description="Noise type", update=updateNode) noise_type2 = EnumProperty( items=avail_noise, default='STDPERLIN', description="Noise type", update=updateNode) distortion = FloatProperty( default=0.2, name="Distortion", update=updateNode) seed = IntProperty(default=0, name='Seed', update=updateNode) def sv_init(self, context): self.inputs.new('VerticesSocket', 'Vertices') self.inputs.new('StringsSocket', 'Seed').prop_name = 'seed' self.inputs.new('StringsSocket', 'Distrortion').prop_name = 'distortion' self.outputs.new('StringsSocket', 'Value') def draw_buttons(self, context, layout): layout.prop(self, 'noise_type1', text="Type") layout.prop(self, 'noise_type2', text="Type") def process(self): inputs, outputs = self.inputs, self.outputs if not outputs[0].is_linked: return out = [] verts = inputs['Vertices'].sv_get(deepcopy=False) _seed = inputs['Seed'].sv_get()[0][0] _distortion = inputs['Distrortion'].sv_get()[0][0] _noise_type1 = noise_dict[self.noise_type1] _noise_type2 = noise_dict[self.noise_type2] for vert_list in verts: final_vert_list = seed_adjusted(vert_list, _seed) out.append([var_func(v, _distortion, _noise_type1, _noise_type2) for v in final_vert_list]) outputs[0].sv_set(out) def draw_label(self): if self.hide: if not self.inputs['Seed'].is_linked: seed = ' + ({0})'.format(str(int(self.seed))) else: seed = ' + seed(s)' return self.noise_type1.title() + ' + ' + self.noise_type2.title() + seed else: return self.label or self.name def register(): bpy.utils.register_class(SvLacunarityNode) def unregister(): bpy.utils.unregister_class(SvLacunarityNode)
gpl-3.0
hurrinico/server-tools
database_cleanup/model/purge_models.py
23
5351
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # This module copyright (C) 2014 Therp BV (<http://therp.nl>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import orm, fields from openerp.tools.translate import _ from openerp.addons.base.ir.ir_model import MODULE_UNINSTALL_FLAG class IrModel(orm.Model): _inherit = 'ir.model' def _drop_table(self, cr, uid, ids, context=None): # Allow to skip this step during model unlink # The super method crashes if the model cannot be instantiated if context and context.get('no_drop_table'): return True return super(IrModel, self)._drop_table(cr, uid, ids, context=context) class CleanupPurgeLineModel(orm.TransientModel): _inherit = 'cleanup.purge.line' _name = 'cleanup.purge.line.model' _columns = { 'wizard_id': fields.many2one( 'cleanup.purge.wizard.model', 'Purge Wizard', readonly=True), } def purge(self, cr, uid, ids, context=None): """ Unlink models upon manual confirmation. """ model_pool = self.pool['ir.model'] attachment_pool = self.pool['ir.attachment'] constraint_pool = self.pool['ir.model.constraint'] fields_pool = self.pool['ir.model.fields'] relation_pool = self.pool['ir.model.relation'] local_context = (context or {}).copy() local_context.update({ MODULE_UNINSTALL_FLAG: True, 'no_drop_table': True, }) for line in self.browse(cr, uid, ids, context=context): cr.execute( "SELECT id, model from ir_model WHERE model = %s", (line.name,)) row = cr.fetchone() if row: self.logger.info('Purging model %s', row[1]) attachment_ids = attachment_pool.search( cr, uid, [('res_model', '=', line.name)], context=context) if attachment_ids: cr.execute( "UPDATE ir_attachment SET res_model = FALSE " "WHERE id in %s", (tuple(attachment_ids), )) constraint_ids = constraint_pool.search( cr, uid, [('model', '=', line.name)], context=context) if constraint_ids: constraint_pool.unlink( cr, uid, constraint_ids, context=context) relation_ids = fields_pool.search( cr, uid, [('relation', '=', row[1])], context=context) for relation in relation_ids: try: # Fails if the model on the target side # cannot be instantiated fields_pool.unlink(cr, uid, [relation], context=local_context) except KeyError: pass except AttributeError: pass relation_ids = relation_pool.search( cr, uid, [('model', '=', line.name)], context=context) for relation in relation_ids: relation_pool.unlink(cr, uid, [relation], context=local_context) model_pool.unlink(cr, uid, [row[0]], context=local_context) line.write({'purged': True}) cr.commit() return True class CleanupPurgeWizardModel(orm.TransientModel): _inherit = 'cleanup.purge.wizard' _name = 'cleanup.purge.wizard.model' def default_get(self, cr, uid, fields, context=None): res = super(CleanupPurgeWizardModel, self).default_get( cr, uid, fields, context=context) if 'name' in fields: res['name'] = _('Purge models') return res def find(self, cr, uid, context=None): """ Search for models that cannot be instantiated. """ res = [] cr.execute("SELECT model from ir_model") for (model,) in cr.fetchall(): if not self.pool.get(model): res.append((0, 0, {'name': model})) if not res: raise orm.except_orm( _('Nothing to do'), _('No orphaned models found')) return res _columns = { 'purge_line_ids': fields.one2many( 'cleanup.purge.line.model', 'wizard_id', 'Models to purge'), }
agpl-3.0
michaelgugino/turbo-lister
sqlalchemy/ext/instrumentation.py
4
14672
"""Extensible class instrumentation. The :mod:`sqlalchemy.ext.instrumentation` package provides for alternate systems of class instrumentation within the ORM. Class instrumentation refers to how the ORM places attributes on the class which maintain data and track changes to that data, as well as event hooks installed on the class. .. note:: The extension package is provided for the benefit of integration with other object management packages, which already perform their own instrumentation. It is not intended for general use. For examples of how the instrumentation extension is used, see the example :ref:`examples_instrumentation`. .. versionchanged:: 0.8 The :mod:`sqlalchemy.orm.instrumentation` was split out so that all functionality having to do with non-standard instrumentation was moved out to :mod:`sqlalchemy.ext.instrumentation`. When imported, the module installs itself within :mod:`sqlalchemy.orm.instrumentation` so that it takes effect, including recognition of ``__sa_instrumentation_manager__`` on mapped classes, as well :attr:`.instrumentation_finders` being used to determine class instrumentation resolution. """ from ..orm import instrumentation as orm_instrumentation from ..orm.instrumentation import ( ClassManager, InstrumentationFactory, _default_state_getter, _default_dict_getter, _default_manager_getter ) from ..orm import attributes, collections, base as orm_base from .. import util from ..orm import exc as orm_exc import weakref INSTRUMENTATION_MANAGER = '__sa_instrumentation_manager__' """Attribute, elects custom instrumentation when present on a mapped class. Allows a class to specify a slightly or wildly different technique for tracking changes made to mapped attributes and collections. Only one instrumentation implementation is allowed in a given object inheritance hierarchy. The value of this attribute must be a callable and will be passed a class object. The callable must return one of: - An instance of an InstrumentationManager or subclass - An object implementing all or some of InstrumentationManager (TODO) - A dictionary of callables, implementing all or some of the above (TODO) - An instance of a ClassManager or subclass This attribute is consulted by SQLAlchemy instrumentation resolution, once the :mod:`sqlalchemy.ext.instrumentation` module has been imported. If custom finders are installed in the global instrumentation_finders list, they may or may not choose to honor this attribute. """ def find_native_user_instrumentation_hook(cls): """Find user-specified instrumentation management for a class.""" return getattr(cls, INSTRUMENTATION_MANAGER, None) instrumentation_finders = [find_native_user_instrumentation_hook] """An extensible sequence of callables which return instrumentation implementations When a class is registered, each callable will be passed a class object. If None is returned, the next finder in the sequence is consulted. Otherwise the return must be an instrumentation factory that follows the same guidelines as sqlalchemy.ext.instrumentation.INSTRUMENTATION_MANAGER. By default, the only finder is find_native_user_instrumentation_hook, which searches for INSTRUMENTATION_MANAGER. If all finders return None, standard ClassManager instrumentation is used. """ class ExtendedInstrumentationRegistry(InstrumentationFactory): """Extends :class:`.InstrumentationFactory` with additional bookkeeping, to accommodate multiple types of class managers. """ _manager_finders = weakref.WeakKeyDictionary() _state_finders = weakref.WeakKeyDictionary() _dict_finders = weakref.WeakKeyDictionary() _extended = False def _locate_extended_factory(self, class_): for finder in instrumentation_finders: factory = finder(class_) if factory is not None: manager = self._extended_class_manager(class_, factory) return manager, factory else: return None, None def _check_conflicts(self, class_, factory): existing_factories = self._collect_management_factories_for(class_).\ difference([factory]) if existing_factories: raise TypeError( "multiple instrumentation implementations specified " "in %s inheritance hierarchy: %r" % ( class_.__name__, list(existing_factories))) def _extended_class_manager(self, class_, factory): manager = factory(class_) if not isinstance(manager, ClassManager): manager = _ClassInstrumentationAdapter(class_, manager) if factory != ClassManager and not self._extended: # somebody invoked a custom ClassManager. # reinstall global "getter" functions with the more # expensive ones. self._extended = True _install_instrumented_lookups() self._manager_finders[class_] = manager.manager_getter() self._state_finders[class_] = manager.state_getter() self._dict_finders[class_] = manager.dict_getter() return manager def _collect_management_factories_for(self, cls): """Return a collection of factories in play or specified for a hierarchy. Traverses the entire inheritance graph of a cls and returns a collection of instrumentation factories for those classes. Factories are extracted from active ClassManagers, if available, otherwise instrumentation_finders is consulted. """ hierarchy = util.class_hierarchy(cls) factories = set() for member in hierarchy: manager = self.manager_of_class(member) if manager is not None: factories.add(manager.factory) else: for finder in instrumentation_finders: factory = finder(member) if factory is not None: break else: factory = None factories.add(factory) factories.discard(None) return factories def unregister(self, class_): if class_ in self._manager_finders: del self._manager_finders[class_] del self._state_finders[class_] del self._dict_finders[class_] super(ExtendedInstrumentationRegistry, self).unregister(class_) def manager_of_class(self, cls): if cls is None: return None return self._manager_finders.get(cls, _default_manager_getter)(cls) def state_of(self, instance): if instance is None: raise AttributeError("None has no persistent state.") return self._state_finders.get( instance.__class__, _default_state_getter)(instance) def dict_of(self, instance): if instance is None: raise AttributeError("None has no persistent state.") return self._dict_finders.get( instance.__class__, _default_dict_getter)(instance) orm_instrumentation._instrumentation_factory = \ _instrumentation_factory = ExtendedInstrumentationRegistry() orm_instrumentation.instrumentation_finders = instrumentation_finders class InstrumentationManager(object): """User-defined class instrumentation extension. :class:`.InstrumentationManager` can be subclassed in order to change how class instrumentation proceeds. This class exists for the purposes of integration with other object management frameworks which would like to entirely modify the instrumentation methodology of the ORM, and is not intended for regular usage. For interception of class instrumentation events, see :class:`.InstrumentationEvents`. The API for this class should be considered as semi-stable, and may change slightly with new releases. .. versionchanged:: 0.8 :class:`.InstrumentationManager` was moved from :mod:`sqlalchemy.orm.instrumentation` to :mod:`sqlalchemy.ext.instrumentation`. """ # r4361 added a mandatory (cls) constructor to this interface. # given that, perhaps class_ should be dropped from all of these # signatures. def __init__(self, class_): pass def manage(self, class_, manager): setattr(class_, '_default_class_manager', manager) def dispose(self, class_, manager): delattr(class_, '_default_class_manager') def manager_getter(self, class_): def get(cls): return cls._default_class_manager return get def instrument_attribute(self, class_, key, inst): pass def post_configure_attribute(self, class_, key, inst): pass def install_descriptor(self, class_, key, inst): setattr(class_, key, inst) def uninstall_descriptor(self, class_, key): delattr(class_, key) def install_member(self, class_, key, implementation): setattr(class_, key, implementation) def uninstall_member(self, class_, key): delattr(class_, key) def instrument_collection_class(self, class_, key, collection_class): return collections.prepare_instrumentation(collection_class) def get_instance_dict(self, class_, instance): return instance.__dict__ def initialize_instance_dict(self, class_, instance): pass def install_state(self, class_, instance, state): setattr(instance, '_default_state', state) def remove_state(self, class_, instance): delattr(instance, '_default_state') def state_getter(self, class_): return lambda instance: getattr(instance, '_default_state') def dict_getter(self, class_): return lambda inst: self.get_instance_dict(class_, inst) class _ClassInstrumentationAdapter(ClassManager): """Adapts a user-defined InstrumentationManager to a ClassManager.""" def __init__(self, class_, override): self._adapted = override self._get_state = self._adapted.state_getter(class_) self._get_dict = self._adapted.dict_getter(class_) ClassManager.__init__(self, class_) def manage(self): self._adapted.manage(self.class_, self) def dispose(self): self._adapted.dispose(self.class_) def manager_getter(self): return self._adapted.manager_getter(self.class_) def instrument_attribute(self, key, inst, propagated=False): ClassManager.instrument_attribute(self, key, inst, propagated) if not propagated: self._adapted.instrument_attribute(self.class_, key, inst) def post_configure_attribute(self, key): super(_ClassInstrumentationAdapter, self).post_configure_attribute(key) self._adapted.post_configure_attribute(self.class_, key, self[key]) def install_descriptor(self, key, inst): self._adapted.install_descriptor(self.class_, key, inst) def uninstall_descriptor(self, key): self._adapted.uninstall_descriptor(self.class_, key) def install_member(self, key, implementation): self._adapted.install_member(self.class_, key, implementation) def uninstall_member(self, key): self._adapted.uninstall_member(self.class_, key) def instrument_collection_class(self, key, collection_class): return self._adapted.instrument_collection_class( self.class_, key, collection_class) def initialize_collection(self, key, state, factory): delegate = getattr(self._adapted, 'initialize_collection', None) if delegate: return delegate(key, state, factory) else: return ClassManager.initialize_collection(self, key, state, factory) def new_instance(self, state=None): instance = self.class_.__new__(self.class_) self.setup_instance(instance, state) return instance def _new_state_if_none(self, instance): """Install a default InstanceState if none is present. A private convenience method used by the __init__ decorator. """ if self.has_state(instance): return False else: return self.setup_instance(instance) def setup_instance(self, instance, state=None): self._adapted.initialize_instance_dict(self.class_, instance) if state is None: state = self._state_constructor(instance, self) # the given instance is assumed to have no state self._adapted.install_state(self.class_, instance, state) return state def teardown_instance(self, instance): self._adapted.remove_state(self.class_, instance) def has_state(self, instance): try: self._get_state(instance) except orm_exc.NO_STATE: return False else: return True def state_getter(self): return self._get_state def dict_getter(self): return self._get_dict def _install_instrumented_lookups(): """Replace global class/object management functions with ExtendedInstrumentationRegistry implementations, which allow multiple types of class managers to be present, at the cost of performance. This function is called only by ExtendedInstrumentationRegistry and unit tests specific to this behavior. The _reinstall_default_lookups() function can be called after this one to re-establish the default functions. """ _install_lookups( dict( instance_state=_instrumentation_factory.state_of, instance_dict=_instrumentation_factory.dict_of, manager_of_class=_instrumentation_factory.manager_of_class ) ) def _reinstall_default_lookups(): """Restore simplified lookups.""" _install_lookups( dict( instance_state=_default_state_getter, instance_dict=_default_dict_getter, manager_of_class=_default_manager_getter ) ) def _install_lookups(lookups): global instance_state, instance_dict, manager_of_class instance_state = lookups['instance_state'] instance_dict = lookups['instance_dict'] manager_of_class = lookups['manager_of_class'] orm_base.instance_state = attributes.instance_state = \ orm_instrumentation.instance_state = instance_state orm_base.instance_dict = attributes.instance_dict = \ orm_instrumentation.instance_dict = instance_dict orm_base.manager_of_class = attributes.manager_of_class = \ orm_instrumentation.manager_of_class = manager_of_class
gpl-3.0
ColinIanKing/autotest
client/kernel.py
3
30723
import os, copy, pickle, re, glob, time, logging from autotest.client import kernel_config, os_dep, kernelexpand from autotest.client import utils from autotest.client.shared import log, error def tee_output_logdir_mark(fn): def tee_logdir_mark_wrapper(self, *args, **dargs): mark = self.__class__.__name__ + "." + fn.__name__ logging.info("--- START %s ---", mark) self.job.logging.tee_redirect_debug_dir(self.log_dir) try: result = fn(self, *args, **dargs) finally: self.job.logging.restore() logging.info("--- END %s ---", mark) return result tee_logdir_mark_wrapper.__name__ = fn.__name__ return tee_logdir_mark_wrapper def _add_kernel_to_bootloader(bootloader, base_args, tag, args, image, initrd): """ Add a kernel with the specified tag to the boot config using the given bootloader object. Also process the base_args and args kernel arguments by removing all root= options and give the last root= option value to the bootloader as a root device. @param bootloader: bootloader object @param base_args: base cmdline kernel arguments @param tag: kernel tag @param args: kernel cmdline arguments that are merged with base_args; a root= option in "args" will override any from base_args @param image: kernel image file @param initrd: initrd file """ # remove existing entry if present bootloader.remove_kernel(tag) if base_args: if args: args = '%s %s' % (base_args, args) else: args = base_args bootloader.add_kernel(path=image, title=tag, initrd=initrd, args=args) class BootableKernel(object): def __init__(self, job): self.job = job self.installed_as = None # kernel choice in bootloader menu self.image = None self.initrd = '' def _boot_kernel(self, args, ident_check, expected_ident, subdir, notes): """ Boot a kernel, with post-boot kernel id check @param args: kernel cmdline arguments @param ident_check: check kernel id after boot @param expected_ident: @param subdir: job-step qualifier in status log @param notes: additional comment in status log """ # If we can check the kernel identity do so. if ident_check: when = int(time.time()) args += " IDENT=%d" % when self.job.next_step_prepend(["job.end_reboot_and_verify", when, expected_ident, subdir, notes]) else: self.job.next_step_prepend(["job.end_reboot", subdir, expected_ident, notes]) self.add_to_bootloader(args) # defer fsck for next reboot, to avoid reboots back to default kernel utils.system('touch /fastboot') # this file is removed automatically # Boot it. self.job.start_reboot() self.job.reboot(tag=self.installed_as) def add_to_bootloader(self, args=''): # Point bootloader to the selected tag. _add_kernel_to_bootloader(self.job.bootloader, self.job.config_get('boot.default_args'), self.installed_as, args, self.image, self.initrd) class kernel(BootableKernel): """ Class for compiling kernels. Data for the object includes the src files used to create the kernel, patches applied, config (base + changes), the build directory itself, and logged output Properties: job Backpointer to the job object we're part of autodir Path to the top level autotest dir (see global_config.ini, session COMMON/autotest_top_path) src_dir <tmp_dir>/src/ build_dir <tmp_dir>/linux/ config_dir <results_dir>/config/ log_dir <results_dir>/debug/ results_dir <results_dir>/results/ """ autodir = '' def __init__(self, job, base_tree, subdir, tmp_dir, build_dir, leave=False): """Initialize the kernel build environment job which job this build is part of base_tree base kernel tree. Can be one of the following: 1. A local tarball 2. A URL to a tarball 3. A local directory (will symlink it) 4. A shorthand expandable (eg '2.6.11-git3') subdir subdir in the results directory (eg "build") (holds config/, debug/, results/) tmp_dir leave Boolean, whether to leave existing tmpdir or not """ super(kernel, self).__init__(job) self.autodir = job.autodir self.src_dir = os.path.join(tmp_dir, 'src') self.build_dir = os.path.join(tmp_dir, build_dir) # created by get_kernel_tree self.config_dir = os.path.join(subdir, 'config') self.log_dir = os.path.join(subdir, 'debug') self.results_dir = os.path.join(subdir, 'results') self.subdir = os.path.basename(subdir) if not leave: if os.path.isdir(self.src_dir): utils.system('rm -rf ' + self.src_dir) if os.path.isdir(self.build_dir): utils.system('rm -rf ' + self.build_dir) if not os.path.exists(self.src_dir): os.mkdir(self.src_dir) for path in [self.config_dir, self.log_dir, self.results_dir]: if os.path.exists(path): utils.system('rm -rf ' + path) os.mkdir(path) logpath = os.path.join(self.log_dir, 'build_log') self.logfile = open(logpath, 'w+') self.applied_patches = [] self.target_arch = None self.build_target = 'bzImage' self.build_image = None arch = utils.get_current_kernel_arch() if arch == 's390' or arch == 's390x': self.build_target = 'image' elif arch == 'ia64': self.build_target = 'all' self.build_image = 'vmlinux.gz' if not leave: self.logfile.write('BASE: %s\n' % base_tree) # Where we have direct version hint record that # for later configuration selection. shorthand = re.compile(r'^\d+\.\d+\.\d+') if shorthand.match(base_tree): self.base_tree_version = base_tree else: self.base_tree_version = None # Actually extract the tree. Make sure we know it occured self.extract(base_tree) def kernelexpand(self, kernel): # If we have something like a path, just use it as it is if '/' in kernel: return [kernel] # Find the configured mirror list. mirrors = self.job.config_get('mirror.mirrors') if not mirrors: # LEGACY: convert the kernel.org mirror mirror = self.job.config_get('mirror.ftp_kernel_org') if mirror: korg = 'http://www.kernel.org/pub/linux/kernel' mirrors = [ [ korg + '/v2.6', mirror + '/v2.6' ], [ korg + '/people/akpm/patches/2.6', mirror + '/akpm' ], [ korg + '/people/mbligh', mirror + '/mbligh' ], ] patches = kernelexpand.expand_classic(kernel, mirrors) print patches return patches @log.record @tee_output_logdir_mark def extract(self, base_tree): if os.path.exists(base_tree): self.get_kernel_tree(base_tree) else: base_components = self.kernelexpand(base_tree) print 'kernelexpand: ' print base_components self.get_kernel_tree(base_components.pop(0)) if base_components: # apply remaining patches self.patch(*base_components) @log.record @tee_output_logdir_mark def patch(self, *patches): """Apply a list of patches (in order)""" if not patches: return print 'Applying patches: ', patches self.apply_patches(self.get_patches(patches)) @log.record @tee_output_logdir_mark def config(self, config_file='', config_list=None, defconfig=False, make=None): self.set_cross_cc() kernel_config.kernel_config(self.job, self.build_dir, self.config_dir, config_file, config_list, defconfig, self.base_tree_version, make) def get_patches(self, patches): """fetch the patches to the local src_dir""" local_patches = [] for patch in patches: dest = os.path.join(self.src_dir, os.path.basename(patch)) # FIXME: this isn't unique. Append something to it # like wget does if it's not there? print "get_file %s %s %s %s" % (patch, dest, self.src_dir, os.path.basename(patch)) utils.get_file(patch, dest) # probably safer to use the command, not python library md5sum = utils.system_output('md5sum ' + dest).split()[0] local_patches.append((patch, dest, md5sum)) return local_patches def apply_patches(self, local_patches): """apply the list of patches, in order""" builddir = self.build_dir os.chdir(builddir) if not local_patches: return None for (spec, local, md5sum) in local_patches: if local.endswith('.bz2') or local.endswith('.gz'): ref = spec else: ref = utils.force_copy(local, self.results_dir) ref = self.job.relative_path(ref) patch_id = "%s %s %s" % (spec, ref, md5sum) log = "PATCH: " + patch_id + "\n" print log utils.cat_file_to_cmd(local, 'patch -p1 > /dev/null') self.logfile.write(log) self.applied_patches.append(patch_id) def get_kernel_tree(self, base_tree): """Extract/link base_tree to self.build_dir""" # if base_tree is a dir, assume uncompressed kernel if os.path.isdir(base_tree): print 'Symlinking existing kernel source' if os.path.islink(self.build_dir): os.remove(self.build_dir) os.symlink(base_tree, self.build_dir) # otherwise, extract tarball else: os.chdir(os.path.dirname(self.src_dir)) # Figure out local destination for tarball tarball = os.path.join(self.src_dir, os.path.basename(base_tree.split(';')[0])) utils.get_file(base_tree, tarball) print 'Extracting kernel tarball:', tarball, '...' utils.extract_tarball_to_dir(tarball, self.build_dir) def extraversion(self, tag, append=True): os.chdir(self.build_dir) extraversion_sub = r's/^CONFIG_LOCALVERSION=\s*"\(.*\)"/CONFIG_LOCALVERSION=' cfg = self.build_dir + '/.config' if append: p = extraversion_sub + '"\\1-%s"/' % tag else: p = extraversion_sub + '"-%s"/' % tag if os.path.exists(cfg): utils.system('mv %s %s.old' % (cfg, cfg)) utils.system("sed '%s' < %s.old > %s" % (p, cfg, cfg)) self.config(make='oldconfig') else: self.config() @log.record @tee_output_logdir_mark def build(self, make_opts='', logfile ='', extraversion='autotest'): """build the kernel make_opts additional options to make, if any """ os_dep.commands('gcc', 'make') if logfile == '': logfile = os.path.join(self.log_dir, 'kernel_build') os.chdir(self.build_dir) if extraversion: self.extraversion(extraversion) self.set_cross_cc() # setup_config_file(config_file, config_overrides) # Not needed on 2.6, but hard to tell -- handle failure utils.system('make dep', ignore_status=True) threads = 2 * utils.count_cpus() build_string = 'make -j %d %s %s' % (threads, make_opts, self.build_target) # eg make bzImage, or make zImage print build_string utils.system(build_string) if kernel_config.modules_needed('.config'): utils.system('make -j %d modules' % (threads)) kernel_version = self.get_kernel_build_ver() kernel_version = re.sub('-autotest', '', kernel_version) self.logfile.write('BUILD VERSION: %s\n' % kernel_version) utils.force_copy(self.build_dir + '/System.map', self.results_dir) def build_timed(self, threads, timefile='/dev/null', make_opts='', output='/dev/null'): """time the bulding of the kernel""" os.chdir(self.build_dir) self.set_cross_cc() self.clean() build_string = ("/usr/bin/time -o %s make %s -j %s vmlinux" % (timefile, make_opts, threads)) build_string += ' > %s 2>&1' % output print build_string utils.system(build_string) if (not os.path.isfile('vmlinux')): errmsg = "no vmlinux found, kernel build failed" raise error.TestError(errmsg) @log.record @tee_output_logdir_mark def clean(self): """make clean in the kernel tree""" os.chdir(self.build_dir) print "make clean" utils.system('make clean > /dev/null 2> /dev/null') @log.record @tee_output_logdir_mark def mkinitrd(self, version, image, system_map, initrd): """Build kernel initrd image. Try to use distro specific way to build initrd image. Parameters: version new kernel version image new kernel image file system_map System.map file initrd initrd image file to build """ vendor = utils.get_os_vendor() if os.path.isfile(initrd): print "Existing %s file, will remove it." % initrd os.remove(initrd) args = self.job.config_get('kernel.mkinitrd_extra_args') # don't leak 'None' into mkinitrd command if not args: args = '' # It is important to match the version with a real directory inside # /lib/modules real_version_list = glob.glob('/lib/modules/%s*' % version) rl = len(real_version_list) if rl == 0: logging.error("No directory %s found under /lib/modules. Initramfs" "creation will most likely fail and your new kernel" "will fail to build", version) else: if rl > 1: logging.warning("Found more than one possible match for " "kernel version %s under /lib/modules", version) version = os.path.basename(real_version_list[0]) if vendor in ['Red Hat', 'Fedora']: try: cmd = os_dep.command('dracut') full_cmd = '%s -f %s %s' % (cmd, initrd, version) except ValueError: cmd = os_dep.command('mkinitrd') full_cmd = '%s %s %s %s' % (cmd, args, initrd, version) utils.system(full_cmd) elif vendor in ['SUSE']: utils.system('mkinitrd %s -k %s -i %s -M %s' % (args, image, initrd, system_map)) elif vendor in ['Debian', 'Ubuntu']: if os.path.isfile('/usr/sbin/mkinitrd'): cmd = '/usr/sbin/mkinitrd' elif os.path.isfile('/usr/sbin/mkinitramfs'): cmd = '/usr/sbin/mkinitramfs' else: raise error.TestError('No Debian initrd builder') utils.system('%s %s -o %s %s' % (cmd, args, initrd, version)) else: raise error.TestError('Unsupported vendor %s' % vendor) def set_build_image(self, image): self.build_image = image @log.record @tee_output_logdir_mark def install(self, tag='autotest', prefix='/', install_vmlinux=True): """make install in the kernel tree""" # Record that we have installed the kernel, and # the tag under which we installed it. self.installed_as = tag os.chdir(self.build_dir) if not os.path.isdir(prefix): os.mkdir(prefix) self.boot_dir = os.path.join(prefix, 'boot') if not os.path.isdir(self.boot_dir): os.mkdir(self.boot_dir) if not self.build_image: images = glob.glob('arch/*/boot/' + self.build_target) if len(images): self.build_image = images[0] else: self.build_image = self.build_target # remember installed files self.vmlinux = self.boot_dir + '/vmlinux-' + tag if (self.build_image != 'vmlinux'): self.image = self.boot_dir + '/vmlinuz-' + tag else: self.image = self.vmlinux install_vmlinux = True self.system_map = self.boot_dir + '/System.map-' + tag self.config_file = self.boot_dir + '/config-' + tag self.initrd = '' # copy to boot dir if install_vmlinux: utils.force_copy('vmlinux', self.vmlinux) if (self.build_image != 'vmlinux'): utils.force_copy(self.build_image, self.image) utils.force_copy('System.map', self.system_map) utils.force_copy('.config', self.config_file) if not kernel_config.modules_needed('.config'): return utils.system('make modules_install INSTALL_MOD_PATH=%s' % prefix) if prefix == '/': self.initrd = self.boot_dir + '/initrd-' + tag self.mkinitrd(self.get_kernel_build_ver(), self.image, self.system_map, self.initrd) def get_kernel_build_arch(self, arch=None): """ Work out the current kernel architecture (as a kernel arch) """ if not arch: arch = utils.get_current_kernel_arch() if re.match('i.86', arch): return 'i386' elif re.match('sun4u', arch): return 'sparc64' elif re.match('arm.*', arch): return 'arm' elif re.match('sa110', arch): return 'arm' elif re.match('s390x', arch): return 's390' elif re.match('parisc64', arch): return 'parisc' elif re.match('ppc.*', arch): return 'powerpc' elif re.match('mips.*', arch): return 'mips' else: return arch def get_kernel_build_release(self): releasem = re.compile(r'.*UTS_RELEASE\s+"([^"]+)".*'); versionm = re.compile(r'.*UTS_VERSION\s+"([^"]+)".*'); release = None version = None for f in [self.build_dir + "/include/linux/version.h", self.build_dir + "/include/linux/utsrelease.h", self.build_dir + "/include/linux/compile.h", self.build_dir + "/include/generated/utsrelease.h", self.build_dir + "/include/generated/compile.h"]: if os.path.exists(f): fd = open(f, 'r') for line in fd.readlines(): m = releasem.match(line) if m: release = m.groups()[0] m = versionm.match(line) if m: version = m.groups()[0] fd.close() return (release, version) def get_kernel_build_ident(self): (release, version) = self.get_kernel_build_release() if not release or not version: raise error.JobError('kernel has no identity') return release + '::' + version def boot(self, args='', ident=True): """ install and boot this kernel, do not care how just make it happen. """ # If the kernel has not yet been installed, # install it now as default tag. if not self.installed_as: self.install() expected_ident = self.get_kernel_build_ident() self._boot_kernel(args, ident, expected_ident, self.subdir, self.applied_patches) def get_kernel_build_ver(self): """Check Makefile and .config to return kernel version""" version = patchlevel = sublevel = extraversion = localversion = '' for line in open(self.build_dir + '/Makefile', 'r').readlines(): if line.startswith('VERSION'): version = line[line.index('=') + 1:].strip() if line.startswith('PATCHLEVEL'): patchlevel = line[line.index('=') + 1:].strip() if line.startswith('SUBLEVEL'): sublevel = line[line.index('=') + 1:].strip() if line.startswith('EXTRAVERSION'): extraversion = line[line.index('=') + 1:].strip() for line in open(self.build_dir + '/.config', 'r').readlines(): if line.startswith('CONFIG_LOCALVERSION='): localversion = line.rstrip().split('"')[1] return "%s.%s.%s%s%s" %(version, patchlevel, sublevel, extraversion, localversion) def set_build_target(self, build_target): if build_target: self.build_target = build_target print 'BUILD TARGET: %s' % self.build_target def set_cross_cc(self, target_arch=None, cross_compile=None, build_target='bzImage'): """Set up to cross-compile. This is broken. We need to work out what the default compile produces, and if not, THEN set the cross compiler. """ if self.target_arch: return # if someone has set build_target, don't clobber in set_cross_cc # run set_build_target before calling set_cross_cc if not self.build_target: self.set_build_target(build_target) # If no 'target_arch' given assume native compilation if target_arch is None: target_arch = utils.get_current_kernel_arch() if target_arch == 'ppc64': if self.build_target == 'bzImage': self.build_target = 'vmlinux' if not cross_compile: cross_compile = self.job.config_get('kernel.cross_cc') if cross_compile: os.environ['CROSS_COMPILE'] = cross_compile else: if os.environ.has_key('CROSS_COMPILE'): del os.environ['CROSS_COMPILE'] return # HACK. Crap out for now. # At this point I know what arch I *want* to build for # but have no way of working out what arch the default # compiler DOES build for. def install_package(package): raise NotImplementedError("I don't exist yet!") if target_arch == 'ppc64': install_package('ppc64-cross') cross_compile = os.path.join(self.autodir, 'sources/ppc64-cross/bin') elif target_arch == 'x86_64': install_package('x86_64-cross') cross_compile = os.path.join(self.autodir, 'sources/x86_64-cross/bin') os.environ['ARCH'] = self.target_arch = target_arch self.cross_compile = cross_compile if self.cross_compile: os.environ['CROSS_COMPILE'] = self.cross_compile def pickle_dump(self, filename): """dump a pickle of ourself out to the specified filename we can't pickle the backreference to job (it contains fd's), nor would we want to. Same for logfile (fd's). """ temp = copy.copy(self) temp.job = None temp.logfile = None pickle.dump(temp, open(filename, 'w')) class rpm_kernel(BootableKernel): """ Class for installing a binary rpm kernel package """ def __init__(self, job, rpm_package, subdir): super(rpm_kernel, self).__init__(job) self.rpm_package = rpm_package self.log_dir = os.path.join(subdir, 'debug') self.subdir = os.path.basename(subdir) if os.path.exists(self.log_dir): utils.system('rm -rf ' + self.log_dir) os.mkdir(self.log_dir) def build(self, *args, **dargs): """ Dummy function, binary kernel so nothing to build. """ pass @log.record @tee_output_logdir_mark def install(self, tag='autotest', install_vmlinux=True): self.installed_as = tag self.image = None self.initrd = '' for rpm_pack in self.rpm_package: rpm_name = utils.system_output('rpm -qp ' + rpm_pack) # install utils.system('rpm -i --force ' + rpm_pack) # get file list files = utils.system_output('rpm -ql ' + rpm_name).splitlines() # search for vmlinuz for file in files: if file.startswith('/boot/vmlinuz'): self.full_version = file[len('/boot/vmlinuz-'):] self.image = file self.rpm_flavour = rpm_name.split('-')[1] # get version and release number self.version, self.release = utils.system_output( 'rpm --queryformat="%{VERSION}\\n%{RELEASE}\\n" -q ' + rpm_name).splitlines()[0:2] # prefer /boot/kernel-version before /boot/kernel if self.full_version: break # search for initrd for file in files: if file.startswith('/boot/init'): self.initrd = file # prefer /boot/initrd-version before /boot/initrd if len(file) > len('/boot/initrd'): break if self.image == None: errmsg = "specified rpm file(s) don't contain /boot/vmlinuz" raise error.TestError(errmsg) # install vmlinux if install_vmlinux: for rpm_pack in self.rpm_package: vmlinux = utils.system_output( 'rpm -q -l -p %s | grep /boot/vmlinux' % rpm_pack) utils.system('cd /; rpm2cpio %s | cpio -imuv .%s 2>&1' % (rpm_pack, vmlinux)) if not os.path.exists(vmlinux): raise error.TestError('%s does not exist after installing %s' % (vmlinux, rpm_pack)) def boot(self, args='', ident=True): """ install and boot this kernel """ # If the kernel has not yet been installed, # install it now as default tag. if not self.installed_as: self.install() expected_ident = self.full_version if not expected_ident: expected_ident = '-'.join([self.version, self.rpm_flavour, self.release]) self._boot_kernel(args, ident, expected_ident, None, 'rpm') class rpm_kernel_suse(rpm_kernel): """ Class for installing openSUSE/SLE rpm kernel package """ def install(self): # do not set the new kernel as the default one os.environ['PBL_AUTOTEST'] = '1' rpm_kernel.install(self, 'dummy') self.installed_as = self.job.bootloader.get_title_for_kernel(self.image) if not self.installed_as: errmsg = "cannot find installed kernel in bootloader configuration" raise error.TestError(errmsg) def add_to_bootloader(self, tag='dummy', args=''): """ Set parameters of this kernel in bootloader """ # pull the base argument set from the job config baseargs = self.job.config_get('boot.default_args') if baseargs: args = baseargs + ' ' + args self.job.bootloader.add_args(tag, args) def rpm_kernel_vendor(job, rpm_package, subdir): vendor = utils.get_os_vendor() if vendor == "SUSE": return rpm_kernel_suse(job, rpm_package, subdir) else: return rpm_kernel(job, rpm_package, subdir) # just make the preprocessor a nop def _preprocess_path_dummy(path): return path.strip() # pull in some optional site-specific path pre-processing preprocess_path = utils.import_site_function(__file__, "autotest.client.site_kernel", "preprocess_path", _preprocess_path_dummy) def auto_kernel(job, path, subdir, tmp_dir, build_dir, leave=False): """ Create a kernel object, dynamically selecting the appropriate class to use based on the path provided. """ kernel_paths = [preprocess_path(path)] if kernel_paths[0].endswith('.list'): # Fetch the list of packages to install kernel_list = os.path.join(tmp_dir, 'kernel.list') utils.get_file(kernel_paths[0], kernel_list) kernel_paths = [p.strip() for p in open(kernel_list).readlines()] if kernel_paths[0].endswith('.rpm'): rpm_paths = [] for kernel_path in kernel_paths: if os.path.exists(kernel_path): rpm_paths.append(kernel_path) else: # Fetch the rpm into the job's packages directory and pass it to # rpm_kernel rpm_name = os.path.basename(kernel_path) # If the preprocessed path (kernel_path) is only a name then # search for the kernel in all the repositories, else fetch the # kernel from that specific path. job.pkgmgr.fetch_pkg(rpm_name, os.path.join(job.pkgdir, rpm_name), repo_url=os.path.dirname(kernel_path)) rpm_paths.append(os.path.join(job.pkgdir, rpm_name)) return rpm_kernel_vendor(job, rpm_paths, subdir) else: if len(kernel_paths) > 1: raise error.TestError("don't know what to do with more than one non-rpm kernel file") return kernel(job, kernel_paths[0], subdir, tmp_dir, build_dir, leave)
gpl-2.0
batxes/4Cin
Six_zebra_models/Six_zebra_models_final_output_0.1_-0.1_13000/mtx1_models/Six_zebra_models39759.py
4
13921
import _surface import chimera try: import chimera.runCommand except: pass from VolumePath import markerset as ms try: from VolumePath import Marker_Set, Link new_marker_set=Marker_Set except: from VolumePath import volume_path_dialog d= volume_path_dialog(True) new_marker_set= d.new_marker_set marker_sets={} surf_sets={} if "particle_0 geometry" not in marker_sets: s=new_marker_set('particle_0 geometry') marker_sets["particle_0 geometry"]=s s= marker_sets["particle_0 geometry"] mark=s.place_marker((11940.8, 9337.41, -158.497), (0.7, 0.7, 0.7), 507.685) if "particle_1 geometry" not in marker_sets: s=new_marker_set('particle_1 geometry') marker_sets["particle_1 geometry"]=s s= marker_sets["particle_1 geometry"] mark=s.place_marker((12195.5, 9106.45, -1101.02), (0.7, 0.7, 0.7), 479.978) if "particle_2 geometry" not in marker_sets: s=new_marker_set('particle_2 geometry') marker_sets["particle_2 geometry"]=s s= marker_sets["particle_2 geometry"] mark=s.place_marker((10973.7, 8381.8, 247.435), (0.7, 0.7, 0.7), 681.834) if "particle_3 geometry" not in marker_sets: s=new_marker_set('particle_3 geometry') marker_sets["particle_3 geometry"]=s s= marker_sets["particle_3 geometry"] mark=s.place_marker((9509.51, 7502.26, 1857.76), (0.7, 0.7, 0.7), 522.532) if "particle_4 geometry" not in marker_sets: s=new_marker_set('particle_4 geometry') marker_sets["particle_4 geometry"]=s s= marker_sets["particle_4 geometry"] mark=s.place_marker((9049.4, 7235.22, 2374.42), (0, 1, 0), 751.925) if "particle_5 geometry" not in marker_sets: s=new_marker_set('particle_5 geometry') marker_sets["particle_5 geometry"]=s s= marker_sets["particle_5 geometry"] mark=s.place_marker((9271.81, 5480.11, 1126.19), (0.7, 0.7, 0.7), 437.001) if "particle_6 geometry" not in marker_sets: s=new_marker_set('particle_6 geometry') marker_sets["particle_6 geometry"]=s s= marker_sets["particle_6 geometry"] mark=s.place_marker((8891.71, 4722.9, 2841.27), (0.7, 0.7, 0.7), 710.767) if "particle_7 geometry" not in marker_sets: s=new_marker_set('particle_7 geometry') marker_sets["particle_7 geometry"]=s s= marker_sets["particle_7 geometry"] mark=s.place_marker((9264.8, 3068.69, 3090.96), (0.7, 0.7, 0.7), 762.077) if "particle_8 geometry" not in marker_sets: s=new_marker_set('particle_8 geometry') marker_sets["particle_8 geometry"]=s s= marker_sets["particle_8 geometry"] mark=s.place_marker((9587.39, 2345.5, 4456.43), (0.7, 0.7, 0.7), 726.799) if "particle_9 geometry" not in marker_sets: s=new_marker_set('particle_9 geometry') marker_sets["particle_9 geometry"]=s s= marker_sets["particle_9 geometry"] mark=s.place_marker((9372.65, 1421.29, 6023.79), (0.7, 0.7, 0.7), 885.508) if "particle_10 geometry" not in marker_sets: s=new_marker_set('particle_10 geometry') marker_sets["particle_10 geometry"]=s s= marker_sets["particle_10 geometry"] mark=s.place_marker((10384.3, 2018.85, 7266.34), (0.7, 0.7, 0.7), 778.489) if "particle_11 geometry" not in marker_sets: s=new_marker_set('particle_11 geometry') marker_sets["particle_11 geometry"]=s s= marker_sets["particle_11 geometry"] mark=s.place_marker((12030.1, 740.68, 7673.3), (0.7, 0.7, 0.7), 790.333) if "particle_12 geometry" not in marker_sets: s=new_marker_set('particle_12 geometry') marker_sets["particle_12 geometry"]=s s= marker_sets["particle_12 geometry"] mark=s.place_marker((13581.2, -581.047, 7931.29), (0.7, 0.7, 0.7), 707.721) if "particle_13 geometry" not in marker_sets: s=new_marker_set('particle_13 geometry') marker_sets["particle_13 geometry"]=s s= marker_sets["particle_13 geometry"] mark=s.place_marker((13475.7, 146.89, 6484.97), (0.7, 0.7, 0.7), 651.166) if "particle_14 geometry" not in marker_sets: s=new_marker_set('particle_14 geometry') marker_sets["particle_14 geometry"]=s s= marker_sets["particle_14 geometry"] mark=s.place_marker((12253.4, -548.309, 7403.08), (0.7, 0.7, 0.7), 708.61) if "particle_15 geometry" not in marker_sets: s=new_marker_set('particle_15 geometry') marker_sets["particle_15 geometry"]=s s= marker_sets["particle_15 geometry"] mark=s.place_marker((10716, -250.452, 7660.92), (0.7, 0.7, 0.7), 490.595) if "particle_16 geometry" not in marker_sets: s=new_marker_set('particle_16 geometry') marker_sets["particle_16 geometry"]=s s= marker_sets["particle_16 geometry"] mark=s.place_marker((9688.92, 505.105, 6940.12), (0.7, 0.7, 0.7), 591.565) if "particle_17 geometry" not in marker_sets: s=new_marker_set('particle_17 geometry') marker_sets["particle_17 geometry"]=s s= marker_sets["particle_17 geometry"] mark=s.place_marker((8489.39, 1369.57, 6361.24), (0.7, 0.7, 0.7), 581.287) if "particle_18 geometry" not in marker_sets: s=new_marker_set('particle_18 geometry') marker_sets["particle_18 geometry"]=s s= marker_sets["particle_18 geometry"] mark=s.place_marker((8300.87, 988.724, 4612.07), (0.7, 0.7, 0.7), 789.529) if "particle_19 geometry" not in marker_sets: s=new_marker_set('particle_19 geometry') marker_sets["particle_19 geometry"]=s s= marker_sets["particle_19 geometry"] mark=s.place_marker((6821.07, 1179.69, 4341.4), (0.7, 0.7, 0.7), 623.587) if "particle_20 geometry" not in marker_sets: s=new_marker_set('particle_20 geometry') marker_sets["particle_20 geometry"]=s s= marker_sets["particle_20 geometry"] mark=s.place_marker((5037.12, 1018.56, 4308.68), (0.7, 0.7, 0.7), 1083.56) if "particle_21 geometry" not in marker_sets: s=new_marker_set('particle_21 geometry') marker_sets["particle_21 geometry"]=s s= marker_sets["particle_21 geometry"] mark=s.place_marker((3860.26, -168.363, 4360.48), (0.7, 0.7, 0.7), 504.258) if "particle_22 geometry" not in marker_sets: s=new_marker_set('particle_22 geometry') marker_sets["particle_22 geometry"]=s s= marker_sets["particle_22 geometry"] mark=s.place_marker((4417.72, 1037.95, 4913.69), (0.7, 0.7, 0.7), 805.519) if "particle_23 geometry" not in marker_sets: s=new_marker_set('particle_23 geometry') marker_sets["particle_23 geometry"]=s s= marker_sets["particle_23 geometry"] mark=s.place_marker((5257.27, 2248.03, 6444.98), (0.7, 0.7, 0.7), 631.708) if "particle_24 geometry" not in marker_sets: s=new_marker_set('particle_24 geometry') marker_sets["particle_24 geometry"]=s s= marker_sets["particle_24 geometry"] mark=s.place_marker((5966.74, 2786.32, 8402.05), (0.7, 0.7, 0.7), 805.942) if "particle_25 geometry" not in marker_sets: s=new_marker_set('particle_25 geometry') marker_sets["particle_25 geometry"]=s s= marker_sets["particle_25 geometry"] mark=s.place_marker((6289.63, 2979.25, 9397.68), (1, 0.7, 0), 672.697) if "particle_26 geometry" not in marker_sets: s=new_marker_set('particle_26 geometry') marker_sets["particle_26 geometry"]=s s= marker_sets["particle_26 geometry"] mark=s.place_marker((5529.08, 5570.16, 9035.92), (0.7, 0.7, 0.7), 797.863) if "particle_27 geometry" not in marker_sets: s=new_marker_set('particle_27 geometry') marker_sets["particle_27 geometry"]=s s= marker_sets["particle_27 geometry"] mark=s.place_marker((4760.85, 7131.28, 9708.64), (1, 0.7, 0), 735.682) if "particle_28 geometry" not in marker_sets: s=new_marker_set('particle_28 geometry') marker_sets["particle_28 geometry"]=s s= marker_sets["particle_28 geometry"] mark=s.place_marker((3786.6, 7360.01, 8945.25), (0.7, 0.7, 0.7), 602.14) if "particle_29 geometry" not in marker_sets: s=new_marker_set('particle_29 geometry') marker_sets["particle_29 geometry"]=s s= marker_sets["particle_29 geometry"] mark=s.place_marker((1654.98, 7410.22, 8021.9), (0.7, 0.7, 0.7), 954.796) if "particle_30 geometry" not in marker_sets: s=new_marker_set('particle_30 geometry') marker_sets["particle_30 geometry"]=s s= marker_sets["particle_30 geometry"] mark=s.place_marker((2487.12, 7674.97, 7874.06), (0.7, 0.7, 0.7), 1021.88) if "particle_31 geometry" not in marker_sets: s=new_marker_set('particle_31 geometry') marker_sets["particle_31 geometry"]=s s= marker_sets["particle_31 geometry"] mark=s.place_marker((2296.04, 8839.24, 8689.6), (0.7, 0.7, 0.7), 909.323) if "particle_32 geometry" not in marker_sets: s=new_marker_set('particle_32 geometry') marker_sets["particle_32 geometry"]=s s= marker_sets["particle_32 geometry"] mark=s.place_marker((1332.12, 10891.2, 8555.1), (0.7, 0.7, 0.7), 621.049) if "particle_33 geometry" not in marker_sets: s=new_marker_set('particle_33 geometry') marker_sets["particle_33 geometry"]=s s= marker_sets["particle_33 geometry"] mark=s.place_marker((2253.76, 11876.7, 7994.34), (0.7, 0.7, 0.7), 525.154) if "particle_34 geometry" not in marker_sets: s=new_marker_set('particle_34 geometry') marker_sets["particle_34 geometry"]=s s= marker_sets["particle_34 geometry"] mark=s.place_marker((2973.08, 12304, 6725.2), (0.7, 0.7, 0.7), 890.246) if "particle_35 geometry" not in marker_sets: s=new_marker_set('particle_35 geometry') marker_sets["particle_35 geometry"]=s s= marker_sets["particle_35 geometry"] mark=s.place_marker((2849.51, 13287.1, 5241.82), (0.7, 0.7, 0.7), 671.216) if "particle_36 geometry" not in marker_sets: s=new_marker_set('particle_36 geometry') marker_sets["particle_36 geometry"]=s s= marker_sets["particle_36 geometry"] mark=s.place_marker((2150.15, 13246.5, 3662.35), (0.7, 0.7, 0.7), 662.672) if "particle_37 geometry" not in marker_sets: s=new_marker_set('particle_37 geometry') marker_sets["particle_37 geometry"]=s s= marker_sets["particle_37 geometry"] mark=s.place_marker((1533.01, 11876.1, 4094.41), (0.7, 0.7, 0.7), 646.682) if "particle_38 geometry" not in marker_sets: s=new_marker_set('particle_38 geometry') marker_sets["particle_38 geometry"]=s s= marker_sets["particle_38 geometry"] mark=s.place_marker((930.542, 12322.5, 5434.01), (0.7, 0.7, 0.7), 769.945) if "particle_39 geometry" not in marker_sets: s=new_marker_set('particle_39 geometry') marker_sets["particle_39 geometry"]=s s= marker_sets["particle_39 geometry"] mark=s.place_marker((2140.21, 11729.7, 6912.62), (0.7, 0.7, 0.7), 606.92) if "particle_40 geometry" not in marker_sets: s=new_marker_set('particle_40 geometry') marker_sets["particle_40 geometry"]=s s= marker_sets["particle_40 geometry"] mark=s.place_marker((2388.73, 12815.5, 7471.97), (0.7, 0.7, 0.7), 622.571) if "particle_41 geometry" not in marker_sets: s=new_marker_set('particle_41 geometry') marker_sets["particle_41 geometry"]=s s= marker_sets["particle_41 geometry"] mark=s.place_marker((2840.28, 11558.1, 7238.14), (0.7, 0.7, 0.7), 466.865) if "particle_42 geometry" not in marker_sets: s=new_marker_set('particle_42 geometry') marker_sets["particle_42 geometry"]=s s= marker_sets["particle_42 geometry"] mark=s.place_marker((3207.73, 11615.9, 6493.22), (0.7, 0.7, 0.7), 682.933) if "particle_43 geometry" not in marker_sets: s=new_marker_set('particle_43 geometry') marker_sets["particle_43 geometry"]=s s= marker_sets["particle_43 geometry"] mark=s.place_marker((2851.89, 11765.5, 7132.13), (0.7, 0.7, 0.7), 809.326) if "particle_44 geometry" not in marker_sets: s=new_marker_set('particle_44 geometry') marker_sets["particle_44 geometry"]=s s= marker_sets["particle_44 geometry"] mark=s.place_marker((2411.48, 10531.3, 8401.7), (0.7, 0.7, 0.7), 796.72) if "particle_45 geometry" not in marker_sets: s=new_marker_set('particle_45 geometry') marker_sets["particle_45 geometry"]=s s= marker_sets["particle_45 geometry"] mark=s.place_marker((4521.95, 8917.17, 9506.66), (0.7, 0.7, 0.7), 870.026) if "particle_46 geometry" not in marker_sets: s=new_marker_set('particle_46 geometry') marker_sets["particle_46 geometry"]=s s= marker_sets["particle_46 geometry"] mark=s.place_marker((6159.18, 9125.57, 10404.4), (0.7, 0.7, 0.7), 909.577) if "particle_47 geometry" not in marker_sets: s=new_marker_set('particle_47 geometry') marker_sets["particle_47 geometry"]=s s= marker_sets["particle_47 geometry"] mark=s.place_marker((7182.6, 9642.03, 10404.9), (0, 1, 0), 500.536) if "particle_48 geometry" not in marker_sets: s=new_marker_set('particle_48 geometry') marker_sets["particle_48 geometry"]=s s= marker_sets["particle_48 geometry"] mark=s.place_marker((7812.64, 11430.5, 11020.9), (0.7, 0.7, 0.7), 725.276) if "particle_49 geometry" not in marker_sets: s=new_marker_set('particle_49 geometry') marker_sets["particle_49 geometry"]=s s= marker_sets["particle_49 geometry"] mark=s.place_marker((8034.26, 13635.6, 12511.2), (0.7, 0.7, 0.7), 570.331) if "particle_50 geometry" not in marker_sets: s=new_marker_set('particle_50 geometry') marker_sets["particle_50 geometry"]=s s= marker_sets["particle_50 geometry"] mark=s.place_marker((6557.43, 14208, 11872), (0.7, 0.7, 0.7), 492.203) if "particle_51 geometry" not in marker_sets: s=new_marker_set('particle_51 geometry') marker_sets["particle_51 geometry"]=s s= marker_sets["particle_51 geometry"] mark=s.place_marker((5027.99, 11762.6, 12234.2), (0, 1, 0), 547.7) if "particle_52 geometry" not in marker_sets: s=new_marker_set('particle_52 geometry') marker_sets["particle_52 geometry"]=s s= marker_sets["particle_52 geometry"] mark=s.place_marker((5139.78, 11881.3, 11476.4), (0.7, 0.7, 0.7), 581.921) if "particle_53 geometry" not in marker_sets: s=new_marker_set('particle_53 geometry') marker_sets["particle_53 geometry"]=s s= marker_sets["particle_53 geometry"] mark=s.place_marker((4503.82, 13181.9, 10230.1), (0.7, 0.7, 0.7), 555.314) if "particle_54 geometry" not in marker_sets: s=new_marker_set('particle_54 geometry') marker_sets["particle_54 geometry"]=s s= marker_sets["particle_54 geometry"] mark=s.place_marker((4542.31, 13826.8, 8823.6), (0.7, 0.7, 0.7), 404.219) if "particle_55 geometry" not in marker_sets: s=new_marker_set('particle_55 geometry') marker_sets["particle_55 geometry"]=s s= marker_sets["particle_55 geometry"] mark=s.place_marker((5277.87, 12715, 7563.24), (0.7, 0.7, 0.7), 764.234) for k in surf_sets.keys(): chimera.openModels.add([surf_sets[k]])
gpl-3.0
antonve/s4-project-mooc
cms/djangoapps/contentstore/management/commands/migrate_to_split.py
185
2174
""" Django management command to migrate a course from the old Mongo modulestore to the new split-Mongo modulestore. """ from django.core.management.base import BaseCommand, CommandError from django.contrib.auth.models import User from xmodule.modulestore.django import modulestore from xmodule.modulestore.split_migrator import SplitMigrator from opaque_keys.edx.keys import CourseKey from opaque_keys import InvalidKeyError from xmodule.modulestore import ModuleStoreEnum from contentstore.management.commands.utils import user_from_str class Command(BaseCommand): """ Migrate a course from old-Mongo to split-Mongo. It reuses the old course id except where overridden. """ help = "Migrate a course from old-Mongo to split-Mongo. The new org, course, and run will default to the old one unless overridden" args = "course_key email <new org> <new course> <new run>" def parse_args(self, *args): """ Return a 5-tuple of passed in values for (course_key, user, org, course, run). """ if len(args) < 2: raise CommandError( "migrate_to_split requires at least two arguments: " "a course_key and a user identifier (email or ID)" ) try: course_key = CourseKey.from_string(args[0]) except InvalidKeyError: raise CommandError("Invalid location string") try: user = user_from_str(args[1]) except User.DoesNotExist: raise CommandError("No user found identified by {}".format(args[1])) org = course = run = None try: org = args[2] course = args[3] run = args[4] except IndexError: pass return course_key, user.id, org, course, run def handle(self, *args, **options): course_key, user, org, course, run = self.parse_args(*args) migrator = SplitMigrator( source_modulestore=modulestore(), split_modulestore=modulestore()._get_modulestore_by_type(ModuleStoreEnum.Type.split), ) migrator.migrate_mongo_course(course_key, user, org, course, run)
agpl-3.0
FCP-INDI/nipype
nipype/interfaces/freesurfer/tests/test_auto_SurfaceSmooth.py
10
1440
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from ....testing import assert_equal from ..utils import SurfaceSmooth def test_SurfaceSmooth_inputs(): input_map = dict(args=dict(argstr='%s', ), cortex=dict(argstr='--cortex', usedefault=True, ), environ=dict(nohash=True, usedefault=True, ), fwhm=dict(argstr='--fwhm %.4f', xor=['smooth_iters'], ), hemi=dict(argstr='--hemi %s', mandatory=True, ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='--sval %s', mandatory=True, ), out_file=dict(argstr='--tval %s', genfile=True, ), reshape=dict(argstr='--reshape', ), smooth_iters=dict(argstr='--smooth %d', xor=['fwhm'], ), subject_id=dict(argstr='--s %s', mandatory=True, ), subjects_dir=dict(), terminal_output=dict(nohash=True, ), ) inputs = SurfaceSmooth.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_SurfaceSmooth_outputs(): output_map = dict(out_file=dict(), ) outputs = SurfaceSmooth.output_spec() for key, metadata in list(output_map.items()): for metakey, value in list(metadata.items()): yield assert_equal, getattr(outputs.traits()[key], metakey), value
bsd-3-clause
dlutxx/memo
python/web.py
1
3133
from __future__ import print_function from urllib2 import Request, urlopen, URLError from urllib import urlencode from cStringIO import StringIO from gzip import GzipFile import time import sys class Hacker(object): url = 'http://weirenwu.weibo.com/taskv2/?c=Person.recharge' def __init__(self, headers, data): self.setUp(headers, data) def setUp(self, headers, data): q = Request(self.__class__.url) for k, v in headers: q.add_header(k, v) self.data = urlencode(data) self.req = q def run(self, n=2, sgap=.4): cnt = ok = fail = 0 while n > 0: cnt += 1 n -= 1 print('%s...' % n) try: res = urlopen(self.req, self.data, 2) if res.getcode() == 200: ok += 1 else: fail += 1 if res.headers['Content-Encoding'] == 'gzip': res = GzipFile(fileobj=StringIO(res.read()), mode='r') # print res.read() except URLError as e: fail += 1 print('-------------' * 2) print(e) except Exception as e: print(e) if n > 0: time.sleep(sgap) print("total:{}, ok:{}, failure:{}".format(cnt, ok, fail)) if '__main__' == __name__: headers = '''Host: weirenwu.weibo.com Connection: keep-alive Content-Length: 8 Cache-Control: max-age=0 Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8 Origin: http://weirenwu.weibo.com User-Agent: Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.116 Safari/537.36 Content-Type: application/x-www-form-urlencoded Referer: http://weirenwu.weibo.com/taskv2/?c=Person.recharge Accept-Encoding: gzip,deflate,sdch Accept-Language: zh-CN,zh;q=0.8 Cookie: SINAGLOBAL=3716629899572.581.1367569870099; UOR=,weibo.com,login.sina.com.cn; login_sid_t=ec5b61b2f73bf09def68c397a02af937; _s_tentry=-; Apache=9951985371299.088.1375709465592; ULV=1375709465597:10:1:1:9951985371299.088.1375709465592:1368514636396; SUE=es%3D2e07d6801092975a2a5f4399f49520a9%26ev%3Dv1%26es2%3D8c2bfdde52cb896ba41c5defa6e95232%26rs0%3D5nxSvjgmHeB9hNFv9ihZZ3Io1ASiOnMNZItgR4GN6LVIXVU9alY2fqxpiShXflxD0pUorjT3ilNxqUp3xZZ07l14WzpR1RbnZ0%252Fj%252FIsQRxCh9GAVR%252Frq3SBMa61ChaS29Dt6Vp8KLR7zxpPStdclI0IaVnhjIrO2i9n8GteS6vY%253D%26rv%3D0; SUP=cv%3D1%26bt%3D1375709475%26et%3D1375795875%26d%3Dc909%26i%3D7a40%26us%3D1%26vf%3D0%26vt%3D0%26ac%3D2%26st%3D0%26uid%3D2416364815%26name%3Drasell%2540163.com%26nick%3D%25E7%2594%25A8%25E6%2588%25B72416364815%26fmp%3D%26lcp%3D; SUS=SID-2416364815-1375709475-XD-qcy30-817d35c07eaab637c55a2ba6f78378ec; ALF=1378301474; SSOLoginState=1375709475; un=rasell@163.com; wvr=5; PHPSESSID=cp858gemm0o5iplenrencgt6t1; USRANIME=usrmdins31125; WBStore=81f33f6776a2f793|undefined'''.split( '\n') hd = [] for i in headers: i = i.split(': ', 1) hd.append(i) data = {'amount': '1'} h = Hacker(hd, data) h.run(1000)
mit
ligature/ansible-modules-extras
windows/win_scheduled_task.py
59
1439
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2015, Peter Mounce <public@neverrunwithscissors.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # this is a windows documentation stub. actual code lives in the .ps1 # file of the same name DOCUMENTATION = ''' --- module: win_scheduled_task version_added: "2.0" short_description: Manage scheduled tasks description: - Manage scheduled tasks options: name: description: - Name of the scheduled task - Supports * as wildcard required: true enabled: description: - State that the task should become required: false choices: - yes - no default: yes author: Peter Mounce ''' EXAMPLES = ''' # Disable the scheduled tasks with "WindowsUpdate" in their name win_scheduled_task: name="*WindowsUpdate*" enabled=no '''
gpl-3.0
kustodian/ansible
lib/ansible/modules/cloud/amazon/ec2_group.py
8
57078
#!/usr/bin/python # -*- coding: utf-8 -*- # This file is part of Ansible # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['stableinterface'], 'supported_by': 'core'} DOCUMENTATION = ''' --- module: ec2_group author: "Andrew de Quincey (@adq)" version_added: "1.3" requirements: [ boto3 ] short_description: maintain an ec2 VPC security group. description: - Maintains ec2 security groups. This module has a dependency on python-boto >= 2.5. options: name: description: - Name of the security group. - One of and only one of I(name) or I(group_id) is required. - Required if I(state=present). required: false type: str group_id: description: - Id of group to delete (works only with absent). - One of and only one of I(name) or I(group_id) is required. required: false version_added: "2.4" type: str description: description: - Description of the security group. Required when C(state) is C(present). required: false type: str vpc_id: description: - ID of the VPC to create the group in. required: false type: str rules: description: - List of firewall inbound rules to enforce in this group (see example). If none are supplied, no inbound rules will be enabled. Rules list may include its own name in `group_name`. This allows idempotent loopback additions (e.g. allow group to access itself). Rule sources list support was added in version 2.4. This allows to define multiple sources per source type as well as multiple source types per rule. Prior to 2.4 an individual source is allowed. In version 2.5 support for rule descriptions was added. required: false type: list elements: dict suboptions: cidr_ip: type: str description: - The IPv4 CIDR range traffic is coming from. - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id) and I(group_name). cidr_ipv6: type: str description: - The IPv6 CIDR range traffic is coming from. - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id) and I(group_name). ip_prefix: type: str description: - The IP Prefix U(https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-prefix-lists.html) that traffic is coming from. - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id) and I(group_name). group_id: type: str description: - The ID of the Security Group that traffic is coming from. - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id) and I(group_name). group_name: type: str description: - Name of the Security Group that traffic is coming from. - If the Security Group doesn't exist a new Security Group will be created with I(group_desc) as the description. - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id) and I(group_name). group_desc: type: str description: - If the I(group_name) is set and the Security Group doesn't exist a new Security Group will be created with I(group_desc) as the description. proto: type: str description: - The IP protocol name (C(tcp), C(udp), C(icmp), C(icmpv6)) or number (U(https://en.wikipedia.org/wiki/List_of_IP_protocol_numbers)) from_port: type: int description: The start of the range of ports that traffic is coming from. A value of C(-1) indicates all ports. to_port: type: int description: The end of the range of ports that traffic is coming from. A value of C(-1) indicates all ports. rule_desc: type: str description: A description for the rule. rules_egress: description: - List of firewall outbound rules to enforce in this group (see example). If none are supplied, a default all-out rule is assumed. If an empty list is supplied, no outbound rules will be enabled. Rule Egress sources list support was added in version 2.4. In version 2.5 support for rule descriptions was added. required: false version_added: "1.6" type: list elements: dict suboptions: cidr_ip: type: str description: - The IPv4 CIDR range traffic is going to. - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id) and I(group_name). cidr_ipv6: type: str description: - The IPv6 CIDR range traffic is going to. - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id) and I(group_name). ip_prefix: type: str description: - The IP Prefix U(https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-prefix-lists.html) that traffic is going to. - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id) and I(group_name). group_id: type: str description: - The ID of the Security Group that traffic is going to. - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id) and I(group_name). group_name: type: str description: - Name of the Security Group that traffic is going to. - If the Security Group doesn't exist a new Security Group will be created with I(group_desc) as the description. - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id) and I(group_name). group_desc: type: str description: - If the I(group_name) is set and the Security Group doesn't exist a new Security Group will be created with I(group_desc) as the description. proto: type: str description: - The IP protocol name (C(tcp), C(udp), C(icmp), C(icmpv6)) or number (U(https://en.wikipedia.org/wiki/List_of_IP_protocol_numbers)) from_port: type: int description: The start of the range of ports that traffic is going to. A value of C(-1) indicates all ports. to_port: type: int description: The end of the range of ports that traffic is going to. A value of C(-1) indicates all ports. rule_desc: type: str description: A description for the rule. state: version_added: "1.4" description: - Create or delete a security group. required: false default: 'present' choices: [ "present", "absent" ] aliases: [] type: str purge_rules: version_added: "1.8" description: - Purge existing rules on security group that are not found in rules. required: false default: 'true' aliases: [] type: bool purge_rules_egress: version_added: "1.8" description: - Purge existing rules_egress on security group that are not found in rules_egress. required: false default: 'true' aliases: [] type: bool tags: version_added: "2.4" description: - A dictionary of one or more tags to assign to the security group. required: false type: dict aliases: ['resource_tags'] purge_tags: version_added: "2.4" description: - If yes, existing tags will be purged from the resource to match exactly what is defined by I(tags) parameter. If the I(tags) parameter is not set then tags will not be modified. required: false default: yes type: bool extends_documentation_fragment: - aws - ec2 notes: - If a rule declares a group_name and that group doesn't exist, it will be automatically created. In that case, group_desc should be provided as well. The module will refuse to create a depended-on group without a description. - Preview diff mode support is added in version 2.7. ''' EXAMPLES = ''' - name: example using security group rule descriptions ec2_group: name: "{{ name }}" description: sg with rule descriptions vpc_id: vpc-xxxxxxxx profile: "{{ aws_profile }}" region: us-east-1 rules: - proto: tcp ports: - 80 cidr_ip: 0.0.0.0/0 rule_desc: allow all on port 80 - name: example ec2 group ec2_group: name: example description: an example EC2 group vpc_id: 12345 region: eu-west-1 aws_secret_key: SECRET aws_access_key: ACCESS rules: - proto: tcp from_port: 80 to_port: 80 cidr_ip: 0.0.0.0/0 - proto: tcp from_port: 22 to_port: 22 cidr_ip: 10.0.0.0/8 - proto: tcp from_port: 443 to_port: 443 # this should only be needed for EC2 Classic security group rules # because in a VPC an ELB will use a user-account security group group_id: amazon-elb/sg-87654321/amazon-elb-sg - proto: tcp from_port: 3306 to_port: 3306 group_id: 123412341234/sg-87654321/exact-name-of-sg - proto: udp from_port: 10050 to_port: 10050 cidr_ip: 10.0.0.0/8 - proto: udp from_port: 10051 to_port: 10051 group_id: sg-12345678 - proto: icmp from_port: 8 # icmp type, -1 = any type to_port: -1 # icmp subtype, -1 = any subtype cidr_ip: 10.0.0.0/8 - proto: all # the containing group name may be specified here group_name: example - proto: all # in the 'proto' attribute, if you specify -1, all, or a protocol number other than tcp, udp, icmp, or 58 (ICMPv6), # traffic on all ports is allowed, regardless of any ports you specify from_port: 10050 # this value is ignored to_port: 10050 # this value is ignored cidr_ip: 10.0.0.0/8 rules_egress: - proto: tcp from_port: 80 to_port: 80 cidr_ip: 0.0.0.0/0 cidr_ipv6: 64:ff9b::/96 group_name: example-other # description to use if example-other needs to be created group_desc: other example EC2 group - name: example2 ec2 group ec2_group: name: example2 description: an example2 EC2 group vpc_id: 12345 region: eu-west-1 rules: # 'ports' rule keyword was introduced in version 2.4. It accepts a single port value or a list of values including ranges (from_port-to_port). - proto: tcp ports: 22 group_name: example-vpn - proto: tcp ports: - 80 - 443 - 8080-8099 cidr_ip: 0.0.0.0/0 # Rule sources list support was added in version 2.4. This allows to define multiple sources per source type as well as multiple source types per rule. - proto: tcp ports: - 6379 - 26379 group_name: - example-vpn - example-redis - proto: tcp ports: 5665 group_name: example-vpn cidr_ip: - 172.16.1.0/24 - 172.16.17.0/24 cidr_ipv6: - 2607:F8B0::/32 - 64:ff9b::/96 group_id: - sg-edcd9784 diff: True - name: "Delete group by its id" ec2_group: region: eu-west-1 group_id: sg-33b4ee5b state: absent ''' RETURN = ''' group_name: description: Security group name sample: My Security Group type: str returned: on create/update group_id: description: Security group id sample: sg-abcd1234 type: str returned: on create/update description: description: Description of security group sample: My Security Group type: str returned: on create/update tags: description: Tags associated with the security group sample: Name: My Security Group Purpose: protecting stuff type: dict returned: on create/update vpc_id: description: ID of VPC to which the security group belongs sample: vpc-abcd1234 type: str returned: on create/update ip_permissions: description: Inbound rules associated with the security group. sample: - from_port: 8182 ip_protocol: tcp ip_ranges: - cidr_ip: "1.1.1.1/32" ipv6_ranges: [] prefix_list_ids: [] to_port: 8182 user_id_group_pairs: [] type: list returned: on create/update ip_permissions_egress: description: Outbound rules associated with the security group. sample: - ip_protocol: -1 ip_ranges: - cidr_ip: "0.0.0.0/0" ipv6_ranges: [] prefix_list_ids: [] user_id_group_pairs: [] type: list returned: on create/update owner_id: description: AWS Account ID of the security group sample: 123456789012 type: int returned: on create/update ''' import json import re import itertools from copy import deepcopy from time import sleep from collections import namedtuple from ansible.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code from ansible.module_utils.aws.iam import get_aws_account_id from ansible.module_utils.aws.waiters import get_waiter from ansible.module_utils.ec2 import AWSRetry, camel_dict_to_snake_dict, compare_aws_tags from ansible.module_utils.ec2 import ansible_dict_to_boto3_filter_list, boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_tag_list from ansible.module_utils.common.network import to_ipv6_subnet, to_subnet from ansible.module_utils.compat.ipaddress import ip_network, IPv6Network from ansible.module_utils._text import to_text from ansible.module_utils.six import string_types try: from botocore.exceptions import BotoCoreError, ClientError except ImportError: pass # caught by AnsibleAWSModule Rule = namedtuple('Rule', ['port_range', 'protocol', 'target', 'target_type', 'description']) valid_targets = set(['ipv4', 'ipv6', 'group', 'ip_prefix']) current_account_id = None def rule_cmp(a, b): """Compare rules without descriptions""" for prop in ['port_range', 'protocol', 'target', 'target_type']: if prop == 'port_range' and to_text(a.protocol) == to_text(b.protocol): # equal protocols can interchange `(-1, -1)` and `(None, None)` if a.port_range in ((None, None), (-1, -1)) and b.port_range in ((None, None), (-1, -1)): continue elif getattr(a, prop) != getattr(b, prop): return False elif getattr(a, prop) != getattr(b, prop): return False return True def rules_to_permissions(rules): return [to_permission(rule) for rule in rules] def to_permission(rule): # take a Rule, output the serialized grant perm = { 'IpProtocol': rule.protocol, } perm['FromPort'], perm['ToPort'] = rule.port_range if rule.target_type == 'ipv4': perm['IpRanges'] = [{ 'CidrIp': rule.target, }] if rule.description: perm['IpRanges'][0]['Description'] = rule.description elif rule.target_type == 'ipv6': perm['Ipv6Ranges'] = [{ 'CidrIpv6': rule.target, }] if rule.description: perm['Ipv6Ranges'][0]['Description'] = rule.description elif rule.target_type == 'group': if isinstance(rule.target, tuple): pair = {} if rule.target[0]: pair['UserId'] = rule.target[0] # group_id/group_name are mutually exclusive - give group_id more precedence as it is more specific if rule.target[1]: pair['GroupId'] = rule.target[1] elif rule.target[2]: pair['GroupName'] = rule.target[2] perm['UserIdGroupPairs'] = [pair] else: perm['UserIdGroupPairs'] = [{ 'GroupId': rule.target }] if rule.description: perm['UserIdGroupPairs'][0]['Description'] = rule.description elif rule.target_type == 'ip_prefix': perm['PrefixListIds'] = [{ 'PrefixListId': rule.target, }] if rule.description: perm['PrefixListIds'][0]['Description'] = rule.description elif rule.target_type not in valid_targets: raise ValueError('Invalid target type for rule {0}'.format(rule)) return fix_port_and_protocol(perm) def rule_from_group_permission(perm): def ports_from_permission(p): if 'FromPort' not in p and 'ToPort' not in p: return (None, None) return (int(perm['FromPort']), int(perm['ToPort'])) # outputs a rule tuple for target_key, target_subkey, target_type in [ ('IpRanges', 'CidrIp', 'ipv4'), ('Ipv6Ranges', 'CidrIpv6', 'ipv6'), ('PrefixListIds', 'PrefixListId', 'ip_prefix'), ]: if target_key not in perm: continue for r in perm[target_key]: # there may be several IP ranges here, which is ok yield Rule( ports_from_permission(perm), to_text(perm['IpProtocol']), r[target_subkey], target_type, r.get('Description') ) if 'UserIdGroupPairs' in perm and perm['UserIdGroupPairs']: for pair in perm['UserIdGroupPairs']: target = ( pair.get('UserId', None), pair.get('GroupId', None), pair.get('GroupName', None), ) if pair.get('UserId', '').startswith('amazon-'): # amazon-elb and amazon-prefix rules don't need # group-id specified, so remove it when querying # from permission target = ( target[0], None, target[2], ) elif 'VpcPeeringConnectionId' in pair or pair['UserId'] != current_account_id: target = ( pair.get('UserId', None), pair.get('GroupId', None), pair.get('GroupName', None), ) yield Rule( ports_from_permission(perm), to_text(perm['IpProtocol']), target, 'group', pair.get('Description') ) @AWSRetry.backoff(tries=5, delay=5, backoff=2.0) def get_security_groups_with_backoff(connection, **kwargs): return connection.describe_security_groups(**kwargs) @AWSRetry.backoff(tries=5, delay=5, backoff=2.0) def sg_exists_with_backoff(connection, **kwargs): try: return connection.describe_security_groups(**kwargs) except is_boto3_error_code('InvalidGroup.NotFound'): return {'SecurityGroups': []} def deduplicate_rules_args(rules): """Returns unique rules""" if rules is None: return None return list(dict(zip((json.dumps(r, sort_keys=True) for r in rules), rules)).values()) def validate_rule(module, rule): VALID_PARAMS = ('cidr_ip', 'cidr_ipv6', 'ip_prefix', 'group_id', 'group_name', 'group_desc', 'proto', 'from_port', 'to_port', 'rule_desc') if not isinstance(rule, dict): module.fail_json(msg='Invalid rule parameter type [%s].' % type(rule)) for k in rule: if k not in VALID_PARAMS: module.fail_json(msg='Invalid rule parameter \'{0}\' for rule: {1}'.format(k, rule)) if 'group_id' in rule and 'cidr_ip' in rule: module.fail_json(msg='Specify group_id OR cidr_ip, not both') elif 'group_name' in rule and 'cidr_ip' in rule: module.fail_json(msg='Specify group_name OR cidr_ip, not both') elif 'group_id' in rule and 'cidr_ipv6' in rule: module.fail_json(msg="Specify group_id OR cidr_ipv6, not both") elif 'group_name' in rule and 'cidr_ipv6' in rule: module.fail_json(msg="Specify group_name OR cidr_ipv6, not both") elif 'cidr_ip' in rule and 'cidr_ipv6' in rule: module.fail_json(msg="Specify cidr_ip OR cidr_ipv6, not both") elif 'group_id' in rule and 'group_name' in rule: module.fail_json(msg='Specify group_id OR group_name, not both') def get_target_from_rule(module, client, rule, name, group, groups, vpc_id): """ Returns tuple of (target_type, target, group_created) after validating rule params. rule: Dict describing a rule. name: Name of the security group being managed. groups: Dict of all available security groups. AWS accepts an ip range or a security group as target of a rule. This function validate the rule specification and return either a non-None group_id or a non-None ip range. """ FOREIGN_SECURITY_GROUP_REGEX = r'^([^/]+)/?(sg-\S+)?/(\S+)' group_id = None group_name = None target_group_created = False validate_rule(module, rule) if rule.get('group_id') and re.match(FOREIGN_SECURITY_GROUP_REGEX, rule['group_id']): # this is a foreign Security Group. Since you can't fetch it you must create an instance of it owner_id, group_id, group_name = re.match(FOREIGN_SECURITY_GROUP_REGEX, rule['group_id']).groups() group_instance = dict(UserId=owner_id, GroupId=group_id, GroupName=group_name) groups[group_id] = group_instance groups[group_name] = group_instance # group_id/group_name are mutually exclusive - give group_id more precedence as it is more specific if group_id and group_name: group_name = None return 'group', (owner_id, group_id, group_name), False elif 'group_id' in rule: return 'group', rule['group_id'], False elif 'group_name' in rule: group_name = rule['group_name'] if group_name == name: group_id = group['GroupId'] groups[group_id] = group groups[group_name] = group elif group_name in groups and group.get('VpcId') and groups[group_name].get('VpcId'): # both are VPC groups, this is ok group_id = groups[group_name]['GroupId'] elif group_name in groups and not (group.get('VpcId') or groups[group_name].get('VpcId')): # both are EC2 classic, this is ok group_id = groups[group_name]['GroupId'] else: auto_group = None filters = {'group-name': group_name} if vpc_id: filters['vpc-id'] = vpc_id # if we got here, either the target group does not exist, or there # is a mix of EC2 classic + VPC groups. Mixing of EC2 classic + VPC # is bad, so we have to create a new SG because no compatible group # exists if not rule.get('group_desc', '').strip(): # retry describing the group once try: auto_group = get_security_groups_with_backoff(client, Filters=ansible_dict_to_boto3_filter_list(filters)).get('SecurityGroups', [])[0] except (is_boto3_error_code('InvalidGroup.NotFound'), IndexError): module.fail_json(msg="group %s will be automatically created by rule %s but " "no description was provided" % (group_name, rule)) except ClientError as e: # pylint: disable=duplicate-except module.fail_json_aws(e) elif not module.check_mode: params = dict(GroupName=group_name, Description=rule['group_desc']) if vpc_id: params['VpcId'] = vpc_id try: auto_group = client.create_security_group(**params) get_waiter( client, 'security_group_exists', ).wait( GroupIds=[auto_group['GroupId']], ) except is_boto3_error_code('InvalidGroup.Duplicate'): # The group exists, but didn't show up in any of our describe-security-groups calls # Try searching on a filter for the name, and allow a retry window for AWS to update # the model on their end. try: auto_group = get_security_groups_with_backoff(client, Filters=ansible_dict_to_boto3_filter_list(filters)).get('SecurityGroups', [])[0] except IndexError as e: module.fail_json(msg="Could not create or use existing group '{0}' in rule. Make sure the group exists".format(group_name)) except ClientError as e: module.fail_json_aws( e, msg="Could not create or use existing group '{0}' in rule. Make sure the group exists".format(group_name)) if auto_group is not None: group_id = auto_group['GroupId'] groups[group_id] = auto_group groups[group_name] = auto_group target_group_created = True return 'group', group_id, target_group_created elif 'cidr_ip' in rule: return 'ipv4', validate_ip(module, rule['cidr_ip']), False elif 'cidr_ipv6' in rule: return 'ipv6', validate_ip(module, rule['cidr_ipv6']), False elif 'ip_prefix' in rule: return 'ip_prefix', rule['ip_prefix'], False module.fail_json(msg="Could not match target for rule {0}".format(rule), failed_rule=rule) def ports_expand(ports): # takes a list of ports and returns a list of (port_from, port_to) ports_expanded = [] for port in ports: if not isinstance(port, string_types): ports_expanded.append((port,) * 2) elif '-' in port: ports_expanded.append(tuple(int(p.strip()) for p in port.split('-', 1))) else: ports_expanded.append((int(port.strip()),) * 2) return ports_expanded def rule_expand_ports(rule): # takes a rule dict and returns a list of expanded rule dicts if 'ports' not in rule: if isinstance(rule.get('from_port'), string_types): rule['from_port'] = int(rule.get('from_port')) if isinstance(rule.get('to_port'), string_types): rule['to_port'] = int(rule.get('to_port')) return [rule] ports = rule['ports'] if isinstance(rule['ports'], list) else [rule['ports']] rule_expanded = [] for from_to in ports_expand(ports): temp_rule = rule.copy() del temp_rule['ports'] temp_rule['from_port'], temp_rule['to_port'] = sorted(from_to) rule_expanded.append(temp_rule) return rule_expanded def rules_expand_ports(rules): # takes a list of rules and expands it based on 'ports' if not rules: return rules return [rule for rule_complex in rules for rule in rule_expand_ports(rule_complex)] def rule_expand_source(rule, source_type): # takes a rule dict and returns a list of expanded rule dicts for specified source_type sources = rule[source_type] if isinstance(rule[source_type], list) else [rule[source_type]] source_types_all = ('cidr_ip', 'cidr_ipv6', 'group_id', 'group_name', 'ip_prefix') rule_expanded = [] for source in sources: temp_rule = rule.copy() for s in source_types_all: temp_rule.pop(s, None) temp_rule[source_type] = source rule_expanded.append(temp_rule) return rule_expanded def rule_expand_sources(rule): # takes a rule dict and returns a list of expanded rule discts source_types = (stype for stype in ('cidr_ip', 'cidr_ipv6', 'group_id', 'group_name', 'ip_prefix') if stype in rule) return [r for stype in source_types for r in rule_expand_source(rule, stype)] def rules_expand_sources(rules): # takes a list of rules and expands it based on 'cidr_ip', 'group_id', 'group_name' if not rules: return rules return [rule for rule_complex in rules for rule in rule_expand_sources(rule_complex)] def update_rules_description(module, client, rule_type, group_id, ip_permissions): if module.check_mode: return try: if rule_type == "in": client.update_security_group_rule_descriptions_ingress(GroupId=group_id, IpPermissions=ip_permissions) if rule_type == "out": client.update_security_group_rule_descriptions_egress(GroupId=group_id, IpPermissions=ip_permissions) except (ClientError, BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to update rule description for group %s" % group_id) def fix_port_and_protocol(permission): for key in ('FromPort', 'ToPort'): if key in permission: if permission[key] is None: del permission[key] else: permission[key] = int(permission[key]) permission['IpProtocol'] = to_text(permission['IpProtocol']) return permission def remove_old_permissions(client, module, revoke_ingress, revoke_egress, group_id): if revoke_ingress: revoke(client, module, revoke_ingress, group_id, 'in') if revoke_egress: revoke(client, module, revoke_egress, group_id, 'out') return bool(revoke_ingress or revoke_egress) def revoke(client, module, ip_permissions, group_id, rule_type): if not module.check_mode: try: if rule_type == 'in': client.revoke_security_group_ingress(GroupId=group_id, IpPermissions=ip_permissions) elif rule_type == 'out': client.revoke_security_group_egress(GroupId=group_id, IpPermissions=ip_permissions) except (BotoCoreError, ClientError) as e: rules = 'ingress rules' if rule_type == 'in' else 'egress rules' module.fail_json_aws(e, "Unable to revoke {0}: {1}".format(rules, ip_permissions)) def add_new_permissions(client, module, new_ingress, new_egress, group_id): if new_ingress: authorize(client, module, new_ingress, group_id, 'in') if new_egress: authorize(client, module, new_egress, group_id, 'out') return bool(new_ingress or new_egress) def authorize(client, module, ip_permissions, group_id, rule_type): if not module.check_mode: try: if rule_type == 'in': client.authorize_security_group_ingress(GroupId=group_id, IpPermissions=ip_permissions) elif rule_type == 'out': client.authorize_security_group_egress(GroupId=group_id, IpPermissions=ip_permissions) except (BotoCoreError, ClientError) as e: rules = 'ingress rules' if rule_type == 'in' else 'egress rules' module.fail_json_aws(e, "Unable to authorize {0}: {1}".format(rules, ip_permissions)) def validate_ip(module, cidr_ip): split_addr = cidr_ip.split('/') if len(split_addr) == 2: # this_ip is a IPv4 or IPv6 CIDR that may or may not have host bits set # Get the network bits if IPv4, and validate if IPv6. try: ip = to_subnet(split_addr[0], split_addr[1]) if ip != cidr_ip: module.warn("One of your CIDR addresses ({0}) has host bits set. To get rid of this warning, " "check the network mask and make sure that only network bits are set: {1}.".format( cidr_ip, ip)) except ValueError: # to_subnet throws a ValueError on IPv6 networks, so we should be working with v6 if we get here try: isinstance(ip_network(to_text(cidr_ip)), IPv6Network) ip = cidr_ip except ValueError: # If a host bit is set on something other than a /128, IPv6Network will throw a ValueError # The ipv6_cidr in this case probably looks like "2001:DB8:A0B:12F0::1/64" and we just want the network bits ip6 = to_ipv6_subnet(split_addr[0]) + "/" + split_addr[1] if ip6 != cidr_ip: module.warn("One of your IPv6 CIDR addresses ({0}) has host bits set. To get rid of this warning, " "check the network mask and make sure that only network bits are set: {1}.".format(cidr_ip, ip6)) return ip6 return ip return cidr_ip def update_tags(client, module, group_id, current_tags, tags, purge_tags): tags_need_modify, tags_to_delete = compare_aws_tags(current_tags, tags, purge_tags) if not module.check_mode: if tags_to_delete: try: client.delete_tags(Resources=[group_id], Tags=[{'Key': tag} for tag in tags_to_delete]) except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg="Unable to delete tags {0}".format(tags_to_delete)) # Add/update tags if tags_need_modify: try: client.create_tags(Resources=[group_id], Tags=ansible_dict_to_boto3_tag_list(tags_need_modify)) except (BotoCoreError, ClientError) as e: module.fail_json(e, msg="Unable to add tags {0}".format(tags_need_modify)) return bool(tags_need_modify or tags_to_delete) def update_rule_descriptions(module, group_id, present_ingress, named_tuple_ingress_list, present_egress, named_tuple_egress_list): changed = False client = module.client('ec2') ingress_needs_desc_update = [] egress_needs_desc_update = [] for present_rule in present_egress: needs_update = [r for r in named_tuple_egress_list if rule_cmp(r, present_rule) and r.description != present_rule.description] for r in needs_update: named_tuple_egress_list.remove(r) egress_needs_desc_update.extend(needs_update) for present_rule in present_ingress: needs_update = [r for r in named_tuple_ingress_list if rule_cmp(r, present_rule) and r.description != present_rule.description] for r in needs_update: named_tuple_ingress_list.remove(r) ingress_needs_desc_update.extend(needs_update) if ingress_needs_desc_update: update_rules_description(module, client, 'in', group_id, rules_to_permissions(ingress_needs_desc_update)) changed |= True if egress_needs_desc_update: update_rules_description(module, client, 'out', group_id, rules_to_permissions(egress_needs_desc_update)) changed |= True return changed def create_security_group(client, module, name, description, vpc_id): if not module.check_mode: params = dict(GroupName=name, Description=description) if vpc_id: params['VpcId'] = vpc_id try: group = client.create_security_group(**params) except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg="Unable to create security group") # When a group is created, an egress_rule ALLOW ALL # to 0.0.0.0/0 is added automatically but it's not # reflected in the object returned by the AWS API # call. We re-read the group for getting an updated object # amazon sometimes takes a couple seconds to update the security group so wait till it exists while True: sleep(3) group = get_security_groups_with_backoff(client, GroupIds=[group['GroupId']])['SecurityGroups'][0] if group.get('VpcId') and not group.get('IpPermissionsEgress'): pass else: break return group return None def wait_for_rule_propagation(module, group, desired_ingress, desired_egress, purge_ingress, purge_egress): group_id = group['GroupId'] tries = 6 def await_rules(group, desired_rules, purge, rule_key): for i in range(tries): current_rules = set(sum([list(rule_from_group_permission(p)) for p in group[rule_key]], [])) if purge and len(current_rules ^ set(desired_rules)) == 0: return group elif purge: conflicts = current_rules ^ set(desired_rules) # For cases where set comparison is equivalent, but invalid port/proto exist for a, b in itertools.combinations(conflicts, 2): if rule_cmp(a, b): conflicts.discard(a) conflicts.discard(b) if not len(conflicts): return group elif current_rules.issuperset(desired_rules) and not purge: return group sleep(10) group = get_security_groups_with_backoff(module.client('ec2'), GroupIds=[group_id])['SecurityGroups'][0] module.warn("Ran out of time waiting for {0} {1}. Current: {2}, Desired: {3}".format(group_id, rule_key, current_rules, desired_rules)) return group group = get_security_groups_with_backoff(module.client('ec2'), GroupIds=[group_id])['SecurityGroups'][0] if 'VpcId' in group and module.params.get('rules_egress') is not None: group = await_rules(group, desired_egress, purge_egress, 'IpPermissionsEgress') return await_rules(group, desired_ingress, purge_ingress, 'IpPermissions') def group_exists(client, module, vpc_id, group_id, name): params = {'Filters': []} if group_id: params['GroupIds'] = [group_id] if name: # Add name to filters rather than params['GroupNames'] # because params['GroupNames'] only checks the default vpc if no vpc is provided params['Filters'].append({'Name': 'group-name', 'Values': [name]}) if vpc_id: params['Filters'].append({'Name': 'vpc-id', 'Values': [vpc_id]}) # Don't filter by description to maintain backwards compatibility try: security_groups = sg_exists_with_backoff(client, **params).get('SecurityGroups', []) all_groups = get_security_groups_with_backoff(client).get('SecurityGroups', []) except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Error in describe_security_groups") if security_groups: groups = dict((group['GroupId'], group) for group in all_groups) groups.update(dict((group['GroupName'], group) for group in all_groups)) if vpc_id: vpc_wins = dict((group['GroupName'], group) for group in all_groups if group.get('VpcId') and group['VpcId'] == vpc_id) groups.update(vpc_wins) # maintain backwards compatibility by using the last matching group return security_groups[-1], groups return None, {} def verify_rules_with_descriptions_permitted(client, module, rules, rules_egress): if not hasattr(client, "update_security_group_rule_descriptions_egress"): all_rules = rules if rules else [] + rules_egress if rules_egress else [] if any('rule_desc' in rule for rule in all_rules): module.fail_json(msg="Using rule descriptions requires botocore version >= 1.7.2.") def get_diff_final_resource(client, module, security_group): def get_account_id(security_group, module): try: owner_id = security_group.get('owner_id', module.client('sts').get_caller_identity()['Account']) except (BotoCoreError, ClientError) as e: owner_id = "Unable to determine owner_id: {0}".format(to_text(e)) return owner_id def get_final_tags(security_group_tags, specified_tags, purge_tags): if specified_tags is None: return security_group_tags tags_need_modify, tags_to_delete = compare_aws_tags(security_group_tags, specified_tags, purge_tags) end_result_tags = dict((k, v) for k, v in specified_tags.items() if k not in tags_to_delete) end_result_tags.update(dict((k, v) for k, v in security_group_tags.items() if k not in tags_to_delete)) end_result_tags.update(tags_need_modify) return end_result_tags def get_final_rules(client, module, security_group_rules, specified_rules, purge_rules): if specified_rules is None: return security_group_rules if purge_rules: final_rules = [] else: final_rules = list(security_group_rules) specified_rules = flatten_nested_targets(module, deepcopy(specified_rules)) for rule in specified_rules: format_rule = { 'from_port': None, 'to_port': None, 'ip_protocol': rule.get('proto', 'tcp'), 'ip_ranges': [], 'ipv6_ranges': [], 'prefix_list_ids': [], 'user_id_group_pairs': [] } if rule.get('proto', 'tcp') in ('all', '-1', -1): format_rule['ip_protocol'] = '-1' format_rule.pop('from_port') format_rule.pop('to_port') elif rule.get('ports'): if rule.get('ports') and (isinstance(rule['ports'], string_types) or isinstance(rule['ports'], int)): rule['ports'] = [rule['ports']] for port in rule.get('ports'): if isinstance(port, string_types) and '-' in port: format_rule['from_port'], format_rule['to_port'] = port.split('-') else: format_rule['from_port'] = format_rule['to_port'] = port elif rule.get('from_port') or rule.get('to_port'): format_rule['from_port'] = rule.get('from_port', rule.get('to_port')) format_rule['to_port'] = rule.get('to_port', rule.get('from_port')) for source_type in ('cidr_ip', 'cidr_ipv6', 'prefix_list_id'): if rule.get(source_type): rule_key = {'cidr_ip': 'ip_ranges', 'cidr_ipv6': 'ipv6_ranges', 'prefix_list_id': 'prefix_list_ids'}.get(source_type) if rule.get('rule_desc'): format_rule[rule_key] = [{source_type: rule[source_type], 'description': rule['rule_desc']}] else: if not isinstance(rule[source_type], list): rule[source_type] = [rule[source_type]] format_rule[rule_key] = [{source_type: target} for target in rule[source_type]] if rule.get('group_id') or rule.get('group_name'): rule_sg = camel_dict_to_snake_dict(group_exists(client, module, module.params['vpc_id'], rule.get('group_id'), rule.get('group_name'))[0]) format_rule['user_id_group_pairs'] = [{ 'description': rule_sg.get('description', rule_sg.get('group_desc')), 'group_id': rule_sg.get('group_id', rule.get('group_id')), 'group_name': rule_sg.get('group_name', rule.get('group_name')), 'peering_status': rule_sg.get('peering_status'), 'user_id': rule_sg.get('user_id', get_account_id(security_group, module)), 'vpc_id': rule_sg.get('vpc_id', module.params['vpc_id']), 'vpc_peering_connection_id': rule_sg.get('vpc_peering_connection_id') }] for k, v in list(format_rule['user_id_group_pairs'][0].items()): if v is None: format_rule['user_id_group_pairs'][0].pop(k) final_rules.append(format_rule) # Order final rules consistently final_rules.sort(key=get_ip_permissions_sort_key) return final_rules security_group_ingress = security_group.get('ip_permissions', []) specified_ingress = module.params['rules'] purge_ingress = module.params['purge_rules'] security_group_egress = security_group.get('ip_permissions_egress', []) specified_egress = module.params['rules_egress'] purge_egress = module.params['purge_rules_egress'] return { 'description': module.params['description'], 'group_id': security_group.get('group_id', 'sg-xxxxxxxx'), 'group_name': security_group.get('group_name', module.params['name']), 'ip_permissions': get_final_rules(client, module, security_group_ingress, specified_ingress, purge_ingress), 'ip_permissions_egress': get_final_rules(client, module, security_group_egress, specified_egress, purge_egress), 'owner_id': get_account_id(security_group, module), 'tags': get_final_tags(security_group.get('tags', {}), module.params['tags'], module.params['purge_tags']), 'vpc_id': security_group.get('vpc_id', module.params['vpc_id'])} def flatten_nested_targets(module, rules): def _flatten(targets): for target in targets: if isinstance(target, list): for t in _flatten(target): yield t elif isinstance(target, string_types): yield target if rules is not None: for rule in rules: target_list_type = None if isinstance(rule.get('cidr_ip'), list): target_list_type = 'cidr_ip' elif isinstance(rule.get('cidr_ipv6'), list): target_list_type = 'cidr_ipv6' if target_list_type is not None: rule[target_list_type] = list(_flatten(rule[target_list_type])) return rules def get_rule_sort_key(dicts): if dicts.get('cidr_ip'): return dicts.get('cidr_ip') elif dicts.get('cidr_ipv6'): return dicts.get('cidr_ipv6') elif dicts.get('prefix_list_id'): return dicts.get('prefix_list_id') elif dicts.get('group_id'): return dicts.get('group_id') return None def get_ip_permissions_sort_key(rule): if rule.get('ip_ranges'): rule.get('ip_ranges').sort(key=get_rule_sort_key) return rule.get('ip_ranges')[0]['cidr_ip'] elif rule.get('ipv6_ranges'): rule.get('ipv6_ranges').sort(key=get_rule_sort_key) return rule.get('ipv6_ranges')[0]['cidr_ipv6'] elif rule.get('prefix_list_ids'): rule.get('prefix_list_ids').sort(key=get_rule_sort_key) return rule.get('prefix_list_ids')[0]['prefix_list_id'] elif rule.get('user_id_group_pairs'): rule.get('user_id_group_pairs').sort(key=get_rule_sort_key) return rule.get('user_id_group_pairs')[0]['group_id'] return None def main(): argument_spec = dict( name=dict(), group_id=dict(), description=dict(), vpc_id=dict(), rules=dict(type='list'), rules_egress=dict(type='list'), state=dict(default='present', type='str', choices=['present', 'absent']), purge_rules=dict(default=True, required=False, type='bool'), purge_rules_egress=dict(default=True, required=False, type='bool'), tags=dict(required=False, type='dict', aliases=['resource_tags']), purge_tags=dict(default=True, required=False, type='bool') ) module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, required_one_of=[['name', 'group_id']], required_if=[['state', 'present', ['name']]], ) name = module.params['name'] group_id = module.params['group_id'] description = module.params['description'] vpc_id = module.params['vpc_id'] rules = flatten_nested_targets(module, deepcopy(module.params['rules'])) rules_egress = flatten_nested_targets(module, deepcopy(module.params['rules_egress'])) rules = deduplicate_rules_args(rules_expand_sources(rules_expand_ports(rules))) rules_egress = deduplicate_rules_args(rules_expand_sources(rules_expand_ports(rules_egress))) state = module.params.get('state') purge_rules = module.params['purge_rules'] purge_rules_egress = module.params['purge_rules_egress'] tags = module.params['tags'] purge_tags = module.params['purge_tags'] if state == 'present' and not description: module.fail_json(msg='Must provide description when state is present.') changed = False client = module.client('ec2') verify_rules_with_descriptions_permitted(client, module, rules, rules_egress) group, groups = group_exists(client, module, vpc_id, group_id, name) group_created_new = not bool(group) global current_account_id current_account_id = get_aws_account_id(module) before = {} after = {} # Ensure requested group is absent if state == 'absent': if group: # found a match, delete it before = camel_dict_to_snake_dict(group, ignore_list=['Tags']) before['tags'] = boto3_tag_list_to_ansible_dict(before.get('tags', [])) try: if not module.check_mode: client.delete_security_group(GroupId=group['GroupId']) except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg="Unable to delete security group '%s'" % group) else: group = None changed = True else: # no match found, no changes required pass # Ensure requested group is present elif state == 'present': if group: # existing group before = camel_dict_to_snake_dict(group, ignore_list=['Tags']) before['tags'] = boto3_tag_list_to_ansible_dict(before.get('tags', [])) if group['Description'] != description: module.warn("Group description does not match existing group. Descriptions cannot be changed without deleting " "and re-creating the security group. Try using state=absent to delete, then rerunning this task.") else: # no match found, create it group = create_security_group(client, module, name, description, vpc_id) changed = True if tags is not None and group is not None: current_tags = boto3_tag_list_to_ansible_dict(group.get('Tags', [])) changed |= update_tags(client, module, group['GroupId'], current_tags, tags, purge_tags) if group: named_tuple_ingress_list = [] named_tuple_egress_list = [] current_ingress = sum([list(rule_from_group_permission(p)) for p in group['IpPermissions']], []) current_egress = sum([list(rule_from_group_permission(p)) for p in group['IpPermissionsEgress']], []) for new_rules, rule_type, named_tuple_rule_list in [(rules, 'in', named_tuple_ingress_list), (rules_egress, 'out', named_tuple_egress_list)]: if new_rules is None: continue for rule in new_rules: target_type, target, target_group_created = get_target_from_rule( module, client, rule, name, group, groups, vpc_id) changed |= target_group_created if rule.get('proto', 'tcp') in ('all', '-1', -1): rule['proto'] = '-1' rule['from_port'] = None rule['to_port'] = None try: int(rule.get('proto', 'tcp')) rule['proto'] = to_text(rule.get('proto', 'tcp')) rule['from_port'] = None rule['to_port'] = None except ValueError: # rule does not use numeric protocol spec pass named_tuple_rule_list.append( Rule( port_range=(rule['from_port'], rule['to_port']), protocol=to_text(rule.get('proto', 'tcp')), target=target, target_type=target_type, description=rule.get('rule_desc'), ) ) # List comprehensions for rules to add, rules to modify, and rule ids to determine purging new_ingress_permissions = [to_permission(r) for r in (set(named_tuple_ingress_list) - set(current_ingress))] new_egress_permissions = [to_permission(r) for r in (set(named_tuple_egress_list) - set(current_egress))] if module.params.get('rules_egress') is None and 'VpcId' in group: # when no egress rules are specified and we're in a VPC, # we add in a default allow all out rule, which was the # default behavior before egress rules were added rule = Rule((None, None), '-1', '0.0.0.0/0', 'ipv4', None) if rule in current_egress: named_tuple_egress_list.append(rule) if rule not in current_egress: current_egress.append(rule) # List comprehensions for rules to add, rules to modify, and rule ids to determine purging present_ingress = list(set(named_tuple_ingress_list).union(set(current_ingress))) present_egress = list(set(named_tuple_egress_list).union(set(current_egress))) if purge_rules: revoke_ingress = [] for p in present_ingress: if not any([rule_cmp(p, b) for b in named_tuple_ingress_list]): revoke_ingress.append(to_permission(p)) else: revoke_ingress = [] if purge_rules_egress and module.params.get('rules_egress') is not None: if module.params.get('rules_egress') is []: revoke_egress = [ to_permission(r) for r in set(present_egress) - set(named_tuple_egress_list) if r != Rule((None, None), '-1', '0.0.0.0/0', 'ipv4', None) ] else: revoke_egress = [] for p in present_egress: if not any([rule_cmp(p, b) for b in named_tuple_egress_list]): revoke_egress.append(to_permission(p)) else: revoke_egress = [] # named_tuple_ingress_list and named_tuple_egress_list got updated by # method update_rule_descriptions, deep copy these two lists to new # variables for the record of the 'desired' ingress and egress sg permissions desired_ingress = deepcopy(named_tuple_ingress_list) desired_egress = deepcopy(named_tuple_egress_list) changed |= update_rule_descriptions(module, group['GroupId'], present_ingress, named_tuple_ingress_list, present_egress, named_tuple_egress_list) # Revoke old rules changed |= remove_old_permissions(client, module, revoke_ingress, revoke_egress, group['GroupId']) rule_msg = 'Revoking {0}, and egress {1}'.format(revoke_ingress, revoke_egress) new_ingress_permissions = [to_permission(r) for r in (set(named_tuple_ingress_list) - set(current_ingress))] new_ingress_permissions = rules_to_permissions(set(named_tuple_ingress_list) - set(current_ingress)) new_egress_permissions = rules_to_permissions(set(named_tuple_egress_list) - set(current_egress)) # Authorize new rules changed |= add_new_permissions(client, module, new_ingress_permissions, new_egress_permissions, group['GroupId']) if group_created_new and module.params.get('rules') is None and module.params.get('rules_egress') is None: # A new group with no rules provided is already being awaited. # When it is created we wait for the default egress rule to be added by AWS security_group = get_security_groups_with_backoff(client, GroupIds=[group['GroupId']])['SecurityGroups'][0] elif changed and not module.check_mode: # keep pulling until current security group rules match the desired ingress and egress rules security_group = wait_for_rule_propagation(module, group, desired_ingress, desired_egress, purge_rules, purge_rules_egress) else: security_group = get_security_groups_with_backoff(client, GroupIds=[group['GroupId']])['SecurityGroups'][0] security_group = camel_dict_to_snake_dict(security_group, ignore_list=['Tags']) security_group['tags'] = boto3_tag_list_to_ansible_dict(security_group.get('tags', [])) else: security_group = {'group_id': None} if module._diff: if module.params['state'] == 'present': after = get_diff_final_resource(client, module, security_group) if before.get('ip_permissions'): before['ip_permissions'].sort(key=get_ip_permissions_sort_key) security_group['diff'] = [{'before': before, 'after': after}] module.exit_json(changed=changed, **security_group) if __name__ == '__main__': main()
gpl-3.0
MoisesTedeschi/python
Scripts-Python/Modulos-Diversos/deteccao-de-faces-com-python-e-opencv/Lib/copyreg.py
14
7017
"""Helper to provide extensibility for pickle. This is only useful to add pickle support for extension types defined in C, not for instances of user-defined classes. """ __all__ = ["pickle", "constructor", "add_extension", "remove_extension", "clear_extension_cache"] dispatch_table = {} def pickle(ob_type, pickle_function, constructor_ob=None): if not callable(pickle_function): raise TypeError("reduction functions must be callable") dispatch_table[ob_type] = pickle_function # The constructor_ob function is a vestige of safe for unpickling. # There is no reason for the caller to pass it anymore. if constructor_ob is not None: constructor(constructor_ob) def constructor(object): if not callable(object): raise TypeError("constructors must be callable") # Example: provide pickling support for complex numbers. try: complex except NameError: pass else: def pickle_complex(c): return complex, (c.real, c.imag) pickle(complex, pickle_complex, complex) # Support for pickling new-style objects def _reconstructor(cls, base, state): if base is object: obj = object.__new__(cls) else: obj = base.__new__(cls, state) if base.__init__ != object.__init__: base.__init__(obj, state) return obj _HEAPTYPE = 1<<9 # Python code for object.__reduce_ex__ for protocols 0 and 1 def _reduce_ex(self, proto): assert proto < 2 for base in self.__class__.__mro__: if hasattr(base, '__flags__') and not base.__flags__ & _HEAPTYPE: break else: base = object # not really reachable if base is object: state = None else: if base is self.__class__: raise TypeError("can't pickle %s objects" % base.__name__) state = base(self) args = (self.__class__, base, state) try: getstate = self.__getstate__ except AttributeError: if getattr(self, "__slots__", None): raise TypeError("a class that defines __slots__ without " "defining __getstate__ cannot be pickled") from None try: dict = self.__dict__ except AttributeError: dict = None else: dict = getstate() if dict: return _reconstructor, args, dict else: return _reconstructor, args # Helper for __reduce_ex__ protocol 2 def __newobj__(cls, *args): return cls.__new__(cls, *args) def __newobj_ex__(cls, args, kwargs): """Used by pickle protocol 4, instead of __newobj__ to allow classes with keyword-only arguments to be pickled correctly. """ return cls.__new__(cls, *args, **kwargs) def _slotnames(cls): """Return a list of slot names for a given class. This needs to find slots defined by the class and its bases, so we can't simply return the __slots__ attribute. We must walk down the Method Resolution Order and concatenate the __slots__ of each class found there. (This assumes classes don't modify their __slots__ attribute to misrepresent their slots after the class is defined.) """ # Get the value from a cache in the class if possible names = cls.__dict__.get("__slotnames__") if names is not None: return names # Not cached -- calculate the value names = [] if not hasattr(cls, "__slots__"): # This class has no slots pass else: # Slots found -- gather slot names from all base classes for c in cls.__mro__: if "__slots__" in c.__dict__: slots = c.__dict__['__slots__'] # if class has a single slot, it can be given as a string if isinstance(slots, str): slots = (slots,) for name in slots: # special descriptors if name in ("__dict__", "__weakref__"): continue # mangled names elif name.startswith('__') and not name.endswith('__'): stripped = c.__name__.lstrip('_') if stripped: names.append('_%s%s' % (stripped, name)) else: names.append(name) else: names.append(name) # Cache the outcome in the class if at all possible try: cls.__slotnames__ = names except: pass # But don't die if we can't return names # A registry of extension codes. This is an ad-hoc compression # mechanism. Whenever a global reference to <module>, <name> is about # to be pickled, the (<module>, <name>) tuple is looked up here to see # if it is a registered extension code for it. Extension codes are # universal, so that the meaning of a pickle does not depend on # context. (There are also some codes reserved for local use that # don't have this restriction.) Codes are positive ints; 0 is # reserved. _extension_registry = {} # key -> code _inverted_registry = {} # code -> key _extension_cache = {} # code -> object # Don't ever rebind those names: pickling grabs a reference to them when # it's initialized, and won't see a rebinding. def add_extension(module, name, code): """Register an extension code.""" code = int(code) if not 1 <= code <= 0x7fffffff: raise ValueError("code out of range") key = (module, name) if (_extension_registry.get(key) == code and _inverted_registry.get(code) == key): return # Redundant registrations are benign if key in _extension_registry: raise ValueError("key %s is already registered with code %s" % (key, _extension_registry[key])) if code in _inverted_registry: raise ValueError("code %s is already in use for key %s" % (code, _inverted_registry[code])) _extension_registry[key] = code _inverted_registry[code] = key def remove_extension(module, name, code): """Unregister an extension code. For testing only.""" key = (module, name) if (_extension_registry.get(key) != code or _inverted_registry.get(code) != key): raise ValueError("key %s is not registered with code %s" % (key, code)) del _extension_registry[key] del _inverted_registry[code] if code in _extension_cache: del _extension_cache[code] def clear_extension_cache(): _extension_cache.clear() # Standard extension code assignments # Reserved ranges # First Last Count Purpose # 1 127 127 Reserved for Python standard library # 128 191 64 Reserved for Zope # 192 239 48 Reserved for 3rd parties # 240 255 16 Reserved for private use (will never be assigned) # 256 Inf Inf Reserved for future assignment # Extension codes are assigned by the Python Software Foundation.
gpl-3.0
yongshengwang/hue
build/env/lib/python2.7/site-packages/pylint-0.28.0-py2.7.egg/pylint/reporters/text.py
2
5074
# Copyright (c) 2003-2013 LOGILAB S.A. (Paris, FRANCE). # This program is free software; you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free Software # Foundation; either version 2 of the License, or (at your option) any later # version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., # 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. """Plain text reporters: :text: the default one grouping messages by module :parseable: standard parseable output with full module path on each message (for editor integration) :colorized: an ANSI colorized text reporter """ import os from logilab.common.ureports import TextWriter from logilab.common.textutils import colorize_ansi from pylint.interfaces import IReporter from pylint.reporters import BaseReporter TITLE_UNDERLINES = ['', '=', '-', '.'] class TextReporter(BaseReporter): """reports messages and layouts in plain text """ __implements__ = IReporter extension = 'txt' def __init__(self, output=None): BaseReporter.__init__(self, output) self._modules = {} def add_message(self, msg_id, location, msg): """manage message of different type and in the context of path""" module, obj, line, col_offset = location[1:] if module not in self._modules: if module: self.writeln('************* Module %s' % module) self._modules[module] = 1 else: self.writeln('************* %s' % module) if obj: obj = ':%s' % obj sigle = self.make_sigle(msg_id) self.writeln('%s:%3s,%s%s: %s' % (sigle, line, col_offset, obj, msg)) def _display(self, layout): """launch layouts display""" print >> self.out TextWriter().format(layout, self.out) class ParseableTextReporter(TextReporter): """a reporter very similar to TextReporter, but display messages in a form recognized by most text editors : <filename>:<linenum>:<msg> """ line_format = '%(path)s:%(line)s: [%(sigle)s%(obj)s] %(msg)s' def __init__(self, output=None, relative=True): TextReporter.__init__(self, output) if relative: self._prefix = os.getcwd() + os.sep else: self._prefix = '' def add_message(self, msg_id, location, msg): """manage message of different type and in the context of path""" path, _, obj, line, _ = location if obj: obj = ', %s' % obj sigle = self.make_sigle(msg_id) if self._prefix: path = path.replace(self._prefix, '') self.writeln(self.line_format % locals()) class VSTextReporter(ParseableTextReporter): """Visual studio text reporter""" line_format = '%(path)s(%(line)s): [%(sigle)s%(obj)s] %(msg)s' class ColorizedTextReporter(TextReporter): """Simple TextReporter that colorizes text output""" COLOR_MAPPING = { "I" : ("green", None), 'C' : (None, "bold"), 'R' : ("magenta", "bold, italic"), 'W' : ("blue", None), 'E' : ("red", "bold"), 'F' : ("red", "bold, underline"), 'S' : ("yellow", "inverse"), # S stands for module Separator } def __init__(self, output=None, color_mapping=None): TextReporter.__init__(self, output) self.color_mapping = color_mapping or \ dict(ColorizedTextReporter.COLOR_MAPPING) def _get_decoration(self, msg_id): """Returns the tuple color, style associated with msg_id as defined in self.color_mapping """ try: return self.color_mapping[msg_id[0]] except KeyError: return None, None def add_message(self, msg_id, location, msg): """manage message of different types, and colorize output using ansi escape codes """ module, obj, line, _ = location[1:] if module not in self._modules: color, style = self._get_decoration('S') if module: modsep = colorize_ansi('************* Module %s' % module, color, style) else: modsep = colorize_ansi('************* %s' % module, color, style) self.writeln(modsep) self._modules[module] = 1 if obj: obj = ':%s' % obj sigle = self.make_sigle(msg_id) color, style = self._get_decoration(sigle) msg = colorize_ansi(msg, color, style) sigle = colorize_ansi(sigle, color, style) self.writeln('%s:%3s%s: %s' % (sigle, line, obj, msg))
apache-2.0
skearnes/pylearn2
pylearn2/sandbox/cuda_convnet/tests/test_image_acts_strided.py
5
6085
__authors__ = "Heng Luo" from pylearn2.testing.skip import skip_if_no_gpu skip_if_no_gpu() import numpy as np from theano import shared from theano.tensor import grad, constant from pylearn2.sandbox.cuda_convnet.filter_acts import FilterActs from pylearn2.sandbox.cuda_convnet.filter_acts import ImageActs from theano.sandbox.cuda import gpu_from_host from theano.sandbox.cuda import host_from_gpu from theano.sandbox.rng_mrg import MRG_RandomStreams from theano.tensor.nnet.conv import conv2d from theano.tensor import as_tensor_variable from theano import function from theano import tensor as T import warnings from theano.sandbox import cuda from theano.sandbox.cuda.var import float32_shared_constructor from test_filter_acts_strided import FilterActs_python def ImageActs_python(filters, hidacts, stride=1, img_shape=None, ): if int(stride) != stride: raise TypeError('stride must be an int', stride) stride = int(stride) num_filters, h_rows, h_cols, batch_size = hidacts.shape channels, filter_rows, filter_cols, _num_filters = filters.shape assert filter_cols == filter_cols assert num_filters == _num_filters assert stride <= filter_rows and stride >= 1 if stride > 1: assert img_shape!= None rows, cols = img_shape if (rows - filter_rows)%stride == 0: stride_padding_rows = 0 else: stride_padding_rows = ((rows - filter_rows)/stride + 1)*stride + filter_rows - rows idx_rows = (rows + stride_padding_rows - filter_rows)/stride if (cols - filter_cols)%stride == 0: stride_padding_cols = 0 else: stride_padding_cols = ((cols - filter_cols)/stride + 1)*stride + filter_cols - cols idx_cols = (cols + stride_padding_cols - filter_cols)/stride new_rows = rows + stride_padding_rows new_cols = cols + stride_padding_cols idx_rows = (new_rows - filter_rows)/stride idx_cols = (new_cols - filter_cols)/stride images = np.zeros((channels,new_rows,new_cols,batch_size),dtype='float32') else: rows = h_rows+filter_rows-1 cols = h_cols+filter_cols-1 img_shape = (channels, rows, cols, batch_size) images = np.zeros(img_shape,dtype='float32') n_dim_filter = channels*filter_rows*filter_cols vector_filters = filters.reshape(n_dim_filter,num_filters).T for idx_h_rows in xrange(h_rows): for idx_h_cols in xrange(h_cols): rc_hidacts = hidacts[:,idx_h_rows,idx_h_cols,:] rc_image = (np.dot( rc_hidacts.T, vector_filters).T).reshape(channels,filter_rows,filter_cols,batch_size) images[:, idx_h_rows*stride:idx_h_rows*stride+filter_rows, idx_h_cols*stride:idx_h_cols*stride+filter_cols, :] += rc_image rval = images[:,:rows,:cols,:] return rval def test_image_acts_strided(): # Tests that running FilterActs with all possible strides rng = np.random.RandomState([2012,10,9]) #Each list in shape_list : #[img_shape,filter_shape] #[(channels, rows, cols, batch_size),(channels, filter_rows, filter_cols, num_filters)] shape_list = [[(1, 7, 8, 5), (1, 2, 2, 16)], [(3, 7, 8, 5), (3, 3, 3, 16)], [(16, 11, 11, 4), (16, 4, 4, 16)], [(3, 20, 20, 3), (3, 5, 5, 16)], [(3, 21, 21, 3), (3, 6, 6, 16)], ] for test_idx in xrange(len(shape_list)): images = rng.uniform(-1., 1., shape_list[test_idx][0]).astype('float32') filters = rng.uniform(-1., 1., shape_list[test_idx][1]).astype('float32') gpu_images = float32_shared_constructor(images,name='images') gpu_filters = float32_shared_constructor(filters,name='filters') print "test case %d..."%(test_idx+1) for ii in xrange(filters.shape[1]): stride = ii + 1 output_python = FilterActs_python(images,filters,stride) hidacts = rng.uniform(-1., 1., output_python.shape).astype('float32') gpu_hidacts = float32_shared_constructor(hidacts,name='hidacts') Img_output_python = ImageActs_python(filters,hidacts,stride,(images.shape[1], images.shape[2])) Img_output = ImageActs(stride=stride)(gpu_hidacts, gpu_filters, as_tensor_variable((images.shape[1], images.shape[2]))) Img_output = host_from_gpu(Img_output) f = function([], Img_output) Img_output_val = f() warnings.warn("""test_image_acts_strided success criterion is not very strict.""") if np.abs(Img_output_val - Img_output_python).max() > 2.1e-5: assert type(Img_output_val) == type(Img_output_python) assert Img_output_val.dtype == Img_output_python.dtype if Img_output_val.shape != Img_output_python.shape: print 'cuda-convnet shape: ',Img_output_val.shape print 'python conv shape: ',Img_output_python.shape assert False err = np.abs(Img_output_val - Img_output_python) print 'stride %d'%stride print 'absolute error range: ', (err.min(), err.max()) print 'mean absolute error: ', err.mean() print 'cuda-convnet value range: ', (Img_output_val.min(), Img_output_val.max()) print 'python conv value range: ', (Img_output_python.min(), Img_output_python.max()) #assert False #print "pass" if __name__ == '__main__': test_image_acts_strided()
bsd-3-clause
MalloyPower/parsing-python
front-end/testsuite-python-lib/Python-3.6.0/Lib/test/test_bytes.py
1
65065
"""Unit tests for the bytes and bytearray types. XXX This is a mess. Common tests should be unified with string_tests.py (and the latter should be modernized). """ import os import re import sys import copy import functools import pickle import tempfile import unittest import test.support import test.string_tests import test.list_tests from test.support import bigaddrspacetest, MAX_Py_ssize_t if sys.flags.bytes_warning: def check_bytes_warnings(func): @functools.wraps(func) def wrapper(*args, **kw): with test.support.check_warnings(('', BytesWarning)): return func(*args, **kw) return wrapper else: # no-op def check_bytes_warnings(func): return func class Indexable: def __init__(self, value=0): self.value = value def __index__(self): return self.value class BaseBytesTest: def test_basics(self): b = self.type2test() self.assertEqual(type(b), self.type2test) self.assertEqual(b.__class__, self.type2test) def test_copy(self): a = self.type2test(b"abcd") for copy_method in (copy.copy, copy.deepcopy): b = copy_method(a) self.assertEqual(a, b) self.assertEqual(type(a), type(b)) def test_empty_sequence(self): b = self.type2test() self.assertEqual(len(b), 0) self.assertRaises(IndexError, lambda: b[0]) self.assertRaises(IndexError, lambda: b[1]) self.assertRaises(IndexError, lambda: b[sys.maxsize]) self.assertRaises(IndexError, lambda: b[sys.maxsize+1]) self.assertRaises(IndexError, lambda: b[10**100]) self.assertRaises(IndexError, lambda: b[-1]) self.assertRaises(IndexError, lambda: b[-2]) self.assertRaises(IndexError, lambda: b[-sys.maxsize]) self.assertRaises(IndexError, lambda: b[-sys.maxsize-1]) self.assertRaises(IndexError, lambda: b[-sys.maxsize-2]) self.assertRaises(IndexError, lambda: b[-10**100]) def test_from_list(self): ints = list(range(256)) b = self.type2test(i for i in ints) self.assertEqual(len(b), 256) self.assertEqual(list(b), ints) def test_from_index(self): b = self.type2test([Indexable(), Indexable(1), Indexable(254), Indexable(255)]) self.assertEqual(list(b), [0, 1, 254, 255]) self.assertRaises(ValueError, self.type2test, [Indexable(-1)]) self.assertRaises(ValueError, self.type2test, [Indexable(256)]) def test_from_ssize(self): self.assertEqual(self.type2test(0), b'') self.assertEqual(self.type2test(1), b'\x00') self.assertEqual(self.type2test(5), b'\x00\x00\x00\x00\x00') self.assertRaises(ValueError, self.type2test, -1) self.assertEqual(self.type2test('0', 'ascii'), b'0') self.assertEqual(self.type2test(b'0'), b'0') self.assertRaises(OverflowError, self.type2test, sys.maxsize + 1) def test_constructor_type_errors(self): self.assertRaises(TypeError, self.type2test, 0.0) class C: pass self.assertRaises(TypeError, self.type2test, ["0"]) self.assertRaises(TypeError, self.type2test, [0.0]) self.assertRaises(TypeError, self.type2test, [None]) self.assertRaises(TypeError, self.type2test, [C()]) self.assertRaises(TypeError, self.type2test, 0, 'ascii') self.assertRaises(TypeError, self.type2test, b'', 'ascii') self.assertRaises(TypeError, self.type2test, 0, errors='ignore') self.assertRaises(TypeError, self.type2test, b'', errors='ignore') self.assertRaises(TypeError, self.type2test, '') self.assertRaises(TypeError, self.type2test, '', errors='ignore') self.assertRaises(TypeError, self.type2test, '', b'ascii') self.assertRaises(TypeError, self.type2test, '', 'ascii', b'ignore') def test_constructor_value_errors(self): self.assertRaises(ValueError, self.type2test, [-1]) self.assertRaises(ValueError, self.type2test, [-sys.maxsize]) self.assertRaises(ValueError, self.type2test, [-sys.maxsize-1]) self.assertRaises(ValueError, self.type2test, [-sys.maxsize-2]) self.assertRaises(ValueError, self.type2test, [-10**100]) self.assertRaises(ValueError, self.type2test, [256]) self.assertRaises(ValueError, self.type2test, [257]) self.assertRaises(ValueError, self.type2test, [sys.maxsize]) self.assertRaises(ValueError, self.type2test, [sys.maxsize+1]) self.assertRaises(ValueError, self.type2test, [10**100]) @bigaddrspacetest def test_constructor_overflow(self): size = MAX_Py_ssize_t self.assertRaises((OverflowError, MemoryError), self.type2test, size) try: # Should either pass or raise an error (e.g. on debug builds with # additional malloc() overhead), but shouldn't crash. bytearray(size - 4) except (OverflowError, MemoryError): pass def test_compare(self): b1 = self.type2test([1, 2, 3]) b2 = self.type2test([1, 2, 3]) b3 = self.type2test([1, 3]) self.assertEqual(b1, b2) self.assertTrue(b2 != b3) self.assertTrue(b1 <= b2) self.assertTrue(b1 <= b3) self.assertTrue(b1 < b3) self.assertTrue(b1 >= b2) self.assertTrue(b3 >= b2) self.assertTrue(b3 > b2) self.assertFalse(b1 != b2) self.assertFalse(b2 == b3) self.assertFalse(b1 > b2) self.assertFalse(b1 > b3) self.assertFalse(b1 >= b3) self.assertFalse(b1 < b2) self.assertFalse(b3 < b2) self.assertFalse(b3 <= b2) @check_bytes_warnings def test_compare_to_str(self): # Byte comparisons with unicode should always fail! # Test this for all expected byte orders and Unicode character # sizes. self.assertEqual(self.type2test(b"\0a\0b\0c") == "abc", False) self.assertEqual(self.type2test(b"\0\0\0a\0\0\0b\0\0\0c") == "abc", False) self.assertEqual(self.type2test(b"a\0b\0c\0") == "abc", False) self.assertEqual(self.type2test(b"a\0\0\0b\0\0\0c\0\0\0") == "abc", False) self.assertEqual(self.type2test() == str(), False) self.assertEqual(self.type2test() != str(), True) def test_reversed(self): input = list(map(ord, "Hello")) b = self.type2test(input) output = list(reversed(b)) input.reverse() self.assertEqual(output, input) def test_getslice(self): def by(s): return self.type2test(map(ord, s)) b = by("Hello, world") self.assertEqual(b[:5], by("Hello")) self.assertEqual(b[1:5], by("ello")) self.assertEqual(b[5:7], by(", ")) self.assertEqual(b[7:], by("world")) self.assertEqual(b[7:12], by("world")) self.assertEqual(b[7:100], by("world")) self.assertEqual(b[:-7], by("Hello")) self.assertEqual(b[-11:-7], by("ello")) self.assertEqual(b[-7:-5], by(", ")) self.assertEqual(b[-5:], by("world")) self.assertEqual(b[-5:12], by("world")) self.assertEqual(b[-5:100], by("world")) self.assertEqual(b[-100:5], by("Hello")) def test_extended_getslice(self): # Test extended slicing by comparing with list slicing. L = list(range(255)) b = self.type2test(L) indices = (0, None, 1, 3, 19, 100, -1, -2, -31, -100) for start in indices: for stop in indices: # Skip step 0 (invalid) for step in indices[1:]: self.assertEqual(b[start:stop:step], self.type2test(L[start:stop:step])) def test_encoding(self): sample = "Hello world\n\u1234\u5678\u9abc" for enc in ("utf-8", "utf-16"): b = self.type2test(sample, enc) self.assertEqual(b, self.type2test(sample.encode(enc))) self.assertRaises(UnicodeEncodeError, self.type2test, sample, "latin-1") b = self.type2test(sample, "latin-1", "ignore") self.assertEqual(b, self.type2test(sample[:-3], "utf-8")) def test_decode(self): sample = "Hello world\n\u1234\u5678\u9abc" for enc in ("utf-8", "utf-16"): b = self.type2test(sample, enc) self.assertEqual(b.decode(enc), sample) sample = "Hello world\n\x80\x81\xfe\xff" b = self.type2test(sample, "latin-1") self.assertRaises(UnicodeDecodeError, b.decode, "utf-8") self.assertEqual(b.decode("utf-8", "ignore"), "Hello world\n") self.assertEqual(b.decode(errors="ignore", encoding="utf-8"), "Hello world\n") # Default encoding is utf-8 self.assertEqual(self.type2test(b'\xe2\x98\x83').decode(), '\u2603') def test_from_int(self): b = self.type2test(0) self.assertEqual(b, self.type2test()) b = self.type2test(10) self.assertEqual(b, self.type2test([0]*10)) b = self.type2test(10000) self.assertEqual(b, self.type2test([0]*10000)) def test_concat(self): b1 = self.type2test(b"abc") b2 = self.type2test(b"def") self.assertEqual(b1 + b2, b"abcdef") self.assertEqual(b1 + bytes(b"def"), b"abcdef") self.assertEqual(bytes(b"def") + b1, b"defabc") self.assertRaises(TypeError, lambda: b1 + "def") self.assertRaises(TypeError, lambda: "abc" + b2) def test_repeat(self): for b in b"abc", self.type2test(b"abc"): self.assertEqual(b * 3, b"abcabcabc") self.assertEqual(b * 0, b"") self.assertEqual(b * -1, b"") self.assertRaises(TypeError, lambda: b * 3.14) self.assertRaises(TypeError, lambda: 3.14 * b) # XXX Shouldn't bytes and bytearray agree on what to raise? with self.assertRaises((OverflowError, MemoryError)): c = b * sys.maxsize with self.assertRaises((OverflowError, MemoryError)): b *= sys.maxsize def test_repeat_1char(self): self.assertEqual(self.type2test(b'x')*100, self.type2test([ord('x')]*100)) def test_contains(self): b = self.type2test(b"abc") self.assertIn(ord('a'), b) self.assertIn(int(ord('a')), b) self.assertNotIn(200, b) self.assertRaises(ValueError, lambda: 300 in b) self.assertRaises(ValueError, lambda: -1 in b) self.assertRaises(ValueError, lambda: sys.maxsize+1 in b) self.assertRaises(TypeError, lambda: None in b) self.assertRaises(TypeError, lambda: float(ord('a')) in b) self.assertRaises(TypeError, lambda: "a" in b) for f in bytes, bytearray: self.assertIn(f(b""), b) self.assertIn(f(b"a"), b) self.assertIn(f(b"b"), b) self.assertIn(f(b"c"), b) self.assertIn(f(b"ab"), b) self.assertIn(f(b"bc"), b) self.assertIn(f(b"abc"), b) self.assertNotIn(f(b"ac"), b) self.assertNotIn(f(b"d"), b) self.assertNotIn(f(b"dab"), b) self.assertNotIn(f(b"abd"), b) def test_fromhex(self): self.assertRaises(TypeError, self.type2test.fromhex) self.assertRaises(TypeError, self.type2test.fromhex, 1) self.assertEqual(self.type2test.fromhex(''), self.type2test()) b = bytearray([0x1a, 0x2b, 0x30]) self.assertEqual(self.type2test.fromhex('1a2B30'), b) self.assertEqual(self.type2test.fromhex(' 1A 2B 30 '), b) self.assertEqual(self.type2test.fromhex('0000'), b'\0\0') self.assertRaises(TypeError, self.type2test.fromhex, b'1B') self.assertRaises(ValueError, self.type2test.fromhex, 'a') self.assertRaises(ValueError, self.type2test.fromhex, 'rt') self.assertRaises(ValueError, self.type2test.fromhex, '1a b cd') self.assertRaises(ValueError, self.type2test.fromhex, '\x00') self.assertRaises(ValueError, self.type2test.fromhex, '12 \x00 34') for data, pos in ( # invalid first hexadecimal character ('12 x4 56', 3), # invalid second hexadecimal character ('12 3x 56', 4), # two invalid hexadecimal characters ('12 xy 56', 3), # test non-ASCII string ('12 3\xff 56', 4), ): with self.assertRaises(ValueError) as cm: self.type2test.fromhex(data) self.assertIn('at position %s' % pos, str(cm.exception)) def test_hex(self): self.assertRaises(TypeError, self.type2test.hex) self.assertRaises(TypeError, self.type2test.hex, 1) self.assertEqual(self.type2test(b"").hex(), "") self.assertEqual(bytearray([0x1a, 0x2b, 0x30]).hex(), '1a2b30') self.assertEqual(self.type2test(b"\x1a\x2b\x30").hex(), '1a2b30') self.assertEqual(memoryview(b"\x1a\x2b\x30").hex(), '1a2b30') def test_join(self): self.assertEqual(self.type2test(b"").join([]), b"") self.assertEqual(self.type2test(b"").join([b""]), b"") for lst in [[b"abc"], [b"a", b"bc"], [b"ab", b"c"], [b"a", b"b", b"c"]]: lst = list(map(self.type2test, lst)) self.assertEqual(self.type2test(b"").join(lst), b"abc") self.assertEqual(self.type2test(b"").join(tuple(lst)), b"abc") self.assertEqual(self.type2test(b"").join(iter(lst)), b"abc") dot_join = self.type2test(b".:").join self.assertEqual(dot_join([b"ab", b"cd"]), b"ab.:cd") self.assertEqual(dot_join([memoryview(b"ab"), b"cd"]), b"ab.:cd") self.assertEqual(dot_join([b"ab", memoryview(b"cd")]), b"ab.:cd") self.assertEqual(dot_join([bytearray(b"ab"), b"cd"]), b"ab.:cd") self.assertEqual(dot_join([b"ab", bytearray(b"cd")]), b"ab.:cd") # Stress it with many items seq = [b"abc"] * 1000 expected = b"abc" + b".:abc" * 999 self.assertEqual(dot_join(seq), expected) self.assertRaises(TypeError, self.type2test(b" ").join, None) # Error handling and cleanup when some item in the middle of the # sequence has the wrong type. with self.assertRaises(TypeError): dot_join([bytearray(b"ab"), "cd", b"ef"]) with self.assertRaises(TypeError): dot_join([memoryview(b"ab"), "cd", b"ef"]) def test_count(self): b = self.type2test(b'mississippi') i = 105 p = 112 w = 119 self.assertEqual(b.count(b'i'), 4) self.assertEqual(b.count(b'ss'), 2) self.assertEqual(b.count(b'w'), 0) self.assertEqual(b.count(i), 4) self.assertEqual(b.count(w), 0) self.assertEqual(b.count(b'i', 6), 2) self.assertEqual(b.count(b'p', 6), 2) self.assertEqual(b.count(b'i', 1, 3), 1) self.assertEqual(b.count(b'p', 7, 9), 1) self.assertEqual(b.count(i, 6), 2) self.assertEqual(b.count(p, 6), 2) self.assertEqual(b.count(i, 1, 3), 1) self.assertEqual(b.count(p, 7, 9), 1) def test_startswith(self): b = self.type2test(b'hello') self.assertFalse(self.type2test().startswith(b"anything")) self.assertTrue(b.startswith(b"hello")) self.assertTrue(b.startswith(b"hel")) self.assertTrue(b.startswith(b"h")) self.assertFalse(b.startswith(b"hellow")) self.assertFalse(b.startswith(b"ha")) with self.assertRaises(TypeError) as cm: b.startswith([b'h']) exc = str(cm.exception) self.assertIn('bytes', exc) self.assertIn('tuple', exc) def test_endswith(self): b = self.type2test(b'hello') self.assertFalse(bytearray().endswith(b"anything")) self.assertTrue(b.endswith(b"hello")) self.assertTrue(b.endswith(b"llo")) self.assertTrue(b.endswith(b"o")) self.assertFalse(b.endswith(b"whello")) self.assertFalse(b.endswith(b"no")) with self.assertRaises(TypeError) as cm: b.endswith([b'o']) exc = str(cm.exception) self.assertIn('bytes', exc) self.assertIn('tuple', exc) def test_find(self): b = self.type2test(b'mississippi') i = 105 w = 119 self.assertEqual(b.find(b'ss'), 2) self.assertEqual(b.find(b'w'), -1) self.assertEqual(b.find(b'mississippian'), -1) self.assertEqual(b.find(i), 1) self.assertEqual(b.find(w), -1) self.assertEqual(b.find(b'ss', 3), 5) self.assertEqual(b.find(b'ss', 1, 7), 2) self.assertEqual(b.find(b'ss', 1, 3), -1) self.assertEqual(b.find(i, 6), 7) self.assertEqual(b.find(i, 1, 3), 1) self.assertEqual(b.find(w, 1, 3), -1) for index in (-1, 256, sys.maxsize + 1): self.assertRaisesRegex( ValueError, r'byte must be in range\(0, 256\)', b.find, index) def test_rfind(self): b = self.type2test(b'mississippi') i = 105 w = 119 self.assertEqual(b.rfind(b'ss'), 5) self.assertEqual(b.rfind(b'w'), -1) self.assertEqual(b.rfind(b'mississippian'), -1) self.assertEqual(b.rfind(i), 10) self.assertEqual(b.rfind(w), -1) self.assertEqual(b.rfind(b'ss', 3), 5) self.assertEqual(b.rfind(b'ss', 0, 6), 2) self.assertEqual(b.rfind(i, 1, 3), 1) self.assertEqual(b.rfind(i, 3, 9), 7) self.assertEqual(b.rfind(w, 1, 3), -1) def test_index(self): b = self.type2test(b'mississippi') i = 105 w = 119 self.assertEqual(b.index(b'ss'), 2) self.assertRaises(ValueError, b.index, b'w') self.assertRaises(ValueError, b.index, b'mississippian') self.assertEqual(b.index(i), 1) self.assertRaises(ValueError, b.index, w) self.assertEqual(b.index(b'ss', 3), 5) self.assertEqual(b.index(b'ss', 1, 7), 2) self.assertRaises(ValueError, b.index, b'ss', 1, 3) self.assertEqual(b.index(i, 6), 7) self.assertEqual(b.index(i, 1, 3), 1) self.assertRaises(ValueError, b.index, w, 1, 3) def test_rindex(self): b = self.type2test(b'mississippi') i = 105 w = 119 self.assertEqual(b.rindex(b'ss'), 5) self.assertRaises(ValueError, b.rindex, b'w') self.assertRaises(ValueError, b.rindex, b'mississippian') self.assertEqual(b.rindex(i), 10) self.assertRaises(ValueError, b.rindex, w) self.assertEqual(b.rindex(b'ss', 3), 5) self.assertEqual(b.rindex(b'ss', 0, 6), 2) self.assertEqual(b.rindex(i, 1, 3), 1) self.assertEqual(b.rindex(i, 3, 9), 7) self.assertRaises(ValueError, b.rindex, w, 1, 3) def test_mod(self): b = self.type2test(b'hello, %b!') orig = b b = b % b'world' self.assertEqual(b, b'hello, world!') self.assertEqual(orig, b'hello, %b!') self.assertFalse(b is orig) b = self.type2test(b'%s / 100 = %d%%') a = b % (b'seventy-nine', 79) self.assertEqual(a, b'seventy-nine / 100 = 79%') self.assertIs(type(a), self.type2test) def test_imod(self): b = self.type2test(b'hello, %b!') orig = b b %= b'world' self.assertEqual(b, b'hello, world!') self.assertEqual(orig, b'hello, %b!') self.assertFalse(b is orig) b = self.type2test(b'%s / 100 = %d%%') b %= (b'seventy-nine', 79) self.assertEqual(b, b'seventy-nine / 100 = 79%') self.assertIs(type(b), self.type2test) def test_rmod(self): with self.assertRaises(TypeError): object() % self.type2test(b'abc') self.assertIs(self.type2test(b'abc').__rmod__('%r'), NotImplemented) def test_replace(self): b = self.type2test(b'mississippi') self.assertEqual(b.replace(b'i', b'a'), b'massassappa') self.assertEqual(b.replace(b'ss', b'x'), b'mixixippi') def test_split_string_error(self): self.assertRaises(TypeError, self.type2test(b'a b').split, ' ') def test_split_unicodewhitespace(self): for b in (b'a\x1Cb', b'a\x1Db', b'a\x1Eb', b'a\x1Fb'): b = self.type2test(b) self.assertEqual(b.split(), [b]) b = self.type2test(b"\x09\x0A\x0B\x0C\x0D\x1C\x1D\x1E\x1F") self.assertEqual(b.split(), [b'\x1c\x1d\x1e\x1f']) def test_rsplit_string_error(self): self.assertRaises(TypeError, self.type2test(b'a b').rsplit, ' ') def test_rsplit_unicodewhitespace(self): b = self.type2test(b"\x09\x0A\x0B\x0C\x0D\x1C\x1D\x1E\x1F") self.assertEqual(b.rsplit(), [b'\x1c\x1d\x1e\x1f']) def test_partition(self): b = self.type2test(b'mississippi') self.assertEqual(b.partition(b'ss'), (b'mi', b'ss', b'issippi')) self.assertEqual(b.partition(b'w'), (b'mississippi', b'', b'')) def test_rpartition(self): b = self.type2test(b'mississippi') self.assertEqual(b.rpartition(b'ss'), (b'missi', b'ss', b'ippi')) self.assertEqual(b.rpartition(b'i'), (b'mississipp', b'i', b'')) self.assertEqual(b.rpartition(b'w'), (b'', b'', b'mississippi')) def test_pickling(self): for proto in range(pickle.HIGHEST_PROTOCOL + 1): for b in b"", b"a", b"abc", b"\xffab\x80", b"\0\0\377\0\0": b = self.type2test(b) ps = pickle.dumps(b, proto) q = pickle.loads(ps) self.assertEqual(b, q) def test_iterator_pickling(self): for proto in range(pickle.HIGHEST_PROTOCOL + 1): for b in b"", b"a", b"abc", b"\xffab\x80", b"\0\0\377\0\0": it = itorg = iter(self.type2test(b)) data = list(self.type2test(b)) d = pickle.dumps(it, proto) it = pickle.loads(d) self.assertEqual(type(itorg), type(it)) self.assertEqual(list(it), data) it = pickle.loads(d) if not b: continue next(it) d = pickle.dumps(it, proto) it = pickle.loads(d) self.assertEqual(list(it), data[1:]) def test_strip_bytearray(self): self.assertEqual(self.type2test(b'abc').strip(memoryview(b'ac')), b'b') self.assertEqual(self.type2test(b'abc').lstrip(memoryview(b'ac')), b'bc') self.assertEqual(self.type2test(b'abc').rstrip(memoryview(b'ac')), b'ab') def test_strip_string_error(self): self.assertRaises(TypeError, self.type2test(b'abc').strip, 'b') self.assertRaises(TypeError, self.type2test(b'abc').lstrip, 'b') self.assertRaises(TypeError, self.type2test(b'abc').rstrip, 'b') def test_center(self): # Fill character can be either bytes or bytearray (issue 12380) b = self.type2test(b'abc') for fill_type in (bytes, bytearray): self.assertEqual(b.center(7, fill_type(b'-')), self.type2test(b'--abc--')) def test_ljust(self): # Fill character can be either bytes or bytearray (issue 12380) b = self.type2test(b'abc') for fill_type in (bytes, bytearray): self.assertEqual(b.ljust(7, fill_type(b'-')), self.type2test(b'abc----')) def test_rjust(self): # Fill character can be either bytes or bytearray (issue 12380) b = self.type2test(b'abc') for fill_type in (bytes, bytearray): self.assertEqual(b.rjust(7, fill_type(b'-')), self.type2test(b'----abc')) def test_ord(self): b = self.type2test(b'\0A\x7f\x80\xff') self.assertEqual([ord(b[i:i+1]) for i in range(len(b))], [0, 65, 127, 128, 255]) def test_maketrans(self): transtable = b'\000\001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037 !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`xyzdefghijklmnopqrstuvwxyz{|}~\177\200\201\202\203\204\205\206\207\210\211\212\213\214\215\216\217\220\221\222\223\224\225\226\227\230\231\232\233\234\235\236\237\240\241\242\243\244\245\246\247\250\251\252\253\254\255\256\257\260\261\262\263\264\265\266\267\270\271\272\273\274\275\276\277\300\301\302\303\304\305\306\307\310\311\312\313\314\315\316\317\320\321\322\323\324\325\326\327\330\331\332\333\334\335\336\337\340\341\342\343\344\345\346\347\350\351\352\353\354\355\356\357\360\361\362\363\364\365\366\367\370\371\372\373\374\375\376\377' self.assertEqual(self.type2test.maketrans(b'abc', b'xyz'), transtable) transtable = b'\000\001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037 !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~\177\200\201\202\203\204\205\206\207\210\211\212\213\214\215\216\217\220\221\222\223\224\225\226\227\230\231\232\233\234\235\236\237\240\241\242\243\244\245\246\247\250\251\252\253\254\255\256\257\260\261\262\263\264\265\266\267\270\271\272\273\274\275\276\277\300\301\302\303\304\305\306\307\310\311\312\313\314\315\316\317\320\321\322\323\324\325\326\327\330\331\332\333\334\335\336\337\340\341\342\343\344\345\346\347\350\351\352\353\354\355\356\357\360\361\362\363\364\365\366\367\370\371\372\373\374xyz' self.assertEqual(self.type2test.maketrans(b'\375\376\377', b'xyz'), transtable) self.assertRaises(ValueError, self.type2test.maketrans, b'abc', b'xyzq') self.assertRaises(TypeError, self.type2test.maketrans, 'abc', 'def') def test_none_arguments(self): # issue 11828 b = self.type2test(b'hello') l = self.type2test(b'l') h = self.type2test(b'h') x = self.type2test(b'x') o = self.type2test(b'o') self.assertEqual(2, b.find(l, None)) self.assertEqual(3, b.find(l, -2, None)) self.assertEqual(2, b.find(l, None, -2)) self.assertEqual(0, b.find(h, None, None)) self.assertEqual(3, b.rfind(l, None)) self.assertEqual(3, b.rfind(l, -2, None)) self.assertEqual(2, b.rfind(l, None, -2)) self.assertEqual(0, b.rfind(h, None, None)) self.assertEqual(2, b.index(l, None)) self.assertEqual(3, b.index(l, -2, None)) self.assertEqual(2, b.index(l, None, -2)) self.assertEqual(0, b.index(h, None, None)) self.assertEqual(3, b.rindex(l, None)) self.assertEqual(3, b.rindex(l, -2, None)) self.assertEqual(2, b.rindex(l, None, -2)) self.assertEqual(0, b.rindex(h, None, None)) self.assertEqual(2, b.count(l, None)) self.assertEqual(1, b.count(l, -2, None)) self.assertEqual(1, b.count(l, None, -2)) self.assertEqual(0, b.count(x, None, None)) self.assertEqual(True, b.endswith(o, None)) self.assertEqual(True, b.endswith(o, -2, None)) self.assertEqual(True, b.endswith(l, None, -2)) self.assertEqual(False, b.endswith(x, None, None)) self.assertEqual(True, b.startswith(h, None)) self.assertEqual(True, b.startswith(l, -2, None)) self.assertEqual(True, b.startswith(h, None, -2)) self.assertEqual(False, b.startswith(x, None, None)) def test_integer_arguments_out_of_byte_range(self): b = self.type2test(b'hello') for method in (b.count, b.find, b.index, b.rfind, b.rindex): self.assertRaises(ValueError, method, -1) self.assertRaises(ValueError, method, 256) self.assertRaises(ValueError, method, 9999) def test_find_etc_raise_correct_error_messages(self): # issue 11828 b = self.type2test(b'hello') x = self.type2test(b'x') self.assertRaisesRegex(TypeError, r'\bfind\b', b.find, x, None, None, None) self.assertRaisesRegex(TypeError, r'\brfind\b', b.rfind, x, None, None, None) self.assertRaisesRegex(TypeError, r'\bindex\b', b.index, x, None, None, None) self.assertRaisesRegex(TypeError, r'\brindex\b', b.rindex, x, None, None, None) self.assertRaisesRegex(TypeError, r'\bcount\b', b.count, x, None, None, None) self.assertRaisesRegex(TypeError, r'\bstartswith\b', b.startswith, x, None, None, None) self.assertRaisesRegex(TypeError, r'\bendswith\b', b.endswith, x, None, None, None) def test_free_after_iterating(self): test.support.check_free_after_iterating(self, iter, self.type2test) test.support.check_free_after_iterating(self, reversed, self.type2test) def test_translate(self): b = self.type2test(b'hello') rosetta = bytearray(range(256)) rosetta[ord('o')] = ord('e') self.assertRaises(TypeError, b.translate) self.assertRaises(TypeError, b.translate, None, None) self.assertRaises(ValueError, b.translate, bytes(range(255))) c = b.translate(rosetta, b'hello') self.assertEqual(b, b'hello') self.assertIsInstance(c, self.type2test) c = b.translate(rosetta) d = b.translate(rosetta, b'') self.assertEqual(c, d) self.assertEqual(c, b'helle') c = b.translate(rosetta, b'l') self.assertEqual(c, b'hee') c = b.translate(None, b'e') self.assertEqual(c, b'hllo') # test delete as a keyword argument c = b.translate(rosetta, delete=b'') self.assertEqual(c, b'helle') c = b.translate(rosetta, delete=b'l') self.assertEqual(c, b'hee') c = b.translate(None, delete=b'e') self.assertEqual(c, b'hllo') class BytesTest(BaseBytesTest, unittest.TestCase): type2test = bytes def test_getitem_error(self): msg = "byte indices must be integers or slices" with self.assertRaisesRegex(TypeError, msg): b'python'['a'] def test_buffer_is_readonly(self): fd = os.open(__file__, os.O_RDONLY) with open(fd, "rb", buffering=0) as f: self.assertRaises(TypeError, f.readinto, b"") def test_custom(self): class A: def __bytes__(self): return b'abc' self.assertEqual(bytes(A()), b'abc') class A: pass self.assertRaises(TypeError, bytes, A()) class A: def __bytes__(self): return None self.assertRaises(TypeError, bytes, A()) class A: def __bytes__(self): return b'a' def __index__(self): return 42 self.assertEqual(bytes(A()), b'a') # Issue #25766 class A(str): def __bytes__(self): return b'abc' self.assertEqual(bytes(A('\u20ac')), b'abc') self.assertEqual(bytes(A('\u20ac'), 'iso8859-15'), b'\xa4') # Issue #24731 class A: def __bytes__(self): return OtherBytesSubclass(b'abc') self.assertEqual(bytes(A()), b'abc') self.assertIs(type(bytes(A())), OtherBytesSubclass) self.assertEqual(BytesSubclass(A()), b'abc') self.assertIs(type(BytesSubclass(A())), BytesSubclass) # Test PyBytes_FromFormat() def test_from_format(self): ctypes = test.support.import_module('ctypes') _testcapi = test.support.import_module('_testcapi') from ctypes import pythonapi, py_object from ctypes import ( c_int, c_uint, c_long, c_ulong, c_size_t, c_ssize_t, c_char_p) PyBytes_FromFormat = pythonapi.PyBytes_FromFormat PyBytes_FromFormat.restype = py_object # basic tests self.assertEqual(PyBytes_FromFormat(b'format'), b'format') self.assertEqual(PyBytes_FromFormat(b'Hello %s !', b'world'), b'Hello world !') # test formatters self.assertEqual(PyBytes_FromFormat(b'c=%c', c_int(0)), b'c=\0') self.assertEqual(PyBytes_FromFormat(b'c=%c', c_int(ord('@'))), b'c=@') self.assertEqual(PyBytes_FromFormat(b'c=%c', c_int(255)), b'c=\xff') self.assertEqual(PyBytes_FromFormat(b'd=%d ld=%ld zd=%zd', c_int(1), c_long(2), c_size_t(3)), b'd=1 ld=2 zd=3') self.assertEqual(PyBytes_FromFormat(b'd=%d ld=%ld zd=%zd', c_int(-1), c_long(-2), c_size_t(-3)), b'd=-1 ld=-2 zd=-3') self.assertEqual(PyBytes_FromFormat(b'u=%u lu=%lu zu=%zu', c_uint(123), c_ulong(456), c_size_t(789)), b'u=123 lu=456 zu=789') self.assertEqual(PyBytes_FromFormat(b'i=%i', c_int(123)), b'i=123') self.assertEqual(PyBytes_FromFormat(b'i=%i', c_int(-123)), b'i=-123') self.assertEqual(PyBytes_FromFormat(b'x=%x', c_int(0xabc)), b'x=abc') sizeof_ptr = ctypes.sizeof(c_char_p) if os.name == 'nt': # Windows (MSCRT) ptr_format = '0x%0{}X'.format(2 * sizeof_ptr) def ptr_formatter(ptr): return (ptr_format % ptr) else: # UNIX (glibc) def ptr_formatter(ptr): return '%#x' % ptr ptr = 0xabcdef self.assertEqual(PyBytes_FromFormat(b'ptr=%p', c_char_p(ptr)), ('ptr=' + ptr_formatter(ptr)).encode('ascii')) self.assertEqual(PyBytes_FromFormat(b's=%s', c_char_p(b'cstr')), b's=cstr') # test minimum and maximum integer values size_max = c_size_t(-1).value for formatstr, ctypes_type, value, py_formatter in ( (b'%d', c_int, _testcapi.INT_MIN, str), (b'%d', c_int, _testcapi.INT_MAX, str), (b'%ld', c_long, _testcapi.LONG_MIN, str), (b'%ld', c_long, _testcapi.LONG_MAX, str), (b'%lu', c_ulong, _testcapi.ULONG_MAX, str), (b'%zd', c_ssize_t, _testcapi.PY_SSIZE_T_MIN, str), (b'%zd', c_ssize_t, _testcapi.PY_SSIZE_T_MAX, str), (b'%zu', c_size_t, size_max, str), (b'%p', c_char_p, size_max, ptr_formatter), ): self.assertEqual(PyBytes_FromFormat(formatstr, ctypes_type(value)), py_formatter(value).encode('ascii')), # width and precision (width is currently ignored) self.assertEqual(PyBytes_FromFormat(b'%5s', b'a'), b'a') self.assertEqual(PyBytes_FromFormat(b'%.3s', b'abcdef'), b'abc') # '%%' formatter self.assertEqual(PyBytes_FromFormat(b'%%'), b'%') self.assertEqual(PyBytes_FromFormat(b'[%%]'), b'[%]') self.assertEqual(PyBytes_FromFormat(b'%%%c', c_int(ord('_'))), b'%_') self.assertEqual(PyBytes_FromFormat(b'%%s'), b'%s') # Invalid formats and partial formatting self.assertEqual(PyBytes_FromFormat(b'%'), b'%') self.assertEqual(PyBytes_FromFormat(b'x=%i y=%', c_int(2), c_int(3)), b'x=2 y=%') # Issue #19969: %c must raise OverflowError for values # not in the range [0; 255] self.assertRaises(OverflowError, PyBytes_FromFormat, b'%c', c_int(-1)) self.assertRaises(OverflowError, PyBytes_FromFormat, b'%c', c_int(256)) def test_bytes_blocking(self): class IterationBlocked(list): __bytes__ = None i = [0, 1, 2, 3] self.assertEqual(bytes(i), b'\x00\x01\x02\x03') self.assertRaises(TypeError, bytes, IterationBlocked(i)) # At least in CPython, because bytes.__new__ and the C API # PyBytes_FromObject have different fallback rules, integer # fallback is handled specially, so test separately. class IntBlocked(int): __bytes__ = None self.assertEqual(bytes(3), b'\0\0\0') self.assertRaises(TypeError, bytes, IntBlocked(3)) # While there is no separately-defined rule for handling bytes # subclasses differently from other buffer-interface classes, # an implementation may well special-case them (as CPython 2.x # str did), so test them separately. class BytesSubclassBlocked(bytes): __bytes__ = None self.assertEqual(bytes(b'ab'), b'ab') self.assertRaises(TypeError, bytes, BytesSubclassBlocked(b'ab')) class BufferBlocked(bytearray): __bytes__ = None ba, bb = bytearray(b'ab'), BufferBlocked(b'ab') self.assertEqual(bytes(ba), b'ab') self.assertRaises(TypeError, bytes, bb) class ByteArrayTest(BaseBytesTest, unittest.TestCase): type2test = bytearray def test_getitem_error(self): msg = "bytearray indices must be integers or slices" with self.assertRaisesRegex(TypeError, msg): bytearray(b'python')['a'] def test_setitem_error(self): msg = "bytearray indices must be integers or slices" with self.assertRaisesRegex(TypeError, msg): b = bytearray(b'python') b['a'] = "python" def test_nohash(self): self.assertRaises(TypeError, hash, bytearray()) def test_bytearray_api(self): short_sample = b"Hello world\n" sample = short_sample + b"\0"*(20 - len(short_sample)) tfn = tempfile.mktemp() try: # Prepare with open(tfn, "wb") as f: f.write(short_sample) # Test readinto with open(tfn, "rb") as f: b = bytearray(20) n = f.readinto(b) self.assertEqual(n, len(short_sample)) self.assertEqual(list(b), list(sample)) # Test writing in binary mode with open(tfn, "wb") as f: f.write(b) with open(tfn, "rb") as f: self.assertEqual(f.read(), sample) # Text mode is ambiguous; don't test finally: try: os.remove(tfn) except OSError: pass def test_reverse(self): b = bytearray(b'hello') self.assertEqual(b.reverse(), None) self.assertEqual(b, b'olleh') b = bytearray(b'hello1') # test even number of items b.reverse() self.assertEqual(b, b'1olleh') b = bytearray() b.reverse() self.assertFalse(b) def test_clear(self): b = bytearray(b'python') b.clear() self.assertEqual(b, b'') b = bytearray(b'') b.clear() self.assertEqual(b, b'') b = bytearray(b'') b.append(ord('r')) b.clear() b.append(ord('p')) self.assertEqual(b, b'p') def test_copy(self): b = bytearray(b'abc') bb = b.copy() self.assertEqual(bb, b'abc') b = bytearray(b'') bb = b.copy() self.assertEqual(bb, b'') # test that it's indeed a copy and not a reference b = bytearray(b'abc') bb = b.copy() self.assertEqual(b, bb) self.assertIsNot(b, bb) bb.append(ord('d')) self.assertEqual(bb, b'abcd') self.assertEqual(b, b'abc') def test_regexps(self): def by(s): return bytearray(map(ord, s)) b = by("Hello, world") self.assertEqual(re.findall(br"\w+", b), [by("Hello"), by("world")]) def test_setitem(self): b = bytearray([1, 2, 3]) b[1] = 100 self.assertEqual(b, bytearray([1, 100, 3])) b[-1] = 200 self.assertEqual(b, bytearray([1, 100, 200])) b[0] = Indexable(10) self.assertEqual(b, bytearray([10, 100, 200])) try: b[3] = 0 self.fail("Didn't raise IndexError") except IndexError: pass try: b[-10] = 0 self.fail("Didn't raise IndexError") except IndexError: pass try: b[0] = 256 self.fail("Didn't raise ValueError") except ValueError: pass try: b[0] = Indexable(-1) self.fail("Didn't raise ValueError") except ValueError: pass try: b[0] = None self.fail("Didn't raise TypeError") except TypeError: pass def test_delitem(self): b = bytearray(range(10)) del b[0] self.assertEqual(b, bytearray(range(1, 10))) del b[-1] self.assertEqual(b, bytearray(range(1, 9))) del b[4] self.assertEqual(b, bytearray([1, 2, 3, 4, 6, 7, 8])) def test_setslice(self): b = bytearray(range(10)) self.assertEqual(list(b), list(range(10))) b[0:5] = bytearray([1, 1, 1, 1, 1]) self.assertEqual(b, bytearray([1, 1, 1, 1, 1, 5, 6, 7, 8, 9])) del b[0:-5] self.assertEqual(b, bytearray([5, 6, 7, 8, 9])) b[0:0] = bytearray([0, 1, 2, 3, 4]) self.assertEqual(b, bytearray(range(10))) b[-7:-3] = bytearray([100, 101]) self.assertEqual(b, bytearray([0, 1, 2, 100, 101, 7, 8, 9])) b[3:5] = [3, 4, 5, 6] self.assertEqual(b, bytearray(range(10))) b[3:0] = [42, 42, 42] self.assertEqual(b, bytearray([0, 1, 2, 42, 42, 42, 3, 4, 5, 6, 7, 8, 9])) b[3:] = b'foo' self.assertEqual(b, bytearray([0, 1, 2, 102, 111, 111])) b[:3] = memoryview(b'foo') self.assertEqual(b, bytearray([102, 111, 111, 102, 111, 111])) b[3:4] = [] self.assertEqual(b, bytearray([102, 111, 111, 111, 111])) for elem in [5, -5, 0, int(10e20), 'str', 2.3, ['a', 'b'], [b'a', b'b'], [[]]]: with self.assertRaises(TypeError): b[3:4] = elem for elem in [[254, 255, 256], [-256, 9000]]: with self.assertRaises(ValueError): b[3:4] = elem def test_setslice_extend(self): # Exercise the resizing logic (see issue #19087) b = bytearray(range(100)) self.assertEqual(list(b), list(range(100))) del b[:10] self.assertEqual(list(b), list(range(10, 100))) b.extend(range(100, 110)) self.assertEqual(list(b), list(range(10, 110))) def test_fifo_overrun(self): # Test for issue #23985, a buffer overrun when implementing a FIFO # Build Python in pydebug mode for best results. b = bytearray(10) b.pop() # Defeat expanding buffer off-by-one quirk del b[:1] # Advance start pointer without reallocating b += bytes(2) # Append exactly the number of deleted bytes del b # Free memory buffer, allowing pydebug verification def test_del_expand(self): # Reducing the size should not expand the buffer (issue #23985) b = bytearray(10) size = sys.getsizeof(b) del b[:1] self.assertLessEqual(sys.getsizeof(b), size) def test_extended_set_del_slice(self): indices = (0, None, 1, 3, 19, 300, 1<<333, -1, -2, -31, -300) for start in indices: for stop in indices: # Skip invalid step 0 for step in indices[1:]: L = list(range(255)) b = bytearray(L) # Make sure we have a slice of exactly the right length, # but with different data. data = L[start:stop:step] data.reverse() L[start:stop:step] = data b[start:stop:step] = data self.assertEqual(b, bytearray(L)) del L[start:stop:step] del b[start:stop:step] self.assertEqual(b, bytearray(L)) def test_setslice_trap(self): # This test verifies that we correctly handle assigning self # to a slice of self (the old Lambert Meertens trap). b = bytearray(range(256)) b[8:] = b self.assertEqual(b, bytearray(list(range(8)) + list(range(256)))) def test_iconcat(self): b = bytearray(b"abc") b1 = b b += b"def" self.assertEqual(b, b"abcdef") self.assertEqual(b, b1) self.assertTrue(b is b1) b += b"xyz" self.assertEqual(b, b"abcdefxyz") try: b += "" except TypeError: pass else: self.fail("bytes += unicode didn't raise TypeError") def test_irepeat(self): b = bytearray(b"abc") b1 = b b *= 3 self.assertEqual(b, b"abcabcabc") self.assertEqual(b, b1) self.assertTrue(b is b1) def test_irepeat_1char(self): b = bytearray(b"x") b1 = b b *= 100 self.assertEqual(b, b"x"*100) self.assertEqual(b, b1) self.assertTrue(b is b1) def test_alloc(self): b = bytearray() alloc = b.__alloc__() self.assertTrue(alloc >= 0) seq = [alloc] for i in range(100): b += b"x" alloc = b.__alloc__() self.assertGreater(alloc, len(b)) # including trailing null byte if alloc not in seq: seq.append(alloc) def test_init_alloc(self): b = bytearray() def g(): for i in range(1, 100): yield i a = list(b) self.assertEqual(a, list(range(1, len(a)+1))) self.assertEqual(len(b), len(a)) self.assertLessEqual(len(b), i) alloc = b.__alloc__() self.assertGreater(alloc, len(b)) # including trailing null byte b.__init__(g()) self.assertEqual(list(b), list(range(1, 100))) self.assertEqual(len(b), 99) alloc = b.__alloc__() self.assertGreater(alloc, len(b)) def test_extend(self): orig = b'hello' a = bytearray(orig) a.extend(a) self.assertEqual(a, orig + orig) self.assertEqual(a[5:], orig) a = bytearray(b'') # Test iterators that don't have a __length_hint__ a.extend(map(int, orig * 25)) a.extend(int(x) for x in orig * 25) self.assertEqual(a, orig * 50) self.assertEqual(a[-5:], orig) a = bytearray(b'') a.extend(iter(map(int, orig * 50))) self.assertEqual(a, orig * 50) self.assertEqual(a[-5:], orig) a = bytearray(b'') a.extend(list(map(int, orig * 50))) self.assertEqual(a, orig * 50) self.assertEqual(a[-5:], orig) a = bytearray(b'') self.assertRaises(ValueError, a.extend, [0, 1, 2, 256]) self.assertRaises(ValueError, a.extend, [0, 1, 2, -1]) self.assertEqual(len(a), 0) a = bytearray(b'') a.extend([Indexable(ord('a'))]) self.assertEqual(a, b'a') def test_remove(self): b = bytearray(b'hello') b.remove(ord('l')) self.assertEqual(b, b'helo') b.remove(ord('l')) self.assertEqual(b, b'heo') self.assertRaises(ValueError, lambda: b.remove(ord('l'))) self.assertRaises(ValueError, lambda: b.remove(400)) self.assertRaises(TypeError, lambda: b.remove('e')) # remove first and last b.remove(ord('o')) b.remove(ord('h')) self.assertEqual(b, b'e') self.assertRaises(TypeError, lambda: b.remove(b'e')) b.remove(Indexable(ord('e'))) self.assertEqual(b, b'') # test values outside of the ascii range: (0, 127) c = bytearray([126, 127, 128, 129]) c.remove(127) self.assertEqual(c, bytes([126, 128, 129])) c.remove(129) self.assertEqual(c, bytes([126, 128])) def test_pop(self): b = bytearray(b'world') self.assertEqual(b.pop(), ord('d')) self.assertEqual(b.pop(0), ord('w')) self.assertEqual(b.pop(-2), ord('r')) self.assertRaises(IndexError, lambda: b.pop(10)) self.assertRaises(IndexError, lambda: bytearray().pop()) # test for issue #6846 self.assertEqual(bytearray(b'\xff').pop(), 0xff) def test_nosort(self): self.assertRaises(AttributeError, lambda: bytearray().sort()) def test_append(self): b = bytearray(b'hell') b.append(ord('o')) self.assertEqual(b, b'hello') self.assertEqual(b.append(100), None) b = bytearray() b.append(ord('A')) self.assertEqual(len(b), 1) self.assertRaises(TypeError, lambda: b.append(b'o')) b = bytearray() b.append(Indexable(ord('A'))) self.assertEqual(b, b'A') def test_insert(self): b = bytearray(b'msssspp') b.insert(1, ord('i')) b.insert(4, ord('i')) b.insert(-2, ord('i')) b.insert(1000, ord('i')) self.assertEqual(b, b'mississippi') self.assertRaises(TypeError, lambda: b.insert(0, b'1')) b = bytearray() b.insert(0, Indexable(ord('A'))) self.assertEqual(b, b'A') def test_copied(self): # Issue 4348. Make sure that operations that don't mutate the array # copy the bytes. b = bytearray(b'abc') self.assertFalse(b is b.replace(b'abc', b'cde', 0)) t = bytearray([i for i in range(256)]) x = bytearray(b'') self.assertFalse(x is x.translate(t)) def test_partition_bytearray_doesnt_share_nullstring(self): a, b, c = bytearray(b"x").partition(b"y") self.assertEqual(b, b"") self.assertEqual(c, b"") self.assertTrue(b is not c) b += b"!" self.assertEqual(c, b"") a, b, c = bytearray(b"x").partition(b"y") self.assertEqual(b, b"") self.assertEqual(c, b"") # Same for rpartition b, c, a = bytearray(b"x").rpartition(b"y") self.assertEqual(b, b"") self.assertEqual(c, b"") self.assertTrue(b is not c) b += b"!" self.assertEqual(c, b"") c, b, a = bytearray(b"x").rpartition(b"y") self.assertEqual(b, b"") self.assertEqual(c, b"") def test_resize_forbidden(self): # #4509: can't resize a bytearray when there are buffer exports, even # if it wouldn't reallocate the underlying buffer. # Furthermore, no destructive changes to the buffer may be applied # before raising the error. b = bytearray(range(10)) v = memoryview(b) def resize(n): b[1:-1] = range(n + 1, 2*n - 1) resize(10) orig = b[:] self.assertRaises(BufferError, resize, 11) self.assertEqual(b, orig) self.assertRaises(BufferError, resize, 9) self.assertEqual(b, orig) self.assertRaises(BufferError, resize, 0) self.assertEqual(b, orig) # Other operations implying resize self.assertRaises(BufferError, b.pop, 0) self.assertEqual(b, orig) self.assertRaises(BufferError, b.remove, b[1]) self.assertEqual(b, orig) def delitem(): del b[1] self.assertRaises(BufferError, delitem) self.assertEqual(b, orig) # deleting a non-contiguous slice def delslice(): b[1:-1:2] = b"" self.assertRaises(BufferError, delslice) self.assertEqual(b, orig) @test.support.cpython_only def test_obsolete_write_lock(self): from _testcapi import getbuffer_with_null_view self.assertRaises(BufferError, getbuffer_with_null_view, bytearray()) def test_iterator_pickling2(self): orig = bytearray(b'abc') data = list(b'qwerty') for proto in range(pickle.HIGHEST_PROTOCOL + 1): # initial iterator itorig = iter(orig) d = pickle.dumps((itorig, orig), proto) it, b = pickle.loads(d) b[:] = data self.assertEqual(type(it), type(itorig)) self.assertEqual(list(it), data) # running iterator next(itorig) d = pickle.dumps((itorig, orig), proto) it, b = pickle.loads(d) b[:] = data self.assertEqual(type(it), type(itorig)) self.assertEqual(list(it), data[1:]) # empty iterator for i in range(1, len(orig)): next(itorig) d = pickle.dumps((itorig, orig), proto) it, b = pickle.loads(d) b[:] = data self.assertEqual(type(it), type(itorig)) self.assertEqual(list(it), data[len(orig):]) # exhausted iterator self.assertRaises(StopIteration, next, itorig) d = pickle.dumps((itorig, orig), proto) it, b = pickle.loads(d) b[:] = data self.assertEqual(list(it), []) test_exhausted_iterator = test.list_tests.CommonTest.test_exhausted_iterator def test_iterator_length_hint(self): # Issue 27443: __length_hint__ can return negative integer ba = bytearray(b'ab') it = iter(ba) next(it) ba.clear() # Shouldn't raise an error self.assertEqual(list(it), []) class AssortedBytesTest(unittest.TestCase): # # Test various combinations of bytes and bytearray # @check_bytes_warnings def test_repr_str(self): for f in str, repr: self.assertEqual(f(bytearray()), "bytearray(b'')") self.assertEqual(f(bytearray([0])), "bytearray(b'\\x00')") self.assertEqual(f(bytearray([0, 1, 254, 255])), "bytearray(b'\\x00\\x01\\xfe\\xff')") self.assertEqual(f(b"abc"), "b'abc'") self.assertEqual(f(b"'"), '''b"'"''') # ''' self.assertEqual(f(b"'\""), r"""b'\'"'""") # ' def test_compare_bytes_to_bytearray(self): self.assertEqual(b"abc" == bytes(b"abc"), True) self.assertEqual(b"ab" != bytes(b"abc"), True) self.assertEqual(b"ab" <= bytes(b"abc"), True) self.assertEqual(b"ab" < bytes(b"abc"), True) self.assertEqual(b"abc" >= bytes(b"ab"), True) self.assertEqual(b"abc" > bytes(b"ab"), True) self.assertEqual(b"abc" != bytes(b"abc"), False) self.assertEqual(b"ab" == bytes(b"abc"), False) self.assertEqual(b"ab" > bytes(b"abc"), False) self.assertEqual(b"ab" >= bytes(b"abc"), False) self.assertEqual(b"abc" < bytes(b"ab"), False) self.assertEqual(b"abc" <= bytes(b"ab"), False) self.assertEqual(bytes(b"abc") == b"abc", True) self.assertEqual(bytes(b"ab") != b"abc", True) self.assertEqual(bytes(b"ab") <= b"abc", True) self.assertEqual(bytes(b"ab") < b"abc", True) self.assertEqual(bytes(b"abc") >= b"ab", True) self.assertEqual(bytes(b"abc") > b"ab", True) self.assertEqual(bytes(b"abc") != b"abc", False) self.assertEqual(bytes(b"ab") == b"abc", False) self.assertEqual(bytes(b"ab") > b"abc", False) self.assertEqual(bytes(b"ab") >= b"abc", False) self.assertEqual(bytes(b"abc") < b"ab", False) self.assertEqual(bytes(b"abc") <= b"ab", False) @test.support.requires_docstrings def test_doc(self): self.assertIsNotNone(bytearray.__doc__) self.assertTrue(bytearray.__doc__.startswith("bytearray("), bytearray.__doc__) self.assertIsNotNone(bytes.__doc__) self.assertTrue(bytes.__doc__.startswith("bytes("), bytes.__doc__) def test_from_bytearray(self): sample = bytes(b"Hello world\n\x80\x81\xfe\xff") buf = memoryview(sample) b = bytearray(buf) self.assertEqual(b, bytearray(sample)) @check_bytes_warnings def test_to_str(self): self.assertEqual(str(b''), "b''") self.assertEqual(str(b'x'), "b'x'") self.assertEqual(str(b'\x80'), "b'\\x80'") self.assertEqual(str(bytearray(b'')), "bytearray(b'')") self.assertEqual(str(bytearray(b'x')), "bytearray(b'x')") self.assertEqual(str(bytearray(b'\x80')), "bytearray(b'\\x80')") def test_literal(self): tests = [ (b"Wonderful spam", "Wonderful spam"), (br"Wonderful spam too", "Wonderful spam too"), (b"\xaa\x00\000\200", "\xaa\x00\000\200"), (br"\xaa\x00\000\200", r"\xaa\x00\000\200"), ] for b, s in tests: self.assertEqual(b, bytearray(s, 'latin-1')) for c in range(128, 256): self.assertRaises(SyntaxError, eval, 'b"%s"' % chr(c)) def test_split_bytearray(self): self.assertEqual(b'a b'.split(memoryview(b' ')), [b'a', b'b']) def test_rsplit_bytearray(self): self.assertEqual(b'a b'.rsplit(memoryview(b' ')), [b'a', b'b']) def test_return_self(self): # bytearray.replace must always return a new bytearray b = bytearray() self.assertFalse(b.replace(b'', b'') is b) @unittest.skipUnless(sys.flags.bytes_warning, "BytesWarning is needed for this test: use -bb option") def test_compare(self): def bytes_warning(): return test.support.check_warnings(('', BytesWarning)) with bytes_warning(): b'' == '' with bytes_warning(): '' == b'' with bytes_warning(): b'' != '' with bytes_warning(): '' != b'' with bytes_warning(): bytearray(b'') == '' with bytes_warning(): '' == bytearray(b'') with bytes_warning(): bytearray(b'') != '' with bytes_warning(): '' != bytearray(b'') with bytes_warning(): b'\0' == 0 with bytes_warning(): 0 == b'\0' with bytes_warning(): b'\0' != 0 with bytes_warning(): 0 != b'\0' # Optimizations: # __iter__? (optimization) # __reversed__? (optimization) # XXX More string methods? (Those that don't use character properties) # There are tests in string_tests.py that are more # comprehensive for things like partition, etc. # Unfortunately they are all bundled with tests that # are not appropriate for bytes # I've started porting some of those into bytearray_tests.py, we should port # the rest that make sense (the code can be cleaned up to use modern # unittest methods at the same time). class BytearrayPEP3137Test(unittest.TestCase): def marshal(self, x): return bytearray(x) def test_returns_new_copy(self): val = self.marshal(b'1234') # On immutable types these MAY return a reference to themselves # but on mutable types like bytearray they MUST return a new copy. for methname in ('zfill', 'rjust', 'ljust', 'center'): method = getattr(val, methname) newval = method(3) self.assertEqual(val, newval) self.assertTrue(val is not newval, methname+' returned self on a mutable object') for expr in ('val.split()[0]', 'val.rsplit()[0]', 'val.partition(b".")[0]', 'val.rpartition(b".")[2]', 'val.splitlines()[0]', 'val.replace(b"", b"")'): newval = eval(expr) self.assertEqual(val, newval) self.assertTrue(val is not newval, expr+' returned val on a mutable object') sep = self.marshal(b'') newval = sep.join([val]) self.assertEqual(val, newval) self.assertIsNot(val, newval) class FixedStringTest(test.string_tests.BaseTest): def fixtype(self, obj): if isinstance(obj, str): return self.type2test(obj.encode("utf-8")) return super().fixtype(obj) contains_bytes = True class ByteArrayAsStringTest(FixedStringTest, unittest.TestCase): type2test = bytearray class BytesAsStringTest(FixedStringTest, unittest.TestCase): type2test = bytes class SubclassTest: def test_basic(self): self.assertTrue(issubclass(self.type2test, self.basetype)) self.assertIsInstance(self.type2test(), self.basetype) a, b = b"abcd", b"efgh" _a, _b = self.type2test(a), self.type2test(b) # test comparison operators with subclass instances self.assertTrue(_a == _a) self.assertTrue(_a != _b) self.assertTrue(_a < _b) self.assertTrue(_a <= _b) self.assertTrue(_b >= _a) self.assertTrue(_b > _a) self.assertTrue(_a is not a) # test concat of subclass instances self.assertEqual(a + b, _a + _b) self.assertEqual(a + b, a + _b) self.assertEqual(a + b, _a + b) # test repeat self.assertTrue(a*5 == _a*5) def test_join(self): # Make sure join returns a NEW object for single item sequences # involving a subclass. # Make sure that it is of the appropriate type. s1 = self.type2test(b"abcd") s2 = self.basetype().join([s1]) self.assertTrue(s1 is not s2) self.assertTrue(type(s2) is self.basetype, type(s2)) # Test reverse, calling join on subclass s3 = s1.join([b"abcd"]) self.assertTrue(type(s3) is self.basetype) def test_pickle(self): a = self.type2test(b"abcd") a.x = 10 a.y = self.type2test(b"efgh") for proto in range(pickle.HIGHEST_PROTOCOL + 1): b = pickle.loads(pickle.dumps(a, proto)) self.assertNotEqual(id(a), id(b)) self.assertEqual(a, b) self.assertEqual(a.x, b.x) self.assertEqual(a.y, b.y) self.assertEqual(type(a), type(b)) self.assertEqual(type(a.y), type(b.y)) def test_copy(self): a = self.type2test(b"abcd") a.x = 10 a.y = self.type2test(b"efgh") for copy_method in (copy.copy, copy.deepcopy): b = copy_method(a) self.assertNotEqual(id(a), id(b)) self.assertEqual(a, b) self.assertEqual(a.x, b.x) self.assertEqual(a.y, b.y) self.assertEqual(type(a), type(b)) self.assertEqual(type(a.y), type(b.y)) def test_fromhex(self): b = self.type2test.fromhex('1a2B30') self.assertEqual(b, b'\x1a\x2b\x30') self.assertIs(type(b), self.type2test) class B1(self.basetype): def __new__(cls, value): me = self.basetype.__new__(cls, value) me.foo = 'bar' return me b = B1.fromhex('1a2B30') self.assertEqual(b, b'\x1a\x2b\x30') self.assertIs(type(b), B1) self.assertEqual(b.foo, 'bar') class B2(self.basetype): def __init__(me, *args, **kwargs): if self.basetype is not bytes: self.basetype.__init__(me, *args, **kwargs) me.foo = 'bar' b = B2.fromhex('1a2B30') self.assertEqual(b, b'\x1a\x2b\x30') self.assertIs(type(b), B2) self.assertEqual(b.foo, 'bar') class ByteArraySubclass(bytearray): pass class BytesSubclass(bytes): pass class OtherBytesSubclass(bytes): pass class ByteArraySubclassTest(SubclassTest, unittest.TestCase): basetype = bytearray type2test = ByteArraySubclass def test_init_override(self): class subclass(bytearray): def __init__(me, newarg=1, *args, **kwargs): bytearray.__init__(me, *args, **kwargs) x = subclass(4, b"abcd") x = subclass(4, source=b"abcd") self.assertEqual(x, b"abcd") x = subclass(newarg=4, source=b"abcd") self.assertEqual(x, b"abcd") class BytesSubclassTest(SubclassTest, unittest.TestCase): basetype = bytes type2test = BytesSubclass if __name__ == "__main__": unittest.main()
mit
dentaku65/pelisalacarta
python/main-classic/servers/veoh.py
43
2852
# -*- coding: iso-8859-1 -*- #------------------------------------------------------------ # pelisalacarta - XBMC Plugin # Conector para Veoh # http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/ #------------------------------------------------------------ import os import urlparse,urllib2,urllib,re from core import scrapertools from core import logger from core import config # Returns an array of possible video url's from the page_url def get_video_url( page_url , premium = False , user="" , password="", video_password="" ): logger.info("[veoh.py] get_video_url(page_url='%s')" % page_url) video_urls = [] # Lo extrae a partir de flashvideodownloader.org if page_url.startswith("http://"): url = 'http://www.flashvideodownloader.org/download.php?u='+page_url else: url = 'http://www.flashvideodownloader.org/download.php?u=http://www.veoh.com/watch/'+page_url logger.info("[veoh.py] url="+url) data = scrapertools.cachePage(url) # Extrae el vídeo patronvideos = '<a href="(http://content.veoh.com.*?)"' matches = re.compile(patronvideos,re.DOTALL).findall(data) if len(matches)>0: video_urls.append( ["[veoh]",matches[0]] ) for video_url in video_urls: logger.info("[veoh.py] %s - %s" % (video_url[0],video_url[1])) return video_urls # Encuentra vídeos del servidor en el texto pasado def find_videos(data): encontrados = set() devuelve = [] patronvideos = '"http://www.veoh.com/.*?permalinkId=([^"]+)"' logger.info("[veoh.py] find_videos #"+patronvideos+"#") matches = re.compile(patronvideos,re.DOTALL).findall(data) for match in matches: titulo = "[veoh]" if match.count("&")>0: primera = match.find("&") url = match[:primera] else: url = match if url not in encontrados: logger.info(" url="+url) devuelve.append( [ titulo , url , 'veoh' ] ) encontrados.add(url) else: logger.info(" url duplicada="+url) patronvideos = 'var embed_code[^>]+> <param name="movie" value="http://www.veoh.com/static/swf/webplayer/WebPlayer.swf.*?permalinkId=(.*?)&player=videodetailsembedded&videoAutoPlay=0&id=anonymous"></param>' logger.info("[veoh.py] find_videos #"+patronvideos+"#") matches = re.compile(patronvideos,re.DOTALL).findall(data) for match in matches: titulo = "[veoh]" if match.count("&")>0: primera = match.find("&") url = match[:primera] else: url = match if url not in encontrados: logger.info(" url="+url) devuelve.append( [ titulo , url , 'veoh' ] ) encontrados.add(url) else: logger.info(" url duplicada="+url) return devuelve
gpl-3.0
edlabh/SickRage
lib/github/PullRequestMergeStatus.py
74
3202
# -*- coding: utf-8 -*- # ########################## Copyrights and license ############################ # # # Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> # # Copyright 2012 Zearin <zearin@gonk.net> # # Copyright 2013 AKFish <akfish@gmail.com> # # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> # # Copyright 2013 martinqt <m.ki2@laposte.net> # # # # This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ # # # # PyGithub is free software: you can redistribute it and/or modify it under # # the terms of the GNU Lesser General Public License as published by the Free # # Software Foundation, either version 3 of the License, or (at your option) # # any later version. # # # # PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY # # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # # FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more # # details. # # # # You should have received a copy of the GNU Lesser General Public License # # along with PyGithub. If not, see <http://www.gnu.org/licenses/>. # # # # ############################################################################## import github.GithubObject class PullRequestMergeStatus(github.GithubObject.NonCompletableGithubObject): """ This class represents PullRequestMergeStatuss. The reference can be found here http://developer.github.com/v3/pulls/#get-if-a-pull-request-has-been-merged """ @property def merged(self): """ :type: bool """ return self._merged.value @property def message(self): """ :type: string """ return self._message.value @property def sha(self): """ :type: string """ return self._sha.value def _initAttributes(self): self._merged = github.GithubObject.NotSet self._message = github.GithubObject.NotSet self._sha = github.GithubObject.NotSet def _useAttributes(self, attributes): if "merged" in attributes: # pragma no branch self._merged = self._makeBoolAttribute(attributes["merged"]) if "message" in attributes: # pragma no branch self._message = self._makeStringAttribute(attributes["message"]) if "sha" in attributes: # pragma no branch self._sha = self._makeStringAttribute(attributes["sha"])
gpl-3.0
EricSekyere/python
neo1218/0023/web/config.py
38
1075
# -*- coding: UTF-8 -*- # !/usr/bin/python # 配置文件 import os basedir = os.path.abspath(os.path.dirname(__file__)) class Config: # 配置类 SECRET_KEY = os.environ.get('SECRET_KEY') or 'hard to guess string' SQLALCHEMY_COMMIT_ON_TEARDOWN = True @staticmethod def init_app(app): pass class DevelopmentConfig(Config): # 开发数据库 DEBUG = True SQLALCHEMY_DATABASE_URI = os.environ.get('DEV_DATABASE_URL') or \ 'sqlite:///' + os.path.join(basedir, 'data-dev.sqlite') class TestingConfig(Config): # 测试数据库 TESTING = True SQLALCHEMY_DATABASE_URI = os.environ.get('TEST_DATABASE_URL') or \ 'sqlite:///' + os.path.join(basedir, 'data-test.sqlite') class ProductionConfig(Config): # 生产数据库 SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \ 'sqlite:///' + os.path.join(basedir, 'data.sqlite') config = { 'development': DevelopmentConfig, 'testing': TestingConfig, 'production': ProductionConfig, 'default': DevelopmentConfig }
mit
m-ober/byceps
testfixtures/user.py
1
1690
""" testfixtures.user ~~~~~~~~~~~~~~~~~ :Copyright: 2006-2020 Jochen Kupperschmidt :License: Modified BSD, see LICENSE for details. """ from datetime import date, datetime from byceps.database import generate_uuid from byceps.services.user.models.detail import UserDetail from byceps.services.user import creation_service as user_creation_service def create_user( screen_name='Faith', *, user_id=None, created_at=None, email_address=None, email_address_verified=False, initialized=True, ): if not user_id: user_id = generate_uuid() if not created_at: created_at = datetime.utcnow() if not email_address: email_address = f'user{user_id}@example.com' user = user_creation_service.build_user( created_at, screen_name, email_address ) user.id = user_id user.email_address_verified = email_address_verified user.initialized = initialized return user def create_user_with_detail( screen_name='Faith', *, user_id=None, email_address=None, initialized=True, first_names='John Joseph', last_name='Doe', date_of_birth=None, ): user = create_user( screen_name, user_id=user_id, email_address=email_address, initialized=initialized, ) detail = UserDetail(user=user) detail.first_names = first_names detail.last_name = last_name detail.date_of_birth = (date_of_birth if date_of_birth else date(1993, 2, 15)) detail.country = 'State of Mind' detail.zip_code = '31337' detail.city = 'Atrocity' detail.street = 'Elite Street 1337' detail.phone_number = '555-CALL-ME-MAYBE' return user
bsd-3-clause
mancoast/CPythonPyc_test
fail/310_test_descrtut.py
56
11739
# This contains most of the executable examples from Guido's descr # tutorial, once at # # http://www.python.org/2.2/descrintro.html # # A few examples left implicit in the writeup were fleshed out, a few were # skipped due to lack of interest (e.g., faking super() by hand isn't # of much interest anymore), and a few were fiddled to make the output # deterministic. from test.support import sortdict import pprint class defaultdict(dict): def __init__(self, default=None): dict.__init__(self) self.default = default def __getitem__(self, key): try: return dict.__getitem__(self, key) except KeyError: return self.default def get(self, key, *args): if not args: args = (self.default,) return dict.get(self, key, *args) def merge(self, other): for key in other: if key not in self: self[key] = other[key] test_1 = """ Here's the new type at work: >>> print(defaultdict) # show our type <class 'test.test_descrtut.defaultdict'> >>> print(type(defaultdict)) # its metatype <class 'type'> >>> a = defaultdict(default=0.0) # create an instance >>> print(a) # show the instance {} >>> print(type(a)) # show its type <class 'test.test_descrtut.defaultdict'> >>> print(a.__class__) # show its class <class 'test.test_descrtut.defaultdict'> >>> print(type(a) is a.__class__) # its type is its class True >>> a[1] = 3.25 # modify the instance >>> print(a) # show the new value {1: 3.25} >>> print(a[1]) # show the new item 3.25 >>> print(a[0]) # a non-existent item 0.0 >>> a.merge({1:100, 2:200}) # use a dict method >>> print(sortdict(a)) # show the result {1: 3.25, 2: 200} >>> We can also use the new type in contexts where classic only allows "real" dictionaries, such as the locals/globals dictionaries for the exec statement or the built-in function eval(): >>> print(sorted(a.keys())) [1, 2] >>> a['print'] = print # need the print function here >>> exec("x = 3; print(x)", a) 3 >>> print(sorted(a.keys(), key=lambda x: (str(type(x)), x))) [1, 2, '__builtins__', 'print', 'x'] >>> print(a['x']) 3 >>> Now I'll show that defaultdict instances have dynamic instance variables, just like classic classes: >>> a.default = -1 >>> print(a["noway"]) -1 >>> a.default = -1000 >>> print(a["noway"]) -1000 >>> 'default' in dir(a) True >>> a.x1 = 100 >>> a.x2 = 200 >>> print(a.x1) 100 >>> d = dir(a) >>> 'default' in d and 'x1' in d and 'x2' in d True >>> print(sortdict(a.__dict__)) {'default': -1000, 'x1': 100, 'x2': 200} >>> """ class defaultdict2(dict): __slots__ = ['default'] def __init__(self, default=None): dict.__init__(self) self.default = default def __getitem__(self, key): try: return dict.__getitem__(self, key) except KeyError: return self.default def get(self, key, *args): if not args: args = (self.default,) return dict.get(self, key, *args) def merge(self, other): for key in other: if key not in self: self[key] = other[key] test_2 = """ The __slots__ declaration takes a list of instance variables, and reserves space for exactly these in the instance. When __slots__ is used, other instance variables cannot be assigned to: >>> a = defaultdict2(default=0.0) >>> a[1] 0.0 >>> a.default = -1 >>> a[1] -1 >>> a.x1 = 1 Traceback (most recent call last): File "<stdin>", line 1, in ? AttributeError: 'defaultdict2' object has no attribute 'x1' >>> """ test_3 = """ Introspecting instances of built-in types For instance of built-in types, x.__class__ is now the same as type(x): >>> type([]) <class 'list'> >>> [].__class__ <class 'list'> >>> list <class 'list'> >>> isinstance([], list) True >>> isinstance([], dict) False >>> isinstance([], object) True >>> You can get the information from the list type: >>> pprint.pprint(dir(list)) # like list.__dict__.keys(), but sorted ['__add__', '__class__', '__contains__', '__delattr__', '__delitem__', '__doc__', '__eq__', '__format__', '__ge__', '__getattribute__', '__getitem__', '__gt__', '__hash__', '__iadd__', '__imul__', '__init__', '__iter__', '__le__', '__len__', '__lt__', '__mul__', '__ne__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__reversed__', '__rmul__', '__setattr__', '__setitem__', '__sizeof__', '__str__', '__subclasshook__', 'append', 'count', 'extend', 'index', 'insert', 'pop', 'remove', 'reverse', 'sort'] The new introspection API gives more information than the old one: in addition to the regular methods, it also shows the methods that are normally invoked through special notations, e.g. __iadd__ (+=), __len__ (len), __ne__ (!=). You can invoke any method from this list directly: >>> a = ['tic', 'tac'] >>> list.__len__(a) # same as len(a) 2 >>> a.__len__() # ditto 2 >>> list.append(a, 'toe') # same as a.append('toe') >>> a ['tic', 'tac', 'toe'] >>> This is just like it is for user-defined classes. """ test_4 = """ Static methods and class methods The new introspection API makes it possible to add static methods and class methods. Static methods are easy to describe: they behave pretty much like static methods in C++ or Java. Here's an example: >>> class C: ... ... @staticmethod ... def foo(x, y): ... print("staticmethod", x, y) >>> C.foo(1, 2) staticmethod 1 2 >>> c = C() >>> c.foo(1, 2) staticmethod 1 2 Class methods use a similar pattern to declare methods that receive an implicit first argument that is the *class* for which they are invoked. >>> class C: ... @classmethod ... def foo(cls, y): ... print("classmethod", cls, y) >>> C.foo(1) classmethod <class 'test.test_descrtut.C'> 1 >>> c = C() >>> c.foo(1) classmethod <class 'test.test_descrtut.C'> 1 >>> class D(C): ... pass >>> D.foo(1) classmethod <class 'test.test_descrtut.D'> 1 >>> d = D() >>> d.foo(1) classmethod <class 'test.test_descrtut.D'> 1 This prints "classmethod __main__.D 1" both times; in other words, the class passed as the first argument of foo() is the class involved in the call, not the class involved in the definition of foo(). But notice this: >>> class E(C): ... @classmethod ... def foo(cls, y): # override C.foo ... print("E.foo() called") ... C.foo(y) >>> E.foo(1) E.foo() called classmethod <class 'test.test_descrtut.C'> 1 >>> e = E() >>> e.foo(1) E.foo() called classmethod <class 'test.test_descrtut.C'> 1 In this example, the call to C.foo() from E.foo() will see class C as its first argument, not class E. This is to be expected, since the call specifies the class C. But it stresses the difference between these class methods and methods defined in metaclasses (where an upcall to a metamethod would pass the target class as an explicit first argument). """ test_5 = """ Attributes defined by get/set methods >>> class property(object): ... ... def __init__(self, get, set=None): ... self.__get = get ... self.__set = set ... ... def __get__(self, inst, type=None): ... return self.__get(inst) ... ... def __set__(self, inst, value): ... if self.__set is None: ... raise AttributeError("this attribute is read-only") ... return self.__set(inst, value) Now let's define a class with an attribute x defined by a pair of methods, getx() and and setx(): >>> class C(object): ... ... def __init__(self): ... self.__x = 0 ... ... def getx(self): ... return self.__x ... ... def setx(self, x): ... if x < 0: x = 0 ... self.__x = x ... ... x = property(getx, setx) Here's a small demonstration: >>> a = C() >>> a.x = 10 >>> print(a.x) 10 >>> a.x = -10 >>> print(a.x) 0 >>> Hmm -- property is builtin now, so let's try it that way too. >>> del property # unmask the builtin >>> property <class 'property'> >>> class C(object): ... def __init__(self): ... self.__x = 0 ... def getx(self): ... return self.__x ... def setx(self, x): ... if x < 0: x = 0 ... self.__x = x ... x = property(getx, setx) >>> a = C() >>> a.x = 10 >>> print(a.x) 10 >>> a.x = -10 >>> print(a.x) 0 >>> """ test_6 = """ Method resolution order This example is implicit in the writeup. >>> class A: # implicit new-style class ... def save(self): ... print("called A.save()") >>> class B(A): ... pass >>> class C(A): ... def save(self): ... print("called C.save()") >>> class D(B, C): ... pass >>> D().save() called C.save() >>> class A(object): # explicit new-style class ... def save(self): ... print("called A.save()") >>> class B(A): ... pass >>> class C(A): ... def save(self): ... print("called C.save()") >>> class D(B, C): ... pass >>> D().save() called C.save() """ class A(object): def m(self): return "A" class B(A): def m(self): return "B" + super(B, self).m() class C(A): def m(self): return "C" + super(C, self).m() class D(C, B): def m(self): return "D" + super(D, self).m() test_7 = """ Cooperative methods and "super" >>> print(D().m()) # "DCBA" DCBA """ test_8 = """ Backwards incompatibilities >>> class A: ... def foo(self): ... print("called A.foo()") >>> class B(A): ... pass >>> class C(A): ... def foo(self): ... B.foo(self) >>> C().foo() called A.foo() >>> class C(A): ... def foo(self): ... A.foo(self) >>> C().foo() called A.foo() """ __test__ = {"tut1": test_1, "tut2": test_2, "tut3": test_3, "tut4": test_4, "tut5": test_5, "tut6": test_6, "tut7": test_7, "tut8": test_8} # Magic test name that regrtest.py invokes *after* importing this module. # This worms around a bootstrap problem. # Note that doctest and regrtest both look in sys.argv for a "-v" argument, # so this works as expected in both ways of running regrtest. def test_main(verbose=None): # Obscure: import this module as test.test_descrtut instead of as # plain test_descrtut because the name of this module works its way # into the doctest examples, and unless the full test.test_descrtut # business is used the name can change depending on how the test is # invoked. from test import support, test_descrtut support.run_doctest(test_descrtut, verbose) # This part isn't needed for regrtest, but for running the test directly. if __name__ == "__main__": test_main(1)
gpl-3.0
2014c2g3/2015cd_midterm
static/Brython3.1.1-20150328-091302/Lib/xml/sax/_exceptions.py
625
4885
"""Different kinds of SAX Exceptions""" #in brython the 4 lines below causes an $globals['Exception'] error #import sys #if sys.platform[:4] == "java": # from java.lang import Exception #del sys # ===== SAXEXCEPTION ===== class SAXException(Exception): """Encapsulate an XML error or warning. This class can contain basic error or warning information from either the XML parser or the application: you can subclass it to provide additional functionality, or to add localization. Note that although you will receive a SAXException as the argument to the handlers in the ErrorHandler interface, you are not actually required to raise the exception; instead, you can simply read the information in it.""" def __init__(self, msg, exception=None): """Creates an exception. The message is required, but the exception is optional.""" self._msg = msg self._exception = exception Exception.__init__(self, msg) def getMessage(self): "Return a message for this exception." return self._msg def getException(self): "Return the embedded exception, or None if there was none." return self._exception def __str__(self): "Create a string representation of the exception." return self._msg def __getitem__(self, ix): """Avoids weird error messages if someone does exception[ix] by mistake, since Exception has __getitem__ defined.""" raise AttributeError("__getitem__") # ===== SAXPARSEEXCEPTION ===== class SAXParseException(SAXException): """Encapsulate an XML parse error or warning. This exception will include information for locating the error in the original XML document. Note that although the application will receive a SAXParseException as the argument to the handlers in the ErrorHandler interface, the application is not actually required to raise the exception; instead, it can simply read the information in it and take a different action. Since this exception is a subclass of SAXException, it inherits the ability to wrap another exception.""" def __init__(self, msg, exception, locator): "Creates the exception. The exception parameter is allowed to be None." SAXException.__init__(self, msg, exception) self._locator = locator # We need to cache this stuff at construction time. # If this exception is raised, the objects through which we must # traverse to get this information may be deleted by the time # it gets caught. self._systemId = self._locator.getSystemId() self._colnum = self._locator.getColumnNumber() self._linenum = self._locator.getLineNumber() def getColumnNumber(self): """The column number of the end of the text where the exception occurred.""" return self._colnum def getLineNumber(self): "The line number of the end of the text where the exception occurred." return self._linenum def getPublicId(self): "Get the public identifier of the entity where the exception occurred." return self._locator.getPublicId() def getSystemId(self): "Get the system identifier of the entity where the exception occurred." return self._systemId def __str__(self): "Create a string representation of the exception." sysid = self.getSystemId() if sysid is None: sysid = "<unknown>" linenum = self.getLineNumber() if linenum is None: linenum = "?" colnum = self.getColumnNumber() if colnum is None: colnum = "?" return "%s:%s:%s: %s" % (sysid, linenum, colnum, self._msg) # ===== SAXNOTRECOGNIZEDEXCEPTION ===== class SAXNotRecognizedException(SAXException): """Exception class for an unrecognized identifier. An XMLReader will raise this exception when it is confronted with an unrecognized feature or property. SAX applications and extensions may use this class for similar purposes.""" pass # ===== SAXNOTSUPPORTEDEXCEPTION ===== class SAXNotSupportedException(SAXException): """Exception class for an unsupported operation. An XMLReader will raise this exception when a service it cannot perform is requested (specifically setting a state or value). SAX applications and extensions may use this class for similar purposes.""" pass # ===== SAXNOTSUPPORTEDEXCEPTION ===== class SAXReaderNotAvailable(SAXNotSupportedException): """Exception class for a missing driver. An XMLReader module (driver) should raise this exception when it is first imported, e.g. when a support module cannot be imported. It also may be raised during parsing, e.g. if executing an external program is not permitted.""" pass
gpl-3.0
StackStorm/st2
scripts/fixate-requirements.py
3
9493
#!/usr/bin/env python # Copyright 2020 The StackStorm Authors. # Copyright 2019 Extreme Networks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This script is used to automate generation of requirements.txt for st2 components. The idea behind this script is that that each component has it's own requirements "in-requirements.txt" file. in-requirements.txt is an input requirements file - a requirements file with dependencies but WITHOUT any version restrictions. In addition to this file, there's also the top-level "fixed-requirements.txt" which pins production versions for the whole st2 stack. During production use (building, packaging, etc) requirements.txt is generated from in-requirements.txt where version of packages are fixed according to fixed-requirements.txt. """ from __future__ import absolute_import, print_function import argparse import os import os.path import sys from distutils.version import StrictVersion # NOTE: This script can't rely on any 3rd party dependency so we need to use this code here PY2 = sys.version_info[0] == 2 PY3 = sys.version_info[0] == 3 if PY3: text_type = str else: text_type = unicode # noqa # pylint: disable=E0602 OSCWD = os.path.abspath(os.curdir) GET_PIP = " curl https://bootstrap.pypa.io/get-pip.py | python" try: import pip from pip import __version__ as pip_version except ImportError as e: print("Failed to import pip: %s" % (text_type(e))) print("") print("Download pip:\n%s" % (GET_PIP)) sys.exit(1) try: # pip < 10.0 from pip.req import parse_requirements except ImportError: # pip >= 10.0 try: from pip._internal.req.req_file import parse_requirements except ImportError as e: print("Failed to import parse_requirements from pip: %s" % (text_type(e))) print("Using pip: %s" % (str(pip_version))) sys.exit(1) try: from pip._internal.req.constructors import parse_req_from_line except ImportError: # Do not error, as will only use on pip >= 20 pass def parse_args(): parser = argparse.ArgumentParser( description="Tool for requirements.txt generation." ) parser.add_argument( "-s", "--source-requirements", nargs="+", required=True, help="Specify paths to requirements file(s). " "In case several requirements files are given their content is merged.", ) parser.add_argument( "-f", "--fixed-requirements", required=True, help="Specify path to fixed-requirements.txt file.", ) parser.add_argument( "-o", "--output-file", default="requirements.txt", help="Specify path to the resulting requirements file.", ) parser.add_argument( "--skip", default=None, help=( "Comma delimited list of requirements to not " "include in the generated file." ), ) if len(sys.argv) < 2: parser.print_help() sys.exit(1) return vars(parser.parse_args()) def check_pip_version(): if StrictVersion(pip.__version__) < StrictVersion("6.1.0"): print( "Upgrade pip, your version `{0}' " "is outdated:\n".format(pip.__version__), GET_PIP, ) sys.exit(1) def load_requirements(file_path): return tuple((r for r in parse_requirements(file_path, session=False))) def locate_file(path, must_exist=False): if not os.path.isabs(path): path = os.path.join(OSCWD, path) if must_exist and not os.path.isfile(path): print("Error: couldn't locate file `{0}'".format(path)) return path def merge_source_requirements(sources): """ Read requirements source files and merge it's content. """ projects = set() merged_requirements = [] for infile_path in (locate_file(p, must_exist=True) for p in sources): for req in load_requirements(infile_path): if hasattr(req, "requirement"): # Requirements starting with project name "project ..." parsedreq = parse_req_from_line(req.requirement, req.line_source) if parsedreq.requirement: # Skip already added project name if parsedreq.requirement.name in projects: continue projects.add(parsedreq.requirement.name) merged_requirements.append(req) # Requirements lines like "vcs+proto://url" elif parsedreq.link: merged_requirements.append(req) else: raise RuntimeError("Unexpected requirement {0}".format(req)) else: if req.req: # Skip already added project name if req.name in projects: continue projects.add(req.name) merged_requirements.append(req) # Requirements lines like "vcs+proto://url" elif req.link: merged_requirements.append(req) else: raise RuntimeError("Unexpected requirement {0}".format(req)) return merged_requirements def write_requirements( sources=None, fixed_requirements=None, output_file=None, skip=None ): """ Write resulting requirements taking versions from the fixed_requirements. """ skip = skip or [] requirements = merge_source_requirements(sources) fixed = load_requirements(locate_file(fixed_requirements, must_exist=True)) # Make sure there are no duplicate / conflicting definitions fixedreq_hash = {} for req in fixed: if hasattr(req, "requirement"): parsedreq = parse_req_from_line(req.requirement, req.line_source) project_name = parsedreq.requirement.name if not req.requirement: continue else: project_name = req.name if not req.req: continue if project_name in fixedreq_hash: raise ValueError( 'Duplicate definition for dependency "%s"' % (project_name) ) fixedreq_hash[project_name] = req lines_to_write = [] links = set() for req in requirements: if hasattr(req, "requirement"): parsedreq = parse_req_from_line(req.requirement, req.line_source) project_name = parsedreq.requirement.name linkreq = parsedreq else: project_name = req.name linkreq = req if project_name in skip: continue # we don't have any idea how to process links, so just add them if linkreq.link and linkreq.link not in links: links.add(linkreq.link) rline = str(linkreq.link) if (hasattr(req, "is_editable") and req.is_editable) or ( hasattr(req, "editable") and req.editable ): rline = "-e %s" % (rline) elif hasattr(req, "requirement") and req.requirement: project = parsedreq.requirement.name req_obj = fixedreq_hash.get(project, req) rline = str(req_obj.requirement) # Markers are included in req_obj.requirement, so no # special processing required elif hasattr(req, "req") and req.req: project = req.name req_obj = fixedreq_hash.get(project, req) rline = str(req_obj.req) # Also write out environment markers if req_obj.markers: rline += " ; {}".format(str(req_obj.markers)) lines_to_write.append(rline) # Sort the lines to guarantee a stable order lines_to_write = sorted(lines_to_write) data = "\n".join(lines_to_write) + "\n" with open(output_file, "w") as fp: fp.write("# Don't edit this file. It's generated automatically!\n") fp.write( "# If you want to update global dependencies, modify fixed-requirements.txt\n" ) fp.write( "# and then run 'make requirements' to update requirements.txt for all\n" ) fp.write("# components.\n") fp.write( "# If you want to update depdencies for a single component, modify the\n" ) fp.write( "# in-requirements.txt for that component and then run 'make requirements' to\n" ) fp.write("# update the component requirements.txt\n") fp.write(data) print("Requirements written to: {0}".format(output_file)) if __name__ == "__main__": check_pip_version() args = parse_args() if args["skip"]: skip = args["skip"].split(",") else: skip = None write_requirements( sources=args["source_requirements"], fixed_requirements=args["fixed_requirements"], output_file=args["output_file"], skip=skip, )
apache-2.0
timothycrosley/hug_explainable
hug_explainable/_version.py
1
1185
"""hug_explainable/_version.py Stores the current version for easy use accross the code-base Copyright (C) 2016 Timothy Edmund Crosley Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ current = "0.2.1"
mit
westinedu/wrgroups
django/contrib/admin/models.py
228
2207
from django.db import models from django.contrib.contenttypes.models import ContentType from django.contrib.auth.models import User from django.contrib.admin.util import quote from django.utils.translation import ugettext_lazy as _ from django.utils.encoding import smart_unicode from django.utils.safestring import mark_safe ADDITION = 1 CHANGE = 2 DELETION = 3 class LogEntryManager(models.Manager): def log_action(self, user_id, content_type_id, object_id, object_repr, action_flag, change_message=''): e = self.model(None, None, user_id, content_type_id, smart_unicode(object_id), object_repr[:200], action_flag, change_message) e.save() class LogEntry(models.Model): action_time = models.DateTimeField(_('action time'), auto_now=True) user = models.ForeignKey(User) content_type = models.ForeignKey(ContentType, blank=True, null=True) object_id = models.TextField(_('object id'), blank=True, null=True) object_repr = models.CharField(_('object repr'), max_length=200) action_flag = models.PositiveSmallIntegerField(_('action flag')) change_message = models.TextField(_('change message'), blank=True) objects = LogEntryManager() class Meta: verbose_name = _('log entry') verbose_name_plural = _('log entries') db_table = 'django_admin_log' ordering = ('-action_time',) def __repr__(self): return smart_unicode(self.action_time) def is_addition(self): return self.action_flag == ADDITION def is_change(self): return self.action_flag == CHANGE def is_deletion(self): return self.action_flag == DELETION def get_edited_object(self): "Returns the edited object represented by this log entry" return self.content_type.get_object_for_this_type(pk=self.object_id) def get_admin_url(self): """ Returns the admin URL to edit the object represented by this log entry. This is relative to the Django admin index page. """ if self.content_type and self.object_id: return mark_safe(u"%s/%s/%s/" % (self.content_type.app_label, self.content_type.model, quote(self.object_id))) return None
bsd-3-clause
csrocha/OpenUpgrade
openerp/addons/test_converter/tests/test_html.py
66
13431
# -*- encoding: utf-8 -*- import json import os import datetime from lxml import etree from openerp.tests import common from openerp.tools import html_escape as e from openerp.addons.base.ir import ir_qweb directory = os.path.dirname(__file__) class TestExport(common.TransactionCase): _model = None def setUp(self): super(TestExport, self).setUp() self.Model = self.registry(self._model) def get_field(self, name): return self.Model._fields[name] def get_converter(self, name, type=None): field = self.get_field(name) for postfix in type, field.type, '': fs = ['ir', 'qweb', 'field'] if postfix is None: continue if postfix: fs.append(postfix) try: model = self.registry('.'.join(fs)) break except KeyError: pass return lambda value, options=None, context=None: e(model.value_to_html( self.cr, self.uid, value, field, options=options, context=context)) class TestBasicExport(TestExport): _model = 'test_converter.test_model' class TestCharExport(TestBasicExport): def test_char(self): converter = self.get_converter('char') value = converter('foo') self.assertEqual(value, 'foo') value = converter("foo<bar>") self.assertEqual(value, "foo&lt;bar&gt;") class TestIntegerExport(TestBasicExport): def test_integer(self): converter = self.get_converter('integer') value = converter(42) self.assertEqual(value, "42") class TestFloatExport(TestBasicExport): def setUp(self): super(TestFloatExport, self).setUp() self.registry('res.lang').write(self.cr, self.uid, [1], { 'grouping': '[3,0]' }) def test_float(self): converter = self.get_converter('float') value = converter(42.0) self.assertEqual(value, "42.0") value = converter(42.0100) self.assertEqual(value, "42.01") value = converter(42.01234) self.assertEqual(value, "42.01234") value = converter(1234567.89) self.assertEqual(value, '1,234,567.89') def test_numeric(self): converter = self.get_converter('numeric') value = converter(42.0) self.assertEqual(value, '42.00') value = converter(42.01234) self.assertEqual(value, '42.01') class TestCurrencyExport(TestExport): _model = 'test_converter.monetary' def setUp(self): super(TestCurrencyExport, self).setUp() self.Currency = self.registry('res.currency') self.base = self.create(self.Currency, name="Source", symbol=u'source') def create(self, model, context=None, **values): return model.browse( self.cr, self.uid, model.create(self.cr, self.uid, values, context=context), context=context) def convert(self, obj, dest): converter = self.registry('ir.qweb.field.monetary') options = { 'widget': 'monetary', 'display_currency': 'c2' } context = dict(inherit_branding=True) converted = converter.to_html( self.cr, self.uid, 'value', obj, options, etree.Element('span'), {'field': 'obj.value', 'field-options': json.dumps(options)}, '', ir_qweb.QWebContext(self.cr, self.uid, {'obj': obj, 'c2': dest, }), context=context, ) return converted def test_currency_post(self): currency = self.create(self.Currency, name="Test", symbol=u"test") obj = self.create(self.Model, value=0.12) converted = self.convert(obj, dest=currency) self.assertEqual( converted, '<span data-oe-model="{obj._model._name}" data-oe-id="{obj.id}" ' 'data-oe-field="value" data-oe-type="monetary" ' 'data-oe-expression="obj.value">' '<span class="oe_currency_value">0.12</span>' ' {symbol}</span>'.format( obj=obj, symbol=currency.symbol.encode('utf-8') ),) def test_currency_pre(self): currency = self.create( self.Currency, name="Test", symbol=u"test", position='before') obj = self.create(self.Model, value=0.12) converted = self.convert(obj, dest=currency) self.assertEqual( converted, '<span data-oe-model="{obj._model._name}" data-oe-id="{obj.id}" ' 'data-oe-field="value" data-oe-type="monetary" ' 'data-oe-expression="obj.value">' '{symbol} ' '<span class="oe_currency_value">0.12</span>' '</span>'.format( obj=obj, symbol=currency.symbol.encode('utf-8') ),) def test_currency_precision(self): """ Precision should be the currency's, not the float field's """ currency = self.create(self.Currency, name="Test", symbol=u"test",) obj = self.create(self.Model, value=0.1234567) converted = self.convert(obj, dest=currency) self.assertEqual( converted, '<span data-oe-model="{obj._model._name}" data-oe-id="{obj.id}" ' 'data-oe-field="value" data-oe-type="monetary" ' 'data-oe-expression="obj.value">' '<span class="oe_currency_value">0.12</span>' ' {symbol}</span>'.format( obj=obj, symbol=currency.symbol.encode('utf-8') ),) class TestTextExport(TestBasicExport): def test_text(self): converter = self.get_converter('text') value = converter("This is my text-kai") self.assertEqual(value, "This is my text-kai") value = converter(""" . The current line (address) in the buffer. $ The last line in the buffer. n The nth, line in the buffer where n is a number in the range [0,$]. $ The last line in the buffer. - The previous line. This is equivalent to -1 and may be repeated with cumulative effect. -n The nth previous line, where n is a non-negative number. + The next line. This is equivalent to +1 and may be repeated with cumulative effect. """) self.assertEqual(value, """<br> . The current line (address) in the buffer.<br> $ The last line in the buffer.<br> n The nth, line in the buffer where n is a number in the range [0,$].<br> $ The last line in the buffer.<br> - The previous line. This is equivalent to -1 and may be repeated with cumulative effect.<br> -n The nth previous line, where n is a non-negative number.<br> + The next line. This is equivalent to +1 and may be repeated with cumulative effect.<br> """) value = converter(""" fgdkls;hjas;lj <b>fdslkj</b> d;lasjfa lkdja <a href=http://spam.com>lfks</a> fldkjsfhs <i style="color: red"><a href="http://spamspam.com">fldskjh</a></i> """) self.assertEqual(value, """<br> fgdkls;hjas;lj &lt;b&gt;fdslkj&lt;/b&gt; d;lasjfa lkdja &lt;a href=http://spam.com&gt;lfks&lt;/a&gt;<br> fldkjsfhs &lt;i style=&quot;color: red&quot;&gt;&lt;a href=&quot;http://spamspam.com&quot;&gt;fldskjh&lt;/a&gt;&lt;/i&gt;<br> """) class TestMany2OneExport(TestBasicExport): def test_many2one(self): Sub = self.registry('test_converter.test_model.sub') id0 = self.Model.create(self.cr, self.uid, { 'many2one': Sub.create(self.cr, self.uid, {'name': "Foo"}) }) id1 = self.Model.create(self.cr, self.uid, { 'many2one': Sub.create(self.cr, self.uid, {'name': "Fo<b>o</b>"}) }) def converter(record): model = self.registry('ir.qweb.field.many2one') return e(model.record_to_html(self.cr, self.uid, 'many2one', record)) value = converter(self.Model.browse(self.cr, self.uid, id0)) self.assertEqual(value, "Foo") value = converter(self.Model.browse(self.cr, self.uid, id1)) self.assertEqual(value, "Fo&lt;b&gt;o&lt;/b&gt;") class TestBinaryExport(TestBasicExport): def test_image(self): field = self.get_field('binary') converter = self.registry('ir.qweb.field.image') with open(os.path.join(directory, 'test_vectors', 'image'), 'rb') as f: content = f.read() encoded_content = content.encode('base64') value = e(converter.value_to_html( self.cr, self.uid, encoded_content, field)) self.assertEqual( value, '<img src="data:image/jpeg;base64,%s">' % ( encoded_content )) with open(os.path.join(directory, 'test_vectors', 'pdf'), 'rb') as f: content = f.read() with self.assertRaises(ValueError): e(converter.value_to_html( self.cr, self.uid, 'binary', content.encode('base64'), field)) with open(os.path.join(directory, 'test_vectors', 'pptx'), 'rb') as f: content = f.read() with self.assertRaises(ValueError): e(converter.value_to_html( self.cr, self.uid, 'binary', content.encode('base64'), field)) class TestSelectionExport(TestBasicExport): def test_selection(self): [record] = self.Model.browse(self.cr, self.uid, [self.Model.create(self.cr, self.uid, { 'selection': 2, 'selection_str': 'C', })]) converter = self.registry('ir.qweb.field.selection') field_name = 'selection' value = converter.record_to_html(self.cr, self.uid, field_name, record) self.assertEqual(value, "réponse B") field_name = 'selection_str' value = converter.record_to_html(self.cr, self.uid, field_name, record) self.assertEqual(value, "Qu'est-ce qu'il fout ce maudit pancake, tabernacle ?") class TestHTMLExport(TestBasicExport): def test_html(self): converter = self.get_converter('html') input = '<span>span</span>' value = converter(input) self.assertEqual(value, input) class TestDatetimeExport(TestBasicExport): def setUp(self): super(TestDatetimeExport, self).setUp() # set user tz to known value Users = self.registry('res.users') Users.write(self.cr, self.uid, self.uid, { 'tz': 'Pacific/Niue' }, context=None) def test_date(self): converter = self.get_converter('date') value = converter('2011-05-03') # default lang/format is US self.assertEqual(value, '05/03/2011') def test_datetime(self): converter = self.get_converter('datetime') value = converter('2011-05-03 11:12:13') # default lang/format is US self.assertEqual(value, '05/03/2011 00:12:13') def test_custom_format(self): converter = self.get_converter('datetime') converter2 = self.get_converter('date') opts = {'format': 'MMMM d'} value = converter('2011-03-02 11:12:13', options=opts) value2 = converter2('2001-03-02', options=opts) self.assertEqual( value, 'March 2' ) self.assertEqual( value2, 'March 2' ) class TestDurationExport(TestBasicExport): def setUp(self): super(TestDurationExport, self).setUp() # needs to have lang installed otherwise falls back on en_US self.registry('res.lang').load_lang(self.cr, self.uid, 'fr_FR') def test_negative(self): converter = self.get_converter('float', 'duration') with self.assertRaises(ValueError): converter(-4) def test_missing_unit(self): converter = self.get_converter('float', 'duration') with self.assertRaises(ValueError): converter(4) def test_basic(self): converter = self.get_converter('float', 'duration') result = converter(4, {'unit': 'hour'}, {'lang': 'fr_FR'}) self.assertEqual(result, u'4 heures') result = converter(50, {'unit': 'second'}, {'lang': 'fr_FR'}) self.assertEqual(result, u'50 secondes') def test_multiple(self): converter = self.get_converter('float', 'duration') result = converter(1.5, {'unit': 'hour'}, {'lang': 'fr_FR'}) self.assertEqual(result, u"1 heure 30 minutes") result = converter(72, {'unit': 'second'}, {'lang': 'fr_FR'}) self.assertEqual(result, u"1 minute 12 secondes") class TestRelativeDatetime(TestBasicExport): # not sure how a test based on "current time" should be tested. Even less # so as it would mostly be a test of babel... def setUp(self): super(TestRelativeDatetime, self).setUp() # needs to have lang installed otherwise falls back on en_US self.registry('res.lang').load_lang(self.cr, self.uid, 'fr_FR') def test_basic(self): converter = self.get_converter('datetime', 'relative') t = datetime.datetime.utcnow() - datetime.timedelta(hours=1) result = converter(t, context={'lang': 'fr_FR'}) self.assertEqual(result, u"il y a 1 heure")
agpl-3.0
xupit3r/askpgh
askbot/conf/license.py
15
2171
"""settings that allow changing of the license clause used in askbot instances""" from askbot import const from askbot.conf.settings_wrapper import settings from askbot.conf.super_groups import CONTENT_AND_UI from askbot.deps import livesettings from askbot.skins import utils as skin_utils from django.utils.translation import ugettext_lazy as _ from django.conf import settings as django_settings LICENSE_SETTINGS = livesettings.ConfigurationGroup( 'LICENSE_SETTINGS', _('Content License'), super_group = CONTENT_AND_UI ) settings.register( livesettings.BooleanValue( LICENSE_SETTINGS, 'USE_LICENSE', description = _('Show license clause in the site footer'), default = True ) ) settings.register( livesettings.StringValue( LICENSE_SETTINGS, 'LICENSE_ACRONYM', description = _('Short name for the license'), default = 'cc-by-sa' ) ) settings.register( livesettings.StringValue( LICENSE_SETTINGS, 'LICENSE_TITLE', description = _('Full name of the license'), default = _('Creative Commons Attribution Share Alike 3.0'), ) ) settings.register( livesettings.BooleanValue( LICENSE_SETTINGS, 'LICENSE_USE_URL', description = _('Add link to the license page'), default = True ) ) settings.register( livesettings.URLValue( LICENSE_SETTINGS, 'LICENSE_URL', description = _('License homepage'), help_text = _( 'URL of the official page with all the license legal clauses' ), default = const.DEPENDENCY_URLS['cc-by-sa'] ) ) settings.register( livesettings.BooleanValue( LICENSE_SETTINGS, 'LICENSE_USE_LOGO', description = _('Use license logo'), default = True ) ) settings.register( livesettings.ImageValue( LICENSE_SETTINGS, 'LICENSE_LOGO_URL', description = _('License logo image'), default = '/images/cc-by-sa.png', url_resolver = skin_utils.get_media_url ) )
gpl-3.0
sunils34/buffer-django-nonrel
django/contrib/gis/gdal/srs.py
291
11717
""" The Spatial Reference class, represensents OGR Spatial Reference objects. Example: >>> from django.contrib.gis.gdal import SpatialReference >>> srs = SpatialReference('WGS84') >>> print srs GEOGCS["WGS 84", DATUM["WGS_1984", SPHEROID["WGS 84",6378137,298.257223563, AUTHORITY["EPSG","7030"]], TOWGS84[0,0,0,0,0,0,0], AUTHORITY["EPSG","6326"]], PRIMEM["Greenwich",0, AUTHORITY["EPSG","8901"]], UNIT["degree",0.01745329251994328, AUTHORITY["EPSG","9122"]], AUTHORITY["EPSG","4326"]] >>> print srs.proj +proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs >>> print srs.ellipsoid (6378137.0, 6356752.3142451793, 298.25722356300003) >>> print srs.projected, srs.geographic False True >>> srs.import_epsg(32140) >>> print srs.name NAD83 / Texas South Central """ import re from ctypes import byref, c_char_p, c_int, c_void_p # Getting the error checking routine and exceptions from django.contrib.gis.gdal.base import GDALBase from django.contrib.gis.gdal.error import OGRException, SRSException from django.contrib.gis.gdal.prototypes import srs as capi #### Spatial Reference class. #### class SpatialReference(GDALBase): """ A wrapper for the OGRSpatialReference object. According to the GDAL Web site, the SpatialReference object "provide[s] services to represent coordinate systems (projections and datums) and to transform between them." """ #### Python 'magic' routines #### def __init__(self, srs_input=''): """ Creates a GDAL OSR Spatial Reference object from the given input. The input may be string of OGC Well Known Text (WKT), an integer EPSG code, a PROJ.4 string, and/or a projection "well known" shorthand string (one of 'WGS84', 'WGS72', 'NAD27', 'NAD83'). """ buf = c_char_p('') srs_type = 'user' if isinstance(srs_input, basestring): # Encoding to ASCII if unicode passed in. if isinstance(srs_input, unicode): srs_input = srs_input.encode('ascii') try: # If SRID is a string, e.g., '4326', then make acceptable # as user input. srid = int(srs_input) srs_input = 'EPSG:%d' % srid except ValueError: pass elif isinstance(srs_input, (int, long)): # EPSG integer code was input. srs_type = 'epsg' elif isinstance(srs_input, self.ptr_type): srs = srs_input srs_type = 'ogr' else: raise TypeError('Invalid SRS type "%s"' % srs_type) if srs_type == 'ogr': # Input is already an SRS pointer. srs = srs_input else: # Creating a new SRS pointer, using the string buffer. srs = capi.new_srs(buf) # If the pointer is NULL, throw an exception. if not srs: raise SRSException('Could not create spatial reference from: %s' % srs_input) else: self.ptr = srs # Importing from either the user input string or an integer SRID. if srs_type == 'user': self.import_user_input(srs_input) elif srs_type == 'epsg': self.import_epsg(srs_input) def __del__(self): "Destroys this spatial reference." if self._ptr: capi.release_srs(self._ptr) def __getitem__(self, target): """ Returns the value of the given string attribute node, None if the node doesn't exist. Can also take a tuple as a parameter, (target, child), where child is the index of the attribute in the WKT. For example: >>> wkt = 'GEOGCS["WGS 84", DATUM["WGS_1984, ... AUTHORITY["EPSG","4326"]]') >>> srs = SpatialReference(wkt) # could also use 'WGS84', or 4326 >>> print srs['GEOGCS'] WGS 84 >>> print srs['DATUM'] WGS_1984 >>> print srs['AUTHORITY'] EPSG >>> print srs['AUTHORITY', 1] # The authority value 4326 >>> print srs['TOWGS84', 4] # the fourth value in this wkt 0 >>> print srs['UNIT|AUTHORITY'] # For the units authority, have to use the pipe symbole. EPSG >>> print srs['UNIT|AUTHORITY', 1] # The authority value for the untis 9122 """ if isinstance(target, tuple): return self.attr_value(*target) else: return self.attr_value(target) def __str__(self): "The string representation uses 'pretty' WKT." return self.pretty_wkt #### SpatialReference Methods #### def attr_value(self, target, index=0): """ The attribute value for the given target node (e.g. 'PROJCS'). The index keyword specifies an index of the child node to return. """ if not isinstance(target, basestring) or not isinstance(index, int): raise TypeError return capi.get_attr_value(self.ptr, target, index) def auth_name(self, target): "Returns the authority name for the given string target node." return capi.get_auth_name(self.ptr, target) def auth_code(self, target): "Returns the authority code for the given string target node." return capi.get_auth_code(self.ptr, target) def clone(self): "Returns a clone of this SpatialReference object." return SpatialReference(capi.clone_srs(self.ptr)) def from_esri(self): "Morphs this SpatialReference from ESRI's format to EPSG." capi.morph_from_esri(self.ptr) def identify_epsg(self): """ This method inspects the WKT of this SpatialReference, and will add EPSG authority nodes where an EPSG identifier is applicable. """ capi.identify_epsg(self.ptr) def to_esri(self): "Morphs this SpatialReference to ESRI's format." capi.morph_to_esri(self.ptr) def validate(self): "Checks to see if the given spatial reference is valid." capi.srs_validate(self.ptr) #### Name & SRID properties #### @property def name(self): "Returns the name of this Spatial Reference." if self.projected: return self.attr_value('PROJCS') elif self.geographic: return self.attr_value('GEOGCS') elif self.local: return self.attr_value('LOCAL_CS') else: return None @property def srid(self): "Returns the SRID of top-level authority, or None if undefined." try: return int(self.attr_value('AUTHORITY', 1)) except (TypeError, ValueError): return None #### Unit Properties #### @property def linear_name(self): "Returns the name of the linear units." units, name = capi.linear_units(self.ptr, byref(c_char_p())) return name @property def linear_units(self): "Returns the value of the linear units." units, name = capi.linear_units(self.ptr, byref(c_char_p())) return units @property def angular_name(self): "Returns the name of the angular units." units, name = capi.angular_units(self.ptr, byref(c_char_p())) return name @property def angular_units(self): "Returns the value of the angular units." units, name = capi.angular_units(self.ptr, byref(c_char_p())) return units @property def units(self): """ Returns a 2-tuple of the units value and the units name, and will automatically determines whether to return the linear or angular units. """ if self.projected or self.local: return capi.linear_units(self.ptr, byref(c_char_p())) elif self.geographic: return capi.angular_units(self.ptr, byref(c_char_p())) else: return (None, None) #### Spheroid/Ellipsoid Properties #### @property def ellipsoid(self): """ Returns a tuple of the ellipsoid parameters: (semimajor axis, semiminor axis, and inverse flattening) """ return (self.semi_major, self.semi_minor, self.inverse_flattening) @property def semi_major(self): "Returns the Semi Major Axis for this Spatial Reference." return capi.semi_major(self.ptr, byref(c_int())) @property def semi_minor(self): "Returns the Semi Minor Axis for this Spatial Reference." return capi.semi_minor(self.ptr, byref(c_int())) @property def inverse_flattening(self): "Returns the Inverse Flattening for this Spatial Reference." return capi.invflattening(self.ptr, byref(c_int())) #### Boolean Properties #### @property def geographic(self): """ Returns True if this SpatialReference is geographic (root node is GEOGCS). """ return bool(capi.isgeographic(self.ptr)) @property def local(self): "Returns True if this SpatialReference is local (root node is LOCAL_CS)." return bool(capi.islocal(self.ptr)) @property def projected(self): """ Returns True if this SpatialReference is a projected coordinate system (root node is PROJCS). """ return bool(capi.isprojected(self.ptr)) #### Import Routines ##### def import_epsg(self, epsg): "Imports the Spatial Reference from the EPSG code (an integer)." capi.from_epsg(self.ptr, epsg) def import_proj(self, proj): "Imports the Spatial Reference from a PROJ.4 string." capi.from_proj(self.ptr, proj) def import_user_input(self, user_input): "Imports the Spatial Reference from the given user input string." capi.from_user_input(self.ptr, user_input) def import_wkt(self, wkt): "Imports the Spatial Reference from OGC WKT (string)" capi.from_wkt(self.ptr, byref(c_char_p(wkt))) def import_xml(self, xml): "Imports the Spatial Reference from an XML string." capi.from_xml(self.ptr, xml) #### Export Properties #### @property def wkt(self): "Returns the WKT representation of this Spatial Reference." return capi.to_wkt(self.ptr, byref(c_char_p())) @property def pretty_wkt(self, simplify=0): "Returns the 'pretty' representation of the WKT." return capi.to_pretty_wkt(self.ptr, byref(c_char_p()), simplify) @property def proj(self): "Returns the PROJ.4 representation for this Spatial Reference." return capi.to_proj(self.ptr, byref(c_char_p())) @property def proj4(self): "Alias for proj()." return self.proj @property def xml(self, dialect=''): "Returns the XML representation of this Spatial Reference." return capi.to_xml(self.ptr, byref(c_char_p()), dialect) class CoordTransform(GDALBase): "The coordinate system transformation object." def __init__(self, source, target): "Initializes on a source and target SpatialReference objects." if not isinstance(source, SpatialReference) or not isinstance(target, SpatialReference): raise TypeError('source and target must be of type SpatialReference') self.ptr = capi.new_ct(source._ptr, target._ptr) self._srs1_name = source.name self._srs2_name = target.name def __del__(self): "Deletes this Coordinate Transformation object." if self._ptr: capi.destroy_ct(self._ptr) def __str__(self): return 'Transform from "%s" to "%s"' % (self._srs1_name, self._srs2_name)
bsd-3-clause
jhawkesworth/ansible
lib/ansible/modules/storage/netapp/na_elementsw_volume_pair.py
31
10254
#!/usr/bin/python # (c) 2017, NetApp, Inc # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'certified'} DOCUMENTATION = ''' module: na_elementsw_volume_pair short_description: NetApp Element Software Volume Pair extends_documentation_fragment: - netapp.solidfire version_added: '2.7' author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com> description: - Create, delete volume pair options: state: description: - Whether the specified volume pair should exist or not. choices: ['present', 'absent'] default: present src_volume: description: - Source volume name or volume ID required: true src_account: description: - Source account name or ID required: true dest_volume: description: - Destination volume name or volume ID required: true dest_account: description: - Destination account name or ID required: true mode: description: - Mode to start the volume pairing choices: ['async', 'sync', 'snapshotsonly'] default: async dest_mvip: description: - Destination IP address of the paired cluster. required: true dest_username: description: - Destination username for the paired cluster - Optional if this is same as source cluster username. dest_password: description: - Destination password for the paired cluster - Optional if this is same as source cluster password. ''' EXAMPLES = """ - name: Create volume pair na_elementsw_volume_pair: hostname: "{{ src_cluster_hostname }}" username: "{{ src_cluster_username }}" password: "{{ src_cluster_password }}" state: present src_volume: test1 src_account: test2 dest_volume: test3 dest_account: test4 mode: sync dest_mvip: "{{ dest_cluster_hostname }}" - name: Delete volume pair na_elementsw_volume_pair: hostname: "{{ src_cluster_hostname }}" username: "{{ src_cluster_username }}" password: "{{ src_cluster_password }}" state: absent src_volume: 3 src_account: 1 dest_volume: 2 dest_account: 1 dest_mvip: "{{ dest_cluster_hostname }}" dest_username: "{{ dest_cluster_username }}" dest_password: "{{ dest_cluster_password }}" """ RETURN = """ """ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_native import ansible.module_utils.netapp as netapp_utils from ansible.module_utils.netapp_elementsw_module import NaElementSWModule from ansible.module_utils.netapp_module import NetAppModule HAS_SF_SDK = netapp_utils.has_sf_sdk() try: import solidfire.common except Exception: HAS_SF_SDK = False class ElementSWVolumePair(object): ''' class to handle volume pairing operations ''' def __init__(self): """ Setup Ansible parameters and SolidFire connection """ self.argument_spec = netapp_utils.ontap_sf_host_argument_spec() self.argument_spec.update(dict( state=dict(required=False, choices=['present', 'absent'], default='present'), src_volume=dict(required=True, type='str'), src_account=dict(required=True, type='str'), dest_volume=dict(required=True, type='str'), dest_account=dict(required=True, type='str'), mode=dict(required=False, type='str', choices=['async', 'sync', 'snapshotsonly'], default='async'), dest_mvip=dict(required=True, type='str'), dest_username=dict(required=False, type='str'), dest_password=dict(required=False, type='str', no_log=True) )) self.module = AnsibleModule( argument_spec=self.argument_spec, supports_check_mode=True ) if HAS_SF_SDK is False: self.module.fail_json(msg="Unable to import the SolidFire Python SDK") else: self.elem = netapp_utils.create_sf_connection(module=self.module) self.elementsw_helper = NaElementSWModule(self.elem) self.na_helper = NetAppModule() self.parameters = self.na_helper.set_parameters(self.module.params) # get element_sw_connection for destination cluster # overwrite existing source host, user and password with destination credentials self.module.params['hostname'] = self.parameters['dest_mvip'] # username and password is same as source, # if dest_username and dest_password aren't specified if self.parameters.get('dest_username'): self.module.params['username'] = self.parameters['dest_username'] if self.parameters.get('dest_password'): self.module.params['password'] = self.parameters['dest_password'] self.dest_elem = netapp_utils.create_sf_connection(module=self.module) self.dest_elementsw_helper = NaElementSWModule(self.dest_elem) def check_if_already_paired(self, vol_id): """ Check for idempotency A volume can have only one pair Return paired-volume-id if volume is paired already None if volume is not paired """ paired_volumes = self.elem.list_volumes(volume_ids=[vol_id], is_paired=True) for vol in paired_volumes.volumes: for pair in vol.volume_pairs: if pair is not None: return pair.remote_volume_id return None def pair_volumes(self): """ Start volume pairing on source, and complete on target volume """ try: pair_key = self.elem.start_volume_pairing( volume_id=self.parameters['src_vol_id'], mode=self.parameters['mode']) self.dest_elem.complete_volume_pairing( volume_pairing_key=pair_key.volume_pairing_key, volume_id=self.parameters['dest_vol_id']) except solidfire.common.ApiServerError as err: self.module.fail_json(msg="Error pairing volume id %s" % (self.parameters['src_vol_id']), exception=to_native(err)) def pairing_exists(self, src_id, dest_id): src_paired = self.check_if_already_paired(self.parameters['src_vol_id']) dest_paired = self.check_if_already_paired(self.parameters['dest_vol_id']) if src_paired is not None or dest_paired is not None: return True return None def unpair_volumes(self): """ Delete volume pair """ try: self.elem.remove_volume_pair(volume_id=self.parameters['src_vol_id']) self.dest_elem.remove_volume_pair(volume_id=self.parameters['dest_vol_id']) except solidfire.common.ApiServerError as err: self.module.fail_json(msg="Error unpairing volume ids %s and %s" % (self.parameters['src_vol_id'], self.parameters['dest_vol_id']), exception=to_native(err)) def get_account_id(self, account, type): """ Get source and destination account IDs """ try: if type == 'src': self.parameters['src_account_id'] = self.elementsw_helper.account_exists(account) elif type == 'dest': self.parameters['dest_account_id'] = self.dest_elementsw_helper.account_exists(account) except solidfire.common.ApiServerError as err: self.module.fail_json(msg="Error: either account %s or %s does not exist" % (self.parameters['src_account'], self.parameters['dest_account']), exception=to_native(err)) def get_volume_id(self, volume, type): """ Get source and destination volume IDs """ if type == 'src': self.parameters['src_vol_id'] = self.elementsw_helper.volume_exists(volume, self.parameters['src_account_id']) if self.parameters['src_vol_id'] is None: self.module.fail_json(msg="Error: source volume %s does not exist" % (self.parameters['src_volume'])) elif type == 'dest': self.parameters['dest_vol_id'] = self.dest_elementsw_helper.volume_exists(volume, self.parameters['dest_account_id']) if self.parameters['dest_vol_id'] is None: self.module.fail_json(msg="Error: destination volume %s does not exist" % (self.parameters['dest_volume'])) def get_ids(self): """ Get IDs for volumes and accounts """ self.get_account_id(self.parameters['src_account'], 'src') self.get_account_id(self.parameters['dest_account'], 'dest') self.get_volume_id(self.parameters['src_volume'], 'src') self.get_volume_id(self.parameters['dest_volume'], 'dest') def apply(self): """ Call create / delete volume pair methods """ self.get_ids() paired = self.pairing_exists(self.parameters['src_vol_id'], self.parameters['dest_vol_id']) # calling helper to determine action cd_action = self.na_helper.get_cd_action(paired, self.parameters) if cd_action == "create": self.pair_volumes() elif cd_action == "delete": self.unpair_volumes() self.module.exit_json(changed=self.na_helper.changed) def main(): """ Apply volume pair actions """ vol_obj = ElementSWVolumePair() vol_obj.apply() if __name__ == '__main__': main()
gpl-3.0
proversity-org/edx-platform
common/djangoapps/track/contexts.py
13
1431
"""Generates common contexts""" import logging from opaque_keys import InvalidKeyError from opaque_keys.edx.keys import CourseKey from six import text_type from util.request import COURSE_REGEX log = logging.getLogger(__name__) def course_context_from_url(url): """ Extracts the course_context from the given `url` and passes it on to `course_context_from_course_id()`. """ url = url or '' match = COURSE_REGEX.match(url) course_id = None if match: course_id_string = match.group('course_id') try: course_id = CourseKey.from_string(course_id_string) except InvalidKeyError: log.warning( 'unable to parse course_id "{course_id}"'.format( course_id=course_id_string ), exc_info=True ) return course_context_from_course_id(course_id) def course_context_from_course_id(course_id): """ Creates a course context from a `course_id`. Example Returned Context:: { 'course_id': 'org/course/run', 'org_id': 'org' } """ if course_id is None: return {'course_id': '', 'org_id': ''} # TODO: Make this accept any CourseKey, and serialize it using .to_string assert isinstance(course_id, CourseKey) return { 'course_id': text_type(course_id), 'org_id': course_id.org, }
agpl-3.0
romain-dartigues/ansible
lib/ansible/modules/cloud/cloudstack/cs_instance.py
5
38896
#!/usr/bin/python # -*- coding: utf-8 -*- # # (c) 2015, René Moser <mail@renemoser.net> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['stableinterface'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: cs_instance short_description: Manages instances and virtual machines on Apache CloudStack based clouds. description: - Deploy, start, update, scale, restart, restore, stop and destroy instances. version_added: '2.0' author: "René Moser (@resmo)" options: name: description: - Host name of the instance. C(name) can only contain ASCII letters. - Name will be generated (UUID) by CloudStack if not specified and can not be changed afterwards. - Either C(name) or C(display_name) is required. display_name: description: - Custom display name of the instances. - Display name will be set to C(name) if not specified. - Either C(name) or C(display_name) is required. group: description: - Group in where the new instance should be in. state: description: - State of the instance. default: present choices: [ deployed, started, stopped, restarted, restored, destroyed, expunged, present, absent ] service_offering: description: - Name or id of the service offering of the new instance. - If not set, first found service offering is used. cpu: description: - The number of CPUs to allocate to the instance, used with custom service offerings cpu_speed: description: - The clock speed/shares allocated to the instance, used with custom service offerings memory: description: - The memory allocated to the instance, used with custom service offerings template: description: - Name, display text or id of the template to be used for creating the new instance. - Required when using I(state=present). - Mutually exclusive with C(ISO) option. iso: description: - Name or id of the ISO to be used for creating the new instance. - Required when using I(state=present). - Mutually exclusive with C(template) option. template_filter: description: - Name of the filter used to search for the template or iso. - Used for params C(iso) or C(template) on I(state=present). - The filter C(all) was added in 2.6. default: executable choices: [ all, featured, self, selfexecutable, sharedexecutable, executable, community ] aliases: [ iso_filter ] version_added: '2.1' hypervisor: description: - Name the hypervisor to be used for creating the new instance. - Relevant when using I(state=present), but only considered if not set on ISO/template. - If not set or found on ISO/template, first found hypervisor will be used. choices: [ KVM, kvm, VMware, vmware, BareMetal, baremetal, XenServer, xenserver, LXC, lxc, HyperV, hyperv, UCS, ucs, OVM, ovm, Simulator, simulator ] keyboard: description: - Keyboard device type for the instance. choices: [ 'de', 'de-ch', 'es', 'fi', 'fr', 'fr-be', 'fr-ch', 'is', 'it', 'jp', 'nl-be', 'no', 'pt', 'uk', 'us' ] networks: description: - List of networks to use for the new instance. aliases: [ network ] ip_address: description: - IPv4 address for default instance's network during creation. ip6_address: description: - IPv6 address for default instance's network. ip_to_networks: description: - "List of mappings in the form I({'network': NetworkName, 'ip': 1.2.3.4})" - Mutually exclusive with C(networks) option. aliases: [ ip_to_network ] disk_offering: description: - Name of the disk offering to be used. disk_size: description: - Disk size in GByte required if deploying instance from ISO. root_disk_size: description: - Root disk size in GByte required if deploying instance with KVM hypervisor and want resize the root disk size at startup (need CloudStack >= 4.4, cloud-initramfs-growroot installed and enabled in the template) security_groups: description: - List of security groups the instance to be applied to. aliases: [ security_group ] host: description: - Host on which an instance should be deployed or started on. - Only considered when I(state=started) or instance is running. - Requires root admin privileges. version_added: 2.6 domain: description: - Domain the instance is related to. account: description: - Account the instance is related to. project: description: - Name of the project the instance to be deployed in. zone: description: - Name of the zone in which the instance should be deployed. - If not set, default zone is used. ssh_key: description: - Name of the SSH key to be deployed on the new instance. affinity_groups: description: - Affinity groups names to be applied to the new instance. aliases: [ affinity_group ] user_data: description: - Optional data (ASCII) that can be sent to the instance upon a successful deployment. - The data will be automatically base64 encoded. - Consider switching to HTTP_POST by using I(CLOUDSTACK_METHOD=post) to increase the HTTP_GET size limit of 2KB to 32 KB. force: description: - Force stop/start the instance if required to apply changes, otherwise a running instance will not be changed. type: bool default: no allow_root_disk_shrink: description: - Enables a volume shrinkage when the new size is smaller than the old one. type: bool default: no version_added: '2.7' tags: description: - List of tags. Tags are a list of dictionaries having keys C(key) and C(value). - "If you want to delete all tags, set a empty list e.g. I(tags: [])." aliases: [ tag ] poll_async: description: - Poll async jobs until job has finished. type: bool default: yes details: description: - Map to specify custom parameters. version_added: '2.6' extends_documentation_fragment: cloudstack ''' EXAMPLES = ''' # NOTE: Names of offerings and ISOs depending on the CloudStack configuration. - name: create a instance from an ISO cs_instance: name: web-vm-1 iso: Linux Debian 7 64-bit hypervisor: VMware project: Integration zone: ch-zrh-ix-01 service_offering: 1cpu_1gb disk_offering: PerfPlus Storage disk_size: 20 networks: - Server Integration - Sync Integration - Storage Integration delegate_to: localhost - name: for changing a running instance, use the 'force' parameter cs_instance: name: web-vm-1 display_name: web-vm-01.example.com iso: Linux Debian 7 64-bit service_offering: 2cpu_2gb force: yes delegate_to: localhost # NOTE: user_data can be used to kickstart the instance using cloud-init yaml config. - name: create or update a instance on Exoscale's public cloud using display_name. cs_instance: display_name: web-vm-1 template: Linux Debian 7 64-bit service_offering: Tiny ssh_key: john@example.com tags: - key: admin value: john - key: foo value: bar user_data: | #cloud-config packages: - nginx delegate_to: localhost - name: create an instance with multiple interfaces specifying the IP addresses cs_instance: name: web-vm-1 template: Linux Debian 7 64-bit service_offering: Tiny ip_to_networks: - network: NetworkA ip: 10.1.1.1 - network: NetworkB ip: 192.0.2.1 delegate_to: localhost - name: ensure an instance is stopped cs_instance: name: web-vm-1 state: stopped delegate_to: localhost - name: ensure an instance is running cs_instance: name: web-vm-1 state: started delegate_to: localhost - name: remove an instance cs_instance: name: web-vm-1 state: absent delegate_to: localhost ''' RETURN = ''' --- id: description: UUID of the instance. returned: success type: string sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6 name: description: Name of the instance. returned: success type: string sample: web-01 display_name: description: Display name of the instance. returned: success type: string sample: web-01 group: description: Group name of the instance is related. returned: success type: string sample: web created: description: Date of the instance was created. returned: success type: string sample: 2014-12-01T14:57:57+0100 password_enabled: description: True if password setting is enabled. returned: success type: boolean sample: true password: description: The password of the instance if exists. returned: success type: string sample: Ge2oe7Do ssh_key: description: Name of SSH key deployed to instance. returned: success type: string sample: key@work domain: description: Domain the instance is related to. returned: success type: string sample: example domain account: description: Account the instance is related to. returned: success type: string sample: example account project: description: Name of project the instance is related to. returned: success type: string sample: Production default_ip: description: Default IP address of the instance. returned: success type: string sample: 10.23.37.42 default_ip6: description: Default IPv6 address of the instance. returned: success type: string sample: 2a04:c43:c00:a07:4b4:beff:fe00:74 version_added: '2.6' public_ip: description: Public IP address with instance via static NAT rule. returned: success type: string sample: 1.2.3.4 iso: description: Name of ISO the instance was deployed with. returned: success type: string sample: Debian-8-64bit template: description: Name of template the instance was deployed with. returned: success type: string sample: Linux Debian 9 64-bit template_display_text: description: Display text of template the instance was deployed with. returned: success type: string sample: Linux Debian 9 64-bit 200G Disk (2017-10-08-622866) version_added: 2.6 service_offering: description: Name of the service offering the instance has. returned: success type: string sample: 2cpu_2gb zone: description: Name of zone the instance is in. returned: success type: string sample: ch-gva-2 state: description: State of the instance. returned: success type: string sample: Running security_groups: description: Security groups the instance is in. returned: success type: list sample: '[ "default" ]' affinity_groups: description: Affinity groups the instance is in. returned: success type: list sample: '[ "webservers" ]' tags: description: List of resource tags associated with the instance. returned: success type: dict sample: '[ { "key": "foo", "value": "bar" } ]' hypervisor: description: Hypervisor related to this instance. returned: success type: string sample: KVM host: description: Hostname of hypervisor an instance is running on. returned: success and instance is running type: string sample: host-01.example.com version_added: 2.6 instance_name: description: Internal name of the instance (ROOT admin only). returned: success type: string sample: i-44-3992-VM ''' import base64 from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_bytes, to_text from ansible.module_utils.cloudstack import ( AnsibleCloudStack, CS_HYPERVISORS, cs_argument_spec, cs_required_together ) class AnsibleCloudStackInstance(AnsibleCloudStack): def __init__(self, module): super(AnsibleCloudStackInstance, self).__init__(module) self.returns = { 'group': 'group', 'hypervisor': 'hypervisor', 'instancename': 'instance_name', 'publicip': 'public_ip', 'passwordenabled': 'password_enabled', 'password': 'password', 'serviceofferingname': 'service_offering', 'isoname': 'iso', 'templatename': 'template', 'templatedisplaytext': 'template_display_text', 'keypair': 'ssh_key', 'hostname': 'host', } self.instance = None self.template = None self.iso = None def get_service_offering_id(self): service_offering = self.module.params.get('service_offering') service_offerings = self.query_api('listServiceOfferings') if service_offerings: if not service_offering: return service_offerings['serviceoffering'][0]['id'] for s in service_offerings['serviceoffering']: if service_offering in [s['name'], s['id']]: return s['id'] self.fail_json(msg="Service offering '%s' not found" % service_offering) def get_host_id(self): host_name = self.module.params.get('host') if not host_name: return None args = { 'type': 'routing', 'zoneid': self.get_zone(key='id'), } hosts = self.query_api('listHosts', **args) if hosts: for h in hosts['host']: if h['name'] == host_name: return h['id'] self.fail_json(msg="Host '%s' not found" % host_name) def get_template_or_iso(self, key=None): template = self.module.params.get('template') iso = self.module.params.get('iso') if not template and not iso: return None args = { 'account': self.get_account(key='name'), 'domainid': self.get_domain(key='id'), 'projectid': self.get_project(key='id'), 'zoneid': self.get_zone(key='id'), 'isrecursive': True, 'fetch_list': True, } if template: if self.template: return self._get_by_key(key, self.template) rootdisksize = self.module.params.get('root_disk_size') args['templatefilter'] = self.module.params.get('template_filter') args['fetch_list'] = True templates = self.query_api('listTemplates', **args) if templates: for t in templates: if template in [t['displaytext'], t['name'], t['id']]: if rootdisksize and t['size'] > rootdisksize * 1024 ** 3: continue self.template = t return self._get_by_key(key, self.template) if rootdisksize: more_info = " (with size <= %s)" % rootdisksize else: more_info = "" self.module.fail_json(msg="Template '%s' not found%s" % (template, more_info)) elif iso: if self.iso: return self._get_by_key(key, self.iso) args['isofilter'] = self.module.params.get('template_filter') args['fetch_list'] = True isos = self.query_api('listIsos', **args) if isos: for i in isos: if iso in [i['displaytext'], i['name'], i['id']]: self.iso = i return self._get_by_key(key, self.iso) self.module.fail_json(msg="ISO '%s' not found" % iso) def get_instance(self): instance = self.instance if not instance: instance_name = self.get_or_fallback('name', 'display_name') args = { 'account': self.get_account(key='name'), 'domainid': self.get_domain(key='id'), 'projectid': self.get_project(key='id'), 'fetch_list': True, } # Do not pass zoneid, as the instance name must be unique across zones. instances = self.query_api('listVirtualMachines', **args) if instances: for v in instances: if instance_name.lower() in [v['name'].lower(), v['displayname'].lower(), v['id']]: self.instance = v break return self.instance def _get_instance_user_data(self, instance): # Query the user data if we need to if 'userdata' in instance: return instance['userdata'] user_data = "" if self.get_user_data() is not None and instance.get('id'): res = self.query_api('getVirtualMachineUserData', virtualmachineid=instance['id']) user_data = res['virtualmachineuserdata'].get('userdata', "") return user_data def get_iptonetwork_mappings(self): network_mappings = self.module.params.get('ip_to_networks') if network_mappings is None: return if network_mappings and self.module.params.get('networks'): self.module.fail_json(msg="networks and ip_to_networks are mutually exclusive.") network_names = [n['network'] for n in network_mappings] ids = self.get_network_ids(network_names) res = [] for i, data in enumerate(network_mappings): res.append({'networkid': ids[i], 'ip': data['ip']}) return res def get_ssh_keypair(self, key=None, name=None, fail_on_missing=True): ssh_key_name = name or self.module.params.get('ssh_key') if ssh_key_name is None: return args = { 'domainid': self.get_domain('id'), 'account': self.get_account('name'), 'projectid': self.get_project('id'), 'name': ssh_key_name, } ssh_key_pairs = self.query_api('listSSHKeyPairs', **args) if 'sshkeypair' in ssh_key_pairs: return self._get_by_key(key=key, my_dict=ssh_key_pairs['sshkeypair'][0]) elif fail_on_missing: self.module.fail_json(msg="SSH key not found: %s" % ssh_key_name) def ssh_key_has_changed(self): ssh_key_name = self.module.params.get('ssh_key') if ssh_key_name is None: return False # Fails if keypair for param is inexistent param_ssh_key_fp = self.get_ssh_keypair(key='fingerprint') # CloudStack 4.5 does return keypair on instance for a non existent key. instance_ssh_key_name = self.instance.get('keypair') if instance_ssh_key_name is None: return True # Get fingerprint for keypair of instance but do not fail if inexistent. instance_ssh_key_fp = self.get_ssh_keypair(key='fingerprint', name=instance_ssh_key_name, fail_on_missing=False) if not instance_ssh_key_fp: return True # Compare fingerprints to ensure the keypair changed if instance_ssh_key_fp != param_ssh_key_fp: return True return False def security_groups_has_changed(self): security_groups = self.module.params.get('security_groups') if security_groups is None: return False security_groups = [s.lower() for s in security_groups] instance_security_groups = self.instance.get('securitygroup') or [] instance_security_group_names = [] for instance_security_group in instance_security_groups: if instance_security_group['name'].lower() not in security_groups: return True else: instance_security_group_names.append(instance_security_group['name'].lower()) for security_group in security_groups: if security_group not in instance_security_group_names: return True return False def get_network_ids(self, network_names=None): if network_names is None: network_names = self.module.params.get('networks') if not network_names: return None args = { 'account': self.get_account(key='name'), 'domainid': self.get_domain(key='id'), 'projectid': self.get_project(key='id'), 'zoneid': self.get_zone(key='id'), 'fetch_list': True, } networks = self.query_api('listNetworks', **args) if not networks: self.module.fail_json(msg="No networks available") network_ids = [] network_displaytexts = [] for network_name in network_names: for n in networks: if network_name in [n['displaytext'], n['name'], n['id']]: network_ids.append(n['id']) network_displaytexts.append(n['name']) break if len(network_ids) != len(network_names): self.module.fail_json(msg="Could not find all networks, networks list found: %s" % network_displaytexts) return network_ids def present_instance(self, start_vm=True): instance = self.get_instance() if not instance: instance = self.deploy_instance(start_vm=start_vm) else: instance = self.recover_instance(instance=instance) instance = self.update_instance(instance=instance, start_vm=start_vm) # In check mode, we do not necessarily have an instance if instance: instance = self.ensure_tags(resource=instance, resource_type='UserVm') # refresh instance data self.instance = instance return instance def get_user_data(self): user_data = self.module.params.get('user_data') if user_data is not None: user_data = to_text(base64.b64encode(to_bytes(user_data))) return user_data def get_details(self): details = self.module.params.get('details') cpu = self.module.params.get('cpu') cpu_speed = self.module.params.get('cpu_speed') memory = self.module.params.get('memory') if all([cpu, cpu_speed, memory]): details.extends({ 'cpuNumber': cpu, 'cpuSpeed': cpu_speed, 'memory': memory, }) return details def deploy_instance(self, start_vm=True): self.result['changed'] = True networkids = self.get_network_ids() if networkids is not None: networkids = ','.join(networkids) args = {} args['templateid'] = self.get_template_or_iso(key='id') if not args['templateid']: self.module.fail_json(msg="Template or ISO is required.") args['zoneid'] = self.get_zone(key='id') args['serviceofferingid'] = self.get_service_offering_id() args['account'] = self.get_account(key='name') args['domainid'] = self.get_domain(key='id') args['projectid'] = self.get_project(key='id') args['diskofferingid'] = self.get_disk_offering(key='id') args['networkids'] = networkids args['iptonetworklist'] = self.get_iptonetwork_mappings() args['userdata'] = self.get_user_data() args['keyboard'] = self.module.params.get('keyboard') args['ipaddress'] = self.module.params.get('ip_address') args['ip6address'] = self.module.params.get('ip6_address') args['name'] = self.module.params.get('name') args['displayname'] = self.get_or_fallback('display_name', 'name') args['group'] = self.module.params.get('group') args['keypair'] = self.get_ssh_keypair(key='name') args['size'] = self.module.params.get('disk_size') args['startvm'] = start_vm args['rootdisksize'] = self.module.params.get('root_disk_size') args['affinitygroupnames'] = self.module.params.get('affinity_groups') args['details'] = self.get_details() args['securitygroupnames'] = self.module.params.get('security_groups') args['hostid'] = self.get_host_id() template_iso = self.get_template_or_iso() if 'hypervisor' not in template_iso: args['hypervisor'] = self.get_hypervisor() instance = None if not self.module.check_mode: instance = self.query_api('deployVirtualMachine', **args) poll_async = self.module.params.get('poll_async') if poll_async: instance = self.poll_job(instance, 'virtualmachine') return instance def update_instance(self, instance, start_vm=True): # Service offering data args_service_offering = { 'id': instance['id'], } if self.module.params.get('service_offering'): args_service_offering['serviceofferingid'] = self.get_service_offering_id() service_offering_changed = self.has_changed(args_service_offering, instance) # Instance data args_instance_update = { 'id': instance['id'], 'userdata': self.get_user_data(), } instance['userdata'] = self._get_instance_user_data(instance) args_instance_update['ostypeid'] = self.get_os_type(key='id') if self.module.params.get('group'): args_instance_update['group'] = self.module.params.get('group') if self.module.params.get('display_name'): args_instance_update['displayname'] = self.module.params.get('display_name') instance_changed = self.has_changed(args_instance_update, instance) ssh_key_changed = self.ssh_key_has_changed() security_groups_changed = self.security_groups_has_changed() # Volume data args_volume_update = {} root_disk_size = self.module.params.get('root_disk_size') root_disk_size_changed = False if root_disk_size is not None: res = self.query_api('listVolumes', type='ROOT', virtualmachineid=instance['id']) [volume] = res['volume'] size = volume['size'] >> 30 args_volume_update['id'] = volume['id'] args_volume_update['size'] = root_disk_size shrinkok = self.module.params.get('allow_root_disk_shrink') if shrinkok: args_volume_update['shrinkok'] = shrinkok root_disk_size_changed = root_disk_size != size changed = [ service_offering_changed, instance_changed, security_groups_changed, ssh_key_changed, root_disk_size_changed, ] if any(changed): force = self.module.params.get('force') instance_state = instance['state'].lower() if instance_state == 'stopped' or force: self.result['changed'] = True if not self.module.check_mode: # Ensure VM has stopped instance = self.stop_instance() instance = self.poll_job(instance, 'virtualmachine') self.instance = instance # Change service offering if service_offering_changed: res = self.query_api('changeServiceForVirtualMachine', **args_service_offering) instance = res['virtualmachine'] self.instance = instance # Update VM if instance_changed or security_groups_changed: if security_groups_changed: args_instance_update['securitygroupnames'] = ','.join(self.module.params.get('security_groups')) res = self.query_api('updateVirtualMachine', **args_instance_update) instance = res['virtualmachine'] self.instance = instance # Reset SSH key if ssh_key_changed: # SSH key data args_ssh_key = {} args_ssh_key['id'] = instance['id'] args_ssh_key['projectid'] = self.get_project(key='id') args_ssh_key['keypair'] = self.module.params.get('ssh_key') instance = self.query_api('resetSSHKeyForVirtualMachine', **args_ssh_key) instance = self.poll_job(instance, 'virtualmachine') self.instance = instance # Root disk size if root_disk_size_changed: async_result = self.query_api('resizeVolume', **args_volume_update) self.poll_job(async_result, 'volume') # Start VM again if it was running before if instance_state == 'running' and start_vm: instance = self.start_instance() else: self.module.warn("Changes won't be applied to running instances. " + "Use force=true to allow the instance %s to be stopped/started." % instance['name']) # migrate to other host host_changed = all([ instance['state'].lower() == 'running', self.module.params.get('host'), self.module.params.get('host') != instance.get('hostname') ]) if host_changed: self.result['changed'] = True args_host = { 'virtualmachineid': instance['id'], 'hostid': self.get_host_id(), } if not self.module.check_mode: res = self.query_api('migrateVirtualMachine', **args_host) instance = self.poll_job(res, 'virtualmachine') return instance def recover_instance(self, instance): if instance['state'].lower() in ['destroying', 'destroyed']: self.result['changed'] = True if not self.module.check_mode: res = self.query_api('recoverVirtualMachine', id=instance['id']) instance = res['virtualmachine'] return instance def absent_instance(self): instance = self.get_instance() if instance: if instance['state'].lower() not in ['expunging', 'destroying', 'destroyed']: self.result['changed'] = True if not self.module.check_mode: res = self.query_api('destroyVirtualMachine', id=instance['id']) poll_async = self.module.params.get('poll_async') if poll_async: instance = self.poll_job(res, 'virtualmachine') return instance def expunge_instance(self): instance = self.get_instance() if instance: res = {} if instance['state'].lower() in ['destroying', 'destroyed']: self.result['changed'] = True if not self.module.check_mode: res = self.query_api('destroyVirtualMachine', id=instance['id'], expunge=True) elif instance['state'].lower() not in ['expunging']: self.result['changed'] = True if not self.module.check_mode: res = self.query_api('destroyVirtualMachine', id=instance['id'], expunge=True) poll_async = self.module.params.get('poll_async') if poll_async: res = self.poll_job(res, 'virtualmachine') return instance def stop_instance(self): instance = self.get_instance() # in check mode instance may not be instanciated if instance: if instance['state'].lower() in ['stopping', 'stopped']: return instance if instance['state'].lower() in ['starting', 'running']: self.result['changed'] = True if not self.module.check_mode: instance = self.query_api('stopVirtualMachine', id=instance['id']) poll_async = self.module.params.get('poll_async') if poll_async: instance = self.poll_job(instance, 'virtualmachine') return instance def start_instance(self): instance = self.get_instance() # in check mode instance may not be instanciated if instance: if instance['state'].lower() in ['starting', 'running']: return instance if instance['state'].lower() in ['stopped', 'stopping']: self.result['changed'] = True if not self.module.check_mode: args = { 'id': instance['id'], 'hostid': self.get_host_id(), } instance = self.query_api('startVirtualMachine', **args) poll_async = self.module.params.get('poll_async') if poll_async: instance = self.poll_job(instance, 'virtualmachine') return instance def restart_instance(self): instance = self.get_instance() # in check mode instance may not be instanciated if instance: if instance['state'].lower() in ['running', 'starting']: self.result['changed'] = True if not self.module.check_mode: instance = self.query_api('rebootVirtualMachine', id=instance['id']) poll_async = self.module.params.get('poll_async') if poll_async: instance = self.poll_job(instance, 'virtualmachine') elif instance['state'].lower() in ['stopping', 'stopped']: instance = self.start_instance() return instance def restore_instance(self): instance = self.get_instance() self.result['changed'] = True # in check mode instance may not be instanciated if instance: args = {} args['templateid'] = self.get_template_or_iso(key='id') args['virtualmachineid'] = instance['id'] res = self.query_api('restoreVirtualMachine', **args) poll_async = self.module.params.get('poll_async') if poll_async: instance = self.poll_job(res, 'virtualmachine') return instance def get_result(self, instance): super(AnsibleCloudStackInstance, self).get_result(instance) if instance: self.result['user_data'] = self._get_instance_user_data(instance) if 'securitygroup' in instance: security_groups = [] for securitygroup in instance['securitygroup']: security_groups.append(securitygroup['name']) self.result['security_groups'] = security_groups if 'affinitygroup' in instance: affinity_groups = [] for affinitygroup in instance['affinitygroup']: affinity_groups.append(affinitygroup['name']) self.result['affinity_groups'] = affinity_groups if 'nic' in instance: for nic in instance['nic']: if nic['isdefault']: if 'ipaddress' in nic: self.result['default_ip'] = nic['ipaddress'] if 'ip6address' in nic: self.result['default_ip6'] = nic['ip6address'] return self.result def main(): argument_spec = cs_argument_spec() argument_spec.update(dict( name=dict(), display_name=dict(), group=dict(), state=dict(choices=['present', 'deployed', 'started', 'stopped', 'restarted', 'restored', 'absent', 'destroyed', 'expunged'], default='present'), service_offering=dict(), cpu=dict(type='int'), cpu_speed=dict(type='int'), memory=dict(type='int'), template=dict(), iso=dict(), template_filter=dict( default="executable", aliases=['iso_filter'], choices=['all', 'featured', 'self', 'selfexecutable', 'sharedexecutable', 'executable', 'community'] ), networks=dict(type='list', aliases=['network']), ip_to_networks=dict(type='list', aliases=['ip_to_network']), ip_address=dict(), ip6_address=dict(), disk_offering=dict(), disk_size=dict(type='int'), root_disk_size=dict(type='int'), keyboard=dict(type='str', choices=['de', 'de-ch', 'es', 'fi', 'fr', 'fr-be', 'fr-ch', 'is', 'it', 'jp', 'nl-be', 'no', 'pt', 'uk', 'us']), hypervisor=dict(choices=CS_HYPERVISORS), host=dict(), security_groups=dict(type='list', aliases=['security_group']), affinity_groups=dict(type='list', aliases=['affinity_group']), domain=dict(), account=dict(), project=dict(), user_data=dict(), zone=dict(), ssh_key=dict(), force=dict(type='bool', default=False), tags=dict(type='list', aliases=['tag']), details=dict(type='dict'), poll_async=dict(type='bool', default=True), allow_root_disk_shrink=dict(type='bool', default=False), )) required_together = cs_required_together() required_together.extend([ ['cpu', 'cpu_speed', 'memory'], ]) module = AnsibleModule( argument_spec=argument_spec, required_together=required_together, required_one_of=( ['display_name', 'name'], ), mutually_exclusive=( ['template', 'iso'], ), supports_check_mode=True ) acs_instance = AnsibleCloudStackInstance(module) state = module.params.get('state') if state in ['absent', 'destroyed']: instance = acs_instance.absent_instance() elif state in ['expunged']: instance = acs_instance.expunge_instance() elif state in ['restored']: acs_instance.present_instance() instance = acs_instance.restore_instance() elif state in ['present', 'deployed']: instance = acs_instance.present_instance() elif state in ['stopped']: acs_instance.present_instance(start_vm=False) instance = acs_instance.stop_instance() elif state in ['started']: acs_instance.present_instance() instance = acs_instance.start_instance() elif state in ['restarted']: acs_instance.present_instance() instance = acs_instance.restart_instance() if instance and 'state' in instance and instance['state'].lower() == 'error': module.fail_json(msg="Instance named '%s' in error state." % module.params.get('name')) result = acs_instance.get_result(instance) module.exit_json(**result) if __name__ == '__main__': main()
gpl-3.0
shiora/The-Perfect-Pokemon-Team-Balancer
libs/env/Lib/stat.py
319
1842
"""Constants/functions for interpreting results of os.stat() and os.lstat(). Suggested usage: from stat import * """ # Indices for stat struct members in the tuple returned by os.stat() ST_MODE = 0 ST_INO = 1 ST_DEV = 2 ST_NLINK = 3 ST_UID = 4 ST_GID = 5 ST_SIZE = 6 ST_ATIME = 7 ST_MTIME = 8 ST_CTIME = 9 # Extract bits from the mode def S_IMODE(mode): return mode & 07777 def S_IFMT(mode): return mode & 0170000 # Constants used as S_IFMT() for various file types # (not all are implemented on all systems) S_IFDIR = 0040000 S_IFCHR = 0020000 S_IFBLK = 0060000 S_IFREG = 0100000 S_IFIFO = 0010000 S_IFLNK = 0120000 S_IFSOCK = 0140000 # Functions to test for each file type def S_ISDIR(mode): return S_IFMT(mode) == S_IFDIR def S_ISCHR(mode): return S_IFMT(mode) == S_IFCHR def S_ISBLK(mode): return S_IFMT(mode) == S_IFBLK def S_ISREG(mode): return S_IFMT(mode) == S_IFREG def S_ISFIFO(mode): return S_IFMT(mode) == S_IFIFO def S_ISLNK(mode): return S_IFMT(mode) == S_IFLNK def S_ISSOCK(mode): return S_IFMT(mode) == S_IFSOCK # Names for permission bits S_ISUID = 04000 S_ISGID = 02000 S_ENFMT = S_ISGID S_ISVTX = 01000 S_IREAD = 00400 S_IWRITE = 00200 S_IEXEC = 00100 S_IRWXU = 00700 S_IRUSR = 00400 S_IWUSR = 00200 S_IXUSR = 00100 S_IRWXG = 00070 S_IRGRP = 00040 S_IWGRP = 00020 S_IXGRP = 00010 S_IRWXO = 00007 S_IROTH = 00004 S_IWOTH = 00002 S_IXOTH = 00001 # Names for file flags UF_NODUMP = 0x00000001 UF_IMMUTABLE = 0x00000002 UF_APPEND = 0x00000004 UF_OPAQUE = 0x00000008 UF_NOUNLINK = 0x00000010 UF_COMPRESSED = 0x00000020 # OS X: file is hfs-compressed UF_HIDDEN = 0x00008000 # OS X: file should not be displayed SF_ARCHIVED = 0x00010000 SF_IMMUTABLE = 0x00020000 SF_APPEND = 0x00040000 SF_NOUNLINK = 0x00100000 SF_SNAPSHOT = 0x00200000
gpl-2.0
appsembler/awstrial
awstrial/__init__.py
5
1077
# AWSTrial, A mechanism and service for offering a cloud image trial # # Copyright (C) 2010 Scott Moser <smoser@ubuntu.com> # Copyright (C) 2010 Dave Walker (Daviey) <DaveWalker@ubuntu.com> # Copyright (C) 2010 Michael Hall <mhall119@gmail.com> # Copyright (C) 2010 Dustin Kirkland <kirkland@ubuntu.com> # Copyright (C) 2010 Andreas Hasenack <andreas@canonical.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>.
agpl-3.0
mvaled/sentry
src/sentry/south_migrations/0421_auto__del_field_identityprovider_organization_id__del_unique_identityp.py
1
106910
# -*- coding: utf-8 -*- from south.utils import datetime_utils as datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): # Flag to indicate if this migration is too risky # to run online and needs to be coordinated for offline is_dangerous = False def forwards(self, orm): # Removing unique constraint on 'IdentityProvider', fields ['type', # 'organization_id', 'external_id'] db.delete_unique(u'sentry_identityprovider', ['type', 'organization_id', 'external_id']) # Deleting field 'IdentityProvider.organization_id' db.delete_column(u'sentry_identityprovider', 'organization_id') def backwards(self, orm): # Adding field 'IdentityProvider.organization_id' db.add_column(u'sentry_identityprovider', 'organization_id', self.gf('sentry.db.models.fields.bounded.BoundedPositiveIntegerField')( default=0, null=True), keep_default=False) # Adding unique constraint on 'IdentityProvider', fields ['type', # 'organization_id', 'external_id'] db.create_unique(u'sentry_identityprovider', ['type', 'organization_id', 'external_id']) models = { 'sentry.activity': { 'Meta': {'object_name': 'Activity'}, 'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True'}), 'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}), 'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}), 'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}) }, 'sentry.apiapplication': { 'Meta': {'object_name': 'ApiApplication'}, 'allowed_origins': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'client_id': ('django.db.models.fields.CharField', [], {'default': "'acb48fa233a74d9cad39997aa92c37975644a40d984546edb1ea5227a0339a57'", 'unique': 'True', 'max_length': '64'}), 'client_secret': ('sentry.db.models.fields.encrypted.EncryptedTextField', [], {'default': "'4454aeb355684e61b5e6256867f1d89a3b795fe5ab3942ab9509467363afca85'"}), 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'homepage_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'default': "'Above Kid'", 'max_length': '64', 'blank': 'True'}), 'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}), 'privacy_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}), 'redirect_uris': ('django.db.models.fields.TextField', [], {}), 'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}), 'terms_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}) }, 'sentry.apiauthorization': { 'Meta': {'unique_together': "(('user', 'application'),)", 'object_name': 'ApiAuthorization'}, 'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']", 'null': 'True'}), 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}), 'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}), 'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}) }, 'sentry.apigrant': { 'Meta': {'object_name': 'ApiGrant'}, 'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']"}), 'code': ('django.db.models.fields.CharField', [], {'default': "'48915bb078f549e4b93cfdafd7cea70e'", 'max_length': '64', 'db_index': 'True'}), 'expires_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2018, 6, 1, 0, 0)', 'db_index': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'redirect_uri': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}), 'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}), 'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}) }, 'sentry.apikey': { 'Meta': {'object_name': 'ApiKey'}, 'allowed_origins': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}), 'label': ('django.db.models.fields.CharField', [], {'default': "'Default'", 'max_length': '64', 'blank': 'True'}), 'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Organization']"}), 'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}), 'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}), 'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}) }, 'sentry.apitoken': { 'Meta': {'object_name': 'ApiToken'}, 'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']", 'null': 'True'}), 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'expires_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2018, 7, 1, 0, 0)', 'null': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'refresh_token': ('django.db.models.fields.CharField', [], {'default': "'00136bcc23a9433a9381a7f88de70aa0a3dbb5670ced4455bd849bdbd1467f3a'", 'max_length': '64', 'unique': 'True', 'null': 'True'}), 'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}), 'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}), 'token': ('django.db.models.fields.CharField', [], {'default': "'f4ea5c4c94ab487ca4a8da2926835130a120ac6765764fcc99b4222a9117d31a'", 'unique': 'True', 'max_length': '64'}), 'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}) }, 'sentry.assistantactivity': { 'Meta': {'unique_together': "(('user', 'guide_id'),)", 'object_name': 'AssistantActivity', 'db_table': "'sentry_assistant_activity'"}, 'dismissed_ts': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}), 'guide_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'useful': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}), 'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}), 'viewed_ts': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}) }, 'sentry.auditlogentry': { 'Meta': {'object_name': 'AuditLogEntry'}, 'actor': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_actors'", 'null': 'True', 'to': "orm['sentry.User']"}), 'actor_key': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiKey']", 'null': 'True', 'blank': 'True'}), 'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}), 'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}), 'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}), 'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}), 'target_object': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}), 'target_user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_targets'", 'null': 'True', 'to': "orm['sentry.User']"}) }, 'sentry.authenticator': { 'Meta': {'unique_together': "(('user', 'type'),)", 'object_name': 'Authenticator', 'db_table': "'auth_authenticator'"}, 'config': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}), 'last_used_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}), 'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}), 'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}) }, 'sentry.authidentity': { 'Meta': {'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))", 'object_name': 'AuthIdentity'}, 'auth_provider': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.AuthProvider']"}), 'data': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}), 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'ident': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'last_synced': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_verified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}) }, 'sentry.authprovider': { 'Meta': {'object_name': 'AuthProvider'}, 'config': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}), 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'default_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'default_role': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}), 'default_teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'blank': 'True'}), 'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'last_sync': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}), 'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']", 'unique': 'True'}), 'provider': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'sync_time': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}) }, 'sentry.broadcast': { 'Meta': {'object_name': 'Broadcast'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'date_expires': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2018, 6, 8, 0, 0)', 'null': 'True', 'blank': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}), 'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}), 'message': ('django.db.models.fields.CharField', [], {'max_length': '256'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'upstream_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}) }, 'sentry.broadcastseen': { 'Meta': {'unique_together': "(('broadcast', 'user'),)", 'object_name': 'BroadcastSeen'}, 'broadcast': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Broadcast']"}), 'date_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}) }, 'sentry.commit': { 'Meta': {'unique_together': "(('repository_id', 'key'),)", 'object_name': 'Commit', 'index_together': "(('repository_id', 'date_added'),)"}, 'author': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.CommitAuthor']", 'null': 'True'}), 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'message': ('django.db.models.fields.TextField', [], {'null': 'True'}), 'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}), 'repository_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}) }, 'sentry.commitauthor': { 'Meta': {'unique_together': "(('organization_id', 'email'), ('organization_id', 'external_id'))", 'object_name': 'CommitAuthor'}, 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}), 'external_id': ('django.db.models.fields.CharField', [], {'max_length': '164', 'null': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}), 'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}) }, 'sentry.commitfilechange': { 'Meta': {'unique_together': "(('commit', 'filename'),)", 'object_name': 'CommitFileChange'}, 'commit': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Commit']"}), 'filename': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}), 'type': ('django.db.models.fields.CharField', [], {'max_length': '1'}) }, 'sentry.counter': { 'Meta': {'object_name': 'Counter', 'db_table': "'sentry_projectcounter'"}, 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'unique': 'True'}), 'value': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}) }, 'sentry.deletedorganization': { 'Meta': {'object_name': 'DeletedOrganization'}, 'actor_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}), 'actor_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}), 'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}), 'date_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}), 'date_deleted': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}), 'reason': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'slug': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}) }, 'sentry.deletedproject': { 'Meta': {'object_name': 'DeletedProject'}, 'actor_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}), 'actor_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}), 'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}), 'date_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}), 'date_deleted': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}), 'organization_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}), 'organization_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}), 'organization_slug': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}), 'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}), 'reason': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'slug': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}) }, 'sentry.deletedteam': { 'Meta': {'object_name': 'DeletedTeam'}, 'actor_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}), 'actor_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}), 'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}), 'date_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}), 'date_deleted': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}), 'organization_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}), 'organization_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}), 'organization_slug': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}), 'reason': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'slug': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}) }, 'sentry.deploy': { 'Meta': {'object_name': 'Deploy'}, 'date_finished': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'date_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'environment_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}), 'notified': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'db_index': 'True', 'blank': 'True'}), 'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}), 'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}), 'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}) }, 'sentry.distribution': { 'Meta': {'unique_together': "(('release', 'name'),)", 'object_name': 'Distribution'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}), 'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}) }, 'sentry.dsymapp': { 'Meta': {'unique_together': "(('project', 'platform', 'app_id'),)", 'object_name': 'DSymApp'}, 'app_id': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}), 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'last_synced': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'platform': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}), 'sync_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}) }, 'sentry.email': { 'Meta': {'object_name': 'Email'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('sentry.db.models.fields.citext.CIEmailField', [], {'unique': 'True', 'max_length': '75'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}) }, 'sentry.environment': { 'Meta': {'unique_together': "(('organization_id', 'name'),)", 'object_name': 'Environment'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}), 'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}), 'projects': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Project']", 'through': "orm['sentry.EnvironmentProject']", 'symmetrical': 'False'}) }, 'sentry.environmentproject': { 'Meta': {'unique_together': "(('project', 'environment'),)", 'object_name': 'EnvironmentProject'}, 'environment': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Environment']"}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'is_hidden': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}) }, 'sentry.event': { 'Meta': {'unique_together': "(('project_id', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'", 'index_together': "(('group_id', 'datetime'),)"}, 'data': ('sentry.db.models.fields.node.NodeField', [], {'null': 'True', 'blank': 'True'}), 'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}), 'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}), 'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'message': ('django.db.models.fields.TextField', [], {}), 'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}), 'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}), 'time_spent': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'null': 'True'}) }, 'sentry.eventmapping': { 'Meta': {'unique_together': "(('project_id', 'event_id'),)", 'object_name': 'EventMapping'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}) }, 'sentry.eventprocessingissue': { 'Meta': {'unique_together': "(('raw_event', 'processing_issue'),)", 'object_name': 'EventProcessingIssue'}, 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'processing_issue': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ProcessingIssue']"}), 'raw_event': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.RawEvent']"}) }, 'sentry.eventtag': { 'Meta': {'unique_together': "(('event_id', 'key_id', 'value_id'),)", 'object_name': 'EventTag', 'index_together': "(('group_id', 'key_id', 'value_id'),)"}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}), 'event_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}), 'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'key_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}), 'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}), 'value_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}) }, 'sentry.eventuser': { 'Meta': {'unique_together': "(('project_id', 'ident'), ('project_id', 'hash'))", 'object_name': 'EventUser', 'index_together': "(('project_id', 'email'), ('project_id', 'username'), ('project_id', 'ip_address'))"}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True'}), 'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'ident': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}), 'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}), 'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}) }, 'sentry.externalissue': { 'Meta': {'unique_together': "(('organization_id', 'integration_id', 'key'),)", 'object_name': 'ExternalIssue'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'description': ('django.db.models.fields.TextField', [], {'null': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'integration_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}), 'title': ('django.db.models.fields.TextField', [], {'null': 'True'}) }, 'sentry.featureadoption': { 'Meta': {'unique_together': "(('organization', 'feature_id'),)", 'object_name': 'FeatureAdoption'}, 'applicable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}), 'date_completed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'feature_id': ('django.db.models.fields.PositiveIntegerField', [], {}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}) }, 'sentry.file': { 'Meta': {'object_name': 'File'}, 'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'legacy_blob'", 'null': 'True', 'to': "orm['sentry.FileBlob']"}), 'blobs': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.FileBlob']", 'through': "orm['sentry.FileBlobIndex']", 'symmetrical': 'False'}), 'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'db_index': 'True'}), 'headers': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.TextField', [], {}), 'path': ('django.db.models.fields.TextField', [], {'null': 'True'}), 'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}), 'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}), 'type': ('django.db.models.fields.CharField', [], {'max_length': '64'}) }, 'sentry.fileblob': { 'Meta': {'object_name': 'FileBlob'}, 'checksum': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'path': ('django.db.models.fields.TextField', [], {'null': 'True'}), 'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}), 'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}) }, 'sentry.fileblobindex': { 'Meta': {'unique_together': "(('file', 'blob', 'offset'),)", 'object_name': 'FileBlobIndex'}, 'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.FileBlob']"}), 'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'offset': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}) }, 'sentry.fileblobowner': { 'Meta': {'unique_together': "(('blob', 'organization'),)", 'object_name': 'FileBlobOwner'}, 'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.FileBlob']"}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}) }, 'sentry.group': { 'Meta': {'unique_together': "(('project', 'short_id'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'", 'index_together': "(('project', 'first_release'),)"}, 'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}), 'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}), 'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}), 'first_release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']", 'null': 'True', 'on_delete': 'models.PROTECT'}), 'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}), 'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}), 'level': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}), 'logger': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}), 'message': ('django.db.models.fields.TextField', [], {}), 'num_comments': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}), 'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}), 'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}), 'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}), 'short_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}), 'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}), 'time_spent_count': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}), 'time_spent_total': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}), 'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '1', 'db_index': 'True'}) }, 'sentry.groupassignee': { 'Meta': {'object_name': 'GroupAssignee', 'db_table': "'sentry_groupasignee'"}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'unique': 'True', 'to': "orm['sentry.Group']"}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'to': "orm['sentry.Project']"}), 'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_assignee_set'", 'null': 'True', 'to': "orm['sentry.Team']"}), 'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_assignee_set'", 'null': 'True', 'to': "orm['sentry.User']"}) }, 'sentry.groupbookmark': { 'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}), 'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}), 'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"}) }, 'sentry.groupcommitresolution': { 'Meta': {'unique_together': "(('group_id', 'commit_id'),)", 'object_name': 'GroupCommitResolution'}, 'commit_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}), 'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}), 'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}) }, 'sentry.groupemailthread': { 'Meta': {'unique_together': "(('email', 'group'), ('email', 'msgid'))", 'object_name': 'GroupEmailThread'}, 'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}), 'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'groupemail_set'", 'to': "orm['sentry.Group']"}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'msgid': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'groupemail_set'", 'to': "orm['sentry.Project']"}) }, 'sentry.groupenvironment': { 'Meta': {'unique_together': "[('group_id', 'environment_id')]", 'object_name': 'GroupEnvironment', 'index_together': "[('environment_id', 'first_release_id')]"}, 'environment_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}), 'first_release_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}), 'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}) }, 'sentry.grouphash': { 'Meta': {'unique_together': "(('project', 'hash'),)", 'object_name': 'GroupHash'}, 'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}), 'group_tombstone_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}), 'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}), 'state': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}) }, 'sentry.grouplink': { 'Meta': {'unique_together': "(('group_id', 'linked_type', 'linked_id'),)", 'object_name': 'GroupLink'}, 'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}), 'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}), 'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'linked_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}), 'linked_type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '1'}), 'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'db_index': 'True'}), 'relationship': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '2'}) }, 'sentry.groupmeta': { 'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'}, 'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'value': ('django.db.models.fields.TextField', [], {}) }, 'sentry.groupredirect': { 'Meta': {'object_name': 'GroupRedirect'}, 'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'db_index': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'previous_group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'unique': 'True'}) }, 'sentry.grouprelease': { 'Meta': {'unique_together': "(('group_id', 'release_id', 'environment'),)", 'object_name': 'GroupRelease'}, 'environment': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64'}), 'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}), 'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}), 'release_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}) }, 'sentry.groupresolution': { 'Meta': {'object_name': 'GroupResolution'}, 'actor_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}), 'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}), 'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}), 'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}), 'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}) }, 'sentry.grouprulestatus': { 'Meta': {'unique_together': "(('rule', 'group'),)", 'object_name': 'GroupRuleStatus'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'last_active': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}), 'rule': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Rule']"}), 'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}) }, 'sentry.groupseen': { 'Meta': {'unique_together': "(('user', 'group'),)", 'object_name': 'GroupSeen'}, 'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}), 'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'db_index': 'False'}) }, 'sentry.groupshare': { 'Meta': {'object_name': 'GroupShare'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}), 'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}), 'uuid': ('django.db.models.fields.CharField', [], {'default': "'9d08ed5b01874277a51bbfac9ba774df'", 'unique': 'True', 'max_length': '32'}) }, 'sentry.groupsnooze': { 'Meta': {'object_name': 'GroupSnooze'}, 'actor_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}), 'count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}), 'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'state': ('sentry.db.models.fields.jsonfield.JSONField', [], {'null': 'True'}), 'until': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}), 'user_count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}), 'user_window': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}), 'window': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}) }, 'sentry.groupsubscription': { 'Meta': {'unique_together': "(('group', 'user'),)", 'object_name': 'GroupSubscription'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}), 'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'subscription_set'", 'to': "orm['sentry.Group']"}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'subscription_set'", 'to': "orm['sentry.Project']"}), 'reason': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}), 'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}) }, 'sentry.grouptagkey': { 'Meta': {'unique_together': "(('project_id', 'group_id', 'key'),)", 'object_name': 'GroupTagKey'}, 'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}), 'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}) }, 'sentry.grouptagvalue': { 'Meta': {'unique_together': "(('group_id', 'key', 'value'),)", 'object_name': 'GroupTagValue', 'db_table': "'sentry_messagefiltervalue'", 'index_together': "(('project_id', 'key', 'value', 'last_seen'),)"}, 'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}), 'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}), 'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}), 'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}), 'value': ('django.db.models.fields.CharField', [], {'max_length': '200'}) }, 'sentry.grouptombstone': { 'Meta': {'object_name': 'GroupTombstone'}, 'actor_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}), 'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}), 'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'level': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '40', 'blank': 'True'}), 'message': ('django.db.models.fields.TextField', [], {}), 'previous_group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'unique': 'True'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}) }, 'sentry.identity': { 'Meta': {'unique_together': "(('idp', 'external_id'), ('idp', 'user'))", 'object_name': 'Identity'}, 'data': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}), 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'date_verified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'external_id': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'idp': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.IdentityProvider']"}), 'scopes': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}), 'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}), 'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}) }, 'sentry.identityprovider': { 'Meta': {'unique_together': "(('type', 'external_id'),)", 'object_name': 'IdentityProvider'}, 'config': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}), 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}), 'external_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'type': ('django.db.models.fields.CharField', [], {'max_length': '64'}) }, 'sentry.integration': { 'Meta': {'unique_together': "(('provider', 'external_id'),)", 'object_name': 'Integration'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}), 'external_id': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'metadata': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'organizations': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'integrations'", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationIntegration']", 'to': "orm['sentry.Organization']"}), 'projects': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'integrations'", 'symmetrical': 'False', 'through': "orm['sentry.ProjectIntegration']", 'to': "orm['sentry.Project']"}), 'provider': ('django.db.models.fields.CharField', [], {'max_length': '64'}) }, 'sentry.latestrelease': { 'Meta': {'unique_together': "(('repository_id', 'environment_id'),)", 'object_name': 'LatestRelease'}, 'commit_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}), 'deploy_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}), 'environment_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'release_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}), 'repository_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}) }, 'sentry.lostpasswordhash': { 'Meta': {'object_name': 'LostPasswordHash'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'unique': 'True'}) }, 'sentry.option': { 'Meta': {'object_name': 'Option'}, 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}), 'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {}) }, 'sentry.organization': { 'Meta': {'object_name': 'Organization'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'default_role': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '32'}), 'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'org_memberships'", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMember']", 'to': "orm['sentry.User']"}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}), 'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}) }, 'sentry.organizationaccessrequest': { 'Meta': {'unique_together': "(('team', 'member'),)", 'object_name': 'OrganizationAccessRequest'}, 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'member': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}), 'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"}) }, 'sentry.organizationavatar': { 'Meta': {'object_name': 'OrganizationAvatar'}, 'avatar_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}), 'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'ident': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}), 'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'avatar'", 'unique': 'True', 'to': "orm['sentry.Organization']"}) }, 'sentry.organizationintegration': { 'Meta': {'unique_together': "(('organization', 'integration'),)", 'object_name': 'OrganizationIntegration'}, 'config': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}), 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}), 'default_auth_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'integration': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Integration']"}), 'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}) }, 'sentry.organizationmember': { 'Meta': {'unique_together': "(('organization', 'user'), ('organization', 'email'))", 'object_name': 'OrganizationMember'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}), 'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}), 'has_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Organization']"}), 'role': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '32'}), 'teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMemberTeam']", 'blank': 'True'}), 'token': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True', 'blank': 'True'}), 'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50', 'blank': 'True'}), 'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'sentry_orgmember_set'", 'null': 'True', 'to': "orm['sentry.User']"}) }, 'sentry.organizationmemberteam': { 'Meta': {'unique_together': "(('team', 'organizationmember'),)", 'object_name': 'OrganizationMemberTeam', 'db_table': "'sentry_organizationmember_teams'"}, 'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'organizationmember': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}), 'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"}) }, 'sentry.organizationonboardingtask': { 'Meta': {'unique_together': "(('organization', 'task'),)", 'object_name': 'OrganizationOnboardingTask'}, 'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}), 'date_completed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}), 'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}), 'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}), 'task': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}), 'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}) }, 'sentry.organizationoption': { 'Meta': {'unique_together': "(('organization', 'key'),)", 'object_name': 'OrganizationOption', 'db_table': "'sentry_organizationoptions'"}, 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}), 'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {}) }, 'sentry.processingissue': { 'Meta': {'unique_together': "(('project', 'checksum', 'type'),)", 'object_name': 'ProcessingIssue'}, 'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}), 'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}), 'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}), 'type': ('django.db.models.fields.CharField', [], {'max_length': '30'}) }, 'sentry.project': { 'Meta': {'unique_together': "(('organization', 'slug'),)", 'object_name': 'Project'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'first_event': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}), 'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0', 'null': 'True'}), 'forced_color': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True', 'blank': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}), 'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}), 'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}), 'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}), 'teams': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'teams'", 'symmetrical': 'False', 'through': "orm['sentry.ProjectTeam']", 'to': "orm['sentry.Team']"}) }, 'sentry.projectavatar': { 'Meta': {'object_name': 'ProjectAvatar'}, 'avatar_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}), 'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'ident': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'avatar'", 'unique': 'True', 'to': "orm['sentry.Project']"}) }, 'sentry.projectbookmark': { 'Meta': {'unique_together': "(('project_id', 'user'),)", 'object_name': 'ProjectBookmark'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}), 'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}) }, 'sentry.projectdsymfile': { 'Meta': {'unique_together': "(('project', 'debug_id'),)", 'object_name': 'ProjectDSymFile'}, 'cpu_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}), 'debug_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_column': "'uuid'"}), 'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'object_name': ('django.db.models.fields.TextField', [], {}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}) }, 'sentry.projectintegration': { 'Meta': {'unique_together': "(('project', 'integration'),)", 'object_name': 'ProjectIntegration'}, 'config': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'integration': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Integration']"}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}) }, 'sentry.projectkey': { 'Meta': {'object_name': 'ProjectKey'}, 'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}), 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}), 'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}), 'rate_limit_count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}), 'rate_limit_window': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}), 'roles': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}), 'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}), 'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}) }, 'sentry.projectoption': { 'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"}, 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}), 'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {}) }, 'sentry.projectownership': { 'Meta': {'object_name': 'ProjectOwnership'}, 'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'fallthrough': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'unique': 'True'}), 'raw': ('django.db.models.fields.TextField', [], {'null': 'True'}), 'schema': ('sentry.db.models.fields.jsonfield.JSONField', [], {'null': 'True'}) }, 'sentry.projectplatform': { 'Meta': {'unique_together': "(('project_id', 'platform'),)", 'object_name': 'ProjectPlatform'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'platform': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}) }, 'sentry.projectredirect': { 'Meta': {'unique_together': "(('organization', 'redirect_slug'),)", 'object_name': 'ProjectRedirect'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}), 'redirect_slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}) }, 'sentry.projectsymcachefile': { 'Meta': {'unique_together': "(('project', 'dsym_file'),)", 'object_name': 'ProjectSymCacheFile'}, 'cache_file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}), 'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40'}), 'dsym_file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ProjectDSymFile']"}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}), 'version': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}) }, 'sentry.projectteam': { 'Meta': {'unique_together': "(('project', 'team'),)", 'object_name': 'ProjectTeam'}, 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}), 'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"}) }, 'sentry.pullrequest': { 'Meta': {'unique_together': "(('repository_id', 'key'),)", 'object_name': 'PullRequest', 'db_table': "'sentry_pull_request'", 'index_together': "(('repository_id', 'date_added'),)"}, 'author': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.CommitAuthor']", 'null': 'True'}), 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'merge_commit_sha': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}), 'message': ('django.db.models.fields.TextField', [], {'null': 'True'}), 'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}), 'repository_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}), 'title': ('django.db.models.fields.TextField', [], {'null': 'True'}) }, 'sentry.pullrequestcommit': { 'Meta': {'unique_together': "(('pull_request', 'commit'),)", 'object_name': 'PullRequestCommit', 'db_table': "'sentry_pullrequest_commit'"}, 'commit': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Commit']"}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'pull_request': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.PullRequest']"}) }, 'sentry.rawevent': { 'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'RawEvent'}, 'data': ('sentry.db.models.fields.node.NodeField', [], {'null': 'True', 'blank': 'True'}), 'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}) }, 'sentry.relay': { 'Meta': {'object_name': 'Relay'}, 'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'public_key': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'relay_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}) }, 'sentry.release': { 'Meta': {'unique_together': "(('organization', 'version'),)", 'object_name': 'Release'}, 'authors': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}), 'commit_count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}), 'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}), 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'date_released': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'date_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'last_commit_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}), 'last_deploy_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}), 'new_groups': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}), 'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}), 'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True', 'blank': 'True'}), 'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}), 'projects': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'releases'", 'symmetrical': 'False', 'through': "orm['sentry.ReleaseProject']", 'to': "orm['sentry.Project']"}), 'ref': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}), 'total_deploys': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}), 'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}), 'version': ('django.db.models.fields.CharField', [], {'max_length': '250'}) }, 'sentry.releasecommit': { 'Meta': {'unique_together': "(('release', 'commit'), ('release', 'order'))", 'object_name': 'ReleaseCommit'}, 'commit': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Commit']"}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'order': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}), 'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}), 'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}), 'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}) }, 'sentry.releaseenvironment': { 'Meta': {'unique_together': "(('organization_id', 'release_id', 'environment_id'),)", 'object_name': 'ReleaseEnvironment', 'db_table': "'sentry_environmentrelease'"}, 'environment_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}), 'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}), 'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}), 'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}), 'release_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}) }, 'sentry.releasefile': { 'Meta': {'unique_together': "(('release', 'ident'),)", 'object_name': 'ReleaseFile'}, 'dist': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Distribution']", 'null': 'True'}), 'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'ident': ('django.db.models.fields.CharField', [], {'max_length': '40'}), 'name': ('django.db.models.fields.TextField', [], {}), 'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}), 'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}), 'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}) }, 'sentry.releaseheadcommit': { 'Meta': {'unique_together': "(('repository_id', 'release'),)", 'object_name': 'ReleaseHeadCommit'}, 'commit': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Commit']"}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}), 'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}), 'repository_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}) }, 'sentry.releaseproject': { 'Meta': {'unique_together': "(('project', 'release'),)", 'object_name': 'ReleaseProject', 'db_table': "'sentry_release_project'"}, 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'new_groups': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}), 'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}) }, 'sentry.releaseprojectenvironment': { 'Meta': {'unique_together': "(('project', 'release', 'environment'),)", 'object_name': 'ReleaseProjectEnvironment'}, 'environment': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Environment']"}), 'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'last_deploy_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}), 'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}), 'new_issues_count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}), 'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}) }, 'sentry.repository': { 'Meta': {'unique_together': "(('organization_id', 'name'), ('organization_id', 'provider', 'external_id'))", 'object_name': 'Repository'}, 'config': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}), 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'external_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'integration_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}), 'provider': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}), 'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}), 'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}) }, 'sentry.reprocessingreport': { 'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'ReprocessingReport'}, 'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}) }, 'sentry.rule': { 'Meta': {'object_name': 'Rule'}, 'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}), 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'environment_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'label': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}), 'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}) }, 'sentry.savedsearch': { 'Meta': {'unique_together': "(('project', 'name'),)", 'object_name': 'SavedSearch'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}), 'query': ('django.db.models.fields.TextField', [], {}) }, 'sentry.savedsearchuserdefault': { 'Meta': {'unique_together': "(('project', 'user'),)", 'object_name': 'SavedSearchUserDefault', 'db_table': "'sentry_savedsearch_userdefault'"}, 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}), 'savedsearch': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.SavedSearch']"}), 'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}) }, 'sentry.scheduleddeletion': { 'Meta': {'unique_together': "(('app_label', 'model_name', 'object_id'),)", 'object_name': 'ScheduledDeletion'}, 'aborted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'actor_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}), 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}), 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'date_scheduled': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2018, 7, 1, 0, 0)'}), 'guid': ('django.db.models.fields.CharField', [], {'default': "'89620ec318d2433fbaef4af319d04ebb'", 'unique': 'True', 'max_length': '32'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'in_progress': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'model_name': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'object_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}) }, 'sentry.scheduledjob': { 'Meta': {'object_name': 'ScheduledJob'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'date_scheduled': ('django.db.models.fields.DateTimeField', [], {}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'payload': ('sentry.db.models.fields.jsonfield.JSONField', [], {'default': '{}'}) }, 'sentry.servicehook': { 'Meta': {'object_name': 'ServiceHook'}, 'actor_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}), 'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']", 'null': 'True'}), 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'events': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}), 'guid': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}), 'secret': ('sentry.db.models.fields.encrypted.EncryptedTextField', [], {'default': "'738a01cb44b54bf1be98ff56debdde7780f53c30b1274e1dbf6ff1b608c1696c'"}), 'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}), 'url': ('django.db.models.fields.URLField', [], {'max_length': '512'}), 'version': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}) }, 'sentry.tagkey': { 'Meta': {'unique_together': "(('project_id', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'"}, 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}), 'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}), 'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}), 'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}) }, 'sentry.tagvalue': { 'Meta': {'unique_together': "(('project_id', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'", 'index_together': "(('project_id', 'key', 'last_seen'),)"}, 'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}), 'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}), 'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}), 'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}), 'value': ('django.db.models.fields.CharField', [], {'max_length': '200'}) }, 'sentry.team': { 'Meta': {'unique_together': "(('organization', 'slug'),)", 'object_name': 'Team'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}), 'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}) }, 'sentry.teamavatar': { 'Meta': {'object_name': 'TeamAvatar'}, 'avatar_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}), 'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'ident': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}), 'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'avatar'", 'unique': 'True', 'to': "orm['sentry.Team']"}) }, 'sentry.user': { 'Meta': {'object_name': 'User', 'db_table': "'auth_user'"}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0', 'null': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_password_expired': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_active': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}), 'last_password_change': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_column': "'first_name'", 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'session_nonce': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}) }, 'sentry.useravatar': { 'Meta': {'object_name': 'UserAvatar'}, 'avatar_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}), 'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'ident': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}), 'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'avatar'", 'unique': 'True', 'to': "orm['sentry.User']"}) }, 'sentry.useremail': { 'Meta': {'unique_together': "(('user', 'email'),)", 'object_name': 'UserEmail'}, 'date_hash_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'is_verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'emails'", 'to': "orm['sentry.User']"}), 'validation_hash': ('django.db.models.fields.CharField', [], {'default': "u'g79L9PRHKGKG4ZcVdOA4CGDf2FFYQ2Zv'", 'max_length': '32'}) }, 'sentry.userip': { 'Meta': {'unique_together': "(('user', 'ip_address'),)", 'object_name': 'UserIP'}, 'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39'}), 'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}) }, 'sentry.useroption': { 'Meta': {'unique_together': "(('user', 'project', 'key'), ('user', 'organization', 'key'))", 'object_name': 'UserOption'}, 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']", 'null': 'True'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}), 'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}), 'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {}) }, 'sentry.userpermission': { 'Meta': {'unique_together': "(('user', 'permission'),)", 'object_name': 'UserPermission'}, 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'permission': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}) }, 'sentry.userreport': { 'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'UserReport', 'index_together': "(('project', 'event_id'), ('project', 'date_added'))"}, 'comments': ('django.db.models.fields.TextField', [], {}), 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}), 'environment': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Environment']", 'null': 'True'}), 'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'event_user_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}), 'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}) }, 'sentry.versiondsymfile': { 'Meta': {'unique_together': "(('dsym_file', 'version', 'build'),)", 'object_name': 'VersionDSymFile'}, 'build': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}), 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'dsym_app': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.DSymApp']"}), 'dsym_file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ProjectDSymFile']", 'null': 'True'}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}), 'version': ('django.db.models.fields.CharField', [], {'max_length': '32'}) } } complete_apps = ['sentry']
bsd-3-clause
almeidapaulopt/erpnext
erpnext/hub_node/doctype/hub_settings/hub_settings.py
1
3067
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors and contributors # For license information, please see license.txt from __future__ import unicode_literals import frappe, requests, json from frappe.model.document import Document from frappe.utils import add_years, now, get_datetime, get_datetime_str from frappe import _ from erpnext.utilities.product import get_price, get_qty_in_stock from six import string_types hub_url = "https://hubmarket.org" class HubSetupError(frappe.ValidationError): pass class HubSettings(Document): def validate(self): if self.publish_pricing and not self.selling_price_list: frappe.throw(_("Please select a Price List to publish pricing")) def get_hub_url(self): return hub_url def sync(self): """Create and execute Data Migration Run for Hub Sync plan""" frappe.has_permission('Hub Settings', throw=True) doc = frappe.get_doc({ 'doctype': 'Data Migration Run', 'data_migration_plan': 'Hub Sync', 'data_migration_connector': 'Hub Connector' }).insert() doc.run() def register(self): """ Create a User on hub.erpnext.org and return username/password """ data = { 'email': frappe.session.user } post_url = hub_url + '/api/method/hub.hub.api.register' response = requests.post(post_url, data=data) response.raise_for_status() message = response.json().get('message') if message and message.get('password'): self.user = frappe.session.user self.create_hub_connector(message) self.company = frappe.defaults.get_user_default('company') self.enabled = 1 self.save() def unregister(self): """ Disable the User on hub.erpnext.org""" hub_connector = frappe.get_doc( 'Data Migration Connector', 'Hub Connector') connection = hub_connector.get_connection() response_doc = connection.update('User', frappe._dict({'enabled': 0}), hub_connector.username) if response_doc['enabled'] == 0: self.enabled = 0 self.save() def create_hub_connector(self, message): if frappe.db.exists('Data Migration Connector', 'Hub Connector'): hub_connector = frappe.get_doc('Data Migration Connector', 'Hub Connector') hub_connector.username = message['email'] hub_connector.password = message['password'] hub_connector.save() return frappe.get_doc({ 'doctype': 'Data Migration Connector', 'connector_type': 'Frappe', 'connector_name': 'Hub Connector', 'hostname': hub_url, 'username': message['email'], 'password': message['password'] }).insert() def reset_hub_publishing_settings(last_sync_datetime = ""): doc = frappe.get_doc("Hub Settings", "Hub Settings") doc.reset_publishing_settings(last_sync_datetime) doc.in_callback = 1 doc.save() def reset_hub_settings(last_sync_datetime = ""): doc = frappe.get_doc("Hub Settings", "Hub Settings") doc.reset_publishing_settings(last_sync_datetime) doc.reset_enable() doc.in_callback = 1 doc.save() frappe.msgprint(_("Successfully unregistered.")) @frappe.whitelist() def sync(): hub_settings = frappe.get_doc('Hub Settings') hub_settings.sync()
gpl-3.0
arielrossanigo/fades
tests/test_multiplatform.py
1
2882
# Copyright 2016 Facundo Batista, Nicolás Demarchi # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General # Public License version 3, as published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranties of # MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. # If not, see <http://www.gnu.org/licenses/>. # # For further info, check https://github.com/PyAr/fades """Tests for the helpers in multiplatform.""" import os import threading import time import unittest from fades.multiplatform import filelock class LockChecker(threading.Thread): """Helper to check the lock in other thread.""" def __init__(self, filepath): self.filepath = filepath self.pre_lock = self.in_lock = self.post_work = None self.middle_work = threading.Event() super().__init__() def run(self): self.pre_lock = time.time() with filelock(self.filepath): self.in_lock = time.time() self.middle_work.wait() self.post_work = time.time() class LockCacheTestCase(unittest.TestCase): """Tests for the locking utility.""" def setUp(self): self.test_path = "test_filelock" def tearDown(self): if os.path.exists(self.test_path): os.remove(self.test_path) def wait(self, lock_checker, attr_name): """Wait at most a second for the LockChecker to end.""" for i in range(10): attr = getattr(lock_checker, attr_name) if attr is not None: # ended! return time.sleep(.3) self.fail("LC didnt end: %s" % (lock_checker,)) def test_lock_alone(self): lc = LockChecker(self.test_path) lc.start() lc.middle_work.set() self.wait(lc, 'post_work') def test_lock_intermixed(self): lc1 = LockChecker(self.test_path) lc1.start() self.wait(lc1, 'in_lock') lc2 = LockChecker(self.test_path) lc2.start() lc1.middle_work.set() self.wait(lc1, 'post_work') lc2.middle_work.set() self.wait(lc2, 'post_work') # check LC 2 waited to enter self.assertGreater(lc2.in_lock, lc1.post_work) def test_lock_exploding(self): # get the lock and explode in the middle (then ignore the blast) try: with filelock(self.test_path): raise ValueError("pumba") except ValueError: pass # get the lock again with filelock(self.test_path): pass
gpl-3.0
jjenki11/blaze-chem-rendering
build_utils/scons-2.3.0/engine/SCons/Tool/sunf95.py
11
2182
"""SCons.Tool.sunf95 Tool-specific initialization for sunf95, the Sun Studio F95 compiler. There normally shouldn't be any need to import this module directly. It will usually be imported through the generic SCons.Tool.Tool() selection method. """ # # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 The SCons Foundation # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "src/engine/SCons/Tool/sunf95.py 2013/03/03 09:48:35 garyo" import SCons.Util from FortranCommon import add_all_to_env compilers = ['sunf95', 'f95'] def generate(env): """Add Builders and construction variables for sunf95 to an Environment.""" add_all_to_env(env) fcomp = env.Detect(compilers) or 'f95' env['FORTRAN'] = fcomp env['F95'] = fcomp env['SHFORTRAN'] = '$FORTRAN' env['SHF95'] = '$F95' env['SHFORTRANFLAGS'] = SCons.Util.CLVar('$FORTRANFLAGS -KPIC') env['SHF95FLAGS'] = SCons.Util.CLVar('$F95FLAGS -KPIC') def exists(env): return env.Detect(compilers) # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:
mit
heke123/chromium-crosswalk
build/android/pylib/perf/test_runner.py
11
15251
# Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Runs perf tests. Our buildbot infrastructure requires each slave to run steps serially. This is sub-optimal for android, where these steps can run independently on multiple connected devices. The buildbots will run this script multiple times per cycle: - First: all steps listed in --steps in will be executed in parallel using all connected devices. Step results will be pickled to disk. Each step has a unique name. The result code will be ignored if the step name is listed in --flaky-steps. The buildbot will treat this step as a regular step, and will not process any graph data. - Then, with -print-step STEP_NAME: at this stage, we'll simply print the file with the step results previously saved. The buildbot will then process the graph data accordingly. The JSON steps file contains a dictionary in the format: { "version": int, "steps": { "foo": { "device_affinity": int, "cmd": "script_to_execute foo" }, "bar": { "device_affinity": int, "cmd": "script_to_execute bar" } } } The JSON flaky steps file contains a list with step names which results should be ignored: [ "step_name_foo", "step_name_bar" ] Note that script_to_execute necessarily have to take at least the following option: --device: the serial number to be passed to all adb commands. """ import collections import io import json import logging import os import pickle import re import shutil import sys import tempfile import threading import time import zipfile from devil.android import battery_utils from devil.android import device_errors from devil.android import forwarder from devil.constants import exit_codes from devil.utils import cmd_helper from pylib import constants from pylib.base import base_test_result from pylib.base import base_test_runner from pylib.constants import host_paths # Regex for the master branch commit position. _GIT_CR_POS_RE = re.compile(r'^Cr-Commit-Position: refs/heads/master@{#(\d+)}$') def _GetChromiumRevision(): # pylint: disable=line-too-long """Get the git hash and commit position of the chromium master branch. See: https://chromium.googlesource.com/chromium/tools/build/+/master/scripts/slave/runtest.py#212 Returns: A dictionary with 'revision' and 'commit_pos' keys. """ # pylint: enable=line-too-long status, output = cmd_helper.GetCmdStatusAndOutput( ['git', 'log', '-n', '1', '--pretty=format:%H%n%B', 'HEAD'], host_paths.DIR_SOURCE_ROOT) revision = None commit_pos = None if not status: lines = output.splitlines() revision = lines[0] for line in reversed(lines): m = _GIT_CR_POS_RE.match(line.strip()) if m: commit_pos = int(m.group(1)) break return {'revision': revision, 'commit_pos': commit_pos} def GetPersistedResult(test_name): file_name = os.path.join(constants.PERF_OUTPUT_DIR, test_name) if not os.path.exists(file_name): logging.error('File not found %s', file_name) return None with file(file_name, 'r') as f: return pickle.loads(f.read()) def OutputJsonList(json_input, json_output): with file(json_input, 'r') as i: all_steps = json.load(i) step_values = [] for k, v in all_steps['steps'].iteritems(): data = {'test': k, 'device_affinity': v['device_affinity']} persisted_result = GetPersistedResult(k) if persisted_result: data['start_time'] = persisted_result['start_time'] data['end_time'] = persisted_result['end_time'] data['total_time'] = persisted_result['total_time'] data['has_archive'] = persisted_result['archive_bytes'] is not None step_values.append(data) with file(json_output, 'w') as o: o.write(json.dumps(step_values)) return 0 def PrintTestOutput(test_name, json_file_name=None, archive_file_name=None): """Helper method to print the output of previously executed test_name. Args: test_name: name of the test that has been previously executed. json_file_name: name of the file to output chartjson data to. archive_file_name: name of the file to write the compressed ZIP archive. Returns: exit code generated by the test step. """ persisted_result = GetPersistedResult(test_name) if not persisted_result: return exit_codes.INFRA logging.info('*' * 80) logging.info('Output from:') logging.info(persisted_result['cmd']) logging.info('*' * 80) output_formatted = '' persisted_outputs = persisted_result['output'] for i in xrange(len(persisted_outputs)): output_formatted += '\n\nOutput from run #%d:\n\n%s' % ( i, persisted_outputs[i]) print output_formatted if json_file_name: with file(json_file_name, 'w') as f: f.write(persisted_result['chartjson']) if archive_file_name: if persisted_result['archive_bytes'] is not None: with file(archive_file_name, 'wb') as f: f.write(persisted_result['archive_bytes']) else: logging.error('The output dir was not archived.') return persisted_result['exit_code'] def PrintSummary(test_names): logging.info('*' * 80) logging.info('Sharding summary') device_total_time = collections.defaultdict(int) for test_name in test_names: file_name = os.path.join(constants.PERF_OUTPUT_DIR, test_name) if not os.path.exists(file_name): logging.info('%s : No status file found', test_name) continue with file(file_name, 'r') as f: result = pickle.loads(f.read()) logging.info('%s : exit_code=%d in %d secs at %s', result['name'], result['exit_code'], result['total_time'], result['device']) device_total_time[result['device']] += result['total_time'] for device, device_time in device_total_time.iteritems(): logging.info('Total for device %s : %d secs', device, device_time) logging.info('Total steps time: %d secs', sum(device_total_time.values())) class _HeartBeatLogger(object): # How often to print the heartbeat on flush(). _PRINT_INTERVAL = 30.0 def __init__(self): """A file-like class for keeping the buildbot alive.""" self._len = 0 self._tick = time.time() self._stopped = threading.Event() self._timer = threading.Thread(target=self._runner) self._timer.start() def _runner(self): while not self._stopped.is_set(): self.flush() self._stopped.wait(_HeartBeatLogger._PRINT_INTERVAL) def write(self, data): self._len += len(data) def flush(self): now = time.time() if now - self._tick >= _HeartBeatLogger._PRINT_INTERVAL: self._tick = now print '--single-step output length %d' % self._len sys.stdout.flush() def stop(self): self._stopped.set() class TestRunner(base_test_runner.BaseTestRunner): def __init__(self, test_options, device, shard_index, max_shard, tests, flaky_tests): """A TestRunner instance runs a perf test on a single device. Args: test_options: A PerfOptions object. device: Device to run the tests. shard_index: the index of this device. max_shards: the maximum shard index. tests: a dict mapping test_name to command. flaky_tests: a list of flaky test_name. """ super(TestRunner, self).__init__(device, None) self._options = test_options self._shard_index = shard_index self._max_shard = max_shard self._tests = tests self._flaky_tests = flaky_tests self._output_dir = None self._device_battery = battery_utils.BatteryUtils(self.device) @staticmethod def _SaveResult(result): pickled = os.path.join(constants.PERF_OUTPUT_DIR, result['name']) if os.path.exists(pickled): with file(pickled, 'r') as f: previous = pickle.loads(f.read()) result['output'] = previous['output'] + result['output'] with file(pickled, 'w') as f: f.write(pickle.dumps(result)) def _CheckDeviceAffinity(self, test_name): """Returns True if test_name has affinity for this shard.""" affinity = (self._tests['steps'][test_name]['device_affinity'] % self._max_shard) if self._shard_index == affinity: return True logging.info('Skipping %s on %s (affinity is %s, device is %s)', test_name, self.device_serial, affinity, self._shard_index) return False def _CleanupOutputDirectory(self): if self._output_dir: shutil.rmtree(self._output_dir, ignore_errors=True) self._output_dir = None def _ReadChartjsonOutput(self): if not self._output_dir: return '' json_output_path = os.path.join(self._output_dir, 'results-chart.json') try: with open(json_output_path) as f: return f.read() except IOError: logging.exception('Exception when reading chartjson.') logging.error('This usually means that telemetry did not run, so it could' ' not generate the file. Please check the device running' ' the test.') return '' def _WriteBuildBotJson(self): """Write metadata about the buildbot environment to the output dir.""" data = { 'chromium': _GetChromiumRevision(), 'environment': dict(os.environ)} logging.info('BuildBot environment: %s', data) with open(os.path.join(self._output_dir, 'buildbot.json'), 'w') as f: json.dump(data, f, sort_keys=True, indent=2, separators=(',', ': ')) def _ArchiveOutputDir(self): """Archive all files in the output dir, and return as compressed bytes.""" with io.BytesIO() as archive: with zipfile.ZipFile(archive, 'w', zipfile.ZIP_DEFLATED) as contents: num_files = 0 for absdir, _, files in os.walk(self._output_dir): reldir = os.path.relpath(absdir, self._output_dir) for filename in files: src_path = os.path.join(absdir, filename) # We use normpath to turn './file.txt' into just 'file.txt'. dst_path = os.path.normpath(os.path.join(reldir, filename)) contents.write(src_path, dst_path) num_files += 1 if num_files: logging.info('%d files in the output dir were archived.', num_files) else: logging.warning('No files in the output dir. Archive is empty.') return archive.getvalue() def _LaunchPerfTest(self, test_name): """Runs a perf test. Args: test_name: the name of the test to be executed. Returns: A tuple containing (Output, base_test_result.ResultType) """ if not self._CheckDeviceAffinity(test_name): return '', base_test_result.ResultType.PASS try: logging.warning('Unmapping device ports') forwarder.Forwarder.UnmapAllDevicePorts(self.device) self.device.RestartAdbd() except Exception as e: # pylint: disable=broad-except logging.error('Exception when tearing down device %s', e) test_config = self._tests['steps'][test_name] cmd = ('%s --device %s' % (test_config['cmd'], self.device_serial)) if (self._options.collect_chartjson_data or test_config.get('archive_output_dir')): self._output_dir = tempfile.mkdtemp() self._WriteBuildBotJson() cmd = cmd + ' --output-dir=%s' % self._output_dir logging.info( 'temperature: %s (0.1 C)', str(self._device_battery.GetBatteryInfo().get('temperature'))) if self._options.max_battery_temp: self._device_battery.LetBatteryCoolToTemperature( self._options.max_battery_temp) logging.info('Charge level: %s%%', str(self._device_battery.GetBatteryInfo().get('level'))) if self._options.min_battery_level: self._device_battery.ChargeDeviceToLevel( self._options.min_battery_level) self.device.SetScreen(True) logging.info('%s : %s', test_name, cmd) start_time = time.time() timeout = test_config.get('timeout', 3600) if self._options.no_timeout: timeout = None logging.info('Timeout for %s test: %s', test_name, timeout) full_cmd = cmd if self._options.dry_run: full_cmd = 'echo %s' % cmd logfile = sys.stdout archive_bytes = None if self._options.single_step: # Just print a heart-beat so that the outer buildbot scripts won't timeout # without response. logfile = _HeartBeatLogger() cwd = os.path.abspath(host_paths.DIR_SOURCE_ROOT) if full_cmd.startswith('src/'): cwd = os.path.abspath(os.path.join(host_paths.DIR_SOURCE_ROOT, os.pardir)) try: exit_code, output = cmd_helper.GetCmdStatusAndOutputWithTimeout( full_cmd, timeout, cwd=cwd, shell=True, logfile=logfile) json_output = self._ReadChartjsonOutput() if test_config.get('archive_output_dir'): archive_bytes = self._ArchiveOutputDir() except cmd_helper.TimeoutError as e: exit_code = -1 output = e.output json_output = '' finally: self._CleanupOutputDirectory() if self._options.single_step: logfile.stop() end_time = time.time() if exit_code is None: exit_code = -1 logging.info('%s : exit_code=%d in %d secs at %s', test_name, exit_code, end_time - start_time, self.device_serial) if exit_code == 0: result_type = base_test_result.ResultType.PASS else: result_type = base_test_result.ResultType.FAIL # Since perf tests use device affinity, give the device a chance to # recover if it is offline after a failure. Otherwise, the master sharder # will remove it from the pool and future tests on this device will fail. try: self.device.WaitUntilFullyBooted(timeout=120) except device_errors.CommandTimeoutError as e: logging.error('Device failed to return after %s: %s', test_name, e) actual_exit_code = exit_code if test_name in self._flaky_tests: # The exit_code is used at the second stage when printing the # test output. If the test is flaky, force to "0" to get that step green # whilst still gathering data to the perf dashboards. # The result_type is used by the test_dispatcher to retry the test. exit_code = 0 persisted_result = { 'name': test_name, 'output': [output], 'chartjson': json_output, 'archive_bytes': archive_bytes, 'exit_code': exit_code, 'actual_exit_code': actual_exit_code, 'result_type': result_type, 'start_time': start_time, 'end_time': end_time, 'total_time': end_time - start_time, 'device': self.device_serial, 'cmd': cmd, } self._SaveResult(persisted_result) return (output, result_type) def RunTest(self, test_name): """Run a perf test on the device. Args: test_name: String to use for logging the test result. Returns: A tuple of (TestRunResults, retry). """ _, result_type = self._LaunchPerfTest(test_name) results = base_test_result.TestRunResults() results.AddResult(base_test_result.BaseTestResult(test_name, result_type)) retry = None if not results.DidRunPass(): retry = test_name return results, retry
bsd-3-clause
abadger/ansible
test/units/modules/test_apt.py
35
1540
from __future__ import (absolute_import, division, print_function) __metaclass__ = type import collections import sys from units.compat import mock from units.compat import unittest try: from ansible.modules.apt import ( expand_pkgspec_from_fnmatches, ) except Exception: # Need some more module_utils work (porting urls.py) before we can test # modules. So don't error out in this case. if sys.version_info[0] >= 3: pass class AptExpandPkgspecTestCase(unittest.TestCase): def setUp(self): FakePackage = collections.namedtuple("Package", ("name",)) self.fake_cache = [ FakePackage("apt"), FakePackage("apt-utils"), FakePackage("not-selected"), ] def test_trivial(self): foo = ["apt"] self.assertEqual( expand_pkgspec_from_fnmatches(None, foo, self.fake_cache), foo) def test_version_wildcard(self): foo = ["apt=1.0*"] self.assertEqual( expand_pkgspec_from_fnmatches(None, foo, self.fake_cache), foo) def test_pkgname_wildcard_version_wildcard(self): foo = ["apt*=1.0*"] m_mock = mock.Mock() self.assertEqual( expand_pkgspec_from_fnmatches(m_mock, foo, self.fake_cache), ['apt', 'apt-utils']) def test_pkgname_expands(self): foo = ["apt*"] m_mock = mock.Mock() self.assertEqual( expand_pkgspec_from_fnmatches(m_mock, foo, self.fake_cache), ["apt", "apt-utils"])
gpl-3.0
JackpotClavin/android_kernel_samsung_venturi
Documentation/networking/cxacru-cf.py
14668
1626
#!/usr/bin/env python # Copyright 2009 Simon Arlott # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the Free # Software Foundation; either version 2 of the License, or (at your option) # any later version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., 59 # Temple Place - Suite 330, Boston, MA 02111-1307, USA. # # Usage: cxacru-cf.py < cxacru-cf.bin # Output: values string suitable for the sysfs adsl_config attribute # # Warning: cxacru-cf.bin with MD5 hash cdbac2689969d5ed5d4850f117702110 # contains mis-aligned values which will stop the modem from being able # to make a connection. If the first and last two bytes are removed then # the values become valid, but the modulation will be forced to ANSI # T1.413 only which may not be appropriate. # # The original binary format is a packed list of le32 values. import sys import struct i = 0 while True: buf = sys.stdin.read(4) if len(buf) == 0: break elif len(buf) != 4: sys.stdout.write("\n") sys.stderr.write("Error: read {0} not 4 bytes\n".format(len(buf))) sys.exit(1) if i > 0: sys.stdout.write(" ") sys.stdout.write("{0:x}={1}".format(i, struct.unpack("<I", buf)[0])) i += 1 sys.stdout.write("\n")
gpl-2.0
pombredanne/pyotp
src/pyotp/totp.py
2
2457
from __future__ import print_function, unicode_literals, division, absolute_import import datetime import time from pyotp import utils from pyotp.otp import OTP class TOTP(OTP): def __init__(self, *args, **kwargs): """ @option options [Integer] interval (30) the time interval in seconds for OTP This defaults to 30 which is standard. """ self.interval = kwargs.pop('interval', 30) super(TOTP, self).__init__(*args, **kwargs) def at(self, for_time, counter_offset=0): """ Accepts either a Unix timestamp integer or a Time object. Time objects will be adjusted to UTC automatically @param [Time/Integer] time the time to generate an OTP for @param [Integer] counter_offset an amount of ticks to add to the time counter """ if not isinstance(for_time, datetime.datetime): for_time = datetime.datetime.fromtimestamp(int(for_time)) return self.generate_otp(self.timecode(for_time) + counter_offset) def now(self): """ Generate the current time OTP @return [Integer] the OTP as an integer """ return self.generate_otp(self.timecode(datetime.datetime.now())) def verify(self, otp, for_time=None, valid_window=0): """ Verifies the OTP passed in against the current time OTP @param [String/Integer] otp the OTP to check against @param [Integer] valid_window extends the validity to this many counter ticks before and after the current one """ if for_time is None: for_time = datetime.datetime.now() if valid_window: for i in range(-valid_window, valid_window + 1): if utils.strings_equal(str(otp), str(self.at(for_time, i))): return True return False return utils.strings_equal(str(otp), str(self.at(for_time))) def provisioning_uri(self, name, issuer_name=None): """ Returns the provisioning URI for the OTP This can then be encoded in a QR Code and used to provision the Google Authenticator app @param [String] name of the account @return [String] provisioning uri """ return utils.build_uri(self.secret, name, issuer_name=issuer_name) def timecode(self, for_time): i = time.mktime(for_time.timetuple()) return int(i / self.interval)
mit
dlebauer/plantcv
scripts/dev/vis_sv_z2500_L2_e82.py
2
4539
#!/usr/bin/python import sys, traceback import cv2 import numpy as np import argparse import string import plantcv as pcv ### Parse command-line arguments def options(): parser = argparse.ArgumentParser(description="Imaging processing with opencv") parser.add_argument("-i", "--image", help="Input image file.", required=True) parser.add_argument("-m", "--roi", help="Input region of interest file.", required=False) parser.add_argument("-o", "--outdir", help="Output directory for image files.", required=True) parser.add_argument("-D", "--debug", help="Turn on debug, prints intermediate images.", action="store_true") args = parser.parse_args() return args ### Main pipeline def main(): # Get options args = options() # Read image img, path, filename = pcv.readimage(args.image) #roi = cv2.imread(args.roi) # Pipeline step device = 0 # Convert RGB to HSV and extract the Saturation channel device, s = pcv.rgb2gray_hsv(img, 's', device, args.debug) # Threshold the Saturation image device, s_thresh = pcv.binary_threshold(s, 36, 255, 'light', device, args.debug) # Median Filter device, s_mblur = pcv.median_blur(s_thresh, 0, device, args.debug) device, s_cnt = pcv.median_blur(s_thresh, 0, device, args.debug) # Fill small objects #device, s_fill = pcv.fill(s_mblur, s_cnt, 0, device, args.debug) # Convert RGB to LAB and extract the Blue channel device, b = pcv.rgb2gray_lab(img, 'b', device, args.debug) # Threshold the blue image device, b_thresh = pcv.binary_threshold(b, 137, 255, 'light', device, args.debug) device, b_cnt = pcv.binary_threshold(b, 137, 255, 'light', device, args.debug) # Fill small objects #device, b_fill = pcv.fill(b_thresh, b_cnt, 10, device, args.debug) # Join the thresholded saturation and blue-yellow images device, bs = pcv.logical_and(s_mblur, b_cnt, device, args.debug) # Apply Mask (for vis images, mask_color=white) device, masked = pcv.apply_mask(img, bs, 'white', device, args.debug) # Convert RGB to LAB and extract the Green-Magenta and Blue-Yellow channels device, masked_a = pcv.rgb2gray_lab(masked, 'a', device, args.debug) device, masked_b = pcv.rgb2gray_lab(masked, 'b', device, args.debug) # Threshold the green-magenta and blue images device, maskeda_thresh = pcv.binary_threshold(masked_a, 127, 255, 'dark', device, args.debug) device, maskedb_thresh = pcv.binary_threshold(masked_b, 128, 255, 'light', device, args.debug) # Join the thresholded saturation and blue-yellow images (OR) device, ab = pcv.logical_or(maskeda_thresh, maskedb_thresh, device, args.debug) device, ab_cnt = pcv.logical_or(maskeda_thresh, maskedb_thresh, device, args.debug) # Fill small objects device, ab_fill = pcv.fill(ab, ab_cnt, 50, device, args.debug) # Apply mask (for vis images, mask_color=white) device, masked2 = pcv.apply_mask(masked, ab_fill, 'white', device, args.debug) # Identify objects device, id_objects,obj_hierarchy = pcv.find_objects(masked2, ab_fill, device, args.debug) # Define ROI device, roi1, roi_hierarchy= pcv.define_roi(masked2,'rectangle', device, None, 'default', args.debug,True, 550, 0,-600,-907) # Decide which objects to keep device,roi_objects, hierarchy3, kept_mask, obj_area = pcv.roi_objects(img,'partial',roi1,roi_hierarchy,id_objects,obj_hierarchy,device, args.debug) # Object combine kept objects device, obj, mask = pcv.object_composition(img, roi_objects, hierarchy3, device, args.debug) ############### Analysis ################ # Find shape properties, output shape image (optional) device, shape_header,shape_data,shape_img = pcv.analyze_object(img, args.image, obj, mask, device,args.debug,args.outdir+'/'+filename) # Shape properties relative to user boundary line (optional) device, boundary_header,boundary_data, boundary_img1= pcv.analyze_bound(img, args.image,obj, mask, 885, device,args.debug,args.outdir+'/'+filename) # Determine color properties: Histograms, Color Slices and Pseudocolored Images, output color analyzed images (optional) device, color_header,color_data,norm_slice= pcv.analyze_color(img, args.image, kept_mask, 256, device, args.debug,'all','rgb','v','img',300,args.outdir+'/'+filename) # Output shape and color data pcv.print_results(args.image, shape_header, shape_data) pcv.print_results(args.image, color_header, color_data) pcv.print_results(args.image, boundary_header, boundary_data) if __name__ == '__main__': main()
gpl-2.0
darkleons/odoo
addons/purchase/res_config.py
357
6198
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Business Applications # Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import fields, osv from openerp.tools.translate import _ class purchase_config_settings(osv.osv_memory): _name = 'purchase.config.settings' _inherit = 'res.config.settings' _columns = { 'default_invoice_method': fields.selection( [('manual', 'Based on purchase order lines'), ('picking', 'Based on incoming shipments'), ('order', 'Pre-generate draft invoices based on purchase orders'), ], 'Default invoicing control method', required=True, default_model='purchase.order'), 'group_purchase_pricelist':fields.boolean("Manage pricelist per supplier", implied_group='product.group_purchase_pricelist', help='Allows to manage different prices based on rules per category of Supplier.\n' 'Example: 10% for retailers, promotion of 5 EUR on this product, etc.'), 'group_uom':fields.boolean("Manage different units of measure for products", implied_group='product.group_uom', help="""Allows you to select and maintain different units of measure for products."""), 'group_costing_method':fields.boolean("Use 'Real Price' or 'Average' costing methods.", implied_group='stock_account.group_inventory_valuation', help="""Allows you to compute product cost price based on average cost."""), 'module_warning': fields.boolean("Alerts by products or supplier", help='Allow to configure notification on products and trigger them when a user wants to purchase a given product or a given supplier.\n' 'Example: Product: this product is deprecated, do not purchase more than 5.\n' 'Supplier: don\'t forget to ask for an express delivery.'), 'module_purchase_double_validation': fields.boolean("Force two levels of approvals", help='Provide a double validation mechanism for purchases exceeding minimum amount.\n' '-This installs the module purchase_double_validation.'), 'module_purchase_requisition': fields.boolean("Manage calls for bids", help="""Calls for bids are used when you want to generate requests for quotations to several suppliers for a given set of products. You can configure per product if you directly do a Request for Quotation to one supplier or if you want a Call for Bids to compare offers from several suppliers."""), 'group_advance_purchase_requisition': fields.boolean("Choose from several bids in a call for bids", implied_group='purchase.group_advance_bidding', help="""In the process of a public bidding, you can compare the bid lines and choose for each requested product from which bid you buy which quantity"""), 'module_purchase_analytic_plans': fields.boolean('Use multiple analytic accounts on purchase orders', help='Allows the user to maintain several analysis plans. These let you split lines on a purchase order between several accounts and analytic plans.\n' '-This installs the module purchase_analytic_plans.'), 'group_analytic_account_for_purchases': fields.boolean('Analytic accounting for purchases', implied_group='purchase.group_analytic_accounting', help="Allows you to specify an analytic account on purchase orders."), 'module_stock_dropshipping': fields.boolean("Manage dropshipping", help='\nCreates the dropship route and add more complex tests' '-This installs the module stock_dropshipping.'), } _defaults = { 'default_invoice_method': 'order', } def onchange_purchase_analytic_plans(self, cr, uid, ids, module_purchase_analytic_plans, context=None): """ change group_analytic_account_for_purchases following module_purchase_analytic_plans """ if not module_purchase_analytic_plans: return {} return {'value': {'group_analytic_account_for_purchases': module_purchase_analytic_plans}} class account_config_settings(osv.osv_memory): _inherit = 'account.config.settings' _columns = { 'module_purchase_analytic_plans': fields.boolean('Use multiple analytic accounts on orders', help='Allows the user to maintain several analysis plans. These let you split lines on a purchase order between several accounts and analytic plans.\n' '-This installs the module purchase_analytic_plans.'), 'group_analytic_account_for_purchases': fields.boolean('Analytic accounting for purchases', implied_group='purchase.group_analytic_accounting', help="Allows you to specify an analytic account on purchase orders."), } def onchange_purchase_analytic_plans(self, cr, uid, ids, module_purchase_analytic_plans, context=None): """ change group_analytic_account_for_purchases following module_purchase_analytic_plans """ if not module_purchase_analytic_plans: return {} return {'value': {'group_analytic_account_for_purchases': module_purchase_analytic_plans}} # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
pombredanne/MOG
nova/scheduler/filters/core_filter.py
27
4005
# Copyright (c) 2011 OpenStack Foundation # Copyright (c) 2012 Justin Santa Barbara # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo.config import cfg from nova import db from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.scheduler import filters LOG = logging.getLogger(__name__) cpu_allocation_ratio_opt = cfg.FloatOpt('cpu_allocation_ratio', default=16.0, help='Virtual CPU to physical CPU allocation ratio which affects ' 'all CPU filters. This configuration specifies a global ratio ' 'for CoreFilter. For AggregateCoreFilter, it will fall back to ' 'this configuration value if no per-aggregate setting found.') CONF = cfg.CONF CONF.register_opt(cpu_allocation_ratio_opt) class BaseCoreFilter(filters.BaseHostFilter): def _get_cpu_allocation_ratio(self, host_state, filter_properties): raise NotImplementedError def host_passes(self, host_state, filter_properties): """Return True if host has sufficient CPU cores.""" instance_type = filter_properties.get('instance_type') if not instance_type: return True if not host_state.vcpus_total: # Fail safe LOG.warning(_("VCPUs not set; assuming CPU collection broken")) return True instance_vcpus = instance_type['vcpus'] cpu_allocation_ratio = self._get_cpu_allocation_ratio(host_state, filter_properties) vcpus_total = host_state.vcpus_total * cpu_allocation_ratio # Only provide a VCPU limit to compute if the virt driver is reporting # an accurate count of installed VCPUs. (XenServer driver does not) if vcpus_total > 0: host_state.limits['vcpu'] = vcpus_total return (vcpus_total - host_state.vcpus_used) >= instance_vcpus class CoreFilter(BaseCoreFilter): """CoreFilter filters based on CPU core utilization.""" def _get_cpu_allocation_ratio(self, host_state, filter_properties): return CONF.cpu_allocation_ratio class AggregateCoreFilter(BaseCoreFilter): """AggregateCoreFilter with per-aggregate CPU subscription flag. Fall back to global cpu_allocation_ratio if no per-aggregate setting found. """ def _get_cpu_allocation_ratio(self, host_state, filter_properties): context = filter_properties['context'].elevated() # TODO(uni): DB query in filter is a performance hit, especially for # system with lots of hosts. Will need a general solution here to fix # all filters with aggregate DB call things. metadata = db.aggregate_metadata_get_by_host( context, host_state.host, key='cpu_allocation_ratio') aggregate_vals = metadata.get('cpu_allocation_ratio', set()) num_values = len(aggregate_vals) if num_values == 0: return CONF.cpu_allocation_ratio if num_values > 1: LOG.warning(_("%(num_values)d ratio values found, " "of which the minimum value will be used."), {'num_values': num_values}) try: ratio = float(min(aggregate_vals)) except ValueError as e: LOG.warning(_("Could not decode cpu_allocation_ratio: '%s'"), e) ratio = CONF.cpu_allocation_ratio return ratio
apache-2.0
yestech/gae-django-template
django/utils/version.py
320
1361
import django import os.path import re def get_svn_revision(path=None): """ Returns the SVN revision in the form SVN-XXXX, where XXXX is the revision number. Returns SVN-unknown if anything goes wrong, such as an unexpected format of internal SVN files. If path is provided, it should be a directory whose SVN info you want to inspect. If it's not provided, this will use the root django/ package directory. """ rev = None if path is None: path = django.__path__[0] entries_path = '%s/.svn/entries' % path try: entries = open(entries_path, 'r').read() except IOError: pass else: # Versions >= 7 of the entries file are flat text. The first line is # the version number. The next set of digits after 'dir' is the revision. if re.match('(\d+)', entries): rev_match = re.search('\d+\s+dir\s+(\d+)', entries) if rev_match: rev = rev_match.groups()[0] # Older XML versions of the file specify revision as an attribute of # the first entries node. else: from xml.dom import minidom dom = minidom.parse(entries_path) rev = dom.getElementsByTagName('entry')[0].getAttribute('revision') if rev: return u'SVN-%s' % rev return u'SVN-unknown'
bsd-3-clause
scriptZilla/linux
scripts/checkkconfigsymbols.py
371
11716
#!/usr/bin/env python2 """Find Kconfig symbols that are referenced but not defined.""" # (c) 2014-2015 Valentin Rothberg <valentinrothberg@gmail.com> # (c) 2014 Stefan Hengelein <stefan.hengelein@fau.de> # # Licensed under the terms of the GNU GPL License version 2 import os import re import sys from subprocess import Popen, PIPE, STDOUT from optparse import OptionParser # regex expressions OPERATORS = r"&|\(|\)|\||\!" FEATURE = r"(?:\w*[A-Z0-9]\w*){2,}" DEF = r"^\s*(?:menu){,1}config\s+(" + FEATURE + r")\s*" EXPR = r"(?:" + OPERATORS + r"|\s|" + FEATURE + r")+" DEFAULT = r"default\s+.*?(?:if\s.+){,1}" STMT = r"^\s*(?:if|select|depends\s+on|(?:" + DEFAULT + r"))\s+" + EXPR SOURCE_FEATURE = r"(?:\W|\b)+[D]{,1}CONFIG_(" + FEATURE + r")" # regex objects REGEX_FILE_KCONFIG = re.compile(r".*Kconfig[\.\w+\-]*$") REGEX_FEATURE = re.compile(r'(?!\B"[^"]*)' + FEATURE + r'(?![^"]*"\B)') REGEX_SOURCE_FEATURE = re.compile(SOURCE_FEATURE) REGEX_KCONFIG_DEF = re.compile(DEF) REGEX_KCONFIG_EXPR = re.compile(EXPR) REGEX_KCONFIG_STMT = re.compile(STMT) REGEX_KCONFIG_HELP = re.compile(r"^\s+(help|---help---)\s*$") REGEX_FILTER_FEATURES = re.compile(r"[A-Za-z0-9]$") REGEX_NUMERIC = re.compile(r"0[xX][0-9a-fA-F]+|[0-9]+") def parse_options(): """The user interface of this module.""" usage = "%prog [options]\n\n" \ "Run this tool to detect Kconfig symbols that are referenced but " \ "not defined in\nKconfig. The output of this tool has the " \ "format \'Undefined symbol\\tFile list\'\n\n" \ "If no option is specified, %prog will default to check your\n" \ "current tree. Please note that specifying commits will " \ "\'git reset --hard\'\nyour current tree! You may save " \ "uncommitted changes to avoid losing data." parser = OptionParser(usage=usage) parser.add_option('-c', '--commit', dest='commit', action='store', default="", help="Check if the specified commit (hash) introduces " "undefined Kconfig symbols.") parser.add_option('-d', '--diff', dest='diff', action='store', default="", help="Diff undefined symbols between two commits. The " "input format bases on Git log's " "\'commmit1..commit2\'.") parser.add_option('-f', '--find', dest='find', action='store_true', default=False, help="Find and show commits that may cause symbols to be " "missing. Required to run with --diff.") parser.add_option('-i', '--ignore', dest='ignore', action='store', default="", help="Ignore files matching this pattern. Note that " "the pattern needs to be a Python regex. To " "ignore defconfigs, specify -i '.*defconfig'.") parser.add_option('', '--force', dest='force', action='store_true', default=False, help="Reset current Git tree even when it's dirty.") (opts, _) = parser.parse_args() if opts.commit and opts.diff: sys.exit("Please specify only one option at once.") if opts.diff and not re.match(r"^[\w\-\.]+\.\.[\w\-\.]+$", opts.diff): sys.exit("Please specify valid input in the following format: " "\'commmit1..commit2\'") if opts.commit or opts.diff: if not opts.force and tree_is_dirty(): sys.exit("The current Git tree is dirty (see 'git status'). " "Running this script may\ndelete important data since it " "calls 'git reset --hard' for some performance\nreasons. " " Please run this script in a clean Git tree or pass " "'--force' if you\nwant to ignore this warning and " "continue.") if opts.commit: opts.find = False if opts.ignore: try: re.match(opts.ignore, "this/is/just/a/test.c") except: sys.exit("Please specify a valid Python regex.") return opts def main(): """Main function of this module.""" opts = parse_options() if opts.commit or opts.diff: head = get_head() # get commit range commit_a = None commit_b = None if opts.commit: commit_a = opts.commit + "~" commit_b = opts.commit elif opts.diff: split = opts.diff.split("..") commit_a = split[0] commit_b = split[1] undefined_a = {} undefined_b = {} # get undefined items before the commit execute("git reset --hard %s" % commit_a) undefined_a = check_symbols(opts.ignore) # get undefined items for the commit execute("git reset --hard %s" % commit_b) undefined_b = check_symbols(opts.ignore) # report cases that are present for the commit but not before for feature in sorted(undefined_b): # feature has not been undefined before if not feature in undefined_a: files = sorted(undefined_b.get(feature)) print "%s\t%s" % (yel(feature), ", ".join(files)) if opts.find: commits = find_commits(feature, opts.diff) print red(commits) # check if there are new files that reference the undefined feature else: files = sorted(undefined_b.get(feature) - undefined_a.get(feature)) if files: print "%s\t%s" % (yel(feature), ", ".join(files)) if opts.find: commits = find_commits(feature, opts.diff) print red(commits) # reset to head execute("git reset --hard %s" % head) # default to check the entire tree else: undefined = check_symbols(opts.ignore) for feature in sorted(undefined): files = sorted(undefined.get(feature)) print "%s\t%s" % (yel(feature), ", ".join(files)) def yel(string): """ Color %string yellow. """ return "\033[33m%s\033[0m" % string def red(string): """ Color %string red. """ return "\033[31m%s\033[0m" % string def execute(cmd): """Execute %cmd and return stdout. Exit in case of error.""" pop = Popen(cmd, stdout=PIPE, stderr=STDOUT, shell=True) (stdout, _) = pop.communicate() # wait until finished if pop.returncode != 0: sys.exit(stdout) return stdout def find_commits(symbol, diff): """Find commits changing %symbol in the given range of %diff.""" commits = execute("git log --pretty=oneline --abbrev-commit -G %s %s" % (symbol, diff)) return commits def tree_is_dirty(): """Return true if the current working tree is dirty (i.e., if any file has been added, deleted, modified, renamed or copied but not committed).""" stdout = execute("git status --porcelain") for line in stdout: if re.findall(r"[URMADC]{1}", line[:2]): return True return False def get_head(): """Return commit hash of current HEAD.""" stdout = execute("git rev-parse HEAD") return stdout.strip('\n') def check_symbols(ignore): """Find undefined Kconfig symbols and return a dict with the symbol as key and a list of referencing files as value. Files matching %ignore are not checked for undefined symbols.""" source_files = [] kconfig_files = [] defined_features = set() referenced_features = dict() # {feature: [files]} # use 'git ls-files' to get the worklist stdout = execute("git ls-files") if len(stdout) > 0 and stdout[-1] == "\n": stdout = stdout[:-1] for gitfile in stdout.rsplit("\n"): if ".git" in gitfile or "ChangeLog" in gitfile or \ ".log" in gitfile or os.path.isdir(gitfile) or \ gitfile.startswith("tools/"): continue if REGEX_FILE_KCONFIG.match(gitfile): kconfig_files.append(gitfile) else: # all non-Kconfig files are checked for consistency source_files.append(gitfile) for sfile in source_files: if ignore and re.match(ignore, sfile): # do not check files matching %ignore continue parse_source_file(sfile, referenced_features) for kfile in kconfig_files: if ignore and re.match(ignore, kfile): # do not collect references for files matching %ignore parse_kconfig_file(kfile, defined_features, dict()) else: parse_kconfig_file(kfile, defined_features, referenced_features) undefined = {} # {feature: [files]} for feature in sorted(referenced_features): # filter some false positives if feature == "FOO" or feature == "BAR" or \ feature == "FOO_BAR" or feature == "XXX": continue if feature not in defined_features: if feature.endswith("_MODULE"): # avoid false positives for kernel modules if feature[:-len("_MODULE")] in defined_features: continue undefined[feature] = referenced_features.get(feature) return undefined def parse_source_file(sfile, referenced_features): """Parse @sfile for referenced Kconfig features.""" lines = [] with open(sfile, "r") as stream: lines = stream.readlines() for line in lines: if not "CONFIG_" in line: continue features = REGEX_SOURCE_FEATURE.findall(line) for feature in features: if not REGEX_FILTER_FEATURES.search(feature): continue sfiles = referenced_features.get(feature, set()) sfiles.add(sfile) referenced_features[feature] = sfiles def get_features_in_line(line): """Return mentioned Kconfig features in @line.""" return REGEX_FEATURE.findall(line) def parse_kconfig_file(kfile, defined_features, referenced_features): """Parse @kfile and update feature definitions and references.""" lines = [] skip = False with open(kfile, "r") as stream: lines = stream.readlines() for i in range(len(lines)): line = lines[i] line = line.strip('\n') line = line.split("#")[0] # ignore comments if REGEX_KCONFIG_DEF.match(line): feature_def = REGEX_KCONFIG_DEF.findall(line) defined_features.add(feature_def[0]) skip = False elif REGEX_KCONFIG_HELP.match(line): skip = True elif skip: # ignore content of help messages pass elif REGEX_KCONFIG_STMT.match(line): features = get_features_in_line(line) # multi-line statements while line.endswith("\\"): i += 1 line = lines[i] line = line.strip('\n') features.extend(get_features_in_line(line)) for feature in set(features): if REGEX_NUMERIC.match(feature): # ignore numeric values continue paths = referenced_features.get(feature, set()) paths.add(kfile) referenced_features[feature] = paths if __name__ == "__main__": main()
gpl-2.0
adaussy/eclipse-monkey-revival
plugins/python/org.eclipse.eclipsemonkey.lang.python/Lib/mimetypes.py
111
20609
"""Guess the MIME type of a file. This module defines two useful functions: guess_type(url, strict=1) -- guess the MIME type and encoding of a URL. guess_extension(type, strict=1) -- guess the extension for a given MIME type. It also contains the following, for tuning the behavior: Data: knownfiles -- list of files to parse inited -- flag set when init() has been called suffix_map -- dictionary mapping suffixes to suffixes encodings_map -- dictionary mapping suffixes to encodings types_map -- dictionary mapping suffixes to types Functions: init([files]) -- parse a list of files, default knownfiles (on Windows, the default values are taken from the registry) read_mime_types(file) -- parse one file, return a dictionary or None """ import os import sys import posixpath import urllib try: import _winreg except ImportError: _winreg = None __all__ = [ "guess_type","guess_extension","guess_all_extensions", "add_type","read_mime_types","init" ] knownfiles = [ "/etc/mime.types", "/etc/httpd/mime.types", # Mac OS X "/etc/httpd/conf/mime.types", # Apache "/etc/apache/mime.types", # Apache 1 "/etc/apache2/mime.types", # Apache 2 "/usr/local/etc/httpd/conf/mime.types", "/usr/local/lib/netscape/mime.types", "/usr/local/etc/httpd/conf/mime.types", # Apache 1.2 "/usr/local/etc/mime.types", # Apache 1.3 ] inited = False _db = None class MimeTypes: """MIME-types datastore. This datastore can handle information from mime.types-style files and supports basic determination of MIME type from a filename or URL, and can guess a reasonable extension given a MIME type. """ def __init__(self, filenames=(), strict=True): if not inited: init() self.encodings_map = encodings_map.copy() self.suffix_map = suffix_map.copy() self.types_map = ({}, {}) # dict for (non-strict, strict) self.types_map_inv = ({}, {}) for (ext, type) in types_map.items(): self.add_type(type, ext, True) for (ext, type) in common_types.items(): self.add_type(type, ext, False) for name in filenames: self.read(name, strict) def add_type(self, type, ext, strict=True): """Add a mapping between a type and an extension. When the extension is already known, the new type will replace the old one. When the type is already known the extension will be added to the list of known extensions. If strict is true, information will be added to list of standard types, else to the list of non-standard types. """ self.types_map[strict][ext] = type exts = self.types_map_inv[strict].setdefault(type, []) if ext not in exts: exts.append(ext) def guess_type(self, url, strict=True): """Guess the type of a file based on its URL. Return value is a tuple (type, encoding) where type is None if the type can't be guessed (no or unknown suffix) or a string of the form type/subtype, usable for a MIME Content-type header; and encoding is None for no encoding or the name of the program used to encode (e.g. compress or gzip). The mappings are table driven. Encoding suffixes are case sensitive; type suffixes are first tried case sensitive, then case insensitive. The suffixes .tgz, .taz and .tz (case sensitive!) are all mapped to '.tar.gz'. (This is table-driven too, using the dictionary suffix_map.) Optional `strict' argument when False adds a bunch of commonly found, but non-standard types. """ scheme, url = urllib.splittype(url) if scheme == 'data': # syntax of data URLs: # dataurl := "data:" [ mediatype ] [ ";base64" ] "," data # mediatype := [ type "/" subtype ] *( ";" parameter ) # data := *urlchar # parameter := attribute "=" value # type/subtype defaults to "text/plain" comma = url.find(',') if comma < 0: # bad data URL return None, None semi = url.find(';', 0, comma) if semi >= 0: type = url[:semi] else: type = url[:comma] if '=' in type or '/' not in type: type = 'text/plain' return type, None # never compressed, so encoding is None base, ext = posixpath.splitext(url) while ext in self.suffix_map: base, ext = posixpath.splitext(base + self.suffix_map[ext]) if ext in self.encodings_map: encoding = self.encodings_map[ext] base, ext = posixpath.splitext(base) else: encoding = None types_map = self.types_map[True] if ext in types_map: return types_map[ext], encoding elif ext.lower() in types_map: return types_map[ext.lower()], encoding elif strict: return None, encoding types_map = self.types_map[False] if ext in types_map: return types_map[ext], encoding elif ext.lower() in types_map: return types_map[ext.lower()], encoding else: return None, encoding def guess_all_extensions(self, type, strict=True): """Guess the extensions for a file based on its MIME type. Return value is a list of strings giving the possible filename extensions, including the leading dot ('.'). The extension is not guaranteed to have been associated with any particular data stream, but would be mapped to the MIME type `type' by guess_type(). Optional `strict' argument when false adds a bunch of commonly found, but non-standard types. """ type = type.lower() extensions = self.types_map_inv[True].get(type, []) if not strict: for ext in self.types_map_inv[False].get(type, []): if ext not in extensions: extensions.append(ext) return extensions def guess_extension(self, type, strict=True): """Guess the extension for a file based on its MIME type. Return value is a string giving a filename extension, including the leading dot ('.'). The extension is not guaranteed to have been associated with any particular data stream, but would be mapped to the MIME type `type' by guess_type(). If no extension can be guessed for `type', None is returned. Optional `strict' argument when false adds a bunch of commonly found, but non-standard types. """ extensions = self.guess_all_extensions(type, strict) if not extensions: return None return extensions[0] def read(self, filename, strict=True): """ Read a single mime.types-format file, specified by pathname. If strict is true, information will be added to list of standard types, else to the list of non-standard types. """ with open(filename) as fp: self.readfp(fp, strict) def readfp(self, fp, strict=True): """ Read a single mime.types-format file. If strict is true, information will be added to list of standard types, else to the list of non-standard types. """ while 1: line = fp.readline() if not line: break words = line.split() for i in range(len(words)): if words[i][0] == '#': del words[i:] break if not words: continue type, suffixes = words[0], words[1:] for suff in suffixes: self.add_type(type, '.' + suff, strict) def read_windows_registry(self, strict=True): """ Load the MIME types database from Windows registry. If strict is true, information will be added to list of standard types, else to the list of non-standard types. """ # Windows only if not _winreg: return def enum_types(mimedb): i = 0 while True: try: ctype = _winreg.EnumKey(mimedb, i) except EnvironmentError: break try: ctype = ctype.encode(default_encoding) # omit in 3.x! except UnicodeEncodeError: pass else: yield ctype i += 1 default_encoding = sys.getdefaultencoding() with _winreg.OpenKey(_winreg.HKEY_CLASSES_ROOT, r'MIME\Database\Content Type') as mimedb: for ctype in enum_types(mimedb): try: with _winreg.OpenKey(mimedb, ctype) as key: suffix, datatype = _winreg.QueryValueEx(key, 'Extension') except EnvironmentError: continue if datatype != _winreg.REG_SZ: continue try: suffix = suffix.encode(default_encoding) # omit in 3.x! except UnicodeEncodeError: continue self.add_type(ctype, suffix, strict) def guess_type(url, strict=True): """Guess the type of a file based on its URL. Return value is a tuple (type, encoding) where type is None if the type can't be guessed (no or unknown suffix) or a string of the form type/subtype, usable for a MIME Content-type header; and encoding is None for no encoding or the name of the program used to encode (e.g. compress or gzip). The mappings are table driven. Encoding suffixes are case sensitive; type suffixes are first tried case sensitive, then case insensitive. The suffixes .tgz, .taz and .tz (case sensitive!) are all mapped to ".tar.gz". (This is table-driven too, using the dictionary suffix_map). Optional `strict' argument when false adds a bunch of commonly found, but non-standard types. """ if _db is None: init() return _db.guess_type(url, strict) def guess_all_extensions(type, strict=True): """Guess the extensions for a file based on its MIME type. Return value is a list of strings giving the possible filename extensions, including the leading dot ('.'). The extension is not guaranteed to have been associated with any particular data stream, but would be mapped to the MIME type `type' by guess_type(). If no extension can be guessed for `type', None is returned. Optional `strict' argument when false adds a bunch of commonly found, but non-standard types. """ if _db is None: init() return _db.guess_all_extensions(type, strict) def guess_extension(type, strict=True): """Guess the extension for a file based on its MIME type. Return value is a string giving a filename extension, including the leading dot ('.'). The extension is not guaranteed to have been associated with any particular data stream, but would be mapped to the MIME type `type' by guess_type(). If no extension can be guessed for `type', None is returned. Optional `strict' argument when false adds a bunch of commonly found, but non-standard types. """ if _db is None: init() return _db.guess_extension(type, strict) def add_type(type, ext, strict=True): """Add a mapping between a type and an extension. When the extension is already known, the new type will replace the old one. When the type is already known the extension will be added to the list of known extensions. If strict is true, information will be added to list of standard types, else to the list of non-standard types. """ if _db is None: init() return _db.add_type(type, ext, strict) def init(files=None): global suffix_map, types_map, encodings_map, common_types global inited, _db inited = True # so that MimeTypes.__init__() doesn't call us again db = MimeTypes() if files is None: if _winreg: db.read_windows_registry() files = knownfiles for file in files: if os.path.isfile(file): db.read(file) encodings_map = db.encodings_map suffix_map = db.suffix_map types_map = db.types_map[True] common_types = db.types_map[False] # Make the DB a global variable now that it is fully initialized _db = db def read_mime_types(file): try: f = open(file) except IOError: return None db = MimeTypes() db.readfp(f, True) return db.types_map[True] def _default_mime_types(): global suffix_map global encodings_map global types_map global common_types suffix_map = { '.tgz': '.tar.gz', '.taz': '.tar.gz', '.tz': '.tar.gz', '.tbz2': '.tar.bz2', } encodings_map = { '.gz': 'gzip', '.Z': 'compress', '.bz2': 'bzip2', } # Before adding new types, make sure they are either registered with IANA, # at http://www.isi.edu/in-notes/iana/assignments/media-types # or extensions, i.e. using the x- prefix # If you add to these, please keep them sorted! types_map = { '.a' : 'application/octet-stream', '.ai' : 'application/postscript', '.aif' : 'audio/x-aiff', '.aifc' : 'audio/x-aiff', '.aiff' : 'audio/x-aiff', '.au' : 'audio/basic', '.avi' : 'video/x-msvideo', '.bat' : 'text/plain', '.bcpio' : 'application/x-bcpio', '.bin' : 'application/octet-stream', '.bmp' : 'image/x-ms-bmp', '.c' : 'text/plain', # Duplicates :( '.cdf' : 'application/x-cdf', '.cdf' : 'application/x-netcdf', '.cpio' : 'application/x-cpio', '.csh' : 'application/x-csh', '.css' : 'text/css', '.dll' : 'application/octet-stream', '.doc' : 'application/msword', '.dot' : 'application/msword', '.dvi' : 'application/x-dvi', '.eml' : 'message/rfc822', '.eps' : 'application/postscript', '.etx' : 'text/x-setext', '.exe' : 'application/octet-stream', '.gif' : 'image/gif', '.gtar' : 'application/x-gtar', '.h' : 'text/plain', '.hdf' : 'application/x-hdf', '.htm' : 'text/html', '.html' : 'text/html', '.ief' : 'image/ief', '.jpe' : 'image/jpeg', '.jpeg' : 'image/jpeg', '.jpg' : 'image/jpeg', '.js' : 'application/x-javascript', '.ksh' : 'text/plain', '.latex' : 'application/x-latex', '.m1v' : 'video/mpeg', '.man' : 'application/x-troff-man', '.me' : 'application/x-troff-me', '.mht' : 'message/rfc822', '.mhtml' : 'message/rfc822', '.mif' : 'application/x-mif', '.mov' : 'video/quicktime', '.movie' : 'video/x-sgi-movie', '.mp2' : 'audio/mpeg', '.mp3' : 'audio/mpeg', '.mp4' : 'video/mp4', '.mpa' : 'video/mpeg', '.mpe' : 'video/mpeg', '.mpeg' : 'video/mpeg', '.mpg' : 'video/mpeg', '.ms' : 'application/x-troff-ms', '.nc' : 'application/x-netcdf', '.nws' : 'message/rfc822', '.o' : 'application/octet-stream', '.obj' : 'application/octet-stream', '.oda' : 'application/oda', '.p12' : 'application/x-pkcs12', '.p7c' : 'application/pkcs7-mime', '.pbm' : 'image/x-portable-bitmap', '.pdf' : 'application/pdf', '.pfx' : 'application/x-pkcs12', '.pgm' : 'image/x-portable-graymap', '.pl' : 'text/plain', '.png' : 'image/png', '.pnm' : 'image/x-portable-anymap', '.pot' : 'application/vnd.ms-powerpoint', '.ppa' : 'application/vnd.ms-powerpoint', '.ppm' : 'image/x-portable-pixmap', '.pps' : 'application/vnd.ms-powerpoint', '.ppt' : 'application/vnd.ms-powerpoint', '.ps' : 'application/postscript', '.pwz' : 'application/vnd.ms-powerpoint', '.py' : 'text/x-python', '.pyc' : 'application/x-python-code', '.pyo' : 'application/x-python-code', '.qt' : 'video/quicktime', '.ra' : 'audio/x-pn-realaudio', '.ram' : 'application/x-pn-realaudio', '.ras' : 'image/x-cmu-raster', '.rdf' : 'application/xml', '.rgb' : 'image/x-rgb', '.roff' : 'application/x-troff', '.rtx' : 'text/richtext', '.sgm' : 'text/x-sgml', '.sgml' : 'text/x-sgml', '.sh' : 'application/x-sh', '.shar' : 'application/x-shar', '.snd' : 'audio/basic', '.so' : 'application/octet-stream', '.src' : 'application/x-wais-source', '.sv4cpio': 'application/x-sv4cpio', '.sv4crc' : 'application/x-sv4crc', '.swf' : 'application/x-shockwave-flash', '.t' : 'application/x-troff', '.tar' : 'application/x-tar', '.tcl' : 'application/x-tcl', '.tex' : 'application/x-tex', '.texi' : 'application/x-texinfo', '.texinfo': 'application/x-texinfo', '.tif' : 'image/tiff', '.tiff' : 'image/tiff', '.tr' : 'application/x-troff', '.tsv' : 'text/tab-separated-values', '.txt' : 'text/plain', '.ustar' : 'application/x-ustar', '.vcf' : 'text/x-vcard', '.wav' : 'audio/x-wav', '.wiz' : 'application/msword', '.wsdl' : 'application/xml', '.xbm' : 'image/x-xbitmap', '.xlb' : 'application/vnd.ms-excel', # Duplicates :( '.xls' : 'application/excel', '.xls' : 'application/vnd.ms-excel', '.xml' : 'text/xml', '.xpdl' : 'application/xml', '.xpm' : 'image/x-xpixmap', '.xsl' : 'application/xml', '.xwd' : 'image/x-xwindowdump', '.zip' : 'application/zip', } # These are non-standard types, commonly found in the wild. They will # only match if strict=0 flag is given to the API methods. # Please sort these too common_types = { '.jpg' : 'image/jpg', '.mid' : 'audio/midi', '.midi': 'audio/midi', '.pct' : 'image/pict', '.pic' : 'image/pict', '.pict': 'image/pict', '.rtf' : 'application/rtf', '.xul' : 'text/xul' } _default_mime_types() if __name__ == '__main__': import getopt USAGE = """\ Usage: mimetypes.py [options] type Options: --help / -h -- print this message and exit --lenient / -l -- additionally search of some common, but non-standard types. --extension / -e -- guess extension instead of type More than one type argument may be given. """ def usage(code, msg=''): print USAGE if msg: print msg sys.exit(code) try: opts, args = getopt.getopt(sys.argv[1:], 'hle', ['help', 'lenient', 'extension']) except getopt.error, msg: usage(1, msg) strict = 1 extension = 0 for opt, arg in opts: if opt in ('-h', '--help'): usage(0) elif opt in ('-l', '--lenient'): strict = 0 elif opt in ('-e', '--extension'): extension = 1 for gtype in args: if extension: guess = guess_extension(gtype, strict) if not guess: print "I don't know anything about type", gtype else: print guess else: guess, encoding = guess_type(gtype, strict) if not guess: print "I don't know anything about type", gtype else: print 'type:', guess, 'encoding:', encoding
epl-1.0
kaushik94/gambit
src/python/gambit/tests/test_actions.py
1
2928
import gambit import decimal import fractions from nose.tools import assert_raises import unittest from gambit.lib.error import UndefinedOperationError #from nose.tools import failUnlessRaises class TestGambitActions(unittest.TestCase): def setUp(self): self.extensive_game = gambit.read_game("test_games/complicated_extensive_game.efg") def tearDown(self): del self.extensive_game def test_action_set_label(self): "Test to ensure action labels work" assert self.extensive_game.root.infoset.actions[0].label == "RED" self.extensive_game.root.infoset.actions[0].label = "action label" assert self.extensive_game.root.infoset.actions[0].label == "action label" def test_action_probability(self): "Test to ensure action probabilities work" assert self.extensive_game.root.infoset.actions[0].prob == decimal.Decimal('0.500000') self.extensive_game.root.infoset.actions[0].prob = decimal.Decimal('0.97300') assert self.extensive_game.root.infoset.actions[0].prob == decimal.Decimal('0.97300') self.extensive_game.root.infoset.actions[0].prob = fractions.Fraction('1/17') assert self.extensive_game.root.infoset.actions[0].prob == fractions.Fraction('1/17') self.extensive_game.root.infoset.actions[0].prob = 2 assert self.extensive_game.root.infoset.actions[0].prob == 2 self.failUnlessRaises(TypeError, setattr, self.extensive_game.root.infoset.actions[0], "prob", 2.0) self.failUnlessRaises(TypeError, setattr, self.extensive_game.root.infoset.actions[0], "prob", "test") self.failUnlessRaises(TypeError, setattr, self.extensive_game.root.infoset.actions[0], "prob", "1/7") self.failUnlessRaises(TypeError, setattr, self.extensive_game.root.infoset.actions[0], "prob", "2.7") def test_action_precedes(self): "Test to ensure precedes is working" assert not self.extensive_game.actions[0].precedes(self.extensive_game.root) assert self.extensive_game.actions[0].precedes(self.extensive_game.root.children[0].children[0]) def test_action_precedes_error(self): "Test to ensure a TypeError is raised when precedes is called without a node" assert_raises(TypeError, self.extensive_game.actions[0].precedes, 0) def test_action_delete(self): "Test to ensure it is possible to delete an action" assert len(self.extensive_game.actions) == 6 self.extensive_game.actions[0].delete() assert len(self.extensive_game.actions) == 5 def test_action_delete_error(self): "Test to ensure deleting the last action of an infoset raises an error" assert len(self.extensive_game.infosets[0].actions) == 2 self.extensive_game.actions[0].delete() assert_raises(UndefinedOperationError, self.extensive_game.actions[0].delete)
gpl-2.0
openprocurement/robot_tests
ez_setup.py
4
12349
#!/usr/bin/env python """ Setuptools bootstrapping installer. Maintained at https://github.com/pypa/setuptools/tree/bootstrap. Run this script to install or upgrade setuptools. This method is DEPRECATED. Check https://github.com/pypa/setuptools/issues/581 for more details. """ import os import shutil import sys import tempfile import zipfile import optparse import subprocess import platform import textwrap import contextlib import warnings from distutils import log try: from urllib.request import urlopen except ImportError: from urllib2 import urlopen try: from site import USER_SITE except ImportError: USER_SITE = None # 33.1.1 is the last version that supports setuptools self upgrade/installation. DEFAULT_VERSION = "33.1.1" DEFAULT_URL = "https://pypi.io/packages/source/s/setuptools/" DEFAULT_SAVE_DIR = os.curdir MEANINGFUL_INVALID_ZIP_ERR_MSG = 'Maybe {0} is corrupted, delete it and try again.' def _python_cmd(*args): """ Execute a command. Return True if the command succeeded. """ args = (sys.executable,) + args return subprocess.call(args) == 0 def _install(archive_filename, install_args=()): """Install Setuptools.""" with archive_context(archive_filename): # installing log.warn('Installing Setuptools') if not _python_cmd('setup.py', 'install', *install_args): log.warn('Something went wrong during the installation.') log.warn('See the error message above.') # exitcode will be 2 return 2 def _build_egg(egg, archive_filename, to_dir): """Build Setuptools egg.""" with archive_context(archive_filename): # building an egg log.warn('Building a Setuptools egg in %s', to_dir) _python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir) # returning the result log.warn(egg) if not os.path.exists(egg): raise IOError('Could not build the egg.') class ContextualZipFile(zipfile.ZipFile): """Supplement ZipFile class to support context manager for Python 2.6.""" def __enter__(self): return self def __exit__(self, type, value, traceback): self.close() def __new__(cls, *args, **kwargs): """Construct a ZipFile or ContextualZipFile as appropriate.""" if hasattr(zipfile.ZipFile, '__exit__'): return zipfile.ZipFile(*args, **kwargs) return super(ContextualZipFile, cls).__new__(cls) @contextlib.contextmanager def archive_context(filename): """ Unzip filename to a temporary directory, set to the cwd. The unzipped target is cleaned up after. """ tmpdir = tempfile.mkdtemp() log.warn('Extracting in %s', tmpdir) old_wd = os.getcwd() try: os.chdir(tmpdir) try: with ContextualZipFile(filename) as archive: archive.extractall() except zipfile.BadZipfile as err: if not err.args: err.args = ('', ) err.args = err.args + ( MEANINGFUL_INVALID_ZIP_ERR_MSG.format(filename), ) raise # going in the directory subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0]) os.chdir(subdir) log.warn('Now working in %s', subdir) yield finally: os.chdir(old_wd) shutil.rmtree(tmpdir) def _do_download(version, download_base, to_dir, download_delay): """Download Setuptools.""" py_desig = 'py{sys.version_info[0]}.{sys.version_info[1]}'.format(sys=sys) tp = 'setuptools-{version}-{py_desig}.egg' egg = os.path.join(to_dir, tp.format(**locals())) if not os.path.exists(egg): archive = download_setuptools(version, download_base, to_dir, download_delay) _build_egg(egg, archive, to_dir) sys.path.insert(0, egg) # Remove previously-imported pkg_resources if present (see # https://bitbucket.org/pypa/setuptools/pull-request/7/ for details). if 'pkg_resources' in sys.modules: _unload_pkg_resources() import setuptools setuptools.bootstrap_install_from = egg def use_setuptools( version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=DEFAULT_SAVE_DIR, download_delay=15): """ Ensure that a setuptools version is installed. Return None. Raise SystemExit if the requested version or later cannot be installed. """ to_dir = os.path.abspath(to_dir) # prior to importing, capture the module state for # representative modules. rep_modules = 'pkg_resources', 'setuptools' imported = set(sys.modules).intersection(rep_modules) try: import pkg_resources pkg_resources.require("setuptools>=" + version) # a suitable version is already installed return except ImportError: # pkg_resources not available; setuptools is not installed; download pass except pkg_resources.DistributionNotFound: # no version of setuptools was found; allow download pass except pkg_resources.VersionConflict as VC_err: if imported: _conflict_bail(VC_err, version) # otherwise, unload pkg_resources to allow the downloaded version to # take precedence. del pkg_resources _unload_pkg_resources() return _do_download(version, download_base, to_dir, download_delay) def _conflict_bail(VC_err, version): """ Setuptools was imported prior to invocation, so it is unsafe to unload it. Bail out. """ conflict_tmpl = textwrap.dedent(""" The required version of setuptools (>={version}) is not available, and can't be installed while this script is running. Please install a more recent version first, using 'easy_install -U setuptools'. (Currently using {VC_err.args[0]!r}) """) msg = conflict_tmpl.format(**locals()) sys.stderr.write(msg) sys.exit(2) def _unload_pkg_resources(): sys.meta_path = [ importer for importer in sys.meta_path if importer.__class__.__module__ != 'pkg_resources.extern' ] del_modules = [ name for name in sys.modules if name.startswith('pkg_resources') ] for mod_name in del_modules: del sys.modules[mod_name] def _clean_check(cmd, target): """ Run the command to download target. If the command fails, clean up before re-raising the error. """ try: subprocess.check_call(cmd) except subprocess.CalledProcessError: if os.access(target, os.F_OK): os.unlink(target) raise def download_file_powershell(url, target): """ Download the file at url to target using Powershell. Powershell will validate trust. Raise an exception if the command cannot complete. """ target = os.path.abspath(target) ps_cmd = ( "[System.Net.WebRequest]::DefaultWebProxy.Credentials = " "[System.Net.CredentialCache]::DefaultCredentials; " '(new-object System.Net.WebClient).DownloadFile("%(url)s", "%(target)s")' % locals() ) cmd = [ 'powershell', '-Command', ps_cmd, ] _clean_check(cmd, target) def has_powershell(): """Determine if Powershell is available.""" if platform.system() != 'Windows': return False cmd = ['powershell', '-Command', 'echo test'] with open(os.path.devnull, 'wb') as devnull: try: subprocess.check_call(cmd, stdout=devnull, stderr=devnull) except Exception: return False return True download_file_powershell.viable = has_powershell def download_file_curl(url, target): cmd = ['curl', url, '--location', '--silent', '--output', target] _clean_check(cmd, target) def has_curl(): cmd = ['curl', '--version'] with open(os.path.devnull, 'wb') as devnull: try: subprocess.check_call(cmd, stdout=devnull, stderr=devnull) except Exception: return False return True download_file_curl.viable = has_curl def download_file_wget(url, target): cmd = ['wget', url, '--quiet', '--output-document', target] _clean_check(cmd, target) def has_wget(): cmd = ['wget', '--version'] with open(os.path.devnull, 'wb') as devnull: try: subprocess.check_call(cmd, stdout=devnull, stderr=devnull) except Exception: return False return True download_file_wget.viable = has_wget def download_file_insecure(url, target): """Use Python to download the file, without connection authentication.""" src = urlopen(url) try: # Read all the data in one block. data = src.read() finally: src.close() # Write all the data in one block to avoid creating a partial file. with open(target, "wb") as dst: dst.write(data) download_file_insecure.viable = lambda: True def get_best_downloader(): downloaders = ( download_file_powershell, download_file_curl, download_file_wget, download_file_insecure, ) viable_downloaders = (dl for dl in downloaders if dl.viable()) return next(viable_downloaders, None) def download_setuptools( version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=DEFAULT_SAVE_DIR, delay=15, downloader_factory=get_best_downloader): """ Download setuptools from a specified location and return its filename. `version` should be a valid setuptools version number that is available as an sdist for download under the `download_base` URL (which should end with a '/'). `to_dir` is the directory where the egg will be downloaded. `delay` is the number of seconds to pause before an actual download attempt. ``downloader_factory`` should be a function taking no arguments and returning a function for downloading a URL to a target. """ # making sure we use the absolute path to_dir = os.path.abspath(to_dir) zip_name = "setuptools-%s.zip" % version url = download_base + zip_name saveto = os.path.join(to_dir, zip_name) if not os.path.exists(saveto): # Avoid repeated downloads log.warn("Downloading %s", url) downloader = downloader_factory() downloader(url, saveto) return os.path.realpath(saveto) def _build_install_args(options): """ Build the arguments to 'python setup.py install' on the setuptools package. Returns list of command line arguments. """ return ['--user'] if options.user_install else [] def _parse_args(): """Parse the command line for options.""" parser = optparse.OptionParser() parser.add_option( '--user', dest='user_install', action='store_true', default=False, help='install in user site package') parser.add_option( '--download-base', dest='download_base', metavar="URL", default=DEFAULT_URL, help='alternative URL from where to download the setuptools package') parser.add_option( '--insecure', dest='downloader_factory', action='store_const', const=lambda: download_file_insecure, default=get_best_downloader, help='Use internal, non-validating downloader' ) parser.add_option( '--version', help="Specify which version to download", default=DEFAULT_VERSION, ) parser.add_option( '--to-dir', help="Directory to save (and re-use) package", default=DEFAULT_SAVE_DIR, ) options, args = parser.parse_args() # positional arguments are ignored return options def _download_args(options): """Return args for download_setuptools function from cmdline args.""" return dict( version=options.version, download_base=options.download_base, downloader_factory=options.downloader_factory, to_dir=options.to_dir, ) def main(): """Install or upgrade setuptools and EasyInstall.""" options = _parse_args() archive = download_setuptools(**_download_args(options)) return _install(archive, _build_install_args(options)) if __name__ == '__main__': warnings.warn("ez_setup.py is deprecated, check https://github.com/pypa/setuptools/issues/581 for more info; use pip to install setuptools") sys.exit(main())
apache-2.0
dhalperi/beam
sdks/python/apache_beam/internal/util_test.py
25
2278
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Unit tests for the util module.""" import unittest from apache_beam.internal.util import ArgumentPlaceholder from apache_beam.internal.util import insert_values_in_args from apache_beam.internal.util import remove_objects_from_args class UtilTest(unittest.TestCase): def test_remove_objects_from_args(self): args, kwargs, objs = remove_objects_from_args( [1, 'a'], {'x': 1, 'y': 3.14}, (str, float)) self.assertEquals([1, ArgumentPlaceholder()], args) self.assertEquals({'x': 1, 'y': ArgumentPlaceholder()}, kwargs) self.assertEquals(['a', 3.14], objs) def test_remove_objects_from_args_nothing_to_remove(self): args, kwargs, objs = remove_objects_from_args( [1, 2], {'x': 1, 'y': 2}, (str, float)) self.assertEquals([1, 2], args) self.assertEquals({'x': 1, 'y': 2}, kwargs) self.assertEquals([], objs) def test_insert_values_in_args(self): values = ['a', 'b'] args = [1, ArgumentPlaceholder()] kwargs = {'x': 1, 'y': ArgumentPlaceholder()} args, kwargs = insert_values_in_args(args, kwargs, values) self.assertEquals([1, 'a'], args) self.assertEquals({'x': 1, 'y': 'b'}, kwargs) def test_insert_values_in_args_nothing_to_insert(self): values = [] args = [1, 'a'] kwargs = {'x': 1, 'y': 'b'} args, kwargs = insert_values_in_args(args, kwargs, values) self.assertEquals([1, 'a'], args) self.assertEquals({'x': 1, 'y': 'b'}, kwargs) if __name__ == '__main__': unittest.main()
apache-2.0
kubeflow/examples
financial_time_series/tensorflow_model/run_preprocess_train_deploy.py
1
2019
"""Module for running the training of the machine learning model. Scripts that performs all the steps to train the ML model. """ import logging import argparse import sys from run_preprocess import run_preprocess from run_train import run_training from run_deploy import run_deploy def parse_arguments(argv): """Parse command line arguments Args: argv (list): list of command line arguments including program name Returns: The parsed arguments as returned by argparse.ArgumentParser """ parser = argparse.ArgumentParser(description='Preprocess and Train') parser.add_argument('--cutoff_year', type=str, help='Cutoff year for the stock data', default='2010') parser.add_argument('--bucket', type=str, help='GCS bucket to store data and ML models', default='<your-bucket-name>') parser.add_argument('--model', type=str, help='model to be used for training', default='DeepModel', choices=['FlatModel', 'DeepModel']) parser.add_argument('--epochs', type=int, help='number of epochs to train', default=30001) parser.add_argument('--tag', type=str, help='tag of the model', default='v1') args, _ = parser.parse_known_args(args=argv[1:]) return args def run_preprocess_and_train(argv=None): """Runs the ML model pipeline. Args: args: args that are passed when submitting the training Returns: """ args = parse_arguments(sys.argv if argv is None else argv) run_preprocess(sys.argv) sys.argv.append('--blob_path=data/data_{}.csv'.format(args.cutoff_year)) run_training(sys.argv) run_deploy(sys.argv) if __name__ == '__main__': logging.basicConfig(level=logging.INFO) run_preprocess_and_train()
apache-2.0
enableiot/iotanalytics-rule-engine
pydeps/rules/conditions/processors/conditionProcessorFactory.py
1
1494
# Copyright (c) 2015 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from rules.conditions.processors.basicConditionProcessor import BasicConditionProcessor from rules.conditions.processors.timebasedConditionProcessor import TimebasedConditionProcessor from rules.conditions.processors.statisticsConditionProcessor import StatisticsConditionProcessor from rules.conditions.conditionsBuilder import SingleConditionBuilder class ConditionProcessorFactory(object): @staticmethod def get_condition_processor(data_dao, condition): single_condition_builder = SingleConditionBuilder(condition) if 'timeLimit' in condition: return TimebasedConditionProcessor(data_dao, single_condition_builder, condition['timeLimit']) if 'type' in condition and condition['type'] == 'statistics': return StatisticsConditionProcessor(data_dao, single_condition_builder) return BasicConditionProcessor(data_dao, single_condition_builder)
apache-2.0
openmv/micropython
tests/basics/int_big1.py
9
2935
# to test arbitrariy precision integers x = 1000000000000000000000000000000 xn = -1000000000000000000000000000000 y = 2000000000000000000000000000000 # printing print(x) print(y) print('%#X' % (x - x)) # print prefix print('{:#,}'.format(x)) # print with commas # addition print(x + 1) print(x + y) print(x + xn == 0) print(bool(x + xn)) # subtraction print(x - 1) print(x - y) print(y - x) print(x - x == 0) print(bool(x - x)) # multiplication print(x * 2) print(x * y) # integer division print(x // 2) print(y // x) # bit inversion print(~x) print(~(-x)) # left shift x = 0x10000000000000000000000 for i in range(32): x = x << 1 print(x) # right shift x = 0x10000000000000000000000 for i in range(32): x = x >> 1 print(x) # left shift of a negative number for i in range(8): print(-10000000000000000000000000 << i) print(-10000000000000000000000001 << i) print(-10000000000000000000000002 << i) print(-10000000000000000000000003 << i) print(-10000000000000000000000004 << i) # right shift of a negative number for i in range(8): print(-10000000000000000000000000 >> i) print(-10000000000000000000000001 >> i) print(-10000000000000000000000002 >> i) print(-10000000000000000000000003 >> i) print(-10000000000000000000000004 >> i) # conversion from string print(int("123456789012345678901234567890")) print(int("-123456789012345678901234567890")) print(int("123456789012345678901234567890abcdef", 16)) print(int("123456789012345678901234567890ABCDEF", 16)) print(int("1234567890abcdefghijklmnopqrstuvwxyz", 36)) # invalid characters in string try: print(int("123456789012345678901234567890abcdef")) except ValueError: print('ValueError'); try: print(int("123456789012345678901234567890\x01")) except ValueError: print('ValueError'); # test constant integer with more than 255 chars x = 0x84ce72aa8699df436059f052ac51b6398d2511e49631bcb7e71f89c499b9ee425dfbc13a5f6d408471b054f2655617cbbaf7937b7c80cd8865cf02c8487d30d2b0fbd8b2c4e102e16d828374bbc47b93852f212d5043c3ea720f086178ff798cc4f63f787b9c2e419efa033e7644ea7936f54462dc21a6c4580725f7f0e7d1aaaaaaa print(x) # test parsing ints just on threshold of small to big # for 32 bit archs x = 1073741823 # small x = -1073741823 # small x = 1073741824 # big x = -1073741824 # big # for nan-boxing with 47-bit small ints print(int('0x3fffffffffff', 16)) # small print(int('-0x3fffffffffff', 16)) # small print(int('0x400000000000', 16)) # big print(int('-0x400000000000', 16)) # big # for 64 bit archs x = 4611686018427387903 # small x = -4611686018427387903 # small x = 4611686018427387904 # big x = -4611686018427387904 # big # sys.maxsize is a constant mpz, so test it's compatible with dynamic ones try: import usys as sys except ImportError: import sys print(sys.maxsize + 1 - 1 == sys.maxsize) # test extraction of big int value via mp_obj_get_int_maybe x = 1 << 70 print('a' * (x + 4 - x))
mit
pczhaoyun/obtainfo
zinnia/tests/test_sitemaps.py
4
4144
"""Test cases for Zinnia's sitemaps""" from django.test import TestCase from django.contrib.sites.models import Site from django.contrib.auth.tests.utils import skipIfCustomUser from zinnia.managers import PUBLISHED from zinnia.models.entry import Entry from zinnia.models.author import Author from zinnia.models.category import Category from zinnia.sitemaps import EntrySitemap from zinnia.sitemaps import CategorySitemap from zinnia.sitemaps import AuthorSitemap from zinnia.sitemaps import TagSitemap from zinnia.signals import disconnect_entry_signals @skipIfCustomUser class SitemapsTestCase(TestCase): """Test cases for Sitemaps classes provided""" urls = 'zinnia.tests.implementations.urls.default' def setUp(self): disconnect_entry_signals() self.site = Site.objects.get_current() self.authors = [ Author.objects.create(username='admin', email='admin@example.com'), Author.objects.create(username='user', email='user@example.com')] self.categories = [ Category.objects.create(title='Category 1', slug='cat-1'), Category.objects.create(title='Category 2', slug='cat-2')] params = {'title': 'My entry 1', 'content': 'My content 1', 'tags': 'zinnia, test', 'slug': 'my-entry-1', 'status': PUBLISHED} self.entry_1 = Entry.objects.create(**params) self.entry_1.authors.add(*self.authors) self.entry_1.categories.add(*self.categories) self.entry_1.sites.add(self.site) params = {'title': 'My entry 2', 'content': 'My content 2', 'tags': 'zinnia', 'slug': 'my-entry-2', 'status': PUBLISHED} self.entry_2 = Entry.objects.create(**params) self.entry_2.authors.add(self.authors[0]) self.entry_2.categories.add(self.categories[0]) self.entry_2.sites.add(self.site) def test_entry_sitemap(self): sitemap = EntrySitemap() self.assertEqual(len(sitemap.items()), 2) self.assertEqual(sitemap.lastmod(self.entry_1), self.entry_1.last_update) def test_category_sitemap(self): sitemap = CategorySitemap() items = sitemap.items() self.assertEqual(len(items), 2) self.assertEqual(sitemap.lastmod(items[0]), self.entry_2.last_update) self.assertEqual(sitemap.lastmod(items[1]), self.entry_1.last_update) self.assertEqual(sitemap.priority(items[0]), '1.0') self.assertEqual(sitemap.priority(items[1]), '0.5') def test_author_sitemap(self): sitemap = AuthorSitemap() items = sitemap.items() self.assertEqual(len(items), 2) self.assertEqual(sitemap.lastmod(items[0]), self.entry_2.last_update) self.assertEqual(sitemap.lastmod(items[1]), self.entry_1.last_update) self.assertEqual(sitemap.priority(items[0]), '1.0') self.assertEqual(sitemap.priority(items[1]), '0.5') def test_tag_sitemap(self): sitemap = TagSitemap() items = sitemap.items() self.assertEqual(len(items), 2) self.assertEqual(sitemap.lastmod(items[1]), self.entry_2.last_update) self.assertEqual(sitemap.lastmod(items[0]), self.entry_1.last_update) self.assertEqual(sitemap.priority(items[1]), '1.0') self.assertEqual(sitemap.priority(items[0]), '0.5') self.assertEqual(sitemap.location(items[1]), '/tags/zinnia/') self.assertEqual(sitemap.location(items[0]), '/tags/test/') def test_empty_sitemap_issue_188(self): Entry.objects.all().delete() entry_sitemap = EntrySitemap() category_sitemap = CategorySitemap() author_sitemap = AuthorSitemap() tag_sitemap = TagSitemap() self.assertEqual(len(entry_sitemap.items()), 0) self.assertEqual(len(category_sitemap.items()), 0) self.assertEqual(len(author_sitemap.items()), 0) self.assertEqual(len(tag_sitemap.items()), 0)
apache-2.0
Jollytown/Garuda
server/garuda/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/util/ssl_.py
170
8755
from binascii import hexlify, unhexlify from hashlib import md5, sha1 from ..exceptions import SSLError SSLContext = None HAS_SNI = False create_default_context = None import errno import ssl try: # Test for SSL features from ssl import wrap_socket, CERT_NONE, PROTOCOL_SSLv23 from ssl import HAS_SNI # Has SNI? except ImportError: pass try: from ssl import OP_NO_SSLv2, OP_NO_SSLv3, OP_NO_COMPRESSION except ImportError: OP_NO_SSLv2, OP_NO_SSLv3 = 0x1000000, 0x2000000 OP_NO_COMPRESSION = 0x20000 try: from ssl import _DEFAULT_CIPHERS except ImportError: _DEFAULT_CIPHERS = ( 'ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:ECDH+HIGH:' 'DH+HIGH:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+HIGH:RSA+3DES:ECDH+RC4:' 'DH+RC4:RSA+RC4:!aNULL:!eNULL:!MD5' ) try: from ssl import SSLContext # Modern SSL? except ImportError: import sys class SSLContext(object): # Platform-specific: Python 2 & 3.1 supports_set_ciphers = sys.version_info >= (2, 7) def __init__(self, protocol_version): self.protocol = protocol_version # Use default values from a real SSLContext self.check_hostname = False self.verify_mode = ssl.CERT_NONE self.ca_certs = None self.options = 0 self.certfile = None self.keyfile = None self.ciphers = None def load_cert_chain(self, certfile, keyfile): self.certfile = certfile self.keyfile = keyfile def load_verify_locations(self, location): self.ca_certs = location def set_ciphers(self, cipher_suite): if not self.supports_set_ciphers: raise TypeError( 'Your version of Python does not support setting ' 'a custom cipher suite. Please upgrade to Python ' '2.7, 3.2, or later if you need this functionality.' ) self.ciphers = cipher_suite def wrap_socket(self, socket, server_hostname=None): kwargs = { 'keyfile': self.keyfile, 'certfile': self.certfile, 'ca_certs': self.ca_certs, 'cert_reqs': self.verify_mode, 'ssl_version': self.protocol, } if self.supports_set_ciphers: # Platform-specific: Python 2.7+ return wrap_socket(socket, ciphers=self.ciphers, **kwargs) else: # Platform-specific: Python 2.6 return wrap_socket(socket, **kwargs) def assert_fingerprint(cert, fingerprint): """ Checks if given fingerprint matches the supplied certificate. :param cert: Certificate as bytes object. :param fingerprint: Fingerprint as string of hexdigits, can be interspersed by colons. """ # Maps the length of a digest to a possible hash function producing # this digest. hashfunc_map = { 16: md5, 20: sha1 } fingerprint = fingerprint.replace(':', '').lower() digest_length, odd = divmod(len(fingerprint), 2) if odd or digest_length not in hashfunc_map: raise SSLError('Fingerprint is of invalid length.') # We need encode() here for py32; works on py2 and p33. fingerprint_bytes = unhexlify(fingerprint.encode()) hashfunc = hashfunc_map[digest_length] cert_digest = hashfunc(cert).digest() if not cert_digest == fingerprint_bytes: raise SSLError('Fingerprints did not match. Expected "{0}", got "{1}".' .format(hexlify(fingerprint_bytes), hexlify(cert_digest))) def resolve_cert_reqs(candidate): """ Resolves the argument to a numeric constant, which can be passed to the wrap_socket function/method from the ssl module. Defaults to :data:`ssl.CERT_NONE`. If given a string it is assumed to be the name of the constant in the :mod:`ssl` module or its abbrevation. (So you can specify `REQUIRED` instead of `CERT_REQUIRED`. If it's neither `None` nor a string we assume it is already the numeric constant which can directly be passed to wrap_socket. """ if candidate is None: return CERT_NONE if isinstance(candidate, str): res = getattr(ssl, candidate, None) if res is None: res = getattr(ssl, 'CERT_' + candidate) return res return candidate def resolve_ssl_version(candidate): """ like resolve_cert_reqs """ if candidate is None: return PROTOCOL_SSLv23 if isinstance(candidate, str): res = getattr(ssl, candidate, None) if res is None: res = getattr(ssl, 'PROTOCOL_' + candidate) return res return candidate def create_urllib3_context(ssl_version=None, cert_reqs=ssl.CERT_REQUIRED, options=None, ciphers=None): """All arguments have the same meaning as ``ssl_wrap_socket``. By default, this function does a lot of the same work that ``ssl.create_default_context`` does on Python 3.4+. It: - Disables SSLv2, SSLv3, and compression - Sets a restricted set of server ciphers If you wish to enable SSLv3, you can do:: from urllib3.util import ssl_ context = ssl_.create_urllib3_context() context.options &= ~ssl_.OP_NO_SSLv3 You can do the same to enable compression (substituting ``COMPRESSION`` for ``SSLv3`` in the last line above). :param ssl_version: The desired protocol version to use. This will default to PROTOCOL_SSLv23 which will negotiate the highest protocol that both the server and your installation of OpenSSL support. :param cert_reqs: Whether to require the certificate verification. This defaults to ``ssl.CERT_REQUIRED``. :param options: Specific OpenSSL options. These default to ``ssl.OP_NO_SSLv2``, ``ssl.OP_NO_SSLv3``, ``ssl.OP_NO_COMPRESSION``. :param ciphers: Which cipher suites to allow the server to select. :returns: Constructed SSLContext object with specified options :rtype: SSLContext """ context = SSLContext(ssl_version or ssl.PROTOCOL_SSLv23) if options is None: options = 0 # SSLv2 is easily broken and is considered harmful and dangerous options |= OP_NO_SSLv2 # SSLv3 has several problems and is now dangerous options |= OP_NO_SSLv3 # Disable compression to prevent CRIME attacks for OpenSSL 1.0+ # (issue #309) options |= OP_NO_COMPRESSION context.options |= options if getattr(context, 'supports_set_ciphers', True): # Platform-specific: Python 2.6 context.set_ciphers(ciphers or _DEFAULT_CIPHERS) context.verify_mode = cert_reqs if getattr(context, 'check_hostname', None) is not None: # Platform-specific: Python 3.2 context.check_hostname = (context.verify_mode == ssl.CERT_REQUIRED) return context def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None, ca_certs=None, server_hostname=None, ssl_version=None, ciphers=None, ssl_context=None): """ All arguments except for server_hostname and ssl_context have the same meaning as they do when using :func:`ssl.wrap_socket`. :param server_hostname: When SNI is supported, the expected hostname of the certificate :param ssl_context: A pre-made :class:`SSLContext` object. If none is provided, one will be created using :func:`create_urllib3_context`. :param ciphers: A string of ciphers we wish the client to support. This is not supported on Python 2.6 as the ssl module does not support it. """ context = ssl_context if context is None: context = create_urllib3_context(ssl_version, cert_reqs, ciphers=ciphers) if ca_certs: try: context.load_verify_locations(ca_certs) except IOError as e: # Platform-specific: Python 2.6, 2.7, 3.2 raise SSLError(e) # Py33 raises FileNotFoundError which subclasses OSError # These are not equivalent unless we check the errno attribute except OSError as e: # Platform-specific: Python 3.3 and beyond if e.errno == errno.ENOENT: raise SSLError(e) raise if certfile: context.load_cert_chain(certfile, keyfile) if HAS_SNI: # Platform-specific: OpenSSL with enabled SNI return context.wrap_socket(sock, server_hostname=server_hostname) return context.wrap_socket(sock)
mit
mjrulesamrat/merchant
billing/integration.py
3
2402
from django.utils.importlib import import_module from django.conf import settings from django.conf.urls import patterns class IntegrationModuleNotFound(Exception): pass class IntegrationNotConfigured(Exception): pass integration_cache = {} class Integration(object): """Base Integration class that needs to be subclassed by implementations""" # The mode of the gateway. Looks into the settings else # defaults to True test_mode = getattr(settings, "MERCHANT_TEST_MODE", True) # Name of the integration. display_name = 'Base Integration' # Template rendered by the templatetag 'billing' template = '' def __init__(self, options=None): if not options: options = {} # The form fields that will be rendered in the template self.fields = {} self.fields.update(options) def add_field(self, key, value): self.fields[key] = value def add_fields(self, params): for (key, val) in params.items(): self.add_field(key, val) @property def service_url(self): # Modified by subclasses raise NotImplementedError def get_urls(self): # Method must be subclassed urlpatterns = patterns('') return urlpatterns @property def urls(self): return self.get_urls() def get_integration(integration, *args, **kwargs): """Return a integration instance specified by `integration` name""" klass = integration_cache.get(integration, None) if not klass: integration_filename = "%s_integration" % integration integration_module = None for app in settings.INSTALLED_APPS: try: integration_module = import_module(".integrations.%s" % integration_filename, package=app) break except ImportError: pass if not integration_module: raise IntegrationModuleNotFound("Missing integration: %s" % (integration)) integration_class_name = "".join(integration_filename.title().split("_")) try: klass = getattr(integration_module, integration_class_name) except AttributeError: raise IntegrationNotConfigured("Missing %s class in the integration module." % integration_class_name) integration_cache[integration] = klass return klass(*args, **kwargs)
bsd-3-clause
Nikoala/CouchPotatoServer
libs/pyasn1/type/namedval.py
200
1605
# ASN.1 named integers from pyasn1 import error __all__ = [ 'NamedValues' ] class NamedValues: def __init__(self, *namedValues): self.nameToValIdx = {}; self.valToNameIdx = {} self.namedValues = () automaticVal = 1 for namedValue in namedValues: if isinstance(namedValue, tuple): name, val = namedValue else: name = namedValue val = automaticVal if name in self.nameToValIdx: raise error.PyAsn1Error('Duplicate name %s' % (name,)) self.nameToValIdx[name] = val if val in self.valToNameIdx: raise error.PyAsn1Error('Duplicate value %s=%s' % (name, val)) self.valToNameIdx[val] = name self.namedValues = self.namedValues + ((name, val),) automaticVal = automaticVal + 1 def __str__(self): return str(self.namedValues) def getName(self, value): if value in self.valToNameIdx: return self.valToNameIdx[value] def getValue(self, name): if name in self.nameToValIdx: return self.nameToValIdx[name] def __getitem__(self, i): return self.namedValues[i] def __len__(self): return len(self.namedValues) def __add__(self, namedValues): return self.__class__(*self.namedValues + namedValues) def __radd__(self, namedValues): return self.__class__(*namedValues + tuple(self)) def clone(self, *namedValues): return self.__class__(*tuple(self) + namedValues) # XXX clone/subtype?
gpl-3.0
ryuunosukeyoshi/PartnerPoi-Bot
lib/youtube_dl/extractor/yinyuetai.py
64
1908
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ExtractorError class YinYueTaiIE(InfoExtractor): IE_NAME = 'yinyuetai:video' IE_DESC = '音悦Tai' _VALID_URL = r'https?://v\.yinyuetai\.com/video(?:/h5)?/(?P<id>[0-9]+)' _TESTS = [{ 'url': 'http://v.yinyuetai.com/video/2322376', 'md5': '6e3abe28d38e3a54b591f9f040595ce0', 'info_dict': { 'id': '2322376', 'ext': 'mp4', 'title': '少女时代_PARTY_Music Video Teaser', 'creator': '少女时代', 'duration': 25, 'thumbnail': r're:^https?://.*\.jpg$', }, }, { 'url': 'http://v.yinyuetai.com/video/h5/2322376', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) info = self._download_json( 'http://ext.yinyuetai.com/main/get-h-mv-info?json=true&videoId=%s' % video_id, video_id, 'Downloading mv info')['videoInfo']['coreVideoInfo'] if info['error']: raise ExtractorError(info['errorMsg'], expected=True) formats = [{ 'url': format_info['videoUrl'], 'format_id': format_info['qualityLevel'], 'format': format_info.get('qualityLevelName'), 'filesize': format_info.get('fileSize'), # though URLs ends with .flv, the downloaded files are in fact mp4 'ext': 'mp4', 'tbr': format_info.get('bitrate'), } for format_info in info['videoUrlModels']] self._sort_formats(formats) return { 'id': video_id, 'title': info['videoName'], 'thumbnail': info.get('bigHeadImage'), 'creator': info.get('artistNames'), 'duration': info.get('duration'), 'formats': formats, }
gpl-3.0
Azure/azure-sdk-for-python
sdk/servicebus/azure-servicebus/tests/mgmt_tests/mgmt_test_utilities.py
1
4585
#------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. #------------------------------------------------------------------------- import pytest from azure.core.exceptions import HttpResponseError class MgmtListTestHelperInterface(object): def __init__(self, mgmt_client): self.sb_mgmt_client = mgmt_client def list_resource_method(self, start_index=0, max_count=100): pass def create_resource_method(self, name): pass def delete_resource_by_name_method(self, name): pass def get_resource_name(self, resource): pass class MgmtQueueListTestHelper(MgmtListTestHelperInterface): def list_resource_method(self, start_index=0, max_count=100): return list(self.sb_mgmt_client.list_queues(start_index=start_index, max_count=max_count)) def create_resource_method(self, name): self.sb_mgmt_client.create_queue(name) def delete_resource_by_name_method(self, name): self.sb_mgmt_client.delete_queue(name) def get_resource_name(self, queue): return queue.queue_name class MgmtQueueListRuntimeInfoTestHelper(MgmtListTestHelperInterface): def list_resource_method(self, start_index=0, max_count=100): return list(self.sb_mgmt_client.list_queues_runtime_info(start_index=start_index, max_count=max_count)) def create_resource_method(self, name): self.sb_mgmt_client.create_queue(name) def delete_resource_by_name_method(self, name): self.sb_mgmt_client.delete_queue(name) def get_resource_name(self, queue_info): return queue_info.queue_name def run_test_mgmt_list_with_parameters(test_helper): result = test_helper.list_resource_method() assert len(result) == 0 resources_names = [] for i in range(20): test_helper.create_resource_method("test_resource{}".format(i)) resources_names.append("test_resource{}".format(i)) result = test_helper.list_resource_method() assert len(result) == 20 sorted_resources_names = sorted(resources_names) result = test_helper.list_resource_method(start_index=5, max_count=10) expected_result = sorted_resources_names[5:15] assert len(result) == 10 for item in result: expected_result.remove(test_helper.get_resource_name(item)) assert len(expected_result) == 0 result = test_helper.list_resource_method(max_count=0) assert len(result) == 0 queues = test_helper.list_resource_method(start_index=0, max_count=0) assert len(queues) == 0 cnt = 20 for name in resources_names: test_helper.delete_resource_by_name_method(name) cnt -= 1 assert len(test_helper.list_resource_method()) == cnt assert cnt == 0 result = test_helper.list_resource_method() assert len(result) == 0 def run_test_mgmt_list_with_negative_parameters(test_helper): result = test_helper.list_resource_method() assert len(result) == 0 with pytest.raises(HttpResponseError): test_helper.list_resource_method(start_index=-1) with pytest.raises(HttpResponseError): test_helper.list_resource_method(max_count=-1) with pytest.raises(HttpResponseError): test_helper.list_resource_method(start_index=-1, max_count=-1) test_helper.create_resource_method("test_resource") result = test_helper.list_resource_method() assert len(result) == 1 and test_helper.get_resource_name(result[0]) == "test_resource" with pytest.raises(HttpResponseError): test_helper.list_resource_method(start_index=-1) with pytest.raises(HttpResponseError): test_helper.list_resource_method(max_count=-1) with pytest.raises(HttpResponseError): test_helper.list_resource_method(start_index=-1, max_count=-1) test_helper.delete_resource_by_name_method("test_resource") result = test_helper.list_resource_method() assert len(result) == 0 def clear_queues(servicebus_management_client): queues = list(servicebus_management_client.list_queues()) for queue in queues: try: servicebus_management_client.delete_queue(queue.name) except: pass def clear_topics(servicebus_management_client): topics = list(servicebus_management_client.list_topics()) for topic in topics: try: servicebus_management_client.delete_topic(topic.name) except: pass
mit
nlholdem/icodoom
.venv/lib/python2.7/site-packages/tensorflow/contrib/layers/ops/gen_bucketization_op.py
4
2296
"""Python wrappers around Brain. This file is MACHINE GENERATED! Do not edit. """ import collections as _collections from google.protobuf import text_format as _text_format from tensorflow.core.framework import op_def_pb2 as _op_def_pb2 # Needed to trigger the call to _set_call_cpp_shape_fn. from tensorflow.python.framework import common_shapes as _common_shapes from tensorflow.python.framework import op_def_registry as _op_def_registry from tensorflow.python.framework import ops as _ops from tensorflow.python.framework import op_def_library as _op_def_library _bucketize_outputs = ["output"] def bucketize(input, boundaries, name=None): r"""Bucketizes 'input' based on 'boundaries'. For example, if the inputs are boundaries = [0, 10, 100] input = [[-5, 10000] [150, 10] [5, 100]] then the output will be output = [[0, 3] [3, 2] [1, 3]] Args: input: A `Tensor`. Must be one of the following types: `int32`, `int64`, `float32`, `float64`. Any shape of Tensor contains with int or float type. boundaries: A list of `floats`. A sorted list of floats gives the boundary of the buckets. name: A name for the operation (optional). Returns: A `Tensor` of type `int32`. Same shape with 'input', each value of input replaced with bucket index. """ result = _op_def_lib.apply_op("Bucketize", input=input, boundaries=boundaries, name=name) return result _ops.RegisterShape("Bucketize")(None) def _InitOpDefLibrary(): op_list = _op_def_pb2.OpList() _text_format.Merge(_InitOpDefLibrary.op_list_ascii, op_list) _op_def_registry.register_op_list(op_list) op_def_lib = _op_def_library.OpDefLibrary() op_def_lib.add_op_list(op_list) return op_def_lib _InitOpDefLibrary.op_list_ascii = """op { name: "Bucketize" input_arg { name: "input" type_attr: "T" } output_arg { name: "output" type: DT_INT32 } attr { name: "T" type: "type" allowed_values { list { type: DT_INT32 type: DT_INT64 type: DT_FLOAT type: DT_DOUBLE } } } attr { name: "boundaries" type: "list(float)" } } """ _op_def_lib = _InitOpDefLibrary()
gpl-3.0
dpryan79/tools-iuc
tools/genetrack/genetrack_util.py
23
12484
import bisect import math import re import subprocess import sys import tempfile import numpy from six import Iterator GFF_EXT = 'gff' SCIDX_EXT = 'scidx' def noop(data): return data def zeropad_to_numeric(data): return re.sub(r'chr0(\d)', r'chr\1', data) def numeric_to_zeropad(data): return re.sub(r'chr(\d([^\d]|$))', r'chr0\1', data) FORMATS = ['zeropad', 'numeric'] IN_CONVERT = {'zeropad': zeropad_to_numeric, 'numeric': noop} OUT_CONVERT = {'zeropad': numeric_to_zeropad, 'numeric': noop} def conversion_functions(in_fmt, out_fmt): """ Returns the proper list of functions to apply to perform a conversion """ return [IN_CONVERT[in_fmt], OUT_CONVERT[out_fmt]] def convert_data(data, in_fmt, out_fmt): for fn in conversion_functions(in_fmt, out_fmt): data = fn(data) return data class ChromosomeManager(Iterator): """ Manages a CSV reader of an index file to only load one chrom at a time """ def __init__(self, reader): self.done = False self.reader = reader self.processed_chromosomes = [] self.current_index = 0 self.next_valid() def __next__(self): self.line = next(self.reader) def is_valid(self, line): if len(line) not in [4, 5, 9]: return False try: [int(i) for i in line[1:]] self.format = SCIDX_EXT return True except ValueError: try: if len(line) < 6: return False [int(line[4]), int(line[5])] self.format = GFF_EXT return True except ValueError: return False def next_valid(self): """ Advance to the next valid line in the reader """ self.line = next(self.reader) s = 0 while not self.is_valid(self.line): self.line = next(self.reader) s += 1 if s > 0: # Skip initial line(s) of file pass def parse_line(self, line): if self.format == SCIDX_EXT: return [int(line[1]), int(line[2]), int(line[3])] else: return [int(line[3]), line[6], line[5]] def chromosome_name(self): """ Return the name of the chromosome about to be loaded """ return self.line[0] def load_chromosome(self, collect_data=True): """ Load the current chromosome into an array and return it """ cname = self.chromosome_name() if cname in self.processed_chromosomes: stop_err('File is not grouped by chromosome') self.data = [] while self.line[0] == cname: if collect_data: read = self.parse_line(self.line) if read[0] < self.current_index: msg = 'Reads in chromosome %s are not sorted by index. (At index %d)' % (cname, self.current_index) stop_err(msg) self.current_index = read[0] self.add_read(read) try: next(self) except StopIteration: self.done = True break self.processed_chromosomes.append(cname) self.current_index = 0 data = self.data # Don't retain reference anymore to save memory del self.data return data def add_read(self, read): if self.format == SCIDX_EXT: self.data.append(read) else: index, strand, value = read if value == '' or value == '.': value = 1 else: value = int(value) if not self.data: self.data.append([index, 0, 0]) current_read = self.data[-1] if self.data[-1][0] == index: current_read = self.data[-1] elif self.data[-1][0] < index: self.data.append([index, 0, 0]) current_read = self.data[-1] else: msg = 'Reads in chromosome %s are not sorted by index. (At index %d)' % (self.chromosome_name(), index) stop_err(msg) if strand == '+': current_read[1] += value elif strand == '-': current_read[2] += value else: msg = 'Strand "%s" at chromosome "%s" index %d is not valid.' % (strand, self.chromosome_name(), index) stop_err(msg) def skip_chromosome(self): """ Skip the current chromosome, discarding data """ self.load_chromosome(collect_data=False) class Peak(object): def __init__(self, index, pos_width, neg_width): self.index = index self.start = index - neg_width self.end = index + pos_width self.value = 0 self.deleted = False self.safe = False def __repr__(self): return '[%d] %d' % (self.index, self.value) def gff_row(cname, start, end, score, source, type='.', strand='.', phase='.', attrs={}): return (cname, source, type, start, end, score, strand, phase, gff_attrs(attrs)) def gff_attrs(d): if not d: return '.' return ';'.join('%s=%s' % item for item in d.items()) def stop_err(msg): sys.stderr.write(msg) sys.exit(1) def is_int(i): try: int(i) return True except ValueError: return False def make_keys(data): return [read[0] for read in data] def make_peak_keys(peaks): return [peak.index for peak in peaks] def get_window(data, start, end, keys): """ Returns all reads from the data set with index between the two indexes """ start_index = bisect.bisect_left(keys, start) end_index = bisect.bisect_right(keys, end) return data[start_index:end_index] def get_index(value, keys): """ Returns the index of the value in the keys using bisect """ return bisect.bisect_left(keys, value) def get_range(data): lo = min([item[0] for item in data]) hi = max([item[0] for item in data]) return lo, hi def get_chunks(lo, hi, size, overlap=500): """ Divides a range into chunks of maximum size size. Returns a list of 2-tuples (slice_range, process_range), each a 2-tuple (start, end). process_range has zero overlap and should be given to process_chromosome as-is, and slice_range is overlapped and should be used to slice the data (using get_window) to be given to process_chromosome. """ chunks = [] for start_index in range(lo, hi, size): process_start = start_index # Don't go over upper bound process_end = min(start_index + size, hi) # Don't go under lower bound slice_start = max(process_start - overlap, lo) # Don't go over upper bound slice_end = min(process_end + overlap, hi) chunks.append(((slice_start, slice_end), (process_start, process_end))) return chunks def allocate_array(data, width): """ Allocates a new array with the dimensions required to fit all reads in the argument. The new array is totally empty. Returns the array and the shift (number to add to a read index to get the position in the array it should be at). """ lo, hi = get_range(data) rng = hi - lo shift = width - lo return numpy.zeros(rng + width * 2, numpy.float), shift def normal_array(width, sigma, normalize=True): """ Returns an array of the normal distribution of the specified width """ sigma2 = float(sigma)**2 def normal_func(x): return math.exp(-x * x / (2 * sigma2)) # width is the half of the distribution values = list(map(normal_func, range(-width, width))) values = numpy.array(values, numpy.float) # normalization if normalize: values = 1.0 / math.sqrt(2 * numpy.pi * sigma2) * values return values def call_peaks(array, shift, data, keys, direction, down_width, up_width, exclusion): peaks = [] def find_peaks(): # Go through the array and call each peak results = (array > numpy.roll(array, 1)) & (array > numpy.roll(array, -1)) indexes = numpy.where(results) for index in indexes[0]: pos = down_width or exclusion // 2 neg = up_width or exclusion // 2 # Reverse strand if direction == 2: # Swap positive and negative widths pos, neg = neg, pos peaks.append(Peak(int(index) - shift, pos, neg)) find_peaks() def calculate_reads(): # Calculate the number of reads in each peak for peak in peaks: reads = get_window(data, peak.start, peak.end, keys) peak.value = sum([read[direction] for read in reads]) # Flat list of indexes with frequency indexes = [r for read in reads for r in [read[0]] * read[direction]] peak.stddev = numpy.std(indexes) calculate_reads() def perform_exclusion(): # Process the exclusion zone peak_keys = make_peak_keys(peaks) peaks_by_value = peaks[:] peaks_by_value.sort(key=lambda peak: -peak.value) for peak in peaks_by_value: peak.safe = True window = get_window(peaks, peak.index - exclusion // 2, peak.index + exclusion // 2, peak_keys) for excluded in window: if excluded.safe: continue i = get_index(excluded.index, peak_keys) del peak_keys[i] del peaks[i] perform_exclusion() return peaks def process_chromosome(cname, data, writer, process_bounds, width, sigma, down_width, up_width, exclusion, filter): """ Process a chromosome. Takes the chromosome name, list of reads, a CSV writer to write processes results to, the bounds (2-tuple) to write results in, and options. """ if not data: return keys = make_keys(data) # Create the arrays that hold the sum of the normals forward_array, forward_shift = allocate_array(data, width) reverse_array, reverse_shift = allocate_array(data, width) normal = normal_array(width, sigma) def populate_array(): # Add each read's normal to the array for read in data: index, forward, reverse = read # Add the normals to the appropriate regions if forward: forward_array[index + forward_shift - width:index + forward_shift + width] += normal * forward if reverse: reverse_array[index + reverse_shift - width:index + reverse_shift + width] += normal * reverse populate_array() forward_peaks = call_peaks(forward_array, forward_shift, data, keys, 1, down_width, up_width, exclusion) reverse_peaks = call_peaks(reverse_array, reverse_shift, data, keys, 2, down_width, up_width, exclusion) # Convert chromosome name in preparation for writing output cname = convert_data(cname, 'zeropad', 'numeric') def write(cname, strand, peak): start = max(peak.start, 1) end = peak.end value = peak.value stddev = peak.stddev if value > filter: # This version of genetrack outputs only gff files. writer.writerow(gff_row(cname=cname, source='genetrack', start=start, end=end, score=value, strand=strand, attrs={'stddev': stddev})) for peak in forward_peaks: if process_bounds[0] < peak.index < process_bounds[1]: write(cname, '+', peak) for peak in reverse_peaks: if process_bounds[0] < peak.index < process_bounds[1]: write(cname, '-', peak) def sort_chromosome_reads_by_index(input_path): """ Return a gff file with chromosome reads sorted by index. """ # Will this sort produce different results across platforms? output_path = tempfile.NamedTemporaryFile(delete=False).name command = 'sort -k 1,1 -k 4,4n "%s" > "%s"' % (input_path, output_path) p = subprocess.Popen(command, shell=True) p.wait() return output_path
mit
MrLoick/python-for-android
python-modules/twisted/twisted/trial/test/test_test_visitor.py
90
2282
from twisted.trial import unittest from twisted.trial.runner import TestSuite, suiteVisit pyunit = __import__('unittest') class MockVisitor(object): def __init__(self): self.calls = [] def __call__(self, testCase): self.calls.append(testCase) class TestTestVisitor(unittest.TestCase): def setUp(self): self.visitor = MockVisitor() def test_visitCase(self): """ Test that C{visit} works for a single test case. """ testCase = TestTestVisitor('test_visitCase') testCase.visit(self.visitor) self.assertEqual(self.visitor.calls, [testCase]) def test_visitSuite(self): """ Test that C{visit} hits all tests in a suite. """ tests = [TestTestVisitor('test_visitCase'), TestTestVisitor('test_visitSuite')] testSuite = TestSuite(tests) testSuite.visit(self.visitor) self.assertEqual(self.visitor.calls, tests) def test_visitEmptySuite(self): """ Test that C{visit} on an empty suite hits nothing. """ TestSuite().visit(self.visitor) self.assertEqual(self.visitor.calls, []) def test_visitNestedSuite(self): """ Test that C{visit} recurses through suites. """ tests = [TestTestVisitor('test_visitCase'), TestTestVisitor('test_visitSuite')] testSuite = TestSuite([TestSuite([test]) for test in tests]) testSuite.visit(self.visitor) self.assertEqual(self.visitor.calls, tests) def test_visitPyunitSuite(self): """ Test that C{suiteVisit} visits stdlib unittest suites """ test = TestTestVisitor('test_visitPyunitSuite') suite = pyunit.TestSuite([test]) suiteVisit(suite, self.visitor) self.assertEqual(self.visitor.calls, [test]) def test_visitPyunitCase(self): """ Test that a stdlib test case in a suite gets visited. """ class PyunitCase(pyunit.TestCase): def test_foo(self): pass test = PyunitCase('test_foo') TestSuite([test]).visit(self.visitor) self.assertEqual( [call.id() for call in self.visitor.calls], [test.id()])
apache-2.0
Richard32/git
contrib/svn-fe/svnrdump_sim.py
328
2044
#!/usr/bin/python """ Simulates svnrdump by replaying an existing dump from a file, taking care of the specified revision range. To simulate incremental imports the environment variable SVNRMAX can be set to the highest revision that should be available. """ import sys import os if sys.hexversion < 0x02040000: # The limiter is the ValueError() calls. This may be too conservative sys.stderr.write("svnrdump-sim.py: requires Python 2.4 or later.\n") sys.exit(1) def getrevlimit(): var = 'SVNRMAX' if var in os.environ: return os.environ[var] return None def writedump(url, lower, upper): if url.startswith('sim://'): filename = url[6:] if filename[-1] == '/': filename = filename[:-1] # remove terminating slash else: raise ValueError('sim:// url required') f = open(filename, 'r') state = 'header' wroterev = False while(True): l = f.readline() if l == '': break if state == 'header' and l.startswith('Revision-number: '): state = 'prefix' if state == 'prefix' and l == 'Revision-number: %s\n' % lower: state = 'selection' if not upper == 'HEAD' and state == 'selection' and \ l == 'Revision-number: %s\n' % upper: break if state == 'header' or state == 'selection': if state == 'selection': wroterev = True sys.stdout.write(l) return wroterev if __name__ == "__main__": if not (len(sys.argv) in (3, 4, 5)): print("usage: %s dump URL -rLOWER:UPPER") sys.exit(1) if not sys.argv[1] == 'dump': raise NotImplementedError('only "dump" is suppported.') url = sys.argv[2] r = ('0', 'HEAD') if len(sys.argv) == 4 and sys.argv[3][0:2] == '-r': r = sys.argv[3][2:].lstrip().split(':') if not getrevlimit() is None: r[1] = getrevlimit() if writedump(url, r[0], r[1]): ret = 0 else: ret = 1 sys.exit(ret)
gpl-2.0
beck/django
django/conf/locale/ko/formats.py
100
2320
# -*- encoding: utf-8 -*- # This file is distributed under the same license as the Django package. # from __future__ import unicode_literals # The *_FORMAT strings use the Django date format syntax, # see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date DATE_FORMAT = 'Y년 n월 j일' TIME_FORMAT = 'A g:i' DATETIME_FORMAT = 'Y년 n월 j일 g:i A' YEAR_MONTH_FORMAT = 'Y년 F월' MONTH_DAY_FORMAT = 'F월 j일' SHORT_DATE_FORMAT = 'Y-n-j.' SHORT_DATETIME_FORMAT = 'Y-n-j H:i' # FIRST_DAY_OF_WEEK = # The *_INPUT_FORMATS strings use the Python strftime format syntax, # see http://docs.python.org/library/datetime.html#strftime-strptime-behavior # Kept ISO formats as they are in first position DATE_INPUT_FORMATS = [ '%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06' # '%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006' # '%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006' # '%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006' # '%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006' '%Y년 %m월 %d일', # '2006년 10월 25일', with localized suffix. ] TIME_INPUT_FORMATS = [ '%H:%M:%S', # '14:30:59' '%H:%M:%S.%f', # '14:30:59.000200' '%H:%M', # '14:30' '%H시 %M분 %S초', # '14시 30분 59초' '%H시 %M분', # '14시 30분' ] DATETIME_INPUT_FORMATS = [ '%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59' '%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200' '%Y-%m-%d %H:%M', # '2006-10-25 14:30' '%Y-%m-%d', # '2006-10-25' '%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59' '%m/%d/%Y %H:%M:%S.%f', # '10/25/2006 14:30:59.000200' '%m/%d/%Y %H:%M', # '10/25/2006 14:30' '%m/%d/%Y', # '10/25/2006' '%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59' '%m/%d/%y %H:%M:%S.%f', # '10/25/06 14:30:59.000200' '%m/%d/%y %H:%M', # '10/25/06 14:30' '%m/%d/%y', # '10/25/06' '%Y년 %m월 %d일 %H시 %M분 %S초', # '2006년 10월 25일 14시 30분 59초' '%Y년 %m월 %d일 %H시 %M분', # '2006년 10월 25일 14시 30분' ] DECIMAL_SEPARATOR = '.' THOUSAND_SEPARATOR = ',' NUMBER_GROUPING = 3
bsd-3-clause
hehongliang/tensorflow
tensorflow/contrib/deprecated/__init__.py
67
4854
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Non-core alias for the deprecated tf.X_summary ops. For TensorFlow 1.0, we have reorganized the TensorFlow summary ops into a submodule, and made some semantic tweaks. The first thing to note is that we moved the APIs around as follows: ```python tf.scalar_summary -> tf.summary.scalar tf.histogram_summary -> tf.summary.histogram tf.audio_summary -> tf.summary.audio tf.image_summary -> tf.summary.image tf.merge_summary -> tf.summary.merge tf.merge_all_summaries -> tf.summary.merge_all ``` We think this API is cleaner and will improve long-term discoverability and clarity of the TensorFlow API. But we also took the opportunity to make an important change to how summary "tags" work. The "tag" of a summary is the string that is associated with the output data, i.e. the key for organizing the generated protobufs. Previously, the tag was allowed to be any unique string; it had no relation to the summary op generating it, and no relation to the TensorFlow name system. This behavior made it very difficult to write reusable that would add summary ops to the graph. If you had a function to add summary ops, you would need to pass in a `tf.name_scope`, manually, to that function to create deduplicated tags. Otherwise your program would fail with a runtime error due to tag collision. The new summary APIs under `tf.summary` throw away the "tag" as an independent concept; instead, the first argument is the node name. So summary tags now automatically inherit the surrounding `tf.name_scope`, and automatically are deduplicated if there is a conflict. Now however, the only allowed characters are alphanumerics, underscores, and forward slashes. To make migration easier, the new APIs automatically convert illegal characters to underscores. Just as an example, consider the following "before" and "after" code snippets: ```python # Before def add_activation_summaries(v, scope): tf.scalar_summary("%s/fraction_of_zero" % scope, tf.nn.fraction_of_zero(v)) tf.histogram_summary("%s/activations" % scope, v) # After def add_activation_summaries(v): tf.summary.scalar("fraction_of_zero", tf.nn.fraction_of_zero(v)) tf.summary.histogram("activations", v) ``` Now, so long as the add_activation_summaries function is called from within the right `tf.name_scope`, the behavior is the same. Because this change does modify the behavior and could break tests, we can't automatically migrate usage to the new APIs. That is why we are making the old APIs temporarily available here at `tf.contrib.deprecated`. In addition to the name change described above, there are two further changes to the new summary ops: - the "max_images" argument for `tf.image_summary` was renamed to "max_outputs for `tf.summary.image` - `tf.scalar_summary` accepted arbitrary tensors of tags and values. But `tf.summary.scalar` requires a single scalar name and scalar value. In most cases, you can create `tf.summary.scalar` in a loop to get the same behavior As before, TensorBoard groups charts by the top-level `tf.name_scope` which may be inconvenient, for in the new summary ops, the summary will inherit that `tf.name_scope` without user control. We plan to add more grouping mechanisms to TensorBoard, so it will be possible to specify the TensorBoard group for each summary via the summary API. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function # pylint: disable=unused-import from tensorflow.python.ops.logging_ops import audio_summary from tensorflow.python.ops.logging_ops import histogram_summary from tensorflow.python.ops.logging_ops import image_summary from tensorflow.python.ops.logging_ops import merge_all_summaries from tensorflow.python.ops.logging_ops import merge_summary from tensorflow.python.ops.logging_ops import scalar_summary from tensorflow.python.util.all_util import remove_undocumented # pylint: enable=unused-import,line-too-long _allowed_symbols = ['audio_summary', 'histogram_summary', 'image_summary', 'merge_all_summaries', 'merge_summary', 'scalar_summary'] remove_undocumented(__name__, _allowed_symbols)
apache-2.0
sander76/home-assistant
tests/helpers/test_config_validation.py
4
30076
"""Test config validators.""" from collections import OrderedDict from datetime import date, datetime, timedelta import enum import os from socket import _GLOBAL_DEFAULT_TIMEOUT from unittest.mock import Mock, patch import uuid import pytest import voluptuous as vol import homeassistant from homeassistant.helpers import config_validation as cv, template def test_boolean(): """Test boolean validation.""" schema = vol.Schema(cv.boolean) for value in ( None, "T", "negative", "lock", "tr ue", [], [1, 2], {"one": "two"}, test_boolean, ): with pytest.raises(vol.MultipleInvalid): schema(value) for value in ("true", "On", "1", "YES", " true ", "enable", 1, 50, True, 0.1): assert schema(value) for value in ("false", "Off", "0", "NO", "disable", 0, False): assert not schema(value) def test_latitude(): """Test latitude validation.""" schema = vol.Schema(cv.latitude) for value in ("invalid", None, -91, 91, "-91", "91", "123.01A"): with pytest.raises(vol.MultipleInvalid): schema(value) for value in ("-89", 89, "12.34"): schema(value) def test_longitude(): """Test longitude validation.""" schema = vol.Schema(cv.longitude) for value in ("invalid", None, -181, 181, "-181", "181", "123.01A"): with pytest.raises(vol.MultipleInvalid): schema(value) for value in ("-179", 179, "12.34"): schema(value) def test_port(): """Test TCP/UDP network port.""" schema = vol.Schema(cv.port) for value in ("invalid", None, -1, 0, 80000, "81000"): with pytest.raises(vol.MultipleInvalid): schema(value) for value in ("1000", 21, 24574): schema(value) def test_isfile(): """Validate that the value is an existing file.""" schema = vol.Schema(cv.isfile) fake_file = "this-file-does-not.exist" assert not os.path.isfile(fake_file) for value in ("invalid", None, -1, 0, 80000, fake_file): with pytest.raises(vol.Invalid): schema(value) # patching methods that allow us to fake a file existing # with write access with patch("os.path.isfile", Mock(return_value=True)), patch( "os.access", Mock(return_value=True) ): schema("test.txt") def test_url(): """Test URL.""" schema = vol.Schema(cv.url) for value in ( "invalid", None, 100, "htp://ha.io", "http//ha.io", "http://??,**", "https://??,**", ): with pytest.raises(vol.MultipleInvalid): schema(value) for value in ( "http://localhost", "https://localhost/test/index.html", "http://home-assistant.io", "http://home-assistant.io/test/", "https://community.home-assistant.io/", ): assert schema(value) def test_platform_config(): """Test platform config validation.""" options = ({}, {"hello": "world"}) for value in options: with pytest.raises(vol.MultipleInvalid): cv.PLATFORM_SCHEMA(value) options = ({"platform": "mqtt"}, {"platform": "mqtt", "beer": "yes"}) for value in options: cv.PLATFORM_SCHEMA_BASE(value) def test_ensure_list(): """Test ensure_list.""" schema = vol.Schema(cv.ensure_list) assert [] == schema(None) assert [1] == schema(1) assert [1] == schema([1]) assert ["1"] == schema("1") assert ["1"] == schema(["1"]) assert [{"1": "2"}] == schema({"1": "2"}) def test_entity_id(): """Test entity ID validation.""" schema = vol.Schema(cv.entity_id) with pytest.raises(vol.MultipleInvalid): schema("invalid_entity") assert schema("sensor.LIGHT") == "sensor.light" def test_entity_ids(): """Test entity ID validation.""" schema = vol.Schema(cv.entity_ids) options = ( "invalid_entity", "sensor.light,sensor_invalid", ["invalid_entity"], ["sensor.light", "sensor_invalid"], ["sensor.light,sensor_invalid"], ) for value in options: with pytest.raises(vol.MultipleInvalid): schema(value) options = ([], ["sensor.light"], "sensor.light") for value in options: schema(value) assert schema("sensor.LIGHT, light.kitchen ") == ["sensor.light", "light.kitchen"] def test_entity_domain(): """Test entity domain validation.""" schema = vol.Schema(cv.entity_domain("sensor")) for value in ( "invalid_entity", "cover.demo", "cover.demo,sensor.another_entity", "", ): with pytest.raises(vol.MultipleInvalid): schema(value) assert schema("sensor.LIGHT") == "sensor.light" schema = vol.Schema(cv.entity_domain(("sensor", "binary_sensor"))) for value in ("invalid_entity", "cover.demo"): with pytest.raises(vol.MultipleInvalid): schema(value) assert schema("sensor.LIGHT") == "sensor.light" assert schema("binary_sensor.LIGHT") == "binary_sensor.light" def test_entities_domain(): """Test entities domain validation.""" schema = vol.Schema(cv.entities_domain("sensor")) options = ( None, "", "invalid_entity", ["sensor.light", "cover.demo"], ["sensor.light", "sensor_invalid"], ) for value in options: with pytest.raises(vol.MultipleInvalid): schema(value) options = ("sensor.light", ["SENSOR.light"], ["sensor.light", "sensor.demo"]) for value in options: schema(value) assert schema("sensor.LIGHT, sensor.demo ") == ["sensor.light", "sensor.demo"] assert schema(["sensor.light", "SENSOR.demo"]) == ["sensor.light", "sensor.demo"] def test_ensure_list_csv(): """Test ensure_list_csv.""" schema = vol.Schema(cv.ensure_list_csv) options = (None, 12, [], ["string"], "string1,string2") for value in options: schema(value) assert schema("string1, string2 ") == ["string1", "string2"] def test_event_schema(): """Test event_schema validation.""" options = ( {}, None, {"event_data": {}}, {"event": "state_changed", "event_data": 1}, ) for value in options: with pytest.raises(vol.MultipleInvalid): cv.EVENT_SCHEMA(value) options = ( {"event": "state_changed"}, {"event": "state_changed", "event_data": {"hello": "world"}}, ) for value in options: cv.EVENT_SCHEMA(value) def test_icon(): """Test icon validation.""" schema = vol.Schema(cv.icon) for value in (False, "work"): with pytest.raises(vol.MultipleInvalid): schema(value) schema("mdi:work") schema("custom:prefix") def test_time_period(): """Test time_period validation.""" schema = vol.Schema(cv.time_period) options = ( None, "", "hello:world", "12:", "12:34:56:78", {}, {"wrong_key": -10}, "12.5:30", "12:30.5", "12.5:30:30", "12:30.5:30", ) for value in options: with pytest.raises(vol.MultipleInvalid): schema(value) options = ( ("8:20", timedelta(hours=8, minutes=20)), ("23:59", timedelta(hours=23, minutes=59)), ("-8:20", -1 * timedelta(hours=8, minutes=20)), ("-1:15", -1 * timedelta(hours=1, minutes=15)), ("-23:59:59", -1 * timedelta(hours=23, minutes=59, seconds=59)), ("-48:00", -1 * timedelta(days=2)), ({"minutes": 5}, timedelta(minutes=5)), (1, timedelta(seconds=1)), ("5", timedelta(seconds=5)), ("180", timedelta(seconds=180)), ("00:08:20.5", timedelta(minutes=8, seconds=20, milliseconds=500)), ("00:23:59.999", timedelta(minutes=23, seconds=59, milliseconds=999)), ("-00:08:20.5", -1 * timedelta(minutes=8, seconds=20, milliseconds=500)), ( "-12:59:59.999", -1 * timedelta(hours=12, minutes=59, seconds=59, milliseconds=999), ), ({"milliseconds": 1.5}, timedelta(milliseconds=1, microseconds=500)), ({"seconds": "1.5"}, timedelta(seconds=1, milliseconds=500)), ({"minutes": "1.5"}, timedelta(minutes=1, seconds=30)), ({"hours": -1.5}, -1 * timedelta(hours=1, minutes=30)), ({"days": "-1.5"}, -1 * timedelta(days=1, hours=12)), ) for value, result in options: assert schema(value) == result def test_remove_falsy(): """Test remove falsy.""" assert cv.remove_falsy([0, None, 1, "1", {}, [], ""]) == [1, "1"] def test_service(): """Test service validation.""" schema = vol.Schema(cv.service) with pytest.raises(vol.MultipleInvalid): schema("invalid_turn_on") schema("homeassistant.turn_on") def test_service_schema(): """Test service_schema validation.""" options = ( {}, None, { "service": "homeassistant.turn_on", "service_template": "homeassistant.turn_on", }, {"data": {"entity_id": "light.kitchen"}}, {"service": "homeassistant.turn_on", "data": None}, { "service": "homeassistant.turn_on", "data_template": {"brightness": "{{ no_end"}, }, ) for value in options: with pytest.raises(vol.MultipleInvalid): cv.SERVICE_SCHEMA(value) options = ( {"service": "homeassistant.turn_on"}, {"service": "homeassistant.turn_on", "entity_id": "light.kitchen"}, {"service": "light.turn_on", "entity_id": "all"}, { "service": "homeassistant.turn_on", "entity_id": ["light.kitchen", "light.ceiling"], }, { "service": "light.turn_on", "entity_id": "all", "alias": "turn on kitchen lights", }, ) for value in options: cv.SERVICE_SCHEMA(value) def test_slug(): """Test slug validation.""" schema = vol.Schema(cv.slug) for value in (None, "hello world"): with pytest.raises(vol.MultipleInvalid): schema(value) for value in (12345, "hello"): schema(value) def test_string(hass): """Test string validation.""" schema = vol.Schema(cv.string) with pytest.raises(vol.Invalid): schema(None) with pytest.raises(vol.Invalid): schema([]) with pytest.raises(vol.Invalid): schema({}) for value in (True, 1, "hello"): schema(value) # Test template support for text, native in ( ("[1, 2]", [1, 2]), ("{1, 2}", {1, 2}), ("(1, 2)", (1, 2)), ('{"hello": True}', {"hello": True}), ): tpl = template.Template(text, hass) result = tpl.async_render() assert isinstance(result, template.ResultWrapper) assert result == native assert schema(result) == text def test_string_with_no_html(): """Test string with no html validation.""" schema = vol.Schema(cv.string_with_no_html) with pytest.raises(vol.Invalid): schema("This has HTML in it <a>Link</a>") with pytest.raises(vol.Invalid): schema("<b>Bold</b>") for value in ( True, 3, "Hello", "**Hello**", "This has no HTML [Link](https://home-assistant.io)", ): schema(value) def test_temperature_unit(): """Test temperature unit validation.""" schema = vol.Schema(cv.temperature_unit) with pytest.raises(vol.MultipleInvalid): schema("K") schema("C") schema("F") def test_x10_address(): """Test x10 addr validator.""" schema = vol.Schema(cv.x10_address) with pytest.raises(vol.Invalid): schema("Q1") schema("q55") schema("garbage_addr") schema("a1") schema("C11") def test_template(): """Test template validator.""" schema = vol.Schema(cv.template) for value in (None, "{{ partial_print }", "{% if True %}Hello", ["test"]): with pytest.raises(vol.Invalid): schema(value) options = ( 1, "Hello", "{{ beer }}", "{% if 1 == 1 %}Hello{% else %}World{% endif %}", ) for value in options: schema(value) def test_dynamic_template(): """Test dynamic template validator.""" schema = vol.Schema(cv.dynamic_template) for value in ( None, 1, "{{ partial_print }", "{% if True %}Hello", ["test"], "just a string", ): with pytest.raises(vol.Invalid): schema(value) options = ( "{{ beer }}", "{% if 1 == 1 %}Hello{% else %}World{% endif %}", ) for value in options: schema(value) def test_template_complex(): """Test template_complex validator.""" schema = vol.Schema(cv.template_complex) for value in ("{{ partial_print }", "{% if True %}Hello"): with pytest.raises(vol.MultipleInvalid): schema(value) options = ( 1, "Hello", "{{ beer }}", "{% if 1 == 1 %}Hello{% else %}World{% endif %}", {"test": 1, "test2": "{{ beer }}"}, ["{{ beer }}", 1], ) for value in options: schema(value) # ensure the validator didn't mutate the input assert options == ( 1, "Hello", "{{ beer }}", "{% if 1 == 1 %}Hello{% else %}World{% endif %}", {"test": 1, "test2": "{{ beer }}"}, ["{{ beer }}", 1], ) # Ensure we don't mutate non-string types that cannot be templates. for value in (1, True, None): assert schema(value) == value def test_time_zone(): """Test time zone validation.""" schema = vol.Schema(cv.time_zone) with pytest.raises(vol.MultipleInvalid): schema("America/Do_Not_Exist") schema("America/Los_Angeles") schema("UTC") def test_date(): """Test date validation.""" schema = vol.Schema(cv.date) for value in ["Not a date", "23:42", "2016-11-23T18:59:08"]: with pytest.raises(vol.Invalid): schema(value) schema(datetime.now().date()) schema("2016-11-23") def test_time(): """Test date validation.""" schema = vol.Schema(cv.time) for value in ["Not a time", "2016-11-23", "2016-11-23T18:59:08"]: with pytest.raises(vol.Invalid): schema(value) schema(datetime.now().time()) schema("23:42:00") schema("23:42") def test_datetime(): """Test date time validation.""" schema = vol.Schema(cv.datetime) for value in [date.today(), "Wrong DateTime"]: with pytest.raises(vol.MultipleInvalid): schema(value) schema(datetime.now()) schema("2016-11-23T18:59:08") def test_multi_select(): """Test multi select validation. Expected behavior: - Will not accept any input but a list - Will not accept selections outside of configured scope """ schema = vol.Schema(cv.multi_select({"paulus": "Paulus", "robban": "Robban"})) with pytest.raises(vol.Invalid): schema("robban") schema(["paulus", "martinhj"]) schema(["robban", "paulus"]) def test_multi_select_in_serializer(): """Test multi_select with custom_serializer.""" assert cv.custom_serializer(cv.multi_select({"paulus": "Paulus"})) == { "type": "multi_select", "options": {"paulus": "Paulus"}, } def test_boolean_in_serializer(): """Test boolean with custom_serializer.""" assert cv.custom_serializer(cv.boolean) == { "type": "boolean", } def test_string_in_serializer(): """Test string with custom_serializer.""" assert cv.custom_serializer(cv.string) == { "type": "string", } def test_positive_time_period_dict_in_serializer(): """Test positive_time_period_dict with custom_serializer.""" assert cv.custom_serializer(cv.positive_time_period_dict) == { "type": "positive_time_period_dict", } @pytest.fixture def schema(): """Create a schema used for testing deprecation.""" return vol.Schema({"venus": cv.boolean, "mars": cv.boolean, "jupiter": cv.boolean}) @pytest.fixture def version(monkeypatch): """Patch the version used for testing to 0.5.0.""" monkeypatch.setattr(homeassistant.const, "__version__", "0.5.0") def test_deprecated_with_no_optionals(caplog, schema): """ Test deprecation behaves correctly when optional params are None. Expected behavior: - Outputs the appropriate deprecation warning if key is detected - Processes schema without changing any values - No warning or difference in output if key is not provided """ deprecated_schema = vol.All(cv.deprecated("mars"), schema) test_data = {"mars": True} output = deprecated_schema(test_data.copy()) assert len(caplog.records) == 1 assert caplog.records[0].name in [ __name__, "homeassistant.helpers.config_validation", ] assert ( "The 'mars' option is deprecated, please remove it from your configuration" ) in caplog.text assert test_data == output caplog.clear() assert len(caplog.records) == 0 test_data = {"venus": True} output = deprecated_schema(test_data.copy()) assert len(caplog.records) == 0 assert test_data == output def test_deprecated_with_replacement_key(caplog, schema): """ Test deprecation behaves correctly when only a replacement key is provided. Expected behavior: - Outputs the appropriate deprecation warning if key is detected - Processes schema moving the value from key to replacement_key - Processes schema changing nothing if only replacement_key provided - No warning if only replacement_key provided - No warning or difference in output if neither key nor replacement_key are provided """ deprecated_schema = vol.All( cv.deprecated("mars", replacement_key="jupiter"), schema ) test_data = {"mars": True} output = deprecated_schema(test_data.copy()) assert len(caplog.records) == 1 assert ( "The 'mars' option is deprecated, please replace it with 'jupiter'" ) in caplog.text assert {"jupiter": True} == output caplog.clear() assert len(caplog.records) == 0 test_data = {"jupiter": True} output = deprecated_schema(test_data.copy()) assert len(caplog.records) == 0 assert test_data == output test_data = {"venus": True} output = deprecated_schema(test_data.copy()) assert len(caplog.records) == 0 assert test_data == output def test_deprecated_with_default(caplog, schema): """ Test deprecation behaves correctly with a default value. This is likely a scenario that would never occur. Expected behavior: - Behaves identically as when the default value was not present """ deprecated_schema = vol.All(cv.deprecated("mars", default=False), schema) test_data = {"mars": True} output = deprecated_schema(test_data.copy()) assert len(caplog.records) == 1 assert caplog.records[0].name == __name__ assert ( "The 'mars' option is deprecated, please remove it from your configuration" ) in caplog.text assert test_data == output caplog.clear() assert len(caplog.records) == 0 test_data = {"venus": True} output = deprecated_schema(test_data.copy()) assert len(caplog.records) == 0 assert test_data == output def test_deprecated_with_replacement_key_and_default(caplog, schema): """ Test deprecation with a replacement key and default. Expected behavior: - Outputs the appropriate deprecation warning if key is detected - Processes schema moving the value from key to replacement_key - Processes schema changing nothing if only replacement_key provided - No warning if only replacement_key provided - No warning if neither key nor replacement_key are provided - Adds replacement_key with default value in this case """ deprecated_schema = vol.All( cv.deprecated("mars", replacement_key="jupiter", default=False), schema ) test_data = {"mars": True} output = deprecated_schema(test_data.copy()) assert len(caplog.records) == 1 assert ( "The 'mars' option is deprecated, please replace it with 'jupiter'" ) in caplog.text assert {"jupiter": True} == output caplog.clear() assert len(caplog.records) == 0 test_data = {"jupiter": True} output = deprecated_schema(test_data.copy()) assert len(caplog.records) == 0 assert test_data == output test_data = {"venus": True} output = deprecated_schema(test_data.copy()) assert len(caplog.records) == 0 assert {"venus": True, "jupiter": False} == output deprecated_schema_with_default = vol.All( vol.Schema( { "venus": cv.boolean, vol.Optional("mars", default=False): cv.boolean, vol.Optional("jupiter", default=False): cv.boolean, } ), cv.deprecated("mars", replacement_key="jupiter", default=False), ) test_data = {"mars": True} output = deprecated_schema_with_default(test_data.copy()) assert len(caplog.records) == 1 assert ( "The 'mars' option is deprecated, please replace it with 'jupiter'" ) in caplog.text assert {"jupiter": True} == output def test_deprecated_cant_find_module(): """Test if the current module cannot be inspected.""" with patch("inspect.getmodule", return_value=None): # This used to raise. cv.deprecated( "mars", replacement_key="jupiter", default=False, ) def test_deprecated_logger_with_config_attributes(caplog): """Test if the logger outputs the correct message if the line and file attribute is available in config.""" file: str = "configuration.yaml" line: int = 54 replacement = f"'mars' option near {file}:{line} is deprecated" config = OrderedDict([("mars", "blah")]) setattr(config, "__config_file__", file) setattr(config, "__line__", line) cv.deprecated("mars", replacement_key="jupiter", default=False)(config) assert len(caplog.records) == 1 assert replacement in caplog.text caplog.clear() assert len(caplog.records) == 0 def test_deprecated_logger_with_one_config_attribute(caplog): """Test if the logger outputs the correct message if only one of line and file attribute is available in config.""" file: str = "configuration.yaml" line: int = 54 replacement = f"'mars' option near {file}:{line} is deprecated" config = OrderedDict([("mars", "blah")]) setattr(config, "__config_file__", file) cv.deprecated("mars", replacement_key="jupiter", default=False)(config) assert len(caplog.records) == 1 assert replacement not in caplog.text assert ( "The 'mars' option is deprecated, please replace it with 'jupiter'" ) in caplog.text caplog.clear() assert len(caplog.records) == 0 config = OrderedDict([("mars", "blah")]) setattr(config, "__line__", line) cv.deprecated("mars", replacement_key="jupiter", default=False)(config) assert len(caplog.records) == 1 assert replacement not in caplog.text assert ( "The 'mars' option is deprecated, please replace it with 'jupiter'" ) in caplog.text caplog.clear() assert len(caplog.records) == 0 def test_deprecated_logger_without_config_attributes(caplog): """Test if the logger outputs the correct message if the line and file attribute is not available in config.""" file: str = "configuration.yaml" line: int = 54 replacement = f"'mars' option near {file}:{line} is deprecated" config = OrderedDict([("mars", "blah")]) cv.deprecated("mars", replacement_key="jupiter", default=False)(config) assert len(caplog.records) == 1 assert replacement not in caplog.text assert ( "The 'mars' option is deprecated, please replace it with 'jupiter'" ) in caplog.text caplog.clear() assert len(caplog.records) == 0 def test_key_dependency(): """Test key_dependency validator.""" schema = vol.Schema(cv.key_dependency("beer", "soda")) options = {"beer": None} for value in options: with pytest.raises(vol.MultipleInvalid): schema(value) options = ({"beer": None, "soda": None}, {"soda": None}, {}) for value in options: schema(value) def test_has_at_most_one_key(): """Test has_at_most_one_key validator.""" schema = vol.Schema(cv.has_at_most_one_key("beer", "soda")) for value in (None, [], {"beer": None, "soda": None}): with pytest.raises(vol.MultipleInvalid): schema(value) for value in ({}, {"beer": None}, {"soda": None}): schema(value) def test_has_at_least_one_key(): """Test has_at_least_one_key validator.""" schema = vol.Schema(cv.has_at_least_one_key("beer", "soda")) for value in (None, [], {}, {"wine": None}): with pytest.raises(vol.MultipleInvalid): schema(value) for value in ({"beer": None}, {"soda": None}): schema(value) def test_enum(): """Test enum validator.""" class TestEnum(enum.Enum): """Test enum.""" value1 = "Value 1" value2 = "Value 2" schema = vol.Schema(cv.enum(TestEnum)) with pytest.raises(vol.Invalid): schema("value3") def test_socket_timeout(): # pylint: disable=invalid-name """Test socket timeout validator.""" schema = vol.Schema(cv.socket_timeout) with pytest.raises(vol.Invalid): schema(0.0) with pytest.raises(vol.Invalid): schema(-1) assert schema(None) == _GLOBAL_DEFAULT_TIMEOUT assert schema(1) == 1.0 def test_matches_regex(): """Test matches_regex validator.""" schema = vol.Schema(cv.matches_regex(".*uiae.*")) with pytest.raises(vol.Invalid): schema(1.0) with pytest.raises(vol.Invalid): schema(" nrtd ") test_str = "This is a test including uiae." assert schema(test_str) == test_str def test_is_regex(): """Test the is_regex validator.""" schema = vol.Schema(cv.is_regex) with pytest.raises(vol.Invalid): schema("(") with pytest.raises(vol.Invalid): schema({"a dict": "is not a regex"}) valid_re = ".*" schema(valid_re) def test_comp_entity_ids(): """Test config validation for component entity IDs.""" schema = vol.Schema(cv.comp_entity_ids) for valid in ( "ALL", "all", "AlL", "light.kitchen", ["light.kitchen"], ["light.kitchen", "light.ceiling"], [], ): schema(valid) for invalid in (["light.kitchen", "not-entity-id"], "*", ""): with pytest.raises(vol.Invalid): schema(invalid) def test_uuid4_hex(caplog): """Test uuid validation.""" schema = vol.Schema(cv.uuid4_hex) for value in ["Not a hex string", "0", 0]: with pytest.raises(vol.Invalid): schema(value) with pytest.raises(vol.Invalid): # the 13th char should be 4 schema("a03d31b22eee1acc9b90eec40be6ed23") with pytest.raises(vol.Invalid): # the 17th char should be 8-a schema("a03d31b22eee4acc7b90eec40be6ed23") _hex = uuid.uuid4().hex assert schema(_hex) == _hex assert schema(_hex.upper()) == _hex def test_key_value_schemas(): """Test key value schemas.""" schema = vol.Schema( cv.key_value_schemas( "mode", { "number": vol.Schema({"mode": "number", "data": int}), "string": vol.Schema({"mode": "string", "data": str}), }, ) ) with pytest.raises(vol.Invalid) as excinfo: schema(True) assert str(excinfo.value) == "Expected a dictionary" for mode in None, "invalid": with pytest.raises(vol.Invalid) as excinfo: schema({"mode": mode}) assert ( str(excinfo.value) == f"Unexpected value for mode: '{mode}'. Expected number, string" ) with pytest.raises(vol.Invalid) as excinfo: schema({"mode": "number", "data": "string-value"}) assert str(excinfo.value) == "expected int for dictionary value @ data['data']" with pytest.raises(vol.Invalid) as excinfo: schema({"mode": "string", "data": 1}) assert str(excinfo.value) == "expected str for dictionary value @ data['data']" for mode, data in (("number", 1), ("string", "hello")): schema({"mode": mode, "data": data}) def test_script(caplog): """Test script validation is user friendly.""" for data, msg in ( ({"delay": "{{ invalid"}, "should be format 'HH:MM'"), ({"wait_template": "{{ invalid"}, "invalid template"), ({"condition": "invalid"}, "Unexpected value for condition: 'invalid'"), ({"event": None}, "string value is None for dictionary value @ data['event']"), ( {"device_id": None}, "string value is None for dictionary value @ data['device_id']", ), ( {"scene": "light.kitchen"}, "Entity ID 'light.kitchen' does not belong to domain 'scene'", ), ): with pytest.raises(vol.Invalid) as excinfo: cv.script_action(data) assert msg in str(excinfo.value) def test_whitespace(): """Test whitespace validation.""" schema = vol.Schema(cv.whitespace) for value in ( None, "" "T", "negative", "lock", "tr ue", [], [1, 2], {"one": "two"}, ): with pytest.raises(vol.MultipleInvalid): schema(value) for value in (" ", " "): assert schema(value)
apache-2.0
hrh5775/LibraryManager
PythonTestClient/LibraryManagerTestClient/venv/Lib/site-packages/pip/utils/appdirs.py
340
8811
""" This code was taken from https://github.com/ActiveState/appdirs and modified to suit our purposes. """ from __future__ import absolute_import import os import sys from pip.compat import WINDOWS, expanduser from pip._vendor.six import PY2, text_type def user_cache_dir(appname): r""" Return full path to the user-specific cache dir for this application. "appname" is the name of application. Typical user cache directories are: macOS: ~/Library/Caches/<AppName> Unix: ~/.cache/<AppName> (XDG default) Windows: C:\Users\<username>\AppData\Local\<AppName>\Cache On Windows the only suggestion in the MSDN docs is that local settings go in the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming app data dir (the default returned by `user_data_dir`). Apps typically put cache data somewhere *under* the given dir here. Some examples: ...\Mozilla\Firefox\Profiles\<ProfileName>\Cache ...\Acme\SuperApp\Cache\1.0 OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value. """ if WINDOWS: # Get the base path path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA")) # When using Python 2, return paths as bytes on Windows like we do on # other operating systems. See helper function docs for more details. if PY2 and isinstance(path, text_type): path = _win_path_to_bytes(path) # Add our app name and Cache directory to it path = os.path.join(path, appname, "Cache") elif sys.platform == "darwin": # Get the base path path = expanduser("~/Library/Caches") # Add our app name to it path = os.path.join(path, appname) else: # Get the base path path = os.getenv("XDG_CACHE_HOME", expanduser("~/.cache")) # Add our app name to it path = os.path.join(path, appname) return path def user_data_dir(appname, roaming=False): """ Return full path to the user-specific data dir for this application. "appname" is the name of application. If None, just the system directory is returned. "roaming" (boolean, default False) can be set True to use the Windows roaming appdata directory. That means that for users on a Windows network setup for roaming profiles, this user data will be sync'd on login. See <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx> for a discussion of issues. Typical user data directories are: macOS: ~/Library/Application Support/<AppName> Unix: ~/.local/share/<AppName> # or in $XDG_DATA_HOME, if defined Win XP (not roaming): C:\Documents and Settings\<username>\ ... ...Application Data\<AppName> Win XP (roaming): C:\Documents and Settings\<username>\Local ... ...Settings\Application Data\<AppName> Win 7 (not roaming): C:\\Users\<username>\AppData\Local\<AppName> Win 7 (roaming): C:\\Users\<username>\AppData\Roaming\<AppName> For Unix, we follow the XDG spec and support $XDG_DATA_HOME. That means, by default "~/.local/share/<AppName>". """ if WINDOWS: const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA" path = os.path.join(os.path.normpath(_get_win_folder(const)), appname) elif sys.platform == "darwin": path = os.path.join( expanduser('~/Library/Application Support/'), appname, ) else: path = os.path.join( os.getenv('XDG_DATA_HOME', expanduser("~/.local/share")), appname, ) return path def user_config_dir(appname, roaming=True): """Return full path to the user-specific config dir for this application. "appname" is the name of application. If None, just the system directory is returned. "roaming" (boolean, default True) can be set False to not use the Windows roaming appdata directory. That means that for users on a Windows network setup for roaming profiles, this user data will be sync'd on login. See <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx> for a discussion of issues. Typical user data directories are: macOS: same as user_data_dir Unix: ~/.config/<AppName> Win *: same as user_data_dir For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME. That means, by default "~/.config/<AppName>". """ if WINDOWS: path = user_data_dir(appname, roaming=roaming) elif sys.platform == "darwin": path = user_data_dir(appname) else: path = os.getenv('XDG_CONFIG_HOME', expanduser("~/.config")) path = os.path.join(path, appname) return path # for the discussion regarding site_config_dirs locations # see <https://github.com/pypa/pip/issues/1733> def site_config_dirs(appname): """Return a list of potential user-shared config dirs for this application. "appname" is the name of application. Typical user config directories are: macOS: /Library/Application Support/<AppName>/ Unix: /etc or $XDG_CONFIG_DIRS[i]/<AppName>/ for each value in $XDG_CONFIG_DIRS Win XP: C:\Documents and Settings\All Users\Application ... ...Data\<AppName>\ Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.) Win 7: Hidden, but writeable on Win 7: C:\ProgramData\<AppName>\ """ if WINDOWS: path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA")) pathlist = [os.path.join(path, appname)] elif sys.platform == 'darwin': pathlist = [os.path.join('/Library/Application Support', appname)] else: # try looking in $XDG_CONFIG_DIRS xdg_config_dirs = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg') if xdg_config_dirs: pathlist = [ os.path.join(expanduser(x), appname) for x in xdg_config_dirs.split(os.pathsep) ] else: pathlist = [] # always look in /etc directly as well pathlist.append('/etc') return pathlist # -- Windows support functions -- def _get_win_folder_from_registry(csidl_name): """ This is a fallback technique at best. I'm not sure if using the registry for this guarantees us the correct answer for all CSIDL_* names. """ import _winreg shell_folder_name = { "CSIDL_APPDATA": "AppData", "CSIDL_COMMON_APPDATA": "Common AppData", "CSIDL_LOCAL_APPDATA": "Local AppData", }[csidl_name] key = _winreg.OpenKey( _winreg.HKEY_CURRENT_USER, r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders" ) directory, _type = _winreg.QueryValueEx(key, shell_folder_name) return directory def _get_win_folder_with_ctypes(csidl_name): csidl_const = { "CSIDL_APPDATA": 26, "CSIDL_COMMON_APPDATA": 35, "CSIDL_LOCAL_APPDATA": 28, }[csidl_name] buf = ctypes.create_unicode_buffer(1024) ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf) # Downgrade to short path name if have highbit chars. See # <http://bugs.activestate.com/show_bug.cgi?id=85099>. has_high_char = False for c in buf: if ord(c) > 255: has_high_char = True break if has_high_char: buf2 = ctypes.create_unicode_buffer(1024) if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024): buf = buf2 return buf.value if WINDOWS: try: import ctypes _get_win_folder = _get_win_folder_with_ctypes except ImportError: _get_win_folder = _get_win_folder_from_registry def _win_path_to_bytes(path): """Encode Windows paths to bytes. Only used on Python 2. Motivation is to be consistent with other operating systems where paths are also returned as bytes. This avoids problems mixing bytes and Unicode elsewhere in the codebase. For more details and discussion see <https://github.com/pypa/pip/issues/3463>. If encoding using ASCII and MBCS fails, return the original Unicode path. """ for encoding in ('ASCII', 'MBCS'): try: return path.encode(encoding) except (UnicodeEncodeError, LookupError): pass return path
gpl-3.0
j-carl/ansible
lib/ansible/utils/jsonrpc.py
58
3845
# (c) 2017, Peter Sprygada <psprygad@redhat.com> # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) __metaclass__ = type import json import traceback from ansible.module_utils._text import to_text from ansible.module_utils.connection import ConnectionError from ansible.module_utils.six import binary_type, text_type from ansible.module_utils.six.moves import cPickle from ansible.utils.display import Display display = Display() class JsonRpcServer(object): _objects = set() def handle_request(self, request): request = json.loads(to_text(request, errors='surrogate_then_replace')) method = request.get('method') if method.startswith('rpc.') or method.startswith('_'): error = self.invalid_request() return json.dumps(error) args, kwargs = request.get('params') setattr(self, '_identifier', request.get('id')) rpc_method = None for obj in self._objects: rpc_method = getattr(obj, method, None) if rpc_method: break if not rpc_method: error = self.method_not_found() response = json.dumps(error) else: try: result = rpc_method(*args, **kwargs) except ConnectionError as exc: display.vvv(traceback.format_exc()) try: error = self.error(code=exc.code, message=to_text(exc)) except AttributeError: error = self.internal_error(data=to_text(exc)) response = json.dumps(error) except Exception as exc: display.vvv(traceback.format_exc()) error = self.internal_error(data=to_text(exc, errors='surrogate_then_replace')) response = json.dumps(error) else: if isinstance(result, dict) and 'jsonrpc' in result: response = result else: response = self.response(result) try: response = json.dumps(response) except Exception as exc: display.vvv(traceback.format_exc()) error = self.internal_error(data=to_text(exc, errors='surrogate_then_replace')) response = json.dumps(error) delattr(self, '_identifier') return response def register(self, obj): self._objects.add(obj) def header(self): return {'jsonrpc': '2.0', 'id': self._identifier} def response(self, result=None): response = self.header() if isinstance(result, binary_type): result = to_text(result) if not isinstance(result, text_type): response["result_type"] = "pickle" result = to_text(cPickle.dumps(result, protocol=0)) response['result'] = result return response def error(self, code, message, data=None): response = self.header() error = {'code': code, 'message': message} if data: error['data'] = data response['error'] = error return response # json-rpc standard errors (-32768 .. -32000) def parse_error(self, data=None): return self.error(-32700, 'Parse error', data) def method_not_found(self, data=None): return self.error(-32601, 'Method not found', data) def invalid_request(self, data=None): return self.error(-32600, 'Invalid request', data) def invalid_params(self, data=None): return self.error(-32602, 'Invalid params', data) def internal_error(self, data=None): return self.error(-32603, 'Internal error', data)
gpl-3.0
Nic30/hwtHls
hwtHls/tests/all.py
1
1472
#!/usr/bin/env python3 # -*- coding: utf-8 -*- from unittest import TestLoader, TextTestRunner, TestSuite from hwtHls.examples.alapAsapDiffExample import AlapAsapDiffExample_TC from hwtHls.examples.bitonicSort import BitonicSorterHLS_TC,\ BitonicSorterHLS_large_TC from hwtHls.examples.hls_expr_tree3 import HlsExprTree3_example_TC from hwtHls.examples.mac import HlsMAC_example_TC from hwtHls.tests.connection import HlsSlicingTC from hwtHls.scheduler.list_schedueling_test import ListSchedueling_TC def testSuiteFromTCs(*tcs): loader = TestLoader() for tc in tcs: tc._multiprocess_can_split_ = True loadedTcs = [loader.loadTestsFromTestCase(tc) for tc in tcs] suite = TestSuite(loadedTcs) return suite suite = testSuiteFromTCs( HlsSlicingTC, HlsMAC_example_TC, BitonicSorterHLS_TC, BitonicSorterHLS_large_TC, HlsExprTree3_example_TC, AlapAsapDiffExample_TC, ListSchedueling_TC, ) if __name__ == '__main__': runner = TextTestRunner(verbosity=2) try: from concurrencytest import ConcurrentTestSuite, fork_for_tests useParallerlTest = True except ImportError: # concurrencytest is not installed, use regular test runner useParallerlTest = False if useParallerlTest: # Run same tests across 4 processes concurrent_suite = ConcurrentTestSuite(suite, fork_for_tests()) runner.run(concurrent_suite) else: runner.run(suite)
mit
aolieman/pyspotlight
setup.py
1
1428
#!/usr/bin/env python # coding: utf-8 from setuptools import setup from setuptools import find_packages from io import open classifiers = [ 'Intended Audience :: Developers', 'Operating System :: OS Independent', 'Topic :: Software Development :: Libraries', 'Environment :: Web Environment', 'License :: OSI Approved :: BSD License', 'Development Status :: 5 - Production/Stable', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', ] requires = [ 'requests~=2.10', ] tests_require = [ 'nose2~=0.6', ] with open('README.rst', 'r', encoding='utf-8') as f: readme = f.read() with open('HISTORY.rst', 'r', encoding='utf-8') as f: history = f.read() setup(name='pyspotlight', version='0.7.2', license='BSD', url='https://github.com/aolieman/pyspotlight', author='Luis Nell', author_email='luis.nell@simpleloop.com', maintainer='Alex Olieman', maintainer_email='alex@olieman.net', packages=find_packages(), description='Python interface to the DBpedia Spotlight REST API', long_description=readme + '\n\n' + history, keywords=['dbpedia spotlight', 'semantic annotation', 'entity linking'], classifiers=classifiers, install_requires=requires, tests_require=tests_require, test_suite='nose2.collector.collector', )
bsd-2-clause
40023256/2015cdag1man
static/Brython3.1.1-20150328-091302/Lib/multiprocessing/__init__.py
693
6866
# # Package analogous to 'threading.py' but using processes # # multiprocessing/__init__.py # # This package is intended to duplicate the functionality (and much of # the API) of threading.py but uses processes instead of threads. A # subpackage 'multiprocessing.dummy' has the same API but is a simple # wrapper for 'threading'. # # Try calling `multiprocessing.doc.main()` to read the html # documentation in a webbrowser. # # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __version__ = '0.70a1' __all__ = [ 'Process', 'current_process', 'active_children', 'freeze_support', 'Manager', 'Pipe', 'cpu_count', 'log_to_stderr', 'get_logger', 'allow_connection_pickling', 'BufferTooShort', 'TimeoutError', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event', 'Barrier', 'Queue', 'SimpleQueue', 'JoinableQueue', 'Pool', 'Value', 'Array', 'RawValue', 'RawArray', 'SUBDEBUG', 'SUBWARNING', ] __author__ = 'R. Oudkerk (r.m.oudkerk@gmail.com)' # # Imports # import os import sys from multiprocessing.process import Process, current_process, active_children from multiprocessing.util import SUBDEBUG, SUBWARNING # # Exceptions # class ProcessError(Exception): pass class BufferTooShort(ProcessError): pass class TimeoutError(ProcessError): pass class AuthenticationError(ProcessError): pass import _multiprocessing # # Definitions not depending on native semaphores # def Manager(): ''' Returns a manager associated with a running server process The managers methods such as `Lock()`, `Condition()` and `Queue()` can be used to create shared objects. ''' from multiprocessing.managers import SyncManager m = SyncManager() m.start() return m #brython fix me #def Pipe(duplex=True): # ''' # Returns two connection object connected by a pipe # ''' # from multiprocessing.connection import Pipe # return Pipe(duplex) def cpu_count(): ''' Returns the number of CPUs in the system ''' if sys.platform == 'win32': try: num = int(os.environ['NUMBER_OF_PROCESSORS']) except (ValueError, KeyError): num = 0 elif 'bsd' in sys.platform or sys.platform == 'darwin': comm = '/sbin/sysctl -n hw.ncpu' if sys.platform == 'darwin': comm = '/usr' + comm try: with os.popen(comm) as p: num = int(p.read()) except ValueError: num = 0 else: try: num = os.sysconf('SC_NPROCESSORS_ONLN') except (ValueError, OSError, AttributeError): num = 0 if num >= 1: return num else: raise NotImplementedError('cannot determine number of cpus') def freeze_support(): ''' Check whether this is a fake forked process in a frozen executable. If so then run code specified by commandline and exit. ''' if sys.platform == 'win32' and getattr(sys, 'frozen', False): from multiprocessing.forking import freeze_support freeze_support() def get_logger(): ''' Return package logger -- if it does not already exist then it is created ''' from multiprocessing.util import get_logger return get_logger() def log_to_stderr(level=None): ''' Turn on logging and add a handler which prints to stderr ''' from multiprocessing.util import log_to_stderr return log_to_stderr(level) #brython fix me #def allow_connection_pickling(): # ''' # Install support for sending connections and sockets between processes # ''' # # This is undocumented. In previous versions of multiprocessing # # its only effect was to make socket objects inheritable on Windows. # import multiprocessing.connection # # Definitions depending on native semaphores # def Lock(): ''' Returns a non-recursive lock object ''' from multiprocessing.synchronize import Lock return Lock() def RLock(): ''' Returns a recursive lock object ''' from multiprocessing.synchronize import RLock return RLock() def Condition(lock=None): ''' Returns a condition object ''' from multiprocessing.synchronize import Condition return Condition(lock) def Semaphore(value=1): ''' Returns a semaphore object ''' from multiprocessing.synchronize import Semaphore return Semaphore(value) def BoundedSemaphore(value=1): ''' Returns a bounded semaphore object ''' from multiprocessing.synchronize import BoundedSemaphore return BoundedSemaphore(value) def Event(): ''' Returns an event object ''' from multiprocessing.synchronize import Event return Event() def Barrier(parties, action=None, timeout=None): ''' Returns a barrier object ''' from multiprocessing.synchronize import Barrier return Barrier(parties, action, timeout) def Queue(maxsize=0): ''' Returns a queue object ''' from multiprocessing.queues import Queue return Queue(maxsize) def JoinableQueue(maxsize=0): ''' Returns a queue object ''' from multiprocessing.queues import JoinableQueue return JoinableQueue(maxsize) def SimpleQueue(): ''' Returns a queue object ''' from multiprocessing.queues import SimpleQueue return SimpleQueue() def Pool(processes=None, initializer=None, initargs=(), maxtasksperchild=None): ''' Returns a process pool object ''' from multiprocessing.pool import Pool return Pool(processes, initializer, initargs, maxtasksperchild) def RawValue(typecode_or_type, *args): ''' Returns a shared object ''' from multiprocessing.sharedctypes import RawValue return RawValue(typecode_or_type, *args) def RawArray(typecode_or_type, size_or_initializer): ''' Returns a shared array ''' from multiprocessing.sharedctypes import RawArray return RawArray(typecode_or_type, size_or_initializer) def Value(typecode_or_type, *args, lock=True): ''' Returns a synchronized shared object ''' from multiprocessing.sharedctypes import Value return Value(typecode_or_type, *args, lock=lock) def Array(typecode_or_type, size_or_initializer, *, lock=True): ''' Returns a synchronized shared array ''' from multiprocessing.sharedctypes import Array return Array(typecode_or_type, size_or_initializer, lock=lock) # # # if sys.platform == 'win32': def set_executable(executable): ''' Sets the path to a python.exe or pythonw.exe binary used to run child processes on Windows instead of sys.executable. Useful for people embedding Python. ''' from multiprocessing.forking import set_executable set_executable(executable) __all__ += ['set_executable']
gpl-3.0
ph4r05/PJSIP
tests/pjsua/inc_cfg.py
19
3296
# $Id$ import random import config_site import socket import errno DEFAULT_ECHO = True DEFAULT_TRACE = True DEFAULT_START_SIP_PORT = 50000 # Shared vars ARGS = [] # arguments containing script module & config HAS_SND_DEV = config_site.HAS_SND_DEV # Individual pjsua instance configuration class class InstanceParam: # Name to identify this pjsua instance (e.g. "caller", "callee", etc.) name = "" # pjsua command line arguments, concatenated in string arg = "" # Specify whether pjsua output should be echoed to stdout echo_enabled = DEFAULT_ECHO # Enable/disable test tracing trace_enabled = DEFAULT_TRACE # SIP URI to send request to this instance uri = "" # SIP port number, zero to automatically assign sip_port = 0 # Does this have registration? If yes then the test function will # wait until the UA is registered before doing anything else have_reg = False # Does this have PUBLISH? have_publish = False # Enable stdout buffer? enable_buffer = False def __init__( self, name, # Instance name arg, # Cmd-line arguments uri="", # URI uri_param="", # Additional URI param sip_port=0, # SIP port have_reg=False, # Have registration? have_publish=False, # Have publish? echo_enabled=DEFAULT_ECHO, trace_enabled=DEFAULT_TRACE, enable_buffer = False): # Instance name self.name = name # Give random sip_port if it's not specified if sip_port==0: # avoid port conflict cnt = 0 port = 0 while cnt < 10: cnt = cnt + 1 port = random.randint(DEFAULT_START_SIP_PORT, 65534) s = socket.socket(socket.AF_INET) try: s.bind(("0.0.0.0", port)) except socket.error as serr: s.close() if serr.errno == errno.EADDRINUSE: continue s.close() break; self.sip_port = port else: self.sip_port = sip_port # Autogenerate URI if it's empty. self.uri = uri if self.uri=="": self.uri = "sip:pjsip@127.0.0.1:" + str(self.sip_port) # Add uri_param to the URI self.uri = self.uri + uri_param # Add bracket to the URI if self.uri[0] != "<": self.uri = "<" + self.uri + ">" # Add SIP local port to the argument self.arg = arg + " --local-port=" + str(self.sip_port) self.have_reg = have_reg self.have_publish = have_publish if have_publish and have_reg and not ("--publish" in self.arg): self.arg = self.arg + " --publish" self.echo_enabled = echo_enabled self.trace_enabled = trace_enabled self.enable_buffer = enable_buffer ############################################ # Test parameter class class TestParam: title = "" # params is list containing InstanceParams objects inst_params = [] # flag if this tes should be skipped skip = None # list of Expect instances, to be filled at run-time by # the test program process = [] # the function for test body test_func = None post_func = None def __init__( self, title, # Test title inst_params, # InstanceParam's as list func=None, skip=False, post_func=None, need_stdout_buffer=False): self.title = title self.inst_params = inst_params self.skip = skip self.test_func = func self.post_func = post_func ################################### # TestError exception class TestError: desc = "" def __init__(self, desc): self.desc = desc
gpl-2.0
tinysun212/swift-windows
utils/swift_build_support/swift_build_support/host.py
41
2856
# swift_build_support/host.py ----------- Migrating build-script -*- python -*- # # This source file is part of the Swift.org open source project # # Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors # Licensed under Apache License v2.0 with Runtime Library Exception # # See https://swift.org/LICENSE.txt for license information # See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors # # ----------------------------------------------------------------------------- # # This file contains routines for determining information about the host for # use in utils/build-script. # # ----------------------------------------------------------------------------- from __future__ import absolute_import import platform from . import shell # Utilities def _return_none_fun(): return None def _return_none_fun_pair(): return (_return_none_fun, _return_none_fun) def _compute_system_key(): return (platform.system(), platform.machine()) # System Memory def _darwin_system_memory(): # Output looks like "hw.memsize: \d+\n" return int(shell.capture(["sysctl", "hw.memsize"], dry_run=False, echo=False, optional=False).strip().split(" ")[1]) _PER_PLATFORM_SYSTEM_MEMORY = { ('Darwin', 'x86_64'): _darwin_system_memory } def system_memory(): return _PER_PLATFORM_SYSTEM_MEMORY.get(_compute_system_key(), _return_none_fun)() # Max Num CPU Threads for use with LTO def _darwin_max_num_llvm_parallel_lto_link_jobs(): # *WARNING! HEURISTIC!* # # Use the formula (GB Memory - 3)/6.0GB to get the number of # parallel link threads we can support. This gives the OS 3 GB of # room to work with. # # This is a bit conservative, but I have found that this number # prevents me from swapping on my test machine. return int((_darwin_system_memory() / 1000000000.0 - 3.0) / 6.0) def _darwin_max_num_swift_parallel_lto_link_jobs(): # *WARNING! HEURISTIC!* # # Use the formula (GB Memory - 3)/8.0GB to get the number of # parallel link threads we can support. This gives the OS 3 GB of # room to work with. # # This is a bit conservative, but I have found that this number # prevents me from swapping on my test machine. return int((_darwin_system_memory() / 1000000000.0 - 3.0) / 8.0) _PER_PLATFORM_MAX_PARALLEL_LTO_JOBS = { ('Darwin', 'x86_64'): (_darwin_max_num_llvm_parallel_lto_link_jobs, _darwin_max_num_swift_parallel_lto_link_jobs) } def max_lto_link_job_counts(): key = _compute_system_key() info = _PER_PLATFORM_MAX_PARALLEL_LTO_JOBS.get(key, _return_none_fun_pair()) return {'llvm': info[0](), 'swift': info[1]()}
apache-2.0
iradul/qtwebkit
Tools/Scripts/webkitpy/common/system/filesystem_mock_unittest.py
122
3391
# Copyright (C) 2011 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import os import re import unittest2 as unittest from webkitpy.common.system import filesystem_mock from webkitpy.common.system import filesystem_unittest class MockFileSystemTest(unittest.TestCase, filesystem_unittest.GenericFileSystemTests): def setUp(self): self.fs = filesystem_mock.MockFileSystem() self.setup_generic_test_dir() def tearDown(self): self.teardown_generic_test_dir() self.fs = None def quick_check(self, test_fn, good_fn, *tests): for test in tests: if hasattr(test, '__iter__'): expected = good_fn(*test) actual = test_fn(*test) else: expected = good_fn(test) actual = test_fn(test) self.assertEqual(expected, actual, 'given %s, expected %s, got %s' % (repr(test), repr(expected), repr(actual))) def test_join(self): self.quick_check(self.fs.join, self.fs._slow_but_correct_join, ('',), ('', 'bar'), ('foo',), ('foo/',), ('foo', ''), ('foo/', ''), ('foo', 'bar'), ('foo', '/bar'), ) def test_normpath(self): self.quick_check(self.fs.normpath, self.fs._slow_but_correct_normpath, '', '/', '.', '/.', 'foo', 'foo/', 'foo/.', 'foo/bar', '/foo', 'foo/../bar', 'foo/../bar/baz', '../foo')
gpl-2.0
MycChiu/tensorflow
tensorflow/tools/common/traverse_test.py
116
2653
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Python module traversal.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import sys from tensorflow.python.platform import googletest from tensorflow.tools.common import traverse class TestVisitor(object): def __init__(self): self.call_log = [] def __call__(self, path, parent, children): # Do not traverse googletest, it's very deep. for item in list(children): if item[1] is googletest: children.remove(item) self.call_log += [(path, parent, children)] class TraverseTest(googletest.TestCase): def test_cycle(self): class Cyclist(object): pass Cyclist.cycle = Cyclist visitor = TestVisitor() traverse.traverse(Cyclist, visitor) # We simply want to make sure we terminate. def test_module(self): visitor = TestVisitor() traverse.traverse(sys.modules[__name__], visitor) called = [parent for _, parent, _ in visitor.call_log] self.assertIn(TestVisitor, called) self.assertIn(TraverseTest, called) self.assertIn(traverse, called) def test_class(self): visitor = TestVisitor() traverse.traverse(TestVisitor, visitor) self.assertEqual(TestVisitor, visitor.call_log[0][1]) # There are a bunch of other members, but make sure that the ones we know # about are there. self.assertIn('__init__', [name for name, _ in visitor.call_log[0][2]]) self.assertIn('__call__', [name for name, _ in visitor.call_log[0][2]]) # There are more classes descended into, at least __class__ and # __class__.__base__, neither of which are interesting to us, and which may # change as part of Python version etc., so we don't test for them. def test_non_class(self): integer = 5 visitor = TestVisitor() traverse.traverse(integer, visitor) self.assertEqual([], visitor.call_log) if __name__ == '__main__': googletest.main()
apache-2.0
jacyn/blog-site
py/lib/python2.7/site-packages/pip-1.1-py2.7.egg/pip/commands/search.py
60
4523
import sys import textwrap import pkg_resources import pip.download from pip.basecommand import Command, SUCCESS from pip.util import get_terminal_size from pip.log import logger from pip.backwardcompat import xmlrpclib, reduce, cmp from pip.exceptions import CommandError from pip.status_codes import NO_MATCHES_FOUND from distutils.version import StrictVersion, LooseVersion class SearchCommand(Command): name = 'search' usage = '%prog QUERY' summary = 'Search PyPI' def __init__(self): super(SearchCommand, self).__init__() self.parser.add_option( '--index', dest='index', metavar='URL', default='http://pypi.python.org/pypi', help='Base URL of Python Package Index (default %default)') def run(self, options, args): if not args: raise CommandError('Missing required argument (search query).') query = args index_url = options.index pypi_hits = self.search(query, index_url) hits = transform_hits(pypi_hits) terminal_width = None if sys.stdout.isatty(): terminal_width = get_terminal_size()[0] print_results(hits, terminal_width=terminal_width) if pypi_hits: return SUCCESS return NO_MATCHES_FOUND def search(self, query, index_url): pypi = xmlrpclib.ServerProxy(index_url, pip.download.xmlrpclib_transport) hits = pypi.search({'name': query, 'summary': query}, 'or') return hits def transform_hits(hits): """ The list from pypi is really a list of versions. We want a list of packages with the list of versions stored inline. This converts the list from pypi into one we can use. """ packages = {} for hit in hits: name = hit['name'] summary = hit['summary'] version = hit['version'] score = hit['_pypi_ordering'] if name not in packages.keys(): packages[name] = {'name': name, 'summary': summary, 'versions': [version], 'score': score} else: packages[name]['versions'].append(version) # if this is the highest version, replace summary and score if version == highest_version(packages[name]['versions']): packages[name]['summary'] = summary packages[name]['score'] = score # each record has a unique name now, so we will convert the dict into a list sorted by score package_list = sorted(packages.values(), key=lambda x: x['score'], reverse=True) return package_list def print_results(hits, name_column_width=25, terminal_width=None): installed_packages = [p.project_name for p in pkg_resources.working_set] for hit in hits: name = hit['name'] summary = hit['summary'] or '' if terminal_width is not None: # wrap and indent summary to fit terminal summary = textwrap.wrap(summary, terminal_width - name_column_width - 5) summary = ('\n' + ' ' * (name_column_width + 3)).join(summary) line = '%s - %s' % (name.ljust(name_column_width), summary) try: logger.notify(line) if name in installed_packages: dist = pkg_resources.get_distribution(name) logger.indent += 2 try: latest = highest_version(hit['versions']) if dist.version == latest: logger.notify('INSTALLED: %s (latest)' % dist.version) else: logger.notify('INSTALLED: %s' % dist.version) logger.notify('LATEST: %s' % latest) finally: logger.indent -= 2 except UnicodeEncodeError: pass def compare_versions(version1, version2): try: return cmp(StrictVersion(version1), StrictVersion(version2)) # in case of abnormal version number, fall back to LooseVersion except ValueError: pass try: return cmp(LooseVersion(version1), LooseVersion(version2)) except TypeError: # certain LooseVersion comparions raise due to unorderable types, # fallback to string comparison return cmp([str(v) for v in LooseVersion(version1).version], [str(v) for v in LooseVersion(version2).version]) def highest_version(versions): return reduce((lambda v1, v2: compare_versions(v1, v2) == 1 and v1 or v2), versions) SearchCommand()
apache-2.0
OCA/sale-workflow
sale_order_line_sequence/model/sale_order.py
1
2381
# Copyright 2017 Eficent Business and IT Consulting Services S.L. # Copyright 2017 Serpent Consulting Services Pvt. Ltd. # License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html). from odoo import api, fields, models class SaleOrder(models.Model): _inherit = 'sale.order' @api.multi @api.depends('order_line') def _compute_max_line_sequence(self): """Allow to know the highest sequence entered in sale order lines. Then we add 1 to this value for the next sequence. This value is given to the context of the o2m field in the view. So when we create new sale order lines, the sequence is automatically added as : max_sequence + 1 """ for sale in self: sale.max_line_sequence = ( max(sale.mapped('order_line.sequence') or [0]) + 1) max_line_sequence = fields.Integer( string='Max sequence in lines', compute='_compute_max_line_sequence', store=True ) @api.multi def _reset_sequence(self): for rec in self: current_sequence = 1 for line in rec.order_line: line.sequence = current_sequence current_sequence += 1 @api.multi def write(self, line_values): res = super(SaleOrder, self).write(line_values) self._reset_sequence() return res @api.multi def copy(self, default=None): return super(SaleOrder, self.with_context(keep_line_sequence=True)).copy(default) class SaleOrderLine(models.Model): _inherit = 'sale.order.line' # re-defines the field to change the default sequence = fields.Integer( help="Gives the sequence of this line when displaying the sale order.", default=9999, string="Sequence" ) # displays sequence on the order line sequence2 = fields.Integer( help="Shows the sequence of this line in the sale order.", related='sequence', string="Line Number", readonly=True, store=True ) @api.model def create(self, values): line = super(SaleOrderLine, self).create(values) # We do not reset the sequence if we are copying a complete sale order if self.env.context.get('keep_line_sequence'): line.order_id._reset_sequence() return line
agpl-3.0
derekjchow/models
research/neural_gpu/neural_gpu.py
5
32528
# Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """The Neural GPU Model.""" import time import numpy as np from six.moves import xrange import tensorflow as tf from tensorflow.python.framework import function import data_utils as data do_jit = False # Gives more speed but experimental for now. jit_scope = tf.contrib.compiler.jit.experimental_jit_scope def conv_linear(args, kw, kh, nin, nout, rate, do_bias, bias_start, prefix): """Convolutional linear map.""" if not isinstance(args, (list, tuple)): args = [args] with tf.variable_scope(prefix): with tf.device("/cpu:0"): k = tf.get_variable("CvK", [kw, kh, nin, nout]) if len(args) == 1: arg = args[0] else: arg = tf.concat(axis=3, values=args) res = tf.nn.convolution(arg, k, dilation_rate=(rate, 1), padding="SAME") if not do_bias: return res with tf.device("/cpu:0"): bias_term = tf.get_variable( "CvB", [nout], initializer=tf.constant_initializer(bias_start)) bias_term = tf.reshape(bias_term, [1, 1, 1, nout]) return res + bias_term def sigmoid_cutoff(x, cutoff): """Sigmoid with cutoff, e.g., 1.2sigmoid(x) - 0.1.""" y = tf.sigmoid(x) if cutoff < 1.01: return y d = (cutoff - 1.0) / 2.0 return tf.minimum(1.0, tf.maximum(0.0, cutoff * y - d), name="cutoff_min") @function.Defun(tf.float32, noinline=True) def sigmoid_cutoff_12(x): """Sigmoid with cutoff 1.2, specialized for speed and memory use.""" y = tf.sigmoid(x) return tf.minimum(1.0, tf.maximum(0.0, 1.2 * y - 0.1), name="cutoff_min_12") @function.Defun(tf.float32, noinline=True) def sigmoid_hard(x): """Hard sigmoid.""" return tf.minimum(1.0, tf.maximum(0.0, 0.25 * x + 0.5)) def place_at14(decided, selected, it): """Place selected at it-th coordinate of decided, dim=1 of 4.""" slice1 = decided[:, :it, :, :] slice2 = decided[:, it + 1:, :, :] return tf.concat(axis=1, values=[slice1, selected, slice2]) def place_at13(decided, selected, it): """Place selected at it-th coordinate of decided, dim=1 of 3.""" slice1 = decided[:, :it, :] slice2 = decided[:, it + 1:, :] return tf.concat(axis=1, values=[slice1, selected, slice2]) def tanh_cutoff(x, cutoff): """Tanh with cutoff, e.g., 1.1tanh(x) cut to [-1. 1].""" y = tf.tanh(x) if cutoff < 1.01: return y d = (cutoff - 1.0) / 2.0 return tf.minimum(1.0, tf.maximum(-1.0, (1.0 + d) * y)) @function.Defun(tf.float32, noinline=True) def tanh_hard(x): """Hard tanh.""" return tf.minimum(1.0, tf.maximum(0.0, x)) def layer_norm(x, nmaps, prefix, epsilon=1e-5): """Layer normalize the 4D tensor x, averaging over the last dimension.""" with tf.variable_scope(prefix): scale = tf.get_variable("layer_norm_scale", [nmaps], initializer=tf.ones_initializer()) bias = tf.get_variable("layer_norm_bias", [nmaps], initializer=tf.zeros_initializer()) mean, variance = tf.nn.moments(x, [3], keep_dims=True) norm_x = (x - mean) / tf.sqrt(variance + epsilon) return norm_x * scale + bias def conv_gru(inpts, mem, kw, kh, nmaps, rate, cutoff, prefix, do_layer_norm, args_len=None): """Convolutional GRU.""" def conv_lin(args, suffix, bias_start): total_args_len = args_len or len(args) * nmaps res = conv_linear(args, kw, kh, total_args_len, nmaps, rate, True, bias_start, prefix + "/" + suffix) if do_layer_norm: return layer_norm(res, nmaps, prefix + "/" + suffix) else: return res if cutoff == 1.2: reset = sigmoid_cutoff_12(conv_lin(inpts + [mem], "r", 1.0)) gate = sigmoid_cutoff_12(conv_lin(inpts + [mem], "g", 1.0)) elif cutoff > 10: reset = sigmoid_hard(conv_lin(inpts + [mem], "r", 1.0)) gate = sigmoid_hard(conv_lin(inpts + [mem], "g", 1.0)) else: reset = sigmoid_cutoff(conv_lin(inpts + [mem], "r", 1.0), cutoff) gate = sigmoid_cutoff(conv_lin(inpts + [mem], "g", 1.0), cutoff) if cutoff > 10: candidate = tanh_hard(conv_lin(inpts + [reset * mem], "c", 0.0)) else: # candidate = tanh_cutoff(conv_lin(inpts + [reset * mem], "c", 0.0), cutoff) candidate = tf.tanh(conv_lin(inpts + [reset * mem], "c", 0.0)) return gate * mem + (1 - gate) * candidate CHOOSE_K = 256 def memory_call(q, l, nmaps, mem_size, vocab_size, num_gpus, update_mem): raise ValueError("Fill for experiments with additional memory structures.") def memory_run(step, nmaps, mem_size, batch_size, vocab_size, global_step, do_training, update_mem, decay_factor, num_gpus, target_emb_weights, output_w, gpu_targets_tn, it): """Run memory.""" q = step[:, 0, it, :] mlabels = gpu_targets_tn[:, it, 0] res, mask, mem_loss = memory_call( q, mlabels, nmaps, mem_size, vocab_size, num_gpus, update_mem) res = tf.gather(target_emb_weights, res) * tf.expand_dims(mask[:, 0], 1) # Mix gold and original in the first steps, 20% later. gold = tf.nn.dropout(tf.gather(target_emb_weights, mlabels), 0.7) use_gold = 1.0 - tf.cast(global_step, tf.float32) / (1000. * decay_factor) use_gold = tf.maximum(use_gold, 0.2) * do_training mem = tf.cond(tf.less(tf.random_uniform([]), use_gold), lambda: use_gold * gold + (1.0 - use_gold) * res, lambda: res) mem = tf.reshape(mem, [-1, 1, 1, nmaps]) return mem, mem_loss, update_mem @tf.RegisterGradient("CustomIdG") def _custom_id_grad(_, grads): return grads def quantize(t, quant_scale, max_value=1.0): """Quantize a tensor t with each element in [-max_value, max_value].""" t = tf.minimum(max_value, tf.maximum(t, -max_value)) big = quant_scale * (t + max_value) + 0.5 with tf.get_default_graph().gradient_override_map({"Floor": "CustomIdG"}): res = (tf.floor(big) / quant_scale) - max_value return res def quantize_weights_op(quant_scale, max_value): ops = [v.assign(quantize(v, quant_scale, float(max_value))) for v in tf.trainable_variables()] return tf.group(*ops) def autoenc_quantize(x, nbits, nmaps, do_training, layers=1): """Autoencoder into nbits vectors of bits, using noise and sigmoids.""" enc_x = tf.reshape(x, [-1, nmaps]) for i in xrange(layers - 1): enc_x = tf.layers.dense(enc_x, nmaps, name="autoenc_%d" % i) enc_x = tf.layers.dense(enc_x, nbits, name="autoenc_%d" % (layers - 1)) noise = tf.truncated_normal(tf.shape(enc_x), stddev=2.0) dec_x = sigmoid_cutoff_12(enc_x + noise * do_training) dec_x = tf.reshape(dec_x, [-1, nbits]) for i in xrange(layers): dec_x = tf.layers.dense(dec_x, nmaps, name="autodec_%d" % i) return tf.reshape(dec_x, tf.shape(x)) def make_dense(targets, noclass, low_param): """Move a batch of targets to a dense 1-hot representation.""" low = low_param / float(noclass - 1) high = 1.0 - low * (noclass - 1) targets = tf.cast(targets, tf.int64) return tf.one_hot(targets, depth=noclass, on_value=high, off_value=low) def reorder_beam(beam_size, batch_size, beam_val, output, is_first, tensors_to_reorder): """Reorder to minimize beam costs.""" # beam_val is [batch_size x beam_size]; let b = batch_size * beam_size # decided is len x b x a x b # output is b x out_size; step is b x len x a x b; outputs = tf.split(axis=0, num_or_size_splits=beam_size, value=tf.nn.log_softmax(output)) all_beam_vals, all_beam_idx = [], [] beam_range = 1 if is_first else beam_size for i in xrange(beam_range): top_out, top_out_idx = tf.nn.top_k(outputs[i], k=beam_size) cur_beam_val = beam_val[:, i] top_out = tf.Print(top_out, [top_out, top_out_idx, beam_val, i, cur_beam_val], "GREPO", summarize=8) all_beam_vals.append(top_out + tf.expand_dims(cur_beam_val, 1)) all_beam_idx.append(top_out_idx) all_beam_idx = tf.reshape(tf.transpose(tf.concat(axis=1, values=all_beam_idx), [1, 0]), [-1]) top_beam, top_beam_idx = tf.nn.top_k(tf.concat(axis=1, values=all_beam_vals), k=beam_size) top_beam_idx = tf.Print(top_beam_idx, [top_beam, top_beam_idx], "GREP", summarize=8) reordered = [[] for _ in xrange(len(tensors_to_reorder) + 1)] top_out_idx = [] for i in xrange(beam_size): which_idx = top_beam_idx[:, i] * batch_size + tf.range(batch_size) top_out_idx.append(tf.gather(all_beam_idx, which_idx)) which_beam = top_beam_idx[:, i] / beam_size # [batch] which_beam = which_beam * batch_size + tf.range(batch_size) reordered[0].append(tf.gather(output, which_beam)) for i, t in enumerate(tensors_to_reorder): reordered[i + 1].append(tf.gather(t, which_beam)) new_tensors = [tf.concat(axis=0, values=t) for t in reordered] top_out_idx = tf.concat(axis=0, values=top_out_idx) return (top_beam, new_tensors[0], top_out_idx, new_tensors[1:]) class NeuralGPU(object): """Neural GPU Model.""" def __init__(self, nmaps, vec_size, niclass, noclass, dropout, max_grad_norm, cutoff, nconvs, kw, kh, height, mem_size, learning_rate, min_length, num_gpus, num_replicas, grad_noise_scale, sampling_rate, act_noise=0.0, do_rnn=False, atrous=False, beam_size=1, backward=True, do_layer_norm=False, autoenc_decay=1.0): # Feeds for parameters and ops to update them. self.nmaps = nmaps if backward: self.global_step = tf.Variable(0, trainable=False, name="global_step") self.cur_length = tf.Variable(min_length, trainable=False) self.cur_length_incr_op = self.cur_length.assign_add(1) self.lr = tf.Variable(learning_rate, trainable=False) self.lr_decay_op = self.lr.assign(self.lr * 0.995) self.do_training = tf.placeholder(tf.float32, name="do_training") self.update_mem = tf.placeholder(tf.int32, name="update_mem") self.noise_param = tf.placeholder(tf.float32, name="noise_param") # Feeds for inputs, targets, outputs, losses, etc. self.input = tf.placeholder(tf.int32, name="inp") self.target = tf.placeholder(tf.int32, name="tgt") self.prev_step = tf.placeholder(tf.float32, name="prev_step") gpu_input = tf.split(axis=0, num_or_size_splits=num_gpus, value=self.input) gpu_target = tf.split(axis=0, num_or_size_splits=num_gpus, value=self.target) gpu_prev_step = tf.split(axis=0, num_or_size_splits=num_gpus, value=self.prev_step) batch_size = tf.shape(gpu_input[0])[0] if backward: adam_lr = 0.005 * self.lr adam = tf.train.AdamOptimizer(adam_lr, epsilon=1e-3) def adam_update(grads): return adam.apply_gradients(zip(grads, tf.trainable_variables()), global_step=self.global_step, name="adam_update") # When switching from Adam to SGD we perform reverse-decay. if backward: global_step_float = tf.cast(self.global_step, tf.float32) sampling_decay_exponent = global_step_float / 100000.0 sampling_decay = tf.maximum(0.05, tf.pow(0.5, sampling_decay_exponent)) self.sampling = sampling_rate * 0.05 / sampling_decay else: self.sampling = tf.constant(0.0) # Cache variables on cpu if needed. if num_replicas > 1 or num_gpus > 1: with tf.device("/cpu:0"): caching_const = tf.constant(0) tf.get_variable_scope().set_caching_device(caching_const.op.device) # partitioner = tf.variable_axis_size_partitioner(1024*256*4) # tf.get_variable_scope().set_partitioner(partitioner) def gpu_avg(l): if l[0] is None: for elem in l: assert elem is None return 0.0 if len(l) < 2: return l[0] return sum(l) / float(num_gpus) self.length_tensor = tf.placeholder(tf.int32, name="length") with tf.device("/cpu:0"): emb_weights = tf.get_variable( "embedding", [niclass, vec_size], initializer=tf.random_uniform_initializer(-1.7, 1.7)) if beam_size > 0: target_emb_weights = tf.get_variable( "target_embedding", [noclass, nmaps], initializer=tf.random_uniform_initializer(-1.7, 1.7)) e0 = tf.scatter_update(emb_weights, tf.constant(0, dtype=tf.int32, shape=[1]), tf.zeros([1, vec_size])) output_w = tf.get_variable("output_w", [nmaps, noclass], tf.float32) def conv_rate(layer): if atrous: return 2**layer return 1 # pylint: disable=cell-var-from-loop def enc_step(step): """Encoder step.""" if autoenc_decay < 1.0: quant_step = autoenc_quantize(step, 16, nmaps, self.do_training) if backward: exp_glob = tf.train.exponential_decay(1.0, self.global_step - 10000, 1000, autoenc_decay) dec_factor = 1.0 - exp_glob # * self.do_training dec_factor = tf.cond(tf.less(self.global_step, 10500), lambda: tf.constant(0.05), lambda: dec_factor) else: dec_factor = 1.0 cur = tf.cond(tf.less(tf.random_uniform([]), dec_factor), lambda: quant_step, lambda: step) else: cur = step if dropout > 0.0001: cur = tf.nn.dropout(cur, keep_prob) if act_noise > 0.00001: cur += tf.truncated_normal(tf.shape(cur)) * act_noise_scale # Do nconvs-many CGRU steps. if do_jit and tf.get_variable_scope().reuse: with jit_scope(): for layer in xrange(nconvs): cur = conv_gru([], cur, kw, kh, nmaps, conv_rate(layer), cutoff, "ecgru_%d" % layer, do_layer_norm) else: for layer in xrange(nconvs): cur = conv_gru([], cur, kw, kh, nmaps, conv_rate(layer), cutoff, "ecgru_%d" % layer, do_layer_norm) return cur zero_tgt = tf.zeros([batch_size, nmaps, 1]) zero_tgt.set_shape([None, nmaps, 1]) def dec_substep(step, decided): """Decoder sub-step.""" cur = step if dropout > 0.0001: cur = tf.nn.dropout(cur, keep_prob) if act_noise > 0.00001: cur += tf.truncated_normal(tf.shape(cur)) * act_noise_scale # Do nconvs-many CGRU steps. if do_jit and tf.get_variable_scope().reuse: with jit_scope(): for layer in xrange(nconvs): cur = conv_gru([decided], cur, kw, kh, nmaps, conv_rate(layer), cutoff, "dcgru_%d" % layer, do_layer_norm) else: for layer in xrange(nconvs): cur = conv_gru([decided], cur, kw, kh, nmaps, conv_rate(layer), cutoff, "dcgru_%d" % layer, do_layer_norm) return cur # pylint: enable=cell-var-from-loop def dec_step(step, it, it_int, decided, output_ta, tgts, mloss, nupd_in, out_idx, beam_cost): """Decoder step.""" nupd, mem_loss = 0, 0.0 if mem_size > 0: it_incr = tf.minimum(it+1, length - 1) mem, mem_loss, nupd = memory_run( step, nmaps, mem_size, batch_size, noclass, self.global_step, self.do_training, self.update_mem, 10, num_gpus, target_emb_weights, output_w, gpu_targets_tn, it_incr) step = dec_substep(step, decided) output_l = tf.expand_dims(tf.expand_dims(step[:, it, 0, :], 1), 1) # Calculate argmax output. output = tf.reshape(output_l, [-1, nmaps]) # pylint: disable=cell-var-from-loop output = tf.matmul(output, output_w) if beam_size > 1: beam_cost, output, out, reordered = reorder_beam( beam_size, batch_size, beam_cost, output, it_int == 0, [output_l, out_idx, step, decided]) [output_l, out_idx, step, decided] = reordered else: # Scheduled sampling. out = tf.multinomial(tf.stop_gradient(output), 1) out = tf.to_int32(tf.squeeze(out, [1])) out_write = output_ta.write(it, output_l[:batch_size, :, :, :]) output = tf.gather(target_emb_weights, out) output = tf.reshape(output, [-1, 1, nmaps]) output = tf.concat(axis=1, values=[output] * height) tgt = tgts[it, :, :, :] selected = tf.cond(tf.less(tf.random_uniform([]), self.sampling), lambda: output, lambda: tgt) # pylint: enable=cell-var-from-loop dec_write = place_at14(decided, tf.expand_dims(selected, 1), it) out_idx = place_at13( out_idx, tf.reshape(out, [beam_size * batch_size, 1, 1]), it) if mem_size > 0: mem = tf.concat(axis=2, values=[mem] * height) dec_write = place_at14(dec_write, mem, it_incr) return (step, dec_write, out_write, mloss + mem_loss, nupd_in + nupd, out_idx, beam_cost) # Main model construction. gpu_outputs = [] gpu_losses = [] gpu_grad_norms = [] grads_list = [] gpu_out_idx = [] self.after_enc_step = [] for gpu in xrange(num_gpus): # Multi-GPU towers, average gradients later. length = self.length_tensor length_float = tf.cast(length, tf.float32) if gpu > 0: tf.get_variable_scope().reuse_variables() gpu_outputs.append([]) gpu_losses.append([]) gpu_grad_norms.append([]) with tf.name_scope("gpu%d" % gpu), tf.device("/gpu:%d" % gpu): # Main graph creation loop. data.print_out("Creating model.") start_time = time.time() # Embed inputs and calculate mask. with tf.device("/cpu:0"): tgt_shape = tf.shape(tf.squeeze(gpu_target[gpu], [1])) weights = tf.where(tf.squeeze(gpu_target[gpu], [1]) > 0, tf.ones(tgt_shape), tf.zeros(tgt_shape)) # Embed inputs and targets. with tf.control_dependencies([e0]): start = tf.gather(emb_weights, gpu_input[gpu]) # b x h x l x nmaps gpu_targets_tn = gpu_target[gpu] # b x 1 x len if beam_size > 0: embedded_targets_tn = tf.gather(target_emb_weights, gpu_targets_tn) embedded_targets_tn = tf.transpose( embedded_targets_tn, [2, 0, 1, 3]) # len x b x 1 x nmaps embedded_targets_tn = tf.concat(axis=2, values=[embedded_targets_tn] * height) # First image comes from start by applying convolution and adding 0s. start = tf.transpose(start, [0, 2, 1, 3]) # Now b x len x h x vec_s first = conv_linear(start, 1, 1, vec_size, nmaps, 1, True, 0.0, "input") first = layer_norm(first, nmaps, "input") # Computation steps. keep_prob = dropout * 3.0 / tf.sqrt(length_float) keep_prob = 1.0 - self.do_training * keep_prob act_noise_scale = act_noise * self.do_training # Start with a convolutional gate merging previous step. step = conv_gru([gpu_prev_step[gpu]], first, kw, kh, nmaps, 1, cutoff, "first", do_layer_norm) # This is just for running a baseline RNN seq2seq model. if do_rnn: self.after_enc_step.append(step) # Not meaningful here, but needed. def lstm_cell(): return tf.contrib.rnn.BasicLSTMCell(height * nmaps) cell = tf.contrib.rnn.MultiRNNCell( [lstm_cell() for _ in range(nconvs)]) with tf.variable_scope("encoder"): encoder_outputs, encoder_state = tf.nn.dynamic_rnn( cell, tf.reshape(step, [batch_size, length, height * nmaps]), dtype=tf.float32, time_major=False) # Attention. attn = tf.layers.dense( encoder_outputs, height * nmaps, name="attn1") # pylint: disable=cell-var-from-loop @function.Defun(noinline=True) def attention_query(query, attn_v): vecs = tf.tanh(attn + tf.expand_dims(query, 1)) mask = tf.reduce_sum(vecs * tf.reshape(attn_v, [1, 1, -1]), 2) mask = tf.nn.softmax(mask) return tf.reduce_sum(encoder_outputs * tf.expand_dims(mask, 2), 1) with tf.variable_scope("decoder"): def decoder_loop_fn(state__prev_cell_out__unused, cell_inp__cur_tgt): """Decoder loop function.""" state, prev_cell_out, _ = state__prev_cell_out__unused cell_inp, cur_tgt = cell_inp__cur_tgt attn_q = tf.layers.dense(prev_cell_out, height * nmaps, name="attn_query") attn_res = attention_query(attn_q, tf.get_variable( "attn_v", [height * nmaps], initializer=tf.random_uniform_initializer(-0.1, 0.1))) concatenated = tf.reshape(tf.concat(axis=1, values=[cell_inp, attn_res]), [batch_size, 2 * height * nmaps]) cell_inp = tf.layers.dense( concatenated, height * nmaps, name="attn_merge") output, new_state = cell(cell_inp, state) mem_loss = 0.0 if mem_size > 0: res, mask, mem_loss = memory_call( output, cur_tgt, height * nmaps, mem_size, noclass, num_gpus, self.update_mem) res = tf.gather(target_emb_weights, res) res *= tf.expand_dims(mask[:, 0], 1) output = tf.layers.dense( tf.concat(axis=1, values=[output, res]), height * nmaps, name="rnnmem") return new_state, output, mem_loss # pylint: enable=cell-var-from-loop gpu_targets = tf.squeeze(gpu_target[gpu], [1]) # b x len gpu_tgt_trans = tf.transpose(gpu_targets, [1, 0]) dec_zero = tf.zeros([batch_size, 1], dtype=tf.int32) dec_inp = tf.concat(axis=1, values=[dec_zero, gpu_targets]) dec_inp = dec_inp[:, :length] embedded_dec_inp = tf.gather(target_emb_weights, dec_inp) embedded_dec_inp_proj = tf.layers.dense( embedded_dec_inp, height * nmaps, name="dec_proj") embedded_dec_inp_proj = tf.transpose(embedded_dec_inp_proj, [1, 0, 2]) init_vals = (encoder_state, tf.zeros([batch_size, height * nmaps]), 0.0) _, dec_outputs, mem_losses = tf.scan( decoder_loop_fn, (embedded_dec_inp_proj, gpu_tgt_trans), initializer=init_vals) mem_loss = tf.reduce_mean(mem_losses) outputs = tf.layers.dense(dec_outputs, nmaps, name="out_proj") # Final convolution to get logits, list outputs. outputs = tf.matmul(tf.reshape(outputs, [-1, nmaps]), output_w) outputs = tf.reshape(outputs, [length, batch_size, noclass]) gpu_out_idx.append(tf.argmax(outputs, 2)) else: # Here we go with the Neural GPU. # Encoder. enc_length = length step = enc_step(step) # First step hard-coded. # pylint: disable=cell-var-from-loop i = tf.constant(1) c = lambda i, _s: tf.less(i, enc_length) def enc_step_lambda(i, step): with tf.variable_scope(tf.get_variable_scope(), reuse=True): new_step = enc_step(step) return (i + 1, new_step) _, step = tf.while_loop( c, enc_step_lambda, [i, step], parallel_iterations=1, swap_memory=True) # pylint: enable=cell-var-from-loop self.after_enc_step.append(step) # Decoder. if beam_size > 0: output_ta = tf.TensorArray( dtype=tf.float32, size=length, dynamic_size=False, infer_shape=False, name="outputs") out_idx = tf.zeros([beam_size * batch_size, length, 1], dtype=tf.int32) decided_t = tf.zeros([beam_size * batch_size, length, height, vec_size]) # Prepare for beam search. tgts = tf.concat(axis=1, values=[embedded_targets_tn] * beam_size) beam_cost = tf.zeros([batch_size, beam_size]) step = tf.concat(axis=0, values=[step] * beam_size) # First step hard-coded. step, decided_t, output_ta, mem_loss, nupd, oi, bc = dec_step( step, 0, 0, decided_t, output_ta, tgts, 0.0, 0, out_idx, beam_cost) tf.get_variable_scope().reuse_variables() # pylint: disable=cell-var-from-loop def step_lambda(i, step, dec_t, out_ta, ml, nu, oi, bc): with tf.variable_scope(tf.get_variable_scope(), reuse=True): s, d, t, nml, nu, oi, bc = dec_step( step, i, 1, dec_t, out_ta, tgts, ml, nu, oi, bc) return (i + 1, s, d, t, nml, nu, oi, bc) i = tf.constant(1) c = lambda i, _s, _d, _o, _ml, _nu, _oi, _bc: tf.less(i, length) _, step, _, output_ta, mem_loss, nupd, out_idx, _ = tf.while_loop( c, step_lambda, [i, step, decided_t, output_ta, mem_loss, nupd, oi, bc], parallel_iterations=1, swap_memory=True) # pylint: enable=cell-var-from-loop gpu_out_idx.append(tf.squeeze(out_idx, [2])) outputs = output_ta.stack() outputs = tf.squeeze(outputs, [2, 3]) # Now l x b x nmaps else: # If beam_size is 0 or less, we don't have a decoder. mem_loss = 0.0 outputs = tf.transpose(step[:, :, 1, :], [1, 0, 2]) gpu_out_idx.append(tf.argmax(outputs, 2)) # Final convolution to get logits, list outputs. outputs = tf.matmul(tf.reshape(outputs, [-1, nmaps]), output_w) outputs = tf.reshape(outputs, [length, batch_size, noclass]) gpu_outputs[gpu] = tf.nn.softmax(outputs) # Calculate cross-entropy loss and normalize it. targets_soft = make_dense(tf.squeeze(gpu_target[gpu], [1]), noclass, 0.1) targets_soft = tf.reshape(targets_soft, [-1, noclass]) targets_hard = make_dense(tf.squeeze(gpu_target[gpu], [1]), noclass, 0.0) targets_hard = tf.reshape(targets_hard, [-1, noclass]) output = tf.transpose(outputs, [1, 0, 2]) xent_soft = tf.reshape(tf.nn.softmax_cross_entropy_with_logits( logits=tf.reshape(output, [-1, noclass]), labels=targets_soft), [batch_size, length]) xent_hard = tf.reshape(tf.nn.softmax_cross_entropy_with_logits( logits=tf.reshape(output, [-1, noclass]), labels=targets_hard), [batch_size, length]) low, high = 0.1 / float(noclass - 1), 0.9 const = high * tf.log(high) + float(noclass - 1) * low * tf.log(low) weight_sum = tf.reduce_sum(weights) + 1e-20 true_perp = tf.reduce_sum(xent_hard * weights) / weight_sum soft_loss = tf.reduce_sum(xent_soft * weights) / weight_sum perp_loss = soft_loss + const # Final loss: cross-entropy + shared parameter relaxation part + extra. mem_loss = 0.5 * tf.reduce_mean(mem_loss) / length_float total_loss = perp_loss + mem_loss gpu_losses[gpu].append(true_perp) # Gradients. if backward: data.print_out("Creating backward pass for the model.") grads = tf.gradients( total_loss, tf.trainable_variables(), colocate_gradients_with_ops=True) for g_i, g in enumerate(grads): if isinstance(g, tf.IndexedSlices): grads[g_i] = tf.convert_to_tensor(g) grads, norm = tf.clip_by_global_norm(grads, max_grad_norm) gpu_grad_norms[gpu].append(norm) for g in grads: if grad_noise_scale > 0.001: g += tf.truncated_normal(tf.shape(g)) * self.noise_param grads_list.append(grads) else: gpu_grad_norms[gpu].append(0.0) data.print_out("Created model for gpu %d in %.2f s." % (gpu, time.time() - start_time)) self.updates = [] self.after_enc_step = tf.concat(axis=0, values=self.after_enc_step) # Concat GPUs. if backward: tf.get_variable_scope()._reuse = False tf.get_variable_scope().set_caching_device(None) grads = [gpu_avg([grads_list[g][i] for g in xrange(num_gpus)]) for i in xrange(len(grads_list[0]))] update = adam_update(grads) self.updates.append(update) else: self.updates.append(tf.no_op()) self.losses = [gpu_avg([gpu_losses[g][i] for g in xrange(num_gpus)]) for i in xrange(len(gpu_losses[0]))] self.out_idx = tf.concat(axis=0, values=gpu_out_idx) self.grad_norms = [gpu_avg([gpu_grad_norms[g][i] for g in xrange(num_gpus)]) for i in xrange(len(gpu_grad_norms[0]))] self.outputs = [tf.concat(axis=1, values=[gpu_outputs[g] for g in xrange(num_gpus)])] self.quantize_op = quantize_weights_op(512, 8) if backward: self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=10) def step(self, sess, inp, target, do_backward_in, noise_param=None, beam_size=2, eos_id=2, eos_cost=0.0, update_mem=None, state=None): """Run a step of the network.""" batch_size, height, length = inp.shape[0], inp.shape[1], inp.shape[2] do_backward = do_backward_in train_mode = True if do_backward_in is None: do_backward = False train_mode = False if update_mem is None: update_mem = do_backward feed_in = {} # print " feeding sequences of length %d" % length if state is None: state = np.zeros([batch_size, length, height, self.nmaps]) feed_in[self.prev_step.name] = state feed_in[self.length_tensor.name] = length feed_in[self.noise_param.name] = noise_param if noise_param else 0.0 feed_in[self.do_training.name] = 1.0 if do_backward else 0.0 feed_in[self.update_mem.name] = 1 if update_mem else 0 if do_backward_in is False: feed_in[self.sampling.name] = 0.0 index = 0 # We're dynamic now. feed_out = [] if do_backward: feed_out.append(self.updates[index]) feed_out.append(self.grad_norms[index]) if train_mode: feed_out.append(self.losses[index]) feed_in[self.input.name] = inp feed_in[self.target.name] = target feed_out.append(self.outputs[index]) if train_mode: # Make a full-sequence training step with one call to session.run. res = sess.run([self.after_enc_step] + feed_out, feed_in) after_enc_state, res = res[0], res[1:] else: # Make a full-sequence decoding step with one call to session.run. feed_in[self.sampling.name] = 1.1 # Sample every time. res = sess.run([self.after_enc_step, self.out_idx] + feed_out, feed_in) after_enc_state, out_idx = res[0], res[1] res = [res[2][l] for l in xrange(length)] outputs = [out_idx[:, i] for i in xrange(length)] cost = [0.0 for _ in xrange(beam_size * batch_size)] seen_eos = [0 for _ in xrange(beam_size * batch_size)] for idx, logit in enumerate(res): best = outputs[idx] for b in xrange(batch_size): if seen_eos[b] > 1: cost[b] -= eos_cost else: cost[b] += np.log(logit[b][best[b]]) if best[b] in [eos_id]: seen_eos[b] += 1 res = [[-c for c in cost]] + outputs # Collect and output results. offset = 0 norm = None if do_backward: offset = 2 norm = res[1] if train_mode: outputs = res[offset + 1] outputs = [outputs[l] for l in xrange(length)] return res[offset], outputs, norm, after_enc_state
apache-2.0
eagleamon/home-assistant
tests/components/sensor/test_darksky.py
6
3197
"""The tests for the Dark Sky platform.""" import re import unittest from unittest.mock import MagicMock, patch import forecastio from requests.exceptions import HTTPError import requests_mock from datetime import timedelta from homeassistant.components.sensor import darksky from homeassistant.bootstrap import setup_component from tests.common import load_fixture, get_test_home_assistant class TestDarkSkySetup(unittest.TestCase): """Test the Dark Sky platform.""" def add_entities(self, new_entities, update_before_add=False): """Mock add entities.""" if update_before_add: for entity in new_entities: entity.update() for entity in new_entities: self.entities.append(entity) def setUp(self): """Initialize values for this testcase class.""" self.hass = get_test_home_assistant() self.key = 'foo' self.config = { 'api_key': 'foo', 'forecast': [1, 2], 'monitored_conditions': ['summary', 'icon', 'temperature_max'], 'update_interval': timedelta(seconds=120), } self.lat = 37.8267 self.lon = -122.423 self.hass.config.latitude = self.lat self.hass.config.longitude = self.lon self.entities = [] def tearDown(self): # pylint: disable=invalid-name """Stop everything that was started.""" self.hass.stop() def test_setup_with_config(self): """Test the platform setup with configuration.""" self.assertTrue( setup_component(self.hass, 'sensor', {'darksky': self.config})) def test_setup_no_latitude(self): """Test that the component is not loaded without required config.""" self.hass.config.latitude = None self.assertFalse(darksky.setup_platform(self.hass, {}, MagicMock())) @patch('forecastio.api.get_forecast') def test_setup_bad_api_key(self, mock_get_forecast): """Test for handling a bad API key.""" # The Dark Sky API wrapper that we use raises an HTTP error # when you try to use a bad (or no) API key. url = 'https://api.darksky.net/forecast/{}/{},{}?units=auto'.format( self.key, str(self.lat), str(self.lon) ) msg = '400 Client Error: Bad Request for url: {}'.format(url) mock_get_forecast.side_effect = HTTPError(msg,) response = darksky.setup_platform(self.hass, self.config, MagicMock()) self.assertFalse(response) @requests_mock.Mocker() @patch('forecastio.api.get_forecast', wraps=forecastio.api.get_forecast) def test_setup(self, mock_req, mock_get_forecast): """Test for successfully setting up the forecast.io platform.""" uri = (r'https://api.(darksky.net|forecast.io)\/forecast\/(\w+)\/' r'(-?\d+\.?\d*),(-?\d+\.?\d*)') mock_req.get(re.compile(uri), text=load_fixture('darksky.json')) darksky.setup_platform(self.hass, self.config, self.add_entities) self.assertTrue(mock_get_forecast.called) self.assertEqual(mock_get_forecast.call_count, 1) self.assertEqual(len(self.entities), 7)
apache-2.0
Maqnai2234/Gram
node_modules/node-gyp/gyp/pylib/gyp/ninja_syntax.py
2485
5536
# This file comes from # https://github.com/martine/ninja/blob/master/misc/ninja_syntax.py # Do not edit! Edit the upstream one instead. """Python module for generating .ninja files. Note that this is emphatically not a required piece of Ninja; it's just a helpful utility for build-file-generation systems that already use Python. """ import textwrap import re def escape_path(word): return word.replace('$ ','$$ ').replace(' ','$ ').replace(':', '$:') class Writer(object): def __init__(self, output, width=78): self.output = output self.width = width def newline(self): self.output.write('\n') def comment(self, text): for line in textwrap.wrap(text, self.width - 2): self.output.write('# ' + line + '\n') def variable(self, key, value, indent=0): if value is None: return if isinstance(value, list): value = ' '.join(filter(None, value)) # Filter out empty strings. self._line('%s = %s' % (key, value), indent) def pool(self, name, depth): self._line('pool %s' % name) self.variable('depth', depth, indent=1) def rule(self, name, command, description=None, depfile=None, generator=False, pool=None, restat=False, rspfile=None, rspfile_content=None, deps=None): self._line('rule %s' % name) self.variable('command', command, indent=1) if description: self.variable('description', description, indent=1) if depfile: self.variable('depfile', depfile, indent=1) if generator: self.variable('generator', '1', indent=1) if pool: self.variable('pool', pool, indent=1) if restat: self.variable('restat', '1', indent=1) if rspfile: self.variable('rspfile', rspfile, indent=1) if rspfile_content: self.variable('rspfile_content', rspfile_content, indent=1) if deps: self.variable('deps', deps, indent=1) def build(self, outputs, rule, inputs=None, implicit=None, order_only=None, variables=None): outputs = self._as_list(outputs) all_inputs = self._as_list(inputs)[:] out_outputs = list(map(escape_path, outputs)) all_inputs = list(map(escape_path, all_inputs)) if implicit: implicit = map(escape_path, self._as_list(implicit)) all_inputs.append('|') all_inputs.extend(implicit) if order_only: order_only = map(escape_path, self._as_list(order_only)) all_inputs.append('||') all_inputs.extend(order_only) self._line('build %s: %s' % (' '.join(out_outputs), ' '.join([rule] + all_inputs))) if variables: if isinstance(variables, dict): iterator = iter(variables.items()) else: iterator = iter(variables) for key, val in iterator: self.variable(key, val, indent=1) return outputs def include(self, path): self._line('include %s' % path) def subninja(self, path): self._line('subninja %s' % path) def default(self, paths): self._line('default %s' % ' '.join(self._as_list(paths))) def _count_dollars_before_index(self, s, i): """Returns the number of '$' characters right in front of s[i].""" dollar_count = 0 dollar_index = i - 1 while dollar_index > 0 and s[dollar_index] == '$': dollar_count += 1 dollar_index -= 1 return dollar_count def _line(self, text, indent=0): """Write 'text' word-wrapped at self.width characters.""" leading_space = ' ' * indent while len(leading_space) + len(text) > self.width: # The text is too wide; wrap if possible. # Find the rightmost space that would obey our width constraint and # that's not an escaped space. available_space = self.width - len(leading_space) - len(' $') space = available_space while True: space = text.rfind(' ', 0, space) if space < 0 or \ self._count_dollars_before_index(text, space) % 2 == 0: break if space < 0: # No such space; just use the first unescaped space we can find. space = available_space - 1 while True: space = text.find(' ', space + 1) if space < 0 or \ self._count_dollars_before_index(text, space) % 2 == 0: break if space < 0: # Give up on breaking. break self.output.write(leading_space + text[0:space] + ' $\n') text = text[space+1:] # Subsequent lines are continuations, so indent them. leading_space = ' ' * (indent+2) self.output.write(leading_space + text + '\n') def _as_list(self, input): if input is None: return [] if isinstance(input, list): return input return [input] def escape(string): """Escape a string such that it can be embedded into a Ninja file without further interpretation.""" assert '\n' not in string, 'Ninja syntax does not allow newlines' # We only have one special metacharacter: '$'. return string.replace('$', '$$')
gpl-3.0
Theer108/invenio
invenio/modules/formatter/format_elements/bfe_field.py
13
6269
# -*- coding: utf-8 -*- # # This file is part of Invenio. # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 CERN. # # Invenio is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of the # License, or (at your option) any later version. # # Invenio is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Invenio; if not, write to the Free Software Foundation, Inc., # 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. """BibFormat element - Prints a custom field """ __revision__ = "$Id$" from six import iteritems from invenio.modules.formatter.utils import parse_tag def format_element(bfo, tag, limit, instances_separator=" ", subfields_separator=" ", extension="", output_pattern=""): """ Prints the given field of a record. If tag is in range [001, 010], this element assumes that it accesses a control field. Else it considers it accesses a data field. <p>For eg. consider the following metdata: <pre> 100__ $$aCalatroni, S$$uCERN 245__ $$aStatus of the EP Simulations and Facilities for the SPL 700__ $$aFerreira, L$$uCERN 700__ $$aMacatrao, M$$uCERN 700__ $$aSkala, A$$uCERN 700__ $$aSosin, M$$uCERN 700__ $$ade Waele, R$$uCERN 700__ $$aWithofs, Y$$uKHLim, Diepenbeek </pre> The following calls to bfe_field would print: <pre> &lt;BFE_FIELD tag="700" instances_separator="&lt;br/>" subfields_separator=" - "> Ferreira, L - CERN Macatrao, M - CERN Skala, A - CERN Sosin, M - CERN de Waele, R - CERN Withofs, Y - KHLim, Diepenbeek </pre> </p> <p>For more advanced formatting, the <code>output_pattern</code> parameter can be used to output the subfields of each instance in the specified way. For eg. consider the following metadata: <pre> 775__ $$b15. Aufl.$$c1995-1996$$nv.1$$pGrundlagen und Werkstoffe$$w317999 775__ $$b12. Aufl.$$c1963$$w278898 775__ $$b14. Aufl.$$c1983$$w107899 775__ $$b13. Aufl.$$c1974$$w99635 </pre> with the following <code>output_pattern</code>: <pre> &lt;a href="/record/%(w)s">%(b)s (%(c)s) %(n)s %(p)s&lt;/a> </pre> would print:<br/> <a href="/record/317999">15. Aufl. (1995-1996) v.1 Grundlagen und Werkstoffe</a><br/> <a href="/record/278898">12. Aufl. (1963) </a><br/> <a href="/record/107899">14. Aufl. (1983) </a><br/> <a href="/record/99635">13. Aufl. (1974) </a> <br/>(<code>instances_separator="&lt;br/>"</code> set for readability)<br/> The output pattern must follow <a href="http://docs.python.org/library/stdtypes.html#string-formatting-operations">Python string formatting</a> syntax. The format must use parenthesized notation to map to the subfield code. This currently restricts the support of <code>output_pattern</code> to non-repeatable subfields</p> @param tag: the tag code of the field that is to be printed @param instances_separator: a separator between instances of field @param subfields_separator: a separator between subfields of an instance @param limit: the maximum number of values to display. @param extension: a text printed at the end if 'limit' has been exceeded @param output_pattern: when specified, prints the subfields of each instance according to pattern specified as parameter (following Python string formatting convention) """ # Check if data or control field p_tag = parse_tag(tag) if p_tag[0].isdigit() and int(p_tag[0]) in range(0, 11): return bfo.control_field(tag) elif p_tag[0].isdigit(): # Get values without subcode. # We will filter unneeded subcode later if p_tag[1] == '': p_tag[1] = '_' if p_tag[2] == '': p_tag[2] = '_' values = bfo.fields(p_tag[0]+p_tag[1]+p_tag[2]) # Values will # always be a # list of # dicts else: return '' x = 0 instances_out = [] # Retain each instance output for instance in values: filtered_values = [value for (subcode, value) in iteritems(instance) if p_tag[3] == '' or p_tag[3] == '%' \ or p_tag[3] == subcode] if len(filtered_values) > 0: # We have found some corresponding subcode(s) if limit.isdigit() and x + len(filtered_values) >= int(limit): # We are going to exceed the limit filtered_values = filtered_values[:int(limit)-x] # Takes only needed one if len(filtered_values) > 0: # do not append empty list! if output_pattern: try: instances_out.append(output_pattern % DictNoKeyError(instance)) except: pass else: instances_out.append(subfields_separator.join(filtered_values)) x += len(filtered_values) # record that so we know limit has been exceeded break # No need to go further else: if output_pattern: try: instances_out.append(output_pattern % DictNoKeyError(instance)) except: pass else: instances_out.append(subfields_separator.join(filtered_values)) x += len(filtered_values) ext_out = '' if limit.isdigit() and x > int(limit): ext_out = extension return instances_separator.join(instances_out) + ext_out class DictNoKeyError(dict): def __getitem__(self, key): if dict.__contains__(self, key): val = dict.__getitem__(self, key) else: val = '' return val
gpl-2.0
sffjunkie/home-assistant
homeassistant/components/binary_sensor/demo.py
30
1257
""" Demo platform that has two fake binary sensors. For more details about this platform, please refer to the documentation https://home-assistant.io/components/demo/ """ from homeassistant.components.binary_sensor import BinarySensorDevice def setup_platform(hass, config, add_devices, discovery_info=None): """Setup the Demo binary sensor platform.""" add_devices([ DemoBinarySensor('Basement Floor Wet', False, 'moisture'), DemoBinarySensor('Movement Backyard', True, 'motion'), ]) class DemoBinarySensor(BinarySensorDevice): """A Demo binary sensor.""" def __init__(self, name, state, sensor_class): """Initialize the demo sensor.""" self._name = name self._state = state self._sensor_type = sensor_class @property def sensor_class(self): """Return the class of this sensor.""" return self._sensor_type @property def should_poll(self): """No polling needed for a demo binary sensor.""" return False @property def name(self): """Return the name of the binary sensor.""" return self._name @property def is_on(self): """Return true if the binary sensor is on.""" return self._state
mit
skyddv/neutron
neutron/agent/linux/external_process.py
16
10111
# Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import collections import os.path import six import eventlet from oslo_concurrency import lockutils from oslo_config import cfg from oslo_log import log as logging from oslo_utils import fileutils from neutron.agent.common import config as agent_cfg from neutron.agent.linux import ip_lib from neutron.agent.linux import utils from neutron.common import utils as common_utils from neutron.i18n import _LE LOG = logging.getLogger(__name__) OPTS = [ cfg.StrOpt('external_pids', default='$state_path/external/pids', help=_('Location to store child pid files')), ] cfg.CONF.register_opts(OPTS) agent_cfg.register_process_monitor_opts(cfg.CONF) @six.add_metaclass(abc.ABCMeta) class MonitoredProcess(object): @abc.abstractproperty def active(self): """Boolean representing the running state of the process.""" @abc.abstractmethod def enable(self): """Enable the service, or respawn the process.""" class ProcessManager(MonitoredProcess): """An external process manager for Neutron spawned processes. Note: The manager expects uuid to be in cmdline. """ def __init__(self, conf, uuid, namespace=None, service=None, pids_path=None, default_cmd_callback=None, cmd_addl_env=None, pid_file=None, run_as_root=False): self.conf = conf self.uuid = uuid self.namespace = namespace self.default_cmd_callback = default_cmd_callback self.cmd_addl_env = cmd_addl_env self.pids_path = pids_path or self.conf.external_pids self.pid_file = pid_file self.run_as_root = run_as_root if service: self.service_pid_fname = 'pid.' + service self.service = service else: self.service_pid_fname = 'pid' self.service = 'default-service' common_utils.ensure_dir(os.path.dirname(self.get_pid_file_name())) def enable(self, cmd_callback=None, reload_cfg=False): if not self.active: if not cmd_callback: cmd_callback = self.default_cmd_callback cmd = cmd_callback(self.get_pid_file_name()) ip_wrapper = ip_lib.IPWrapper(namespace=self.namespace) ip_wrapper.netns.execute(cmd, addl_env=self.cmd_addl_env, run_as_root=self.run_as_root) elif reload_cfg: self.reload_cfg() def reload_cfg(self): self.disable('HUP') def disable(self, sig='9', get_stop_command=None): pid = self.pid if self.active: if get_stop_command: cmd = get_stop_command(self.get_pid_file_name()) ip_wrapper = ip_lib.IPWrapper(namespace=self.namespace) ip_wrapper.netns.execute(cmd, addl_env=self.cmd_addl_env) else: cmd = ['kill', '-%s' % (sig), pid] utils.execute(cmd, run_as_root=True) # In the case of shutting down, remove the pid file if sig == '9': fileutils.delete_if_exists(self.get_pid_file_name()) elif pid: LOG.debug('Process for %(uuid)s pid %(pid)d is stale, ignoring ' 'signal %(signal)s', {'uuid': self.uuid, 'pid': pid, 'signal': sig}) else: LOG.debug('No process started for %s', self.uuid) def get_pid_file_name(self): """Returns the file name for a given kind of config file.""" if self.pid_file: return self.pid_file else: return utils.get_conf_file_name(self.pids_path, self.uuid, self.service_pid_fname) @property def pid(self): """Last known pid for this external process spawned for this uuid.""" return utils.get_value_from_file(self.get_pid_file_name(), int) @property def active(self): pid = self.pid if pid is None: return False cmdline = '/proc/%s/cmdline' % pid try: with open(cmdline, "r") as f: return self.uuid in f.readline() except IOError: return False ServiceId = collections.namedtuple('ServiceId', ['uuid', 'service']) class ProcessMonitor(object): def __init__(self, config, resource_type): """Handle multiple process managers and watch over all of them. :param config: oslo config object with the agent configuration. :type config: oslo_config.ConfigOpts :param resource_type: can be dhcp, router, load_balancer, etc. :type resource_type: str """ self._config = config self._resource_type = resource_type self._monitored_processes = {} if self._config.AGENT.check_child_processes_interval: self._spawn_checking_thread() def register(self, uuid, service_name, monitored_process): """Start monitoring a process. The given monitored_process will be tied to it's uuid+service_name replacing the old one if it existed already. The monitored_process should be enabled before registration, otherwise ProcessMonitor could try to enable the process itself, which could lead to double enable and if unlucky enough, two processes running, and also errors in the logs. :param uuid: An ID of the resource for which the process is running. :param service_name: A logical service name for this process monitor, so the same uuid provided via process manager can reference several different services. :param monitored_process: MonitoredProcess we want to monitor. """ service_id = ServiceId(uuid, service_name) self._monitored_processes[service_id] = monitored_process def unregister(self, uuid, service_name): """Stop monitoring a process. The uuid+service_name will be removed from the monitored processes. The service must be disabled **after** unregistering, otherwise if process monitor checks after you disable the process, and before you unregister it, the process will be respawned, and left orphaned into the system. :param uuid: An ID of the resource for which the process is running. :param service_name: A logical service name for this process monitor, so the same uuid provided via process manager can reference several different services. """ service_id = ServiceId(uuid, service_name) self._monitored_processes.pop(service_id, None) def stop(self): """Stop the process monitoring. This method will stop the monitoring thread, but no monitored process will be stopped. """ self._monitor_processes = False def _spawn_checking_thread(self): self._monitor_processes = True eventlet.spawn(self._periodic_checking_thread) @lockutils.synchronized("_check_child_processes") def _check_child_processes(self): # we build the list of keys before iterating in the loop to cover # the case where other threads add or remove items from the # dictionary which otherwise will cause a RuntimeError for service_id in list(self._monitored_processes): pm = self._monitored_processes.get(service_id) if pm and not pm.active: LOG.error(_LE("%(service)s for %(resource_type)s " "with uuid %(uuid)s not found. " "The process should not have died"), {'service': pm.service, 'resource_type': self._resource_type, 'uuid': service_id.uuid}) self._execute_action(service_id) eventlet.sleep(0) def _periodic_checking_thread(self): while self._monitor_processes: eventlet.sleep(self._config.AGENT.check_child_processes_interval) eventlet.spawn(self._check_child_processes) def _execute_action(self, service_id): action = self._config.AGENT.check_child_processes_action action_function = getattr(self, "_%s_action" % action) action_function(service_id) def _respawn_action(self, service_id): LOG.error(_LE("respawning %(service)s for uuid %(uuid)s"), {'service': service_id.service, 'uuid': service_id.uuid}) self._monitored_processes[service_id].enable() def _exit_action(self, service_id): LOG.error(_LE("Exiting agent as programmed in check_child_processes_" "actions")) self._exit_handler(service_id.uuid, service_id.service) def _exit_handler(self, uuid, service): """This is an exit handler for the ProcessMonitor. It will be called if the administrator configured the exit action in check_child_processes_actions, and one of our external processes die unexpectedly. """ LOG.error(_LE("Exiting agent because of a malfunction with the " "%(service)s process identified by uuid %(uuid)s"), {'service': service, 'uuid': uuid}) raise SystemExit(1)
apache-2.0
hellsgate1001/bookit
docs/env/Lib/site-packages/django/db/backends/postgresql_psycopg2/introspection.py
105
5005
from __future__ import unicode_literals from django.db.backends import BaseDatabaseIntrospection, FieldInfo from django.utils.encoding import force_text class DatabaseIntrospection(BaseDatabaseIntrospection): # Maps type codes to Django Field types. data_types_reverse = { 16: 'BooleanField', 17: 'BinaryField', 20: 'BigIntegerField', 21: 'SmallIntegerField', 23: 'IntegerField', 25: 'TextField', 700: 'FloatField', 701: 'FloatField', 869: 'GenericIPAddressField', 1042: 'CharField', # blank-padded 1043: 'CharField', 1082: 'DateField', 1083: 'TimeField', 1114: 'DateTimeField', 1184: 'DateTimeField', 1266: 'TimeField', 1700: 'DecimalField', } ignored_tables = [] def get_table_list(self, cursor): "Returns a list of table names in the current database." cursor.execute(""" SELECT c.relname FROM pg_catalog.pg_class c LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace WHERE c.relkind IN ('r', 'v', '') AND n.nspname NOT IN ('pg_catalog', 'pg_toast') AND pg_catalog.pg_table_is_visible(c.oid)""") return [row[0] for row in cursor.fetchall() if row[0] not in self.ignored_tables] def get_table_description(self, cursor, table_name): "Returns a description of the table, with the DB-API cursor.description interface." # As cursor.description does not return reliably the nullable property, # we have to query the information_schema (#7783) cursor.execute(""" SELECT column_name, is_nullable FROM information_schema.columns WHERE table_name = %s""", [table_name]) null_map = dict(cursor.fetchall()) cursor.execute("SELECT * FROM %s LIMIT 1" % self.connection.ops.quote_name(table_name)) return [FieldInfo(*((force_text(line[0]),) + line[1:6] + (null_map[force_text(line[0])]=='YES',))) for line in cursor.description] def get_relations(self, cursor, table_name): """ Returns a dictionary of {field_index: (field_index_other_table, other_table)} representing all relationships to the given table. Indexes are 0-based. """ cursor.execute(""" SELECT con.conkey, con.confkey, c2.relname FROM pg_constraint con, pg_class c1, pg_class c2 WHERE c1.oid = con.conrelid AND c2.oid = con.confrelid AND c1.relname = %s AND con.contype = 'f'""", [table_name]) relations = {} for row in cursor.fetchall(): # row[0] and row[1] are single-item lists, so grab the single item. relations[row[0][0] - 1] = (row[1][0] - 1, row[2]) return relations def get_key_columns(self, cursor, table_name): key_columns = [] cursor.execute(""" SELECT kcu.column_name, ccu.table_name AS referenced_table, ccu.column_name AS referenced_column FROM information_schema.constraint_column_usage ccu LEFT JOIN information_schema.key_column_usage kcu ON ccu.constraint_catalog = kcu.constraint_catalog AND ccu.constraint_schema = kcu.constraint_schema AND ccu.constraint_name = kcu.constraint_name LEFT JOIN information_schema.table_constraints tc ON ccu.constraint_catalog = tc.constraint_catalog AND ccu.constraint_schema = tc.constraint_schema AND ccu.constraint_name = tc.constraint_name WHERE kcu.table_name = %s AND tc.constraint_type = 'FOREIGN KEY'""" , [table_name]) key_columns.extend(cursor.fetchall()) return key_columns def get_indexes(self, cursor, table_name): # This query retrieves each index on the given table, including the # first associated field name cursor.execute(""" SELECT attr.attname, idx.indkey, idx.indisunique, idx.indisprimary FROM pg_catalog.pg_class c, pg_catalog.pg_class c2, pg_catalog.pg_index idx, pg_catalog.pg_attribute attr WHERE c.oid = idx.indrelid AND idx.indexrelid = c2.oid AND attr.attrelid = c.oid AND attr.attnum = idx.indkey[0] AND c.relname = %s""", [table_name]) indexes = {} for row in cursor.fetchall(): # row[1] (idx.indkey) is stored in the DB as an array. It comes out as # a string of space-separated integers. This designates the field # indexes (1-based) of the fields that have indexes on the table. # Here, we skip any indexes across multiple fields. if ' ' in row[1]: continue indexes[row[0]] = {'primary_key': row[3], 'unique': row[2]} return indexes
mit
ycl2045/nova-master
nova/api/metadata/password.py
16
2438
# Copyright 2012 Nebula, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from webob import exc from nova import conductor from nova import context from nova.openstack.common.gettextutils import _ from nova import utils CHUNKS = 4 CHUNK_LENGTH = 255 MAX_SIZE = CHUNKS * CHUNK_LENGTH def extract_password(instance): result = '' sys_meta = utils.instance_sys_meta(instance) for key in sorted(sys_meta.keys()): if key.startswith('password_'): result += sys_meta[key] return result or None def convert_password(context, password): """Stores password as system_metadata items. Password is stored with the keys 'password_0' -> 'password_3'. """ password = password or '' meta = {} for i in xrange(CHUNKS): meta['password_%d' % i] = password[:CHUNK_LENGTH] password = password[CHUNK_LENGTH:] return meta def handle_password(req, meta_data): ctxt = context.get_admin_context() if req.method == 'GET': return meta_data.password elif req.method == 'POST': # NOTE(vish): The conflict will only happen once the metadata cache # updates, but it isn't a huge issue if it can be set for # a short window. if meta_data.password: raise exc.HTTPConflict() if (req.content_length > MAX_SIZE or len(req.body) > MAX_SIZE): msg = _("Request is too large.") raise exc.HTTPBadRequest(explanation=msg) conductor_api = conductor.API() instance = conductor_api.instance_get_by_uuid(ctxt, meta_data.uuid) sys_meta = utils.instance_sys_meta(instance) sys_meta.update(convert_password(ctxt, req.body)) conductor_api.instance_update(ctxt, meta_data.uuid, system_metadata=sys_meta) else: raise exc.HTTPBadRequest()
apache-2.0
tysonclugg/django
tests/annotations/models.py
90
2591
from django.db import models class Author(models.Model): name = models.CharField(max_length=100) age = models.IntegerField() friends = models.ManyToManyField('self', blank=True) def __str__(self): return self.name class Publisher(models.Model): name = models.CharField(max_length=255) num_awards = models.IntegerField() def __str__(self): return self.name class Book(models.Model): isbn = models.CharField(max_length=9) name = models.CharField(max_length=255) pages = models.IntegerField() rating = models.FloatField() price = models.DecimalField(decimal_places=2, max_digits=6) authors = models.ManyToManyField(Author) contact = models.ForeignKey(Author, models.CASCADE, related_name='book_contact_set') publisher = models.ForeignKey(Publisher, models.CASCADE) pubdate = models.DateField() def __str__(self): return self.name class Store(models.Model): name = models.CharField(max_length=255) books = models.ManyToManyField(Book) original_opening = models.DateTimeField() friday_night_closing = models.TimeField() def __str__(self): return self.name class DepartmentStore(Store): chain = models.CharField(max_length=255) def __str__(self): return '%s - %s ' % (self.chain, self.name) class Employee(models.Model): # The order of these fields matter, do not change. Certain backends # rely on field ordering to perform database conversions, and this # model helps to test that. first_name = models.CharField(max_length=20) manager = models.BooleanField(default=False) last_name = models.CharField(max_length=20) store = models.ForeignKey(Store, models.CASCADE) age = models.IntegerField() salary = models.DecimalField(max_digits=8, decimal_places=2) def __str__(self): return '%s %s' % (self.first_name, self.last_name) class Company(models.Model): name = models.CharField(max_length=200) motto = models.CharField(max_length=200, null=True, blank=True) ticker_name = models.CharField(max_length=10, null=True, blank=True) description = models.CharField(max_length=200, null=True, blank=True) def __str__(self): return 'Company(name=%s, motto=%s, ticker_name=%s, description=%s)' % ( self.name, self.motto, self.ticker_name, self.description, ) class Ticket(models.Model): active_at = models.DateTimeField() duration = models.DurationField() def __str__(self): return '{} - {}'.format(self.active_at, self.duration)
bsd-3-clause
stefanw/froide
froide/foirequest/tests/test_api.py
1
13744
from datetime import timedelta import json from urllib.parse import urlencode from django.test import TestCase from django.core import mail from django.conf import settings from django.contrib.auth import get_user_model from django.utils import timezone from django.urls import reverse from oauth2_provider.models import get_access_token_model, get_application_model from froide.publicbody.models import PublicBody from froide.foirequest.tests import factories from froide.foirequest.models import FoiRequest, FoiAttachment User = get_user_model() Application = get_application_model() AccessToken = get_access_token_model() class ApiTest(TestCase): def setUp(self): self.site = factories.make_world() def test_list(self): response = self.client.get('/api/v1/request/') self.assertEqual(response.status_code, 200) response = self.client.get('/api/v1/message/') self.assertEqual(response.status_code, 200) response = self.client.get('/api/v1/attachment/') self.assertEqual(response.status_code, 200) def test_detail(self): req = FoiRequest.objects.all()[0] response = self.client.get('/api/v1/request/%d/' % req.pk) self.assertEqual(response.status_code, 200) self.assertContains(response, req.title) self.assertNotContains(response, req.secret_address) req.user.private = True req.user.save() mes = factories.FoiMessageFactory.create( request=req, subject=req.user.get_full_name(), plaintext='Hallo %s,\n%s\n%s' % ( req.user.get_full_name(), req.secret_address, req.user.address ) ) response = self.client.get('/api/v1/message/%d/' % mes.pk) self.assertEqual(response.status_code, 200) self.assertNotContains(response, req.user.get_full_name()) self.assertNotContains(response, req.secret_address) self.assertNotContains(response, req.user.address) att = FoiAttachment.objects.all()[0] att.approved = True att.save() response = self.client.get('/api/v1/attachment/%d/' % att.pk) self.assertEqual(response.status_code, 200) def test_permissions(self): req = factories.FoiRequestFactory.create( visibility=FoiRequest.VISIBLE_TO_REQUESTER, site=self.site) response = self.client.get('/api/v1/request/%d/' % req.pk) self.assertEqual(response.status_code, 404) mes = factories.FoiMessageFactory.create(request=req) response = self.client.get('/api/v1/message/%d/' % mes.pk) self.assertEqual(response.status_code, 404) att = factories.FoiAttachmentFactory.create(belongs_to=mes) att.approved = True att.save() response = self.client.get('/api/v1/attachment/%d/' % att.pk) self.assertEqual(response.status_code, 404) def test_content_hidden(self): marker = 'TESTMARKER' mes = factories.FoiMessageFactory.create( content_hidden=True, plaintext=marker ) response = self.client.get('/api/v1/message/%d/' % mes.pk) self.assertEqual(response.status_code, 200) self.assertNotContains(response, marker) def test_username_hidden(self): user = factories.UserFactory.create( first_name='Reinhardt' ) user.private = True user.save() mes = factories.FoiMessageFactory.create( content_hidden=True, sender_user=user ) response = self.client.get('/api/v1/message/%d/' % mes.pk) self.assertEqual(response.status_code, 200) self.assertNotContains(response, user.username) self.assertNotContains(response, user.first_name) def test_search(self): response = self.client.get('/api/v1/request/search/?q=Number') self.assertEqual(response.status_code, 200) def test_search_similar(self): factories.delete_index() search_url = '/api/v1/request/search/' response = self.client.get(search_url) self.assertEqual(response.status_code, 200) self.assertContains(response, '"objects":[]') self.assertEqual(response['Content-Type'], 'application/json') req = FoiRequest.objects.all()[0] factories.rebuild_index() response = self.client.get('%s?%s' % ( search_url, urlencode({'q': req.title}) )) self.assertEqual(response.status_code, 200) self.assertContains(response, 'title') self.assertContains(response, 'description') class OAuthAPIMixin(): def setUp(self): factories.make_world() self.test_user = User.objects.get(username='dummy') self.dev_user = User.objects.create_user("dev@example.com", "dev_user", "123456") self.application = Application.objects.create( name="Test Application", redirect_uris="http://localhost http://example.com http://example.org", user=self.dev_user, client_type=Application.CLIENT_CONFIDENTIAL, authorization_grant_type=Application.GRANT_AUTHORIZATION_CODE, ) self.access_token = AccessToken.objects.create( user=self.test_user, scope="read:user", expires=timezone.now() + timedelta(seconds=300), token="secret-access-token-key", application=self.application ) self.req = factories.FoiRequestFactory.create( visibility=FoiRequest.VISIBLE_TO_REQUESTER, user=self.test_user, title='permissions required' ) self.mes = factories.FoiMessageFactory.create( request=self.req ) self.att = factories.FoiAttachmentFactory.create( belongs_to=self.mes, approved=False ) self.att2 = factories.FoiAttachmentFactory.create( belongs_to=self.mes, approved=True ) factories.FoiRequestFactory.create( visibility=FoiRequest.VISIBLE_TO_REQUESTER, user=self.dev_user, title='never shown' ) self.pb = PublicBody.objects.all()[0] self.request_list_url = reverse('api:request-list') self.message_detail_url = reverse('api:message-detail', kwargs={'pk': self.mes.pk}) def _create_authorization_header(self, token): return "Bearer {0}".format(token) def api_get(self, url): auth = self._create_authorization_header(self.access_token.token) response = self.client.get(url, HTTP_AUTHORIZATION=auth) self.assertEqual(response.status_code, 200) return response, json.loads(response.content.decode('utf-8')) def api_post(self, url, data=''): auth = self._create_authorization_header(self.access_token.token) response = self.client.post(url, json.dumps(data), content_type="application/json", HTTP_AUTHORIZATION=auth) return response, json.loads(response.content.decode('utf-8')) def api_delete(self, url, data=''): auth = self._create_authorization_header(self.access_token.token) response = self.client.delete(url, json.dumps(data), content_type="application/json", HTTP_AUTHORIZATION=auth) result = None if response.content: result = json.loads(response.content.decode('utf-8')) return response, result class OAuthApiTest(OAuthAPIMixin, TestCase): def test_list_public_requests(self): self.assertEqual(FoiRequest.objects.all().count(), 3) response = self.client.get(self.request_list_url) self.assertEqual(response.status_code, 200) result = json.loads(response.content.decode('utf-8')) self.assertEqual(result['meta']['total_count'], 1) def test_list_private_requests_when_logged_in(self): self.client.login(email=self.test_user.email, password='froide') response = self.client.get(self.request_list_url) self.assertEqual(response.status_code, 200) result = json.loads(response.content.decode('utf-8')) self.assertEqual(result['meta']['total_count'], 2) def test_list_private_requests_without_scope(self): response, result = self.api_get(self.request_list_url) self.assertEqual(result['meta']['total_count'], 1) self.assertNotContains(response, 'permissions required') self.assertNotContains(response, 'never shown') def test_list_private_requests_with_scope(self): self.access_token.scope = "read:user read:request" self.access_token.save() response, result = self.api_get(self.request_list_url) self.assertEqual(result['meta']['total_count'], 2) self.assertContains(response, 'permissions required') self.assertNotContains(response, 'never shown') def test_filter_other_private_requests(self): self.access_token.scope = "read:user read:request" self.access_token.save() response, result = self.api_get(self.request_list_url + '?user=%s' % self.dev_user.pk) self.assertEqual(result['meta']['total_count'], 0) def test_filter_private_requests_without_scope(self): response, result = self.api_get(self.request_list_url + '?user=%s' % self.test_user.pk) self.assertEqual(result['meta']['total_count'], 0) def test_filter_private_requests_with_scope(self): self.access_token.scope = "read:user read:request" self.access_token.save() response, result = self.api_get(self.request_list_url + '?user=%s' % self.test_user.pk) self.assertEqual(result['meta']['total_count'], 1) def test_see_only_approved_attachments(self): self.req.visibility = FoiRequest.VISIBLE_TO_PUBLIC self.req.save() self.assertEqual(FoiAttachment.objects.all().count(), 4) response = self.client.get(self.message_detail_url) self.assertEqual(response.status_code, 200) result = json.loads(response.content.decode('utf-8')) self.assertEqual(len(result['attachments']), 1) def test_see_only_approved_attachments_loggedin(self): self.req.visibility = FoiRequest.VISIBLE_TO_PUBLIC self.req.save() self.client.login(email=self.test_user.email, password='froide') response = self.client.get(self.message_detail_url) self.assertEqual(response.status_code, 200) result = json.loads(response.content.decode('utf-8')) self.assertEqual(len(result['attachments']), 2) def test_see_only_approved_attachments_without_scope(self): self.req.visibility = FoiRequest.VISIBLE_TO_PUBLIC self.req.save() response, result = self.api_get(self.message_detail_url) self.assertEqual(len(result['attachments']), 1) def test_see_only_approved_attachments_with_scope(self): self.req.visibility = FoiRequest.VISIBLE_TO_PUBLIC self.req.save() self.access_token.scope = "read:user read:request" self.access_token.save() response, result = self.api_get(self.message_detail_url) self.assertEqual(len(result['attachments']), 2) def test_request_creation_not_loggedin(self): old_count = FoiRequest.objects.all().count() response = self.client.post(self.request_list_url, json.dumps({ 'subject': 'Test', 'body': 'Testing', 'publicbodies': [self.pb.pk] }), content_type="application/json") self.assertEqual(response.status_code, 401) new_count = FoiRequest.objects.all().count() self.assertEqual(old_count, new_count) self.assertEqual(len(mail.outbox), 0) def test_request_creation_without_scope(self): old_count = FoiRequest.objects.all().count() response, result = self.api_post(self.request_list_url, { 'subject': 'Test', 'body': 'Testing', 'publicbodies': [self.pb.pk] }) self.assertEqual(response.status_code, 403) new_count = FoiRequest.objects.all().count() self.assertEqual(old_count, new_count) self.assertEqual(len(mail.outbox), 0) def test_request_creation_with_scope(self): self.access_token.scope = "read:user make:request" self.access_token.save() old_count = FoiRequest.objects.all().count() mail.outbox = [] data = { 'subject': 'OAUth-Test', 'body': 'Testing', 'publicbodies': [self.pb.pk], 'tags': ['test1', 'test2'] } response, result = self.api_post(self.request_list_url, data) self.assertEqual(response.status_code, 201) new_count = FoiRequest.objects.all().count() self.assertEqual(old_count, new_count - 1) self.assertEqual(len(mail.outbox), 2) new_req = FoiRequest.objects.get(title='OAUth-Test') self.assertEqual(set([t.name for t in new_req.tags.all()]), set(data['tags'])) # Check throttling froide_config = dict(settings.FROIDE_CONFIG) froide_config['request_throttle'] = [(1, 60), (5, 60 * 60)] with self.settings(FROIDE_CONFIG=froide_config): response, result = self.api_post(self.request_list_url, { 'subject': 'Test', 'body': 'Testing', 'publicbodies': [self.pb.pk] }) self.assertEqual(response.status_code, 429)
mit
spragunr/echolocation
stereo/add_data.py
1
1566
import h5py import os import numpy as np from sys import argv from stereo_processing import align_audio, downsize path = '/Volumes/seagate/legit_data/' current_path = os.getcwd()+'/' print "opening main file" with h5py.File(current_path+argv[1], 'r') as main_data: main_audio = main_data['audio'].value main_depth = main_data['depth'].value new_audio = [main_audio] new_depth = [main_depth] num_new_samples = 0 old_audio_shape = main_audio.shape old_depth_shape = main_depth.shape for filename in argv[2:]: print "loading %s data" %filename with h5py.File(path+filename, 'r') as f: print "aligning audio" a = f['audio'].value aligned = align_audio(5000, a) new_audio.append(aligned) print "downsizing depth" d = f['depth'].value downsized = np.empty((aligned.shape[0],12,16)) counter = 0 for d_map in d: downsized[counter] = downsize(d_map) print "done with map", counter counter += 1 new_depth.append(downsized) num_new_samples += a.shape[0] audio_tuple = tuple(new_audio) depth_tuple = tuple(new_depth) print "audio concatenation" all_audio = np.concatenate(audio_tuple) print "depth concatenation" all_depth = np.concatenate(depth_tuple) print "\n\nold audio shape:", old_audio_shape print "old depth shape:", old_depth_shape print "total number of new samples added:",num_new_samples print "new audio shape:", all_audio.shape print "new depth shape:", all_depth.shape print "\n\nsaving new file" with h5py.File("data_100t.h5", 'w') as d: d.create_dataset('audio', data=all_audio) d.create_dataset('depth', data=all_depth)
mit
google-research/language
language/nql/nql/util_test.py
1
13749
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=g-error-prone-assert-raises """Tests for util.""" import os import tempfile import nql from nql import dataset from nql import util import numpy as np import tensorflow.compat.v1 as tf1 import tensorflow.compat.v2 as tf def tabline(s): return "\t".join(s.split(" ")) + "\n" TRIPPY_KG_LINES = [ tabline("feature t1 purple"), tabline("feature t1 green"), tabline("feature t1 red"), tabline("feature t2 purple"), tabline("feature t2 red"), tabline("feature t3 red"), tabline("feature t3 black"), tabline("feature b1 black"), tabline("feature b1 tan"), tabline("feature b2 white"), tabline("feature b2 grey"), tabline("feature b3 black"), tabline("feature b3 white"), tabline("feature b3 tan"), tabline("feature u1 purple"), tabline("feature t1 green"), tabline("feature u2 green"), tabline("feature t2 red"), tabline("feature c1 black"), tabline("feature b1 grey"), tabline("feature c2 tan"), tabline("feature c2 grey") ] TRAIN_DATA_LINES = [ "t1|trippy", "t2|trippy", "t3|trippy", "b1|boring", "b2|boring", "b3|boring" ] TEST_DATA_LINES = ["u1|trippy", "u2|trippy", "c1|boring", "c2|boring"] def simple_tf_dataset(context, tuple_input, x_type, y_type, normalize_outputs=False, batch_size=1, shuffle_buffer_size=1000, feature_key=None, field_separator="\t"): """A dataset with just two columns, x and y. Args: context: a NeuralQueryContext tuple_input: passed to util.tuple_dataset x_type: type of entities x y_type: type of entities y1,...,yk normalize_outputs: make the encoding of {y1,...,yk} sum to 1 batch_size: size of minibatches shuffle_buffer_size: if zero, do not shuffle the dataset. Otherwise, this is passed in as argument to shuffle feature_key: if not None, wrap the x part of the minibatch in a dictionary with the given key field_separator: passed in to dataset.tuple_dataset Returns: a tf.data.Dataset formed by wrapping the generator """ dset = dataset.tuple_dataset( context, tuple_input, [x_type, y_type], normalize_outputs=normalize_outputs, field_separator=field_separator) if shuffle_buffer_size > 0: dset = dset.shuffle(shuffle_buffer_size) dset = dset.batch(batch_size) if feature_key is None: return dset else: wrap_x_in_dict = lambda x, y: ({feature_key: x}, y) return dset.map(wrap_x_in_dict) class TrippyBuilder(util.ModelBuilder): def config_context(self, context, params=None): context.declare_relation("feature", "instance_t", "feature_t") context.declare_relation( "indicates", "feature_t", "label_t", trainable=True) context.extend_type("label_t", ["trippy", "boring"]) context.load_kg(lines=TRIPPY_KG_LINES) context.set_initial_value( "indicates", np.ones(context.get_shape("indicates"), dtype="float32")) def config_model_prediction(self, model, feature_ph_dict, params=None): model.x = model.context.as_nql(feature_ph_dict["x"], "instance_t") model.score = model.x.feature().indicates() model.predicted_y = model.score.tf_op(nql.nonneg_softmax) model.predictions = {"y": model.predicted_y} def config_model_training(self, model, labels_ph, params=None): model.labels = model.context.as_tf(labels_ph) model.loss = nql.nonneg_crossentropy(model.predicted_y.tf, model.labels) optimizer = tf1.train.AdagradOptimizer(1.0) model.train_op = optimizer.minimize( loss=model.loss, global_step=tf1.train.get_global_step()) def config_model_evaluation(self, model, labels_ph, params=None): model.accuracy = tf1.metrics.accuracy( tf.argmax(input=model.labels, axis=1), tf.argmax(input=model.predicted_y.tf, axis=1)) model.top_labels = util.labels_of_top_ranked_predictions_in_batch( model.labels, model.predicted_y.tf) model.precision_at_one = tf1.metrics.mean(model.top_labels) model.evaluations = { "accuracy": model.accuracy, "precision@1": model.precision_at_one } class BaseTester(tf.test.TestCase): def setUp(self): super(BaseTester, self).setUp() self.tmp_dir = tempfile.mkdtemp() self.context = TrippyBuilder().build_context() def make_train_dset(self, num_epochs): # need to specify a non-default field separator # because tabs are disallowed in test input files return simple_tf_dataset( self.context, TRAIN_DATA_LINES, "instance_t", "label_t", feature_key="x", field_separator="|").repeat(num_epochs) def make_test_dset(self): return simple_tf_dataset( self.context, TEST_DATA_LINES, "instance_t", "label_t", shuffle_buffer_size=0, feature_key="x", field_separator="|") class TestModelBuilder(BaseTester): def setUp(self): super(TestModelBuilder, self).setUp() self.graph = tf.Graph() self.session = tf1.Session(graph=self.graph) def check_one_hot(self, m, i, typename): self.assertEqual(m.shape, (self.context.get_max_id(typename),)) self.assertEqual(np.sum(m), 1.0) self.assertEqual(m[i], 1.0) def test_tf_dataset(self): with self.graph.as_default(): dset1 = simple_tf_dataset( self.context, TRAIN_DATA_LINES, "instance_t", "label_t", shuffle_buffer_size=0, field_separator="|") x, y = self.session.run(tf1.data.make_one_shot_iterator(dset1).get_next()) self.check_batch(x, 0, "instance_t") self.check_batch(y, 0, "label_t") def check_batch(self, m, i, typename): self.assertEqual(m.shape, (1, self.context.get_max_id(typename))) self.assertEqual(np.sum(m), 1.0) self.assertEqual(m[0, i], 1.0) def test_tf_minibatch_dataset(self): with self.graph.as_default(): dset2 = simple_tf_dataset( self.context, TRAIN_DATA_LINES, "instance_t", "label_t", batch_size=2, shuffle_buffer_size=0, field_separator="|") x, y = self.session.run(tf1.data.make_one_shot_iterator(dset2).get_next()) # check that this is a minibatch containing the first two instances self.assertEqual(x.shape[0], 2) self.assertEqual(y.shape[0], 2) self.assertEqual(x.shape[1], self.context.get_max_id("instance_t")) self.assertEqual(y.shape[1], self.context.get_max_id("label_t")) self.assertEqual(np.sum(x), 2.0) self.assertEqual(np.sum(y), 2.0) self.assertEqual(x[0, 0], 1.0) self.assertEqual(x[1, 1], 1.0) # both of the first two instances are negative self.assertEqual(y[0, 0], 1.0) self.assertEqual(y[1, 0], 1.0) def test_ph_learn(self): with self.graph.as_default(): # build model feature_ph_dict = {"x": self.context.placeholder("x", "instance_t")} labels_ph = self.context.placeholder("y", "label_t") builder = TrippyBuilder() model = builder.build_model(feature_ph_dict, labels_ph) trainer = util.Trainer(self.session, model, feature_ph_dict, labels_ph) # train trainer.train(self.make_train_dset(7)) # check the model fits the train data evaluation = trainer.evaluate(self.make_train_dset(1)) self.assertEqual(evaluation["accuracy"], 1.0) self.assertEqual(evaluation["precision@1"], 1.0) # try running the model on something for inst_name in ["u1", "u2", "c1", "c2"]: x = model.context.one_hot_numpy_array(inst_name, "instance_t") x_ph = feature_ph_dict["x"] fd = {x_ph.name: x} y_dict = model.predicted_y.eval(self.session, feed_dict=fd) # the u's are class trippy if inst_name[0] == "u": self.assertGreater(y_dict["trippy"], y_dict["boring"]) # the c's are class boring but c1 is hard to get elif inst_name == "c2": self.assertLess(y_dict["trippy"], y_dict["boring"]) # test the model evaluation = trainer.evaluate(self.make_test_dset()) self.assertGreaterEqual(evaluation["accuracy"], 0.7) self.assertGreaterEqual(evaluation["precision@1"], 0.7) # test callback cb_model = builder.build_model(feature_ph_dict, labels_ph) cb_model.loss_history = [] def my_callback(fd, loss, secs): del fd, secs # unused cb_model.loss_history.append(loss) return cb_model.training_callback = my_callback with tf1.Session() as session: cb_trainer = util.Trainer(session, cb_model, feature_ph_dict, labels_ph) cb_trainer.train(self.make_train_dset(5)) self.assertEqual(len(cb_model.loss_history), 30) self.assertLess(cb_model.loss_history[-1], 0.06) def test_estimator_learn(self): def train_input_fn(): return self.make_train_dset(5) def test_input_fn(): return self.make_test_dset() estimator = TrippyBuilder().build_estimator() estimator.train(input_fn=train_input_fn) evaluation = estimator.evaluate(input_fn=train_input_fn) self.assertEqual(evaluation["accuracy"], 1.0) self.assertEqual(evaluation["global_step"], 30) evaluation = estimator.evaluate(input_fn=test_input_fn) self.assertGreater(evaluation["accuracy"], 0.7) self.assertGreaterEqual(evaluation["precision@1"], 0.7) class TestSaveRestore(BaseTester): def setUp(self): super(TestSaveRestore, self).setUp() tmp_dir = tempfile.mkdtemp("util_test") self.checkpoint_location_a = os.path.join(tmp_dir, "trippy.ckpt") self.checkpoint_location_b = os.path.join(tmp_dir, "trippy2.ckpt") def test_est(self): def train_input_fn(): return self.make_train_dset(5) def test_input_fn(): return self.make_test_dset() estimator = TrippyBuilder().build_estimator( model_dir=self.checkpoint_location_a) estimator.train(input_fn=train_input_fn) evaluation = estimator.evaluate(input_fn=test_input_fn) self.assertGreater(evaluation["accuracy"], 0.7) self.assertGreaterEqual(evaluation["precision@1"], 0.7) def test_ph(self): def try_model_on_test_instances(model, sess, feature_ph_dict): trial = {} for inst_name in ["u1", "u2", "c1", "c2"]: x = model.context.one_hot_numpy_array(inst_name, "instance_t") x_ph = feature_ph_dict["x"] fd = {x_ph.name: x} y_dict = model.predicted_y.eval(sess, feed_dict=fd) trial[inst_name] = y_dict["boring"] return trial # Train and save. with tf.Graph().as_default(): with tf1.Session() as sess1: builder1 = TrippyBuilder() context1 = builder1.build_context() feature_ph_dict1 = {"x": context1.placeholder("x", "instance_t")} labels_ph1 = context1.placeholder("y", "label_t") model1 = builder1.build_model(feature_ph_dict1, labels_ph1) trainer1 = util.Trainer(sess1, model1, feature_ph_dict1, labels_ph1) trainer1.train(self.make_train_dset(5)) trial1a = try_model_on_test_instances(model1, sess1, feature_ph_dict1) saver1 = tf1.train.Saver() saver1.save(sess1, self.checkpoint_location_a) # Restore, evaluate, train, and save. with tf.Graph().as_default(): with tf1.Session() as sess2: builder2 = TrippyBuilder() context2 = builder2.build_context() feature_ph_dict2 = {"x": context2.placeholder("x", "instance_t")} labels_ph2 = context2.placeholder("y", "label_t") model2 = builder2.build_model(feature_ph_dict2, labels_ph2) saver2 = tf1.train.Saver() trainer2 = util.Trainer(sess2, model2, feature_ph_dict2, labels_ph2) saver2.restore(sess2, self.checkpoint_location_a) trainer2.evaluate(self.make_test_dset()) trial2a = try_model_on_test_instances(model2, sess2, feature_ph_dict2) self.assertDictEqual(trial1a, trial2a) trainer2.train(self.make_train_dset(5)) saver2.save(sess2, self.checkpoint_location_b) trial2b = try_model_on_test_instances(model2, sess2, feature_ph_dict2) with self.assertRaises(tf.test.TestCase.failureException): self.assertDictEqual(trial2a, trial2b) # Restore and evaluate. with tf.Graph().as_default(): with tf1.Session() as sess3: builder3 = TrippyBuilder() context3 = builder3.build_context() feature_ph_dict3 = {"x": context3.placeholder("x", "instance_t")} labels_ph3 = context3.placeholder("y", "label_t") model3 = builder3.build_model(feature_ph_dict3, labels_ph3) saver3 = tf1.train.Saver() trainer3 = util.Trainer(sess3, model3, feature_ph_dict3, labels_ph3) saver3.restore(sess3, self.checkpoint_location_b) trainer3.evaluate(self.make_test_dset()) trial3b = try_model_on_test_instances(model3, sess3, feature_ph_dict3) self.assertDictEqual(trial2b, trial3b) if __name__ == "__main__": tf.test.main()
apache-2.0
nealzhang/util
FastBitSetTest.py
1
3557
#!/usr/bin/python # -*- coding: utf-8 -*- import time from FastBitSet import FastBitSet ISOTIMEFORMAT = '%Y-%m-%d %X' cap = 500000 size = 64 listSize = 8 indexes = [1, 555, 687987, 73521, 53821, 1287, 36746, 2165] print('start BitSet test.') print() print('cap:', cap, '; size:', size, '; bites:', cap * size) print() print(time.strftime( ISOTIMEFORMAT, time.localtime()), 'start create BitSet.') bs = FastBitSet(cap, size) print(time.strftime( ISOTIMEFORMAT, time.localtime()), 'end create BitSet.') print() print(time.strftime( ISOTIMEFORMAT, time.localtime()), 'start setOne.') start = time.time() for i in range(cap * size): bs.setOne(i, False) end = time.time() print(time.strftime( ISOTIMEFORMAT, time.localtime()), 'end setOne.') print('totail time: ', end - start, ' seconds.') print((cap * size) / (end * 1000 - start * 1000) , ' times setOne per ms.') print() print(time.strftime( ISOTIMEFORMAT, time.localtime()), 'start setList.') start = time.time() for i in range(int((cap * size) / listSize)): bs.setList(indexes, False) end = time.time() print(time.strftime( ISOTIMEFORMAT, time.localtime()), 'end setList.') print('totail time: ', end - start, ' seconds.') print((cap * size) / (end * 1000 - start * 1000) , ' times setList per ms.') print() print(time.strftime( ISOTIMEFORMAT, time.localtime()), 'start set.') start = time.time() bs.set(0, cap * size) end = time.time() print(time.strftime( ISOTIMEFORMAT, time.localtime()), 'end set.') print('totail time: ', end - start, ' seconds.') print((cap * size) / (end * 1000 - start * 1000) , ' times set per ms.') print() print(time.strftime( ISOTIMEFORMAT, time.localtime()), 'start getOne.') start = time.time() for i in range(cap * size): bit = bs.getOne(i) end = time.time() print(time.strftime( ISOTIMEFORMAT, time.localtime()), 'end getOne.') print('totail time: ', end - start, ' seconds.') print((cap * size) / (end * 1000 - start * 1000) , ' times getOne per ms.') print() print(time.strftime( ISOTIMEFORMAT, time.localtime()), 'start getList.') start = time.time() for i in range(int((cap * size) / listSize)): bits = bs.getList(indexes) end = time.time() print(time.strftime( ISOTIMEFORMAT, time.localtime()), 'end getList.') print('totail time: ', end - start, ' seconds.') print((cap * size) / (end * 1000 - start * 1000) , ' times getList per ms.') print() print(time.strftime( ISOTIMEFORMAT, time.localtime()), 'start get.') start = time.time() bits = bs.get(0, cap * size) end = time.time() print(time.strftime( ISOTIMEFORMAT, time.localtime()), 'end get.') print('totail time: ', end - start, ' seconds.') print((cap * size) / (end * 1000 - start * 1000) , ' times get per ms.') print() print(time.strftime( ISOTIMEFORMAT, time.localtime()), 'start flipOne.') start = time.time() for i in range(cap * size): bit = bs.flipOne(i) end = time.time() print(time.strftime( ISOTIMEFORMAT, time.localtime()), 'end flipOne.') print('totail time: ', end - start, ' seconds.') print((cap * size) / (end * 1000 - start * 1000) , ' times flipOne per ms.') print() print(time.strftime( ISOTIMEFORMAT, time.localtime()), 'start flip.') start = time.time() bits = bs.flip(0, cap * size) end = time.time() print(time.strftime( ISOTIMEFORMAT, time.localtime()), 'end flip.') print('totail time: ', end - start, ' seconds.') print((cap * size) / (end * 1000 - start * 1000) , ' times flip per ms.') print() import cProfile cProfile.run("bs.setOne(0)")
lgpl-3.0
TheWylieStCoyote/gnuradio
gr-trellis/docs/test_tcm.py
10
3656
#!/usr/bin/env python from __future__ import print_function from __future__ import division from __future__ import unicode_literals from gnuradio import gr from gnuradio import audio from gnuradio import trellis, digital, blocks from gnuradio import eng_notation import math import sys import random import fsm_utils try: from gnuradio import analog except ImportError: sys.stderr.write("Error: Program requires gr-analog.\n") sys.exit(1) def run_test (f,Kb,bitspersymbol,K,dimensionality,constellation,N0,seed): tb = gr.top_block () # TX src = blocks.lfsr_32k_source_s() src_head = blocks.head (gr.sizeof_short,Kb / 16) # packet size in shorts s2fsmi = blocks.packed_to_unpacked_ss(bitspersymbol,gr.GR_MSB_FIRST) # unpack shorts to symbols compatible with the FSM input cardinality enc = trellis.encoder_ss(f,0) # initial state = 0 mod = digital.chunks_to_symbols_sf(constellation,dimensionality) # CHANNEL add = blocks.add_ff() noise = analog.noise_source_f(analog.GR_GAUSSIAN,math.sqrt(N0 / 2),seed) # RX metrics = trellis.metrics_f(f.O(),dimensionality,constellation,digital.TRELLIS_EUCLIDEAN) # data preprocessing to generate metrics for Viterbi va = trellis.viterbi_s(f,K,0,-1) # Put -1 if the Initial/Final states are not set. fsmi2s = blocks.unpacked_to_packed_ss(bitspersymbol,gr.GR_MSB_FIRST) # pack FSM input symbols to shorts dst = blocks.check_lfsr_32k_s(); tb.connect (src,src_head,s2fsmi,enc,mod) tb.connect (mod,(add,0)) tb.connect (noise,(add,1)) tb.connect (add,metrics) tb.connect (metrics,va,fsmi2s,dst) tb.run() # A bit of cheating: run the program once and print the # final encoder state. # Then put it as the last argument in the viterbi block #print "final state = " , enc.ST() ntotal = dst.ntotal () nright = dst.nright () runlength = dst.runlength () return (ntotal,ntotal-nright) def main(args): nargs = len (args) if nargs == 3: fname=args[0] esn0_db=float(args[1]) # Es/No in dB rep=int(args[2]) # number of times the experiment is run to collect enough errors else: sys.stderr.write ('usage: test_tcm.py fsm_fname Es/No_db repetitions\n') sys.exit (1) # system parameters f=trellis.fsm(fname) # get the FSM specification from a file Kb=1024*16 # packet size in bits (make it multiple of 16 so it can be packed in a short) bitspersymbol = int(round(math.log(f.I()) / math.log(2))) # bits per FSM input symbol K=Kb / bitspersymbol # packet size in trellis steps modulation = fsm_utils.psk4 # see fsm_utlis.py for available predefined modulations dimensionality = modulation[0] constellation = modulation[1] if len(constellation) / dimensionality != f.O(): sys.stderr.write ('Incompatible FSM output cardinality and modulation size.\n') sys.exit (1) # calculate average symbol energy Es = 0 for i in range(len(constellation)): Es = Es + constellation[i]**2 Es = Es / (len(constellation)//dimensionality) N0=Es / pow(10.0,esn0_db/10.0); # noise variance tot_s=0 terr_s=0 for i in range(rep): (s,e)=run_test(f,Kb,bitspersymbol,K,dimensionality,constellation,N0,-int(666+i)) # run experiment with different seed to get different noise realizations tot_s=tot_s+s terr_s=terr_s+e if (i%100==0): print(i,s,e,tot_s,terr_s, '%e' % ((1.0*terr_s) / tot_s)) # estimate of the (short) error rate print(tot_s,terr_s, '%e' % ((1.0*terr_s) / tot_s)) if __name__ == '__main__': main (sys.argv[1:])
gpl-3.0
jaywreddy/django
django/core/serializers/python.py
153
7692
""" A Python "serializer". Doesn't do much serializing per se -- just converts to and from basic Python data types (lists, dicts, strings, etc.). Useful as a basis for other serializers. """ from __future__ import unicode_literals from collections import OrderedDict from django.apps import apps from django.conf import settings from django.core.serializers import base from django.db import DEFAULT_DB_ALIAS, models from django.utils import six from django.utils.encoding import force_text, is_protected_type class Serializer(base.Serializer): """ Serializes a QuerySet to basic Python objects. """ internal_use_only = True def start_serialization(self): self._current = None self.objects = [] def end_serialization(self): pass def start_object(self, obj): self._current = OrderedDict() def end_object(self, obj): self.objects.append(self.get_dump_object(obj)) self._current = None def get_dump_object(self, obj): model = obj._meta.proxy_for_model if obj._deferred else obj.__class__ data = OrderedDict([('model', force_text(model._meta))]) if not self.use_natural_primary_keys or not hasattr(obj, 'natural_key'): data["pk"] = force_text(obj._get_pk_val(), strings_only=True) data['fields'] = self._current return data def handle_field(self, obj, field): value = field.value_from_object(obj) # Protected types (i.e., primitives like None, numbers, dates, # and Decimals) are passed through as is. All other values are # converted to string first. if is_protected_type(value): self._current[field.name] = value else: self._current[field.name] = field.value_to_string(obj) def handle_fk_field(self, obj, field): if self.use_natural_foreign_keys and hasattr(field.remote_field.model, 'natural_key'): related = getattr(obj, field.name) if related: value = related.natural_key() else: value = None else: value = getattr(obj, field.get_attname()) if not is_protected_type(value): value = field.value_to_string(obj) self._current[field.name] = value def handle_m2m_field(self, obj, field): if field.remote_field.through._meta.auto_created: if self.use_natural_foreign_keys and hasattr(field.remote_field.model, 'natural_key'): m2m_value = lambda value: value.natural_key() else: m2m_value = lambda value: force_text(value._get_pk_val(), strings_only=True) self._current[field.name] = [m2m_value(related) for related in getattr(obj, field.name).iterator()] def getvalue(self): return self.objects def Deserializer(object_list, **options): """ Deserialize simple Python objects back into Django ORM instances. It's expected that you pass the Python objects themselves (instead of a stream or a string) to the constructor """ db = options.pop('using', DEFAULT_DB_ALIAS) ignore = options.pop('ignorenonexistent', False) for d in object_list: # Look up the model and starting build a dict of data for it. try: Model = _get_model(d["model"]) except base.DeserializationError: if ignore: continue else: raise data = {} if 'pk' in d: try: data[Model._meta.pk.attname] = Model._meta.pk.to_python(d.get('pk')) except Exception as e: raise base.DeserializationError.WithData(e, d['model'], d.get('pk'), None) m2m_data = {} field_names = {f.name for f in Model._meta.get_fields()} # Handle each field for (field_name, field_value) in six.iteritems(d["fields"]): if ignore and field_name not in field_names: # skip fields no longer on model continue if isinstance(field_value, str): field_value = force_text( field_value, options.get("encoding", settings.DEFAULT_CHARSET), strings_only=True ) field = Model._meta.get_field(field_name) # Handle M2M relations if field.remote_field and isinstance(field.remote_field, models.ManyToManyRel): model = field.remote_field.model if hasattr(model._default_manager, 'get_by_natural_key'): def m2m_convert(value): if hasattr(value, '__iter__') and not isinstance(value, six.text_type): return model._default_manager.db_manager(db).get_by_natural_key(*value).pk else: return force_text(model._meta.pk.to_python(value), strings_only=True) else: m2m_convert = lambda v: force_text(model._meta.pk.to_python(v), strings_only=True) try: m2m_data[field.name] = [] for pk in field_value: m2m_data[field.name].append(m2m_convert(pk)) except Exception as e: raise base.DeserializationError.WithData(e, d['model'], d.get('pk'), pk) # Handle FK fields elif field.remote_field and isinstance(field.remote_field, models.ManyToOneRel): model = field.remote_field.model if field_value is not None: try: default_manager = model._default_manager field_name = field.remote_field.field_name if hasattr(default_manager, 'get_by_natural_key'): if hasattr(field_value, '__iter__') and not isinstance(field_value, six.text_type): obj = default_manager.db_manager(db).get_by_natural_key(*field_value) value = getattr(obj, field.remote_field.field_name) # If this is a natural foreign key to an object that # has a FK/O2O as the foreign key, use the FK value if model._meta.pk.remote_field: value = value.pk else: value = model._meta.get_field(field_name).to_python(field_value) data[field.attname] = value else: data[field.attname] = model._meta.get_field(field_name).to_python(field_value) except Exception as e: raise base.DeserializationError.WithData(e, d['model'], d.get('pk'), field_value) else: data[field.attname] = None # Handle all other fields else: try: data[field.name] = field.to_python(field_value) except Exception as e: raise base.DeserializationError.WithData(e, d['model'], d.get('pk'), field_value) obj = base.build_instance(Model, data, db) yield base.DeserializedObject(obj, m2m_data) def _get_model(model_identifier): """ Helper to look up a model from an "app_label.model_name" string. """ try: return apps.get_model(model_identifier) except (LookupError, TypeError): raise base.DeserializationError("Invalid model identifier: '%s'" % model_identifier)
bsd-3-clause
lnielsen/invenio
invenio/legacy/bibauthorid/cluster_set.py
3
10579
# -*- coding: utf-8 -*- ## ## This file is part of Invenio. ## Copyright (C) 2011 CERN. ## ## Invenio is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 2 of the ## License, or (at your option) any later version. ## ## Invenio is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with Invenio; if not, write to the Free Software Foundation, Inc., ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. from itertools import chain, groupby, izip, cycle from operator import itemgetter from invenio.legacy.bibauthorid.matrix_optimization import maximized_mapping from invenio.legacy.bibauthorid.backinterface import save_cluster from invenio.legacy.bibauthorid.backinterface import get_confirmed_papers_of_authors from invenio.legacy.bibauthorid.backinterface import get_bib10x, get_bib70x from invenio.legacy.bibauthorid.backinterface import get_author_to_confirmed_names_mapping from invenio.legacy.bibauthorid.backinterface import get_signatures_from_bibrefs from invenio.legacy.bibauthorid.name_utils import generate_last_name_cluster_str from invenio.legacy.bibauthorid.general_utils import bibauthor_print class Blob(object): def __init__(self, personid_records): ''' @param personid_records: A list of tuples: (personid, bibrefrec, flag). Notice that all bibrefrecs should be the same since the Blob represents only one bibrefrec. ''' self.bib = personid_records[0][1] assert all(p[1] == self.bib for p in personid_records), \ "All cluster sets should share the bibrefrec" self.claimed = set() self.assigned = set() self.rejected = set() for pid, _, flag in personid_records: if flag > 1: self.claimed.add(pid) elif flag >= -1: self.assigned.add(pid) else: self.rejected.add(pid) def create_blobs_by_pids(pids): ''' Returs a list of blobs by a given set of personids. Blob is an object which describes all information for a bibrefrec in the personid table. @type pids: iterable of integers ''' all_bibs = get_confirmed_papers_of_authors(pids) all_bibs = ((x[0], (int(x[1]), x[2], x[3]), x[4]) for x in all_bibs) bibs_dict = groupby(sorted(all_bibs, key=itemgetter(1)), key=itemgetter(1)) blobs = [Blob(list(bibs)) for _, bibs in bibs_dict] return blobs def group_blobs(blobs): ''' Separates the blobs into two groups of objects - those with claims and those without. ''' # created from blobs, which are claimed # [(bibrefrec, personid)] union = [] # created from blobs, which are not claimed # [(bibrefrec, personid/None, [personid])] independent = [] for blob in blobs: assert len(blob.claimed) + len(blob.assigned) == 1, \ "Each blob must have exactly one associated signature" if len(blob.claimed) > 0: union.append((blob.bib, list(blob.claimed)[0])) else: independent.append((blob.bib, list(blob.assigned)[0], list(blob.rejected))) return (union, independent) class ClusterSet(object): class Cluster(object): def __init__(self, bibs, hate=None): # hate is a symetrical relation self.bibs = set(bibs) if hate: self.hate = set(hate) else: self.hate = set(list()) self.personid = None def hates(self, other): return other in self.hate def quarrel(self, cl2): self.hate.add(cl2) cl2.hate.add(self) def _debug_test_hate_relation(self): for cl2 in self.hate: if not self.hates(cl2) or not cl2.hates(self): return False return True def __init__(self): self.clusters = [] self.num_all_bibs = None self.last_name = None def update_bibs(self): '''Updates the number of bibrefrecs held by this clusterset''' self.num_all_bibs = sum(len(cl.bibs) for cl in self.clusters) def all_bibs(self): '''Chain all bibs contained in this clusterset''' return chain.from_iterable(cl.bibs for cl in self.clusters) def create_skeleton(self, personids, last_name): blobs = create_blobs_by_pids(personids) self.last_name = last_name union, independent = group_blobs(blobs) union_clusters = {} for uni in union: union_clusters[uni[1]] = union_clusters.get(uni[1], []) + [uni[0]] cluster_dict = dict((personid, self.Cluster(bibs)) for personid, bibs in union_clusters.items()) self.clusters = cluster_dict.values() for i, cl in enumerate(self.clusters): cl.hate = set(chain(self.clusters[:i], self.clusters[i + 1:])) for ind in independent: bad_clusters = [cluster_dict[i] for i in ind[2] if i in cluster_dict] cl = self.Cluster([ind[0]], bad_clusters) for bcl in bad_clusters: bcl.hate.add(cl) self.clusters.append(cl) self.update_bibs() return self # Creates a cluster set, ignoring the claims and the # rejected papers. def create_pure(self, personids, last_name): blobs = create_blobs_by_pids(personids) self.last_name = last_name self.clusters = [self.Cluster((blob.bib,)) for blob in blobs] self.update_bibs() return self def create_from_mark(self, bibrefs, last_name): bibrecrefs = get_signatures_from_bibrefs(bibrefs) self.clusters = [ClusterSet.Cluster([bib]) for bib in bibrecrefs] self.last_name = last_name self.update_bibs() return self # a *very* slow fucntion checking when the hate relation is no longer symetrical def _debug_test_hate_relation(self): for cl1 in self.clusters: if not cl1._debug_test_hate_relation(): return False return True # similar to the function above def _debug_duplicated_recs(self, mapping=None): for cl in self.clusters: if mapping: setty = set(mapping[x][2] for x in cl.bibs) else: setty = set(x[2] for x in cl.bibs) if len(cl.bibs) != len(setty): return False return True # No longer used but it might be handy. @staticmethod def match_cluster_sets(cs1, cs2): """ This functions tries to generate the best matching between cs1 and cs2 acoarding to the shared bibrefrecs. It returns a dictionary with keys, clsuters in cs1, and values, clusters in cs2. @param and type of cs1 and cs2: cluster_set @return: dictionary with the matching clusters. @return type: { cluster : cluster } """ matr = [[len(cl1.bibs & cl2.bibs) for cl2 in cs2.clusters] for cl1 in cs1.clusters] mapping = maximized_mapping(matr) return dict((cs1.clusters[mappy[0]], cs2.clusters[mappy[1]]) for mappy in mapping) def store(self): ''' Stores the cluster set in a special table. This is used to store the results of tortoise/wedge in a table and later merge them with personid. ''' named_clusters = (("%s.%d" % (self.last_name, idx), cl) for idx, cl in enumerate(self.clusters)) map(save_cluster, named_clusters) def delayed_create_from_mark(bibrefs, last_name): def ret(): return ClusterSet().create_from_mark(bibrefs, last_name) return ret def delayed_cluster_sets_from_marktables(limit_to_surnames=False): # { name -> [(table, bibref)] } bibauthor_print('Delayed_cluster_set_from_marktables limited to %s' % str(limit_to_surnames)) name_buket = {} if limit_to_surnames: limit_to_surnames = set([generate_last_name_cluster_str(s) for s in limit_to_surnames]) for tab, ref, name in chain(izip(cycle((100,)), *izip(*get_bib10x())), izip(cycle((700,)), *izip(*get_bib70x()))): name = generate_last_name_cluster_str(name) if limit_to_surnames and not name in limit_to_surnames: continue name_buket[name] = name_buket.get(name, []) + [(tab, ref)] bibauthor_print('Delayed_cluster_set_from_marktables going to get %s signatures....' % str(len(name_buket))) all_refs = ((name, refs, len(list(get_signatures_from_bibrefs(refs)))) for name, refs in name_buket.items()) all_refs = sorted(all_refs, key=itemgetter(2)) return ([delayed_create_from_mark(set(refs), name) for name, refs, _ in all_refs], map(itemgetter(0), all_refs), map(itemgetter(2), all_refs)) def create_lastname_list_from_personid(last_modification): ''' This function generates a dictionary from a last name to list of personids which have this lastname. ''' # ((personid, [full Name1], Nbibs) ... ) all_names = get_author_to_confirmed_names_mapping(last_modification) # ((personid, last_name, Nbibs) ... ) all_names = ((row[0], generate_last_name_cluster_str(iter(row[1]).next()), row[2]) for row in all_names) # { (last_name, [(personid)... ], Nbibs) ... } all_names = groupby(sorted(all_names, key=itemgetter(1)), key=itemgetter(1)) all_names = ((key, list(data)) for key, data in all_names) all_names = ((key, map(itemgetter(0), data), sum(x[2] for x in data)) for key, data in all_names) return all_names def delayed_create(create_f, pids, lname): def ret(): return create_f(ClusterSet(), pids, lname) return ret def delayed_cluster_sets_from_personid(pure, last_modification=None): names = create_lastname_list_from_personid(last_modification) names = sorted(names, key=itemgetter(2)) if pure: create = ClusterSet.create_pure else: create = ClusterSet.create_skeleton return ([delayed_create(create, name[1], name[0]) for name in names], map(itemgetter(0), names), map(itemgetter(2), names))
gpl-2.0
krafczyk/root
bindings/pyroot/_pythonization.py
53
12056
""" Pythonization API. """ # TODO: externalize this (have PythonizationScope and UserPythonizations as # globals here and picked up from this module # TODO: set explicit export list # TODO: move cast to cppyy.lowlevel or some sort # TODO: remove all need for accessing _backend def _set_backend( backend ): global _backend _backend = backend def set_pythonization_scope(scope): _backend.PythonizationScope = scope if scope not in _backend.UserPythonizations: _backend.UserPythonizations[scope] = [] def add_pythonization(pythonizor): """Takes a callable that should take two arguments -- the class proxy, and its C++ name -- and which is called the first time the named class is bound. """ scope = _backend.PythonizationScope #scope = _pythonization_scope if pythonizor and not callable(pythonizor): raise TypeError("given '%s' object is not callable" % str(pythonizor)) if pythonizor: # _pythonizations[scope] _backend.UserPythonizations[scope].append(pythonizor) def pin_type(derived_type, base_type): _backend.SetTypePinning(derived_type, base_type) def make_interface(base_type): pin_type(base_type, base_type) def ignore_type_pinning(some_type): _backend.IgnoreTypePinning(some_type) def cast(some_object, new_type): return _backend.Cast(some_object, new_type) def add_exception_mapping(cpp_exception, py_exception): _backend.UserExceptions[cpp_exception] = py_exception #--- Pythonization factories -------------------------------------------- def set_gil_policy(match_class, match_method, release_gil=True): return set_method_property(match_class, match_method, '_threaded', int(release_gil)) def set_ownership_policy(match_class, match_method, python_owns_result): return set_method_property(match_class, match_method, '_creates', int(python_owns_result)) def set_smart_ptr_policy(match_class, match_method, manage_smart_ptr=False): return set_method_property(match_class, match_method, '_manage_smart_ptr', bool(manage_smart_ptr)) # NB: Ideally, we'd use the version commented out below, but for now, we # make do with the hackier version here. def rename_attribute(match_class, orig_attribute, new_attribute, keep_orig=False): class attribute_pythonizor(object): class getter(object): def __init__(self, attr): self.attr = attr def __call__(self, obj): return getattr(obj, self.attr) class setter(object): def __init__(self, attr): self.attr = attr def __call__(self, obj, value): return setattr(obj, self.attr, value) class deleter(object): def __init__(self, attr): self.attr = attr def __call__(self, obj): return delattr(obj, self.attr) def __init__(self, match_class, orig_attribute, new_attribute, keep_orig): import re self.match_class = re.compile(match_class) self.match_attr = re.compile(orig_attribute) self.new_attr = new_attribute self.keep_orig = keep_orig def __call__(self, obj, name): if not self.match_class.match(name): return for k in dir(obj): #.__dict__: if self.match_attr.match(k): tmp = property(self.getter(k), self.setter(k), self.deleter(k)) setattr(obj, self.new_attr, tmp) #if not self.keep_orig: delattr(obj, k) return attribute_pythonizor(match_class, orig_attribute, new_attribute, keep_orig) # def rename_attribute(match_class, orig_attribute, new_attribute, keep_orig=False): # class method_pythonizor: # def __init__(self, match_class, orig_attribute, new_attribute, keep_orig): # import re # self.match_class = re.compile(match_class) # self.match_attr = re.compile(orig_attribute) # self.new_attr = new_attribute # self.keep_orig = keep_orig # def __call__(self, obj, name): # import sys # if not self.match_class.match(name): # return # sys.stderr.write("%s %s %s %s" % ("!!!", obj, name, "\n")) # for k in dir(obj): #obj.__dict__: # if not self.match_attr.match(k): continue # try: # tmp = getattr(obj, k) # except Exception as e: # continue # setattr(obj, self.new_attr, tmp) # if not self.keep_orig: delattr(obj, k) # return method_pythonizor(match_class, orig_attribute, new_attribute, keep_orig) # Shared with PyPy: def add_overload(match_class, match_method, overload): class method_pythonizor(object): def __init__(self, match_class, match_method, overload): import re self.match_class = re.compile(match_class) self.match_method = re.compile(match_method) self.overload = overload def __call__(self, obj, name): if not self.match_class.match(name): return for k in dir(obj): #.__dict__: try: tmp = getattr(obj, k) except: continue if self.match_method.match(k): try: tmp.__add_overload__(overload) except AttributeError: pass return method_pythonizor(match_class, match_method, overload) def compose_method(match_class, match_method, g): class composition_pythonizor(object): def __init__(self, match_class, match_method, g): import re self.match_class = re.compile(match_class) self.match_method = re.compile(match_method) self.g = g def __call__(self, obj, name): if not self.match_class.match(name): return g = self.g for k in obj.__dict__: if not self.match_method.match(k): continue try: f = getattr(obj, k) except: continue def make_fun(f, g): def h(self, *args, **kwargs): return g(self, f(self, *args, **kwargs)) return h h = make_fun(f, g) setattr(obj, k, h) return composition_pythonizor(match_class, match_method, g) def set_method_property(match_class, match_method, prop, value): class method_pythonizor(object): def __init__(self, match_class, match_method, prop, value): import re self.match_class = re.compile(match_class) self.match_method = re.compile(match_method) self.prop = prop self.value = value def __call__(self, obj, name): if not self.match_class.match(name): return for k in dir(obj): #.__dict__: try: tmp = getattr(obj, k) except: continue if self.match_method.match(k): setattr(tmp, self.prop, self.value) return method_pythonizor(match_class, match_method, prop, value) def make_property(match_class, match_get, match_set=None, match_del=None, prop_name=None): class property_pythonizor(object): def __init__(self, match_class, match_get, match_set, match_del, prop_name): import re self.match_class = re.compile(match_class) self.match_get = re.compile(match_get) match_many_getters = self.match_get.groups == 1 if match_set: self.match_set = re.compile(match_set) match_many_setters = self.match_set.groups == 1 if match_many_getters ^ match_many_setters: raise ValueError('Must match getters and setters equally') else: self.match_set = None if match_del: self.match_del = re.compile(match_del) match_many_deleters = self.match_del.groups == 1 if match_many_getters ^ match_many_deleters: raise ValueError('Must match getters and deleters equally') else: self.match_del = None self.match_many = match_many_getters if not (self.match_many or prop_name): raise ValueError("If not matching properties by regex, need a property name with exactly one substitution field") if self.match_many and prop_name: if prop_name.format(').!:(') == prop_name: raise ValueError("If matching properties by regex and providing a property name, the name needs exactly one substitution field") self.prop_name = prop_name def make_get_del_proxy(self, getter): class proxy(object): def __init__(self, getter): self.getter = getter def __call__(self, obj): return getattr(obj, self.getter)() return proxy(getter) def make_set_proxy(self, setter): class proxy(object): def __init__(self, setter): self.setter = setter def __call__(self, obj, arg): return getattr(obj, self.setter)(arg) return proxy(setter) def __call__(self, obj, name): if not self.match_class.match(name): return names = [] named_getters = {} named_setters = {} named_deleters = {} if not self.match_many: fget, fset, fdel = None, None, None for k in dir(obj): #.__dict__: match = self.match_get.match(k) try: tmp = getattr(obj, k) except: continue if match and hasattr(tmp, '__call__'): if self.match_many: name = match.group(1) named_getters[name] = k else: fget = self.make_get_del_proxy(k) break if self.match_set: for k in dir(obj): #.__dict__: match = self.match_set.match(k) try: tmp = getattr(obj, k) except: continue if match and hasattr(tmp, '__call__'): if self.match_many: name = match.group(1) named_setters[name] = k else: fset = self.make_set_proxy(k) break if self.match_del: for k in dir(obj): #.__dict__: match = self.match_del.match(k) try: tmp = getattr(obj, k) except: continue if match and hasattr(tmp, '__call__'): if self.match_many: name = match.group(1) named_deleters[name] = k else: fdel = self.make_get_del_proxy(k) break if not self.match_many: new_prop = property(fget, fset, fdel) setattr(obj, self.prop_name, new_prop) return names += list(named_getters.keys()) names += list(named_setters.keys()) names += list(named_deleters.keys()) names = set(names) properties = [] for name in names: if name in named_getters: fget = self.make_get_del_proxy(named_getters[name]) else: fget = None if name in named_setters: fset = self.make_set_proxy(named_setters[name]) else: fset = None if name in named_deleters: fdel = self.make_get_del_proxy(named_deleters[name]) else: fdel = None new_prop = property(fget, fset, fdel) if self.prop_name: prop_name = self.prop_name.format(name) else: prop_name = name setattr(obj, prop_name, new_prop) return property_pythonizor(match_class, match_get, match_set, match_del, prop_name)
lgpl-2.1
thomasvs/pychecker
test_input/test68.py
11
2903
'test implicit returns' __pychecker__ = 'implicitreturns' def func1(x): 'should not produce a warning' if x == 1: return 1 return 0 def func2(x): 'should produce a warning' if x == 1: return 1 def func3(x): 'should not produce a warning' while 1: if x == 1: return 1 x = x / 2 return 0 def func4(x): 'should not produce a warning' while 1: if x == 1: return 1 x = x / 2 def func5(x): 'should not produce a warning' while 1: if x == 1: return 1 return 0 def func6(x): 'should produce a warning' while 1: if x == 1: return 1 break def func7(x): 'should not produce a warning' try: print x return 2 except: pass return 0 def func8(x): 'should produce a warning' try: if x == 1: return 3 if x == 2: return 6 except: pass def func9(x): 'should not produce a warning' try: return x except: return 0 def func10(x): 'should not produce a warning' if x: raise ValueError def func11(x): 'should not produce a warning' if x: raise ValueError return 5 def func12(x): 'should not produce a warning' raise ValueError, 'test' def func13(x): 'should not produce a warning' if x == 1: return 1 else: return 0 def func14(x): 'should not produce a warning' try: if x == 1: return 3 return 6 except: raise def func15(x): 'should not produce a warning' try: return x.j except AttributeError: return 0 def func16(x): 'should not produce a warning' try: return x.j except AttributeError: raise def func17(x): 'should not produce a warning' try: return x.j except (AttributeError, KeyError, IndexError): return 0 def func18(x): if x == 'n': return x if x != 'j': raise AttributeError def func19(x): 'should not produce a warning' while 1: if x: x = x + 1 return 1 def func20(x): 'should produce a warning' while 1: if x: break return 1 def func21(x): 'should not produce a warning' try: if x == 1: return 3 return 6 finally: print 'do nothing' def func22(x): 'should not produce a warning' while 1: for _ in range(10) : x = x / 2 break return 1 def catchup(slave, image, inProgress): d = func1.bogus() def next_func(): defer = slave.call('', image.next()) try: defer.add(d.errback) except: slave.call(inProgress) next_func() return d
bsd-3-clause
100star/h2o
py/testdir_single_jvm/test_GLM2_gamma_fail1.py
9
1515
import unittest, random, sys, time sys.path.extend(['.','..','../..','py']) import h2o, h2o_cmd, h2o_glm, h2o_import as h2i class Basic(unittest.TestCase): def tearDown(self): h2o.check_sandbox_for_errors() @classmethod def setUpClass(cls): global SEED SEED = h2o.setup_random_seed() h2o.init() @classmethod def tearDownClass(cls): # time.sleep(3600) h2o.tear_down_cloud() def test_GLM2_gamma_fail1(self): csvPathname = 'standard/covtype.data' parseResult = h2i.import_parse(bucket='home-0xdiag-datasets', path=csvPathname, schema='put') for trial in range(5): kwargs = { 'standardize': 1, 'family': 'gamma', 'response': 54, 'lambda': 0.0001, 'alpha': 0.5, 'max_iter': 25, 'n_folds': 1, } start = time.time() glm = h2o_cmd.runGLM(timeoutSecs=120, parseResult=parseResult, **kwargs) print "glm end on ", csvPathname, 'took', time.time() - start, 'seconds' # if we hit the max_iter, that means it probably didn't converge. should be 1-maxExpectedIter # h2o_glm.simpleCheckGLM(self, glm, None, maxExpectedIterations=kwargs['max_iter']-2, **kwargs) h2o_glm.simpleCheckGLM(self, glm, None, None, **kwargs) print "Trial #", trial, "completed\n" if __name__ == '__main__': h2o.unit_main()
apache-2.0