repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
jjmachan/activityPointsApp | activitypoints/lib/python3.5/site-packages/django/contrib/postgres/fields/ranges.py | 109 | 6308 | import json
from psycopg2.extras import DateRange, DateTimeTZRange, NumericRange, Range
from django.contrib.postgres import forms, lookups
from django.db import models
from django.utils import six
from .utils import AttributeSetter
__all__ = [
'RangeField', 'IntegerRangeField', 'BigIntegerRangeField',
'FloatRangeField', 'DateTimeRangeField', 'DateRangeField',
]
class RangeField(models.Field):
empty_strings_allowed = False
def __init__(self, *args, **kwargs):
# Initializing base_field here ensures that its model matches the model for self.
if hasattr(self, 'base_field'):
self.base_field = self.base_field()
super(RangeField, self).__init__(*args, **kwargs)
@property
def model(self):
try:
return self.__dict__['model']
except KeyError:
raise AttributeError("'%s' object has no attribute 'model'" % self.__class__.__name__)
@model.setter
def model(self, model):
self.__dict__['model'] = model
self.base_field.model = model
def get_prep_value(self, value):
if value is None:
return None
elif isinstance(value, Range):
return value
elif isinstance(value, (list, tuple)):
return self.range_type(value[0], value[1])
return value
def to_python(self, value):
if isinstance(value, six.string_types):
# Assume we're deserializing
vals = json.loads(value)
for end in ('lower', 'upper'):
if end in vals:
vals[end] = self.base_field.to_python(vals[end])
value = self.range_type(**vals)
elif isinstance(value, (list, tuple)):
value = self.range_type(value[0], value[1])
return value
def set_attributes_from_name(self, name):
super(RangeField, self).set_attributes_from_name(name)
self.base_field.set_attributes_from_name(name)
def value_to_string(self, obj):
value = self.value_from_object(obj)
if value is None:
return None
if value.isempty:
return json.dumps({"empty": True})
base_field = self.base_field
result = {"bounds": value._bounds}
for end in ('lower', 'upper'):
val = getattr(value, end)
if val is None:
result[end] = None
else:
obj = AttributeSetter(base_field.attname, val)
result[end] = base_field.value_to_string(obj)
return json.dumps(result)
def formfield(self, **kwargs):
kwargs.setdefault('form_class', self.form_field)
return super(RangeField, self).formfield(**kwargs)
class IntegerRangeField(RangeField):
base_field = models.IntegerField
range_type = NumericRange
form_field = forms.IntegerRangeField
def db_type(self, connection):
return 'int4range'
class BigIntegerRangeField(RangeField):
base_field = models.BigIntegerField
range_type = NumericRange
form_field = forms.IntegerRangeField
def db_type(self, connection):
return 'int8range'
class FloatRangeField(RangeField):
base_field = models.FloatField
range_type = NumericRange
form_field = forms.FloatRangeField
def db_type(self, connection):
return 'numrange'
class DateTimeRangeField(RangeField):
base_field = models.DateTimeField
range_type = DateTimeTZRange
form_field = forms.DateTimeRangeField
def db_type(self, connection):
return 'tstzrange'
class DateRangeField(RangeField):
base_field = models.DateField
range_type = DateRange
form_field = forms.DateRangeField
def db_type(self, connection):
return 'daterange'
RangeField.register_lookup(lookups.DataContains)
RangeField.register_lookup(lookups.ContainedBy)
RangeField.register_lookup(lookups.Overlap)
class RangeContainedBy(models.Lookup):
lookup_name = 'contained_by'
type_mapping = {
'integer': 'int4range',
'bigint': 'int8range',
'double precision': 'numrange',
'date': 'daterange',
'timestamp with time zone': 'tstzrange',
}
def as_sql(self, qn, connection):
field = self.lhs.output_field
if isinstance(field, models.FloatField):
sql = '%s::numeric <@ %s::{}'.format(self.type_mapping[field.db_type(connection)])
else:
sql = '%s <@ %s::{}'.format(self.type_mapping[field.db_type(connection)])
lhs, lhs_params = self.process_lhs(qn, connection)
rhs, rhs_params = self.process_rhs(qn, connection)
params = lhs_params + rhs_params
return sql % (lhs, rhs), params
def get_prep_lookup(self):
return RangeField().get_prep_value(self.rhs)
models.DateField.register_lookup(RangeContainedBy)
models.DateTimeField.register_lookup(RangeContainedBy)
models.IntegerField.register_lookup(RangeContainedBy)
models.BigIntegerField.register_lookup(RangeContainedBy)
models.FloatField.register_lookup(RangeContainedBy)
@RangeField.register_lookup
class FullyLessThan(lookups.PostgresSimpleLookup):
lookup_name = 'fully_lt'
operator = '<<'
@RangeField.register_lookup
class FullGreaterThan(lookups.PostgresSimpleLookup):
lookup_name = 'fully_gt'
operator = '>>'
@RangeField.register_lookup
class NotLessThan(lookups.PostgresSimpleLookup):
lookup_name = 'not_lt'
operator = '&>'
@RangeField.register_lookup
class NotGreaterThan(lookups.PostgresSimpleLookup):
lookup_name = 'not_gt'
operator = '&<'
@RangeField.register_lookup
class AdjacentToLookup(lookups.PostgresSimpleLookup):
lookup_name = 'adjacent_to'
operator = '-|-'
@RangeField.register_lookup
class RangeStartsWith(models.Transform):
lookup_name = 'startswith'
function = 'lower'
@property
def output_field(self):
return self.lhs.output_field.base_field
@RangeField.register_lookup
class RangeEndsWith(models.Transform):
lookup_name = 'endswith'
function = 'upper'
@property
def output_field(self):
return self.lhs.output_field.base_field
@RangeField.register_lookup
class IsEmpty(models.Transform):
lookup_name = 'isempty'
function = 'isempty'
output_field = models.BooleanField()
| mit |
gdreich/geonode | geonode/contrib/metadataxsl/management/commands/addmissinglinks.py | 23 | 1771 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.core.management.base import BaseCommand
from geonode.base.models import Link, ResourceBase
from geonode.contrib.metadataxsl.models import add_xsl_link
class Command(BaseCommand):
help = 'Add missing links to ISO XSL metadata service'
def handle(self, *args, **options):
for resource in ResourceBase.objects.all():
print('Checking resource with id {}'.format(resource.id))
# check ISO link exists
isolink = Link.objects.filter(resource_id=resource.id, link_type='metadata', name='ISO')
if(isolink):
print (' ISO link found for resource {} "{}"'. format(resource.id, resource.title))
created = add_xsl_link(resource)
if (created):
print(' XSL link created')
else:
print (' ISO link NOT found for resource {} "{}"'. format(resource.id, resource.title))
| gpl-3.0 |
konstruktoid/ansible-upstream | lib/ansible/modules/cloud/cloudstack/cs_vpn_connection.py | 35 | 10273 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017, René Moser <mail@renemoser.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: cs_vpn_connection
short_description: Manages site-to-site VPN connections on Apache CloudStack based clouds.
description:
- Create and remove VPN connections.
version_added: "2.5"
author: "René Moser (@resmo)"
options:
vpc:
description:
- Name of the VPC the VPN connection is related to.
required: true
vpn_customer_gateway:
description:
- Name of the VPN customer gateway.
required: true
passive:
description:
- State of the VPN connection.
- Only considered when C(state=present).
default: no
type: bool
force:
description:
- Activate the VPN gateway if not already activated on C(state=present).
- Also see M(cs_vpn_gateway).
default: no
type: bool
state:
description:
- State of the VPN connection.
default: present
choices: [ present, absent ]
domain:
description:
- Domain the VPN connection is related to.
account:
description:
- Account the VPN connection is related to.
project:
description:
- Name of the project the VPN connection is related to.
poll_async:
description:
- Poll async jobs until job has finished.
default: yes
type: bool
extends_documentation_fragment: cloudstack
'''
EXAMPLES = r'''
- name: Create a VPN connection with activated VPN gateway
local_action:
module: cs_vpn_connection
vpn_customer_gateway: my vpn connection
vpc: my vpc
- name: Create a VPN connection and force VPN gateway activation
local_action:
module: cs_vpn_connection
vpn_customer_gateway: my vpn connection
vpc: my vpc
force: yes
- name: Remove a vpn connection
local_action:
module: cs_vpn_connection
vpn_customer_gateway: my vpn connection
vpc: my vpc
state: absent
'''
RETURN = r'''
---
id:
description: UUID of the VPN connection.
returned: success
type: string
sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6
vpn_gateway_id:
description: UUID of the VPN gateway.
returned: success
type: string
sample: 04589590-ac63-93f5-4ffc-b698b8ac38b6
domain:
description: Domain the VPN connection is related to.
returned: success
type: string
sample: example domain
account:
description: Account the VPN connection is related to.
returned: success
type: string
sample: example account
project:
description: Name of project the VPN connection is related to.
returned: success
type: string
sample: Production
created:
description: Date the connection was created.
returned: success
type: string
sample: 2014-12-01T14:57:57+0100
dpd:
description: Whether dead pear detection is enabled or not.
returned: success
type: bool
sample: true
esp_lifetime:
description: Lifetime in seconds of phase 2 VPN connection.
returned: success
type: int
sample: 86400
esp_policy:
description: IKE policy of the VPN connection.
returned: success
type: string
sample: aes256-sha1;modp1536
force_encap:
description: Whether encapsulation for NAT traversal is enforced or not.
returned: success
type: bool
sample: true
ike_lifetime:
description: Lifetime in seconds of phase 1 VPN connection.
returned: success
type: int
sample: 86400
ike_policy:
description: ESP policy of the VPN connection.
returned: success
type: string
sample: aes256-sha1;modp1536
cidrs:
description: List of CIDRs of the customer gateway.
returned: success
type: list
sample: [ 10.10.10.0/24 ]
passive:
description: Whether the connection is passive or not.
returned: success
type: bool
sample: false
public_ip:
description: IP address of the VPN gateway.
returned: success
type: string
sample: 10.100.212.10
gateway:
description: IP address of the VPN customer gateway.
returned: success
type: string
sample: 10.101.214.10
state:
description: State of the VPN connection.
returned: success
type: string
sample: Connected
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.cloudstack import (
AnsibleCloudStack,
cs_argument_spec,
cs_required_together
)
class AnsibleCloudStackVpnConnection(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackVpnConnection, self).__init__(module)
self.returns = {
'dpd': 'dpd',
'esplifetime': 'esp_lifetime',
'esppolicy': 'esp_policy',
'gateway': 'gateway',
'ikepolicy': 'ike_policy',
'ikelifetime': 'ike_lifetime',
'publicip': 'public_ip',
'passive': 'passive',
's2svpngatewayid': 'vpn_gateway_id',
}
self.vpn_customer_gateway = None
def get_vpn_customer_gateway(self, key=None, identifier=None, refresh=False):
if not refresh and self.vpn_customer_gateway:
return self._get_by_key(key, self.vpn_customer_gateway)
args = {
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'projectid': self.get_project(key='id'),
'fetch_list': True,
}
vpn_customer_gateway = identifier or self.module.params.get('vpn_customer_gateway')
vcgws = self.query_api('listVpnCustomerGateways', **args)
if vcgws:
for vcgw in vcgws:
if vpn_customer_gateway.lower() in [vcgw['id'], vcgw['name'].lower()]:
self.vpn_customer_gateway = vcgw
return self._get_by_key(key, self.vpn_customer_gateway)
self.fail_json(msg="VPN customer gateway not found: %s" % vpn_customer_gateway)
def get_vpn_gateway(self, key=None):
args = {
'vpcid': self.get_vpc(key='id'),
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'projectid': self.get_project(key='id'),
}
vpn_gateways = self.query_api('listVpnGateways', **args)
if vpn_gateways:
return self._get_by_key(key, vpn_gateways['vpngateway'][0])
elif self.module.params.get('force'):
if self.module.check_mode:
return {}
res = self.query_api('createVpnGateway', **args)
vpn_gateway = self.poll_job(res, 'vpngateway')
return self._get_by_key(key, vpn_gateway)
self.fail_json(msg="VPN gateway not found and not forced to create one")
def get_vpn_connection(self):
args = {
'vpcid': self.get_vpc(key='id'),
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'projectid': self.get_project(key='id'),
}
vpn_conns = self.query_api('listVpnConnections', **args)
if vpn_conns:
for vpn_conn in vpn_conns['vpnconnection']:
if self.get_vpn_customer_gateway(key='id') == vpn_conn['s2scustomergatewayid']:
return vpn_conn
def present_vpn_connection(self):
vpn_conn = self.get_vpn_connection()
args = {
's2scustomergatewayid': self.get_vpn_customer_gateway(key='id'),
's2svpngatewayid': self.get_vpn_gateway(key='id'),
'passive': self.module.params.get('passive'),
}
if not vpn_conn:
self.result['changed'] = True
if not self.module.check_mode:
res = self.query_api('createVpnConnection', **args)
poll_async = self.module.params.get('poll_async')
if poll_async:
vpn_conn = self.poll_job(res, 'vpnconnection')
return vpn_conn
def absent_vpn_connection(self):
vpn_conn = self.get_vpn_connection()
if vpn_conn:
self.result['changed'] = True
args = {
'id': vpn_conn['id']
}
if not self.module.check_mode:
res = self.query_api('deleteVpnConnection', **args)
poll_async = self.module.params.get('poll_async')
if poll_async:
self.poll_job(res, 'vpnconnection')
return vpn_conn
def get_result(self, vpn_conn):
super(AnsibleCloudStackVpnConnection, self).get_result(vpn_conn)
if vpn_conn:
if 'cidrlist' in vpn_conn:
self.result['cidrs'] = vpn_conn['cidrlist'].split(',') or [vpn_conn['cidrlist']]
# Ensure we return a bool
self.result['force_encap'] = True if vpn_conn.get('forceencap') else False
args = {
'key': 'name',
'identifier': vpn_conn['s2scustomergatewayid'],
'refresh': True,
}
self.result['vpn_customer_gateway'] = self.get_vpn_customer_gateway(**args)
return self.result
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
vpn_customer_gateway=dict(required=True),
vpc=dict(required=True),
domain=dict(),
account=dict(),
project=dict(),
zone=dict(),
passive=dict(type='bool', default=False),
force=dict(type='bool', default=False),
state=dict(choices=['present', 'absent'], default='present'),
poll_async=dict(type='bool', default=True),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
supports_check_mode=True
)
acs_vpn_conn = AnsibleCloudStackVpnConnection(module)
state = module.params.get('state')
if state == "absent":
vpn_conn = acs_vpn_conn.absent_vpn_connection()
else:
vpn_conn = acs_vpn_conn.present_vpn_connection()
result = acs_vpn_conn.get_result(vpn_conn)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
thodrek/sampler | lib/gtest-1.7.0/test/gtest_uninitialized_test.py | 2901 | 2480 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test warns the user when not initialized properly."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import gtest_test_utils
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_uninitialized_test_')
def Assert(condition):
if not condition:
raise AssertionError
def AssertEq(expected, actual):
if expected != actual:
print 'Expected: %s' % (expected,)
print ' Actual: %s' % (actual,)
raise AssertionError
def TestExitCodeAndOutput(command):
"""Runs the given command and verifies its exit code and output."""
# Verifies that 'command' exits with code 1.
p = gtest_test_utils.Subprocess(command)
Assert(p.exited)
AssertEq(1, p.exit_code)
Assert('InitGoogleTest' in p.output)
class GTestUninitializedTest(gtest_test_utils.TestCase):
def testExitCodeAndOutput(self):
TestExitCodeAndOutput(COMMAND)
if __name__ == '__main__':
gtest_test_utils.Main()
| apache-2.0 |
nanolearningllc/edx-platform-cypress | common/test/acceptance/pages/studio/html_component_editor.py | 115 | 1139 | from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
from component_editor import ComponentEditorView
class HtmlComponentEditorView(ComponentEditorView):
"""
Represents the rendered view of an HTML component editor.
"""
def set_content_and_save(self, content):
"""
Types content into the html component and presses Save.
"""
self.set_content(content)
self.save()
def set_content_and_cancel(self, content):
"""
Types content into the html component and presses Cancel to abort the change.
"""
self.set_content(content)
self.cancel()
def set_content(self, content):
"""
Types content into the html component, leaving the component open.
"""
self.q(css='.edit-xblock-modal .editor-modes .editor-button').click()
editor = self.q(css=self._bounded_selector('.html-editor .mce-edit-area'))[0]
ActionChains(self.browser).click(editor).\
send_keys([Keys.CONTROL, 'a']).key_up(Keys.CONTROL).send_keys(content).perform()
| agpl-3.0 |
atkinson/django-registration | registration/tests/__init__.py | 59 | 1347 | from django.test import TestCase
import registration
from registration.tests.backends import *
from registration.tests.forms import *
from registration.tests.models import *
from registration.tests.views import *
class RegistrationVersionInfoTests(TestCase):
"""
Test django-registration's internal version-reporting
infrastructure.
"""
def setUp(self):
self.version = registration.VERSION
def tearDown(self):
registration.VERSION = self.version
def test_get_version(self):
"""
Test the version-info reporting.
"""
versions = [
{'version': (1, 0, 0, 'alpha', 0),
'expected': "1.0 pre-alpha"},
{'version': (1, 0, 1, 'alpha', 1),
'expected': "1.0.1 alpha 1"},
{'version': (1, 1, 0, 'beta', 2),
'expected': "1.1 beta 2"},
{'version': (1, 2, 1, 'rc', 3),
'expected': "1.2.1 rc 3"},
{'version': (1, 3, 0, 'final', 0),
'expected': "1.3"},
{'version': (1, 4, 1, 'beta', 0),
'expected': "1.4.1 beta"},
]
for version_dict in versions:
registration.VERSION = version_dict['version']
self.assertEqual(registration.get_version(), version_dict['expected'])
| bsd-3-clause |
hengqujushi/shadowsocks | shadowsocks/crypto/table.py | 1044 | 8108 | # !/usr/bin/env python
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import string
import struct
import hashlib
__all__ = ['ciphers']
cached_tables = {}
if hasattr(string, 'maketrans'):
maketrans = string.maketrans
translate = string.translate
else:
maketrans = bytes.maketrans
translate = bytes.translate
def get_table(key):
m = hashlib.md5()
m.update(key)
s = m.digest()
a, b = struct.unpack('<QQ', s)
table = maketrans(b'', b'')
table = [table[i: i + 1] for i in range(len(table))]
for i in range(1, 1024):
table.sort(key=lambda x: int(a % (ord(x) + i)))
return table
def init_table(key):
if key not in cached_tables:
encrypt_table = b''.join(get_table(key))
decrypt_table = maketrans(encrypt_table, maketrans(b'', b''))
cached_tables[key] = [encrypt_table, decrypt_table]
return cached_tables[key]
class TableCipher(object):
def __init__(self, cipher_name, key, iv, op):
self._encrypt_table, self._decrypt_table = init_table(key)
self._op = op
def update(self, data):
if self._op:
return translate(data, self._encrypt_table)
else:
return translate(data, self._decrypt_table)
ciphers = {
'table': (0, 0, TableCipher)
}
def test_table_result():
from shadowsocks.common import ord
target1 = [
[60, 53, 84, 138, 217, 94, 88, 23, 39, 242, 219, 35, 12, 157, 165, 181,
255, 143, 83, 247, 162, 16, 31, 209, 190, 171, 115, 65, 38, 41, 21,
245, 236, 46, 121, 62, 166, 233, 44, 154, 153, 145, 230, 49, 128, 216,
173, 29, 241, 119, 64, 229, 194, 103, 131, 110, 26, 197, 218, 59, 204,
56, 27, 34, 141, 221, 149, 239, 192, 195, 24, 155, 170, 183, 11, 254,
213, 37, 137, 226, 75, 203, 55, 19, 72, 248, 22, 129, 33, 175, 178,
10, 198, 71, 77, 36, 113, 167, 48, 2, 117, 140, 142, 66, 199, 232,
243, 32, 123, 54, 51, 82, 57, 177, 87, 251, 150, 196, 133, 5, 253,
130, 8, 184, 14, 152, 231, 3, 186, 159, 76, 89, 228, 205, 156, 96,
163, 146, 18, 91, 132, 85, 80, 109, 172, 176, 105, 13, 50, 235, 127,
0, 189, 95, 98, 136, 250, 200, 108, 179, 211, 214, 106, 168, 78, 79,
74, 210, 30, 73, 201, 151, 208, 114, 101, 174, 92, 52, 120, 240, 15,
169, 220, 182, 81, 224, 43, 185, 40, 99, 180, 17, 212, 158, 42, 90, 9,
191, 45, 6, 25, 4, 222, 67, 126, 1, 116, 124, 206, 69, 61, 7, 68, 97,
202, 63, 244, 20, 28, 58, 93, 134, 104, 144, 227, 147, 102, 118, 135,
148, 47, 238, 86, 112, 122, 70, 107, 215, 100, 139, 223, 225, 164,
237, 111, 125, 207, 160, 187, 246, 234, 161, 188, 193, 249, 252],
[151, 205, 99, 127, 201, 119, 199, 211, 122, 196, 91, 74, 12, 147, 124,
180, 21, 191, 138, 83, 217, 30, 86, 7, 70, 200, 56, 62, 218, 47, 168,
22, 107, 88, 63, 11, 95, 77, 28, 8, 188, 29, 194, 186, 38, 198, 33,
230, 98, 43, 148, 110, 177, 1, 109, 82, 61, 112, 219, 59, 0, 210, 35,
215, 50, 27, 103, 203, 212, 209, 235, 93, 84, 169, 166, 80, 130, 94,
164, 165, 142, 184, 111, 18, 2, 141, 232, 114, 6, 131, 195, 139, 176,
220, 5, 153, 135, 213, 154, 189, 238, 174, 226, 53, 222, 146, 162,
236, 158, 143, 55, 244, 233, 96, 173, 26, 206, 100, 227, 49, 178, 34,
234, 108, 207, 245, 204, 150, 44, 87, 121, 54, 140, 118, 221, 228,
155, 78, 3, 239, 101, 64, 102, 17, 223, 41, 137, 225, 229, 66, 116,
171, 125, 40, 39, 71, 134, 13, 193, 129, 247, 251, 20, 136, 242, 14,
36, 97, 163, 181, 72, 25, 144, 46, 175, 89, 145, 113, 90, 159, 190,
15, 183, 73, 123, 187, 128, 248, 252, 152, 24, 197, 68, 253, 52, 69,
117, 57, 92, 104, 157, 170, 214, 81, 60, 133, 208, 246, 172, 23, 167,
160, 192, 76, 161, 237, 45, 4, 58, 10, 182, 65, 202, 240, 185, 241,
79, 224, 132, 51, 42, 126, 105, 37, 250, 149, 32, 243, 231, 67, 179,
48, 9, 106, 216, 31, 249, 19, 85, 254, 156, 115, 255, 120, 75, 16]]
target2 = [
[124, 30, 170, 247, 27, 127, 224, 59, 13, 22, 196, 76, 72, 154, 32,
209, 4, 2, 131, 62, 101, 51, 230, 9, 166, 11, 99, 80, 208, 112, 36,
248, 81, 102, 130, 88, 218, 38, 168, 15, 241, 228, 167, 117, 158, 41,
10, 180, 194, 50, 204, 243, 246, 251, 29, 198, 219, 210, 195, 21, 54,
91, 203, 221, 70, 57, 183, 17, 147, 49, 133, 65, 77, 55, 202, 122,
162, 169, 188, 200, 190, 125, 63, 244, 96, 31, 107, 106, 74, 143, 116,
148, 78, 46, 1, 137, 150, 110, 181, 56, 95, 139, 58, 3, 231, 66, 165,
142, 242, 43, 192, 157, 89, 175, 109, 220, 128, 0, 178, 42, 255, 20,
214, 185, 83, 160, 253, 7, 23, 92, 111, 153, 26, 226, 33, 176, 144,
18, 216, 212, 28, 151, 71, 206, 222, 182, 8, 174, 205, 201, 152, 240,
155, 108, 223, 104, 239, 98, 164, 211, 184, 34, 193, 14, 114, 187, 40,
254, 12, 67, 93, 217, 6, 94, 16, 19, 82, 86, 245, 24, 197, 134, 132,
138, 229, 121, 5, 235, 238, 85, 47, 103, 113, 179, 69, 250, 45, 135,
156, 25, 61, 75, 44, 146, 189, 84, 207, 172, 119, 53, 123, 186, 120,
171, 68, 227, 145, 136, 100, 90, 48, 79, 159, 149, 39, 213, 236, 126,
52, 60, 225, 199, 105, 73, 233, 252, 118, 215, 35, 115, 64, 37, 97,
129, 161, 177, 87, 237, 141, 173, 191, 163, 140, 234, 232, 249],
[117, 94, 17, 103, 16, 186, 172, 127, 146, 23, 46, 25, 168, 8, 163, 39,
174, 67, 137, 175, 121, 59, 9, 128, 179, 199, 132, 4, 140, 54, 1, 85,
14, 134, 161, 238, 30, 241, 37, 224, 166, 45, 119, 109, 202, 196, 93,
190, 220, 69, 49, 21, 228, 209, 60, 73, 99, 65, 102, 7, 229, 200, 19,
82, 240, 71, 105, 169, 214, 194, 64, 142, 12, 233, 88, 201, 11, 72,
92, 221, 27, 32, 176, 124, 205, 189, 177, 246, 35, 112, 219, 61, 129,
170, 173, 100, 84, 242, 157, 26, 218, 20, 33, 191, 155, 232, 87, 86,
153, 114, 97, 130, 29, 192, 164, 239, 90, 43, 236, 208, 212, 185, 75,
210, 0, 81, 227, 5, 116, 243, 34, 18, 182, 70, 181, 197, 217, 95, 183,
101, 252, 248, 107, 89, 136, 216, 203, 68, 91, 223, 96, 141, 150, 131,
13, 152, 198, 111, 44, 222, 125, 244, 76, 251, 158, 106, 24, 42, 38,
77, 2, 213, 207, 249, 147, 113, 135, 245, 118, 193, 47, 98, 145, 66,
160, 123, 211, 165, 78, 204, 80, 250, 110, 162, 48, 58, 10, 180, 55,
231, 79, 149, 74, 62, 50, 148, 143, 206, 28, 15, 57, 159, 139, 225,
122, 237, 138, 171, 36, 56, 115, 63, 144, 154, 6, 230, 133, 215, 41,
184, 22, 104, 254, 234, 253, 187, 226, 247, 188, 156, 151, 40, 108,
51, 83, 178, 52, 3, 31, 255, 195, 53, 235, 126, 167, 120]]
encrypt_table = b''.join(get_table(b'foobar!'))
decrypt_table = maketrans(encrypt_table, maketrans(b'', b''))
for i in range(0, 256):
assert (target1[0][i] == ord(encrypt_table[i]))
assert (target1[1][i] == ord(decrypt_table[i]))
encrypt_table = b''.join(get_table(b'barfoo!'))
decrypt_table = maketrans(encrypt_table, maketrans(b'', b''))
for i in range(0, 256):
assert (target2[0][i] == ord(encrypt_table[i]))
assert (target2[1][i] == ord(decrypt_table[i]))
def test_encryption():
from shadowsocks.crypto import util
cipher = TableCipher('table', b'test', b'', 1)
decipher = TableCipher('table', b'test', b'', 0)
util.run_cipher(cipher, decipher)
if __name__ == '__main__':
test_table_result()
test_encryption()
| apache-2.0 |
hvy/chainer | examples/imagenet/resnet50.py | 3 | 4807 | # Original author: yasunorikudo
# (https://github.com/yasunorikudo/chainer-ResNet)
import chainer
import chainer.functions as F
from chainer import initializers
import chainer.links as L
class BottleNeckA(chainer.Chain):
def __init__(self, in_size, ch, out_size, stride=2, groups=1):
super(BottleNeckA, self).__init__()
initialW = initializers.HeNormal()
with self.init_scope():
self.conv1 = L.Convolution2D(
in_size, ch, 1, stride, 0, initialW=initialW, nobias=True)
self.bn1 = L.BatchNormalization(ch)
self.conv2 = L.Convolution2D(
ch, ch, 3, 1, 1, initialW=initialW, nobias=True,
groups=groups)
self.bn2 = L.BatchNormalization(ch)
self.conv3 = L.Convolution2D(
ch, out_size, 1, 1, 0, initialW=initialW, nobias=True)
self.bn3 = L.BatchNormalization(out_size)
self.conv4 = L.Convolution2D(
in_size, out_size, 1, stride, 0,
initialW=initialW, nobias=True)
self.bn4 = L.BatchNormalization(out_size)
def forward(self, x):
h1 = F.relu(self.bn1(self.conv1(x)))
h1 = F.relu(self.bn2(self.conv2(h1)))
h1 = self.bn3(self.conv3(h1))
h2 = self.bn4(self.conv4(x))
return F.relu(h1 + h2)
class BottleNeckB(chainer.Chain):
def __init__(self, in_size, ch, groups=1):
super(BottleNeckB, self).__init__()
initialW = initializers.HeNormal()
with self.init_scope():
self.conv1 = L.Convolution2D(
in_size, ch, 1, 1, 0, initialW=initialW, nobias=True)
self.bn1 = L.BatchNormalization(ch)
self.conv2 = L.Convolution2D(
ch, ch, 3, 1, 1, initialW=initialW, nobias=True,
groups=groups)
self.bn2 = L.BatchNormalization(ch)
self.conv3 = L.Convolution2D(
ch, in_size, 1, 1, 0, initialW=initialW, nobias=True)
self.bn3 = L.BatchNormalization(in_size)
def forward(self, x):
h = F.relu(self.bn1(self.conv1(x)))
h = F.relu(self.bn2(self.conv2(h)))
h = self.bn3(self.conv3(h))
return F.relu(h + x)
class Block(chainer.ChainList):
def __init__(self, layer, in_size, ch, out_size, stride=2, groups=1):
super(Block, self).__init__()
self.add_link(BottleNeckA(in_size, ch, out_size, stride, groups))
for i in range(layer - 1):
self.add_link(BottleNeckB(out_size, ch, groups))
def forward(self, x):
for f in self.children():
x = f(x)
return x
class ResNet50(chainer.Chain):
insize = 224
def __init__(self):
super(ResNet50, self).__init__()
with self.init_scope():
self.conv1 = L.Convolution2D(
3, 64, 7, 2, 3, initialW=initializers.HeNormal())
self.bn1 = L.BatchNormalization(64)
self.res2 = Block(3, 64, 64, 256, 1)
self.res3 = Block(4, 256, 128, 512)
self.res4 = Block(6, 512, 256, 1024)
self.res5 = Block(3, 1024, 512, 2048)
self.fc = L.Linear(2048, 1000)
def forward(self, x, t):
h = self.bn1(self.conv1(x))
h = F.max_pooling_2d(F.relu(h), 3, stride=2)
h = self.res2(h)
h = self.res3(h)
h = self.res4(h)
h = self.res5(h)
h = F.average_pooling_2d(h, 7, stride=1)
h = self.fc(h)
loss = F.softmax_cross_entropy(h, t)
chainer.report({'loss': loss, 'accuracy': F.accuracy(h, t)}, self)
return loss
class ResNet50_Nhwc(chainer.Chain):
insize = 224
def __init__(self):
super(ResNet50_Nhwc, self).__init__()
with self.init_scope():
self.conv1 = L.Convolution2D(
3, 64, 7, 2, 3, initialW=initializers.HeNormal())
self.bn1 = L.BatchNormalization(64)
with chainer.using_config('compute_mode', 'cudnn_fast'):
self.res2 = Block(3, 64, 64, 256, 1)
self.res3 = Block(4, 256, 128, 512)
self.res4 = Block(6, 512, 256, 1024)
self.res5 = Block(3, 1024, 512, 2048)
self.fc = L.Linear(2048, 1000)
def forward(self, x, t):
h = self.bn1(self.conv1(x))
h = F.max_pooling_2d(F.relu(h), 3, stride=2)
h = h.as_layout(chainer.memory_layouts.CUDNN_CHANNEL_LAST_X)
h = self.res2(h)
h = self.res3(h)
h = self.res4(h)
h = self.res5(h)
h = h.as_layout(None)
h = F.average_pooling_2d(h, 7, stride=1)
h = self.fc(h)
loss = F.softmax_cross_entropy(h, t)
chainer.report({'loss': loss, 'accuracy': F.accuracy(h, t)}, self)
return loss
| mit |
7kbird/chrome | mojo/tools/generate_java_callback_interfaces.py | 32 | 1702 | """Generate the org.chromium.mojo.bindings.Callbacks interface"""
import argparse
import sys
CALLBACK_TEMPLATE = ("""
/**
* A generic %d-argument callback.
*
* %s
*/
interface Callback%d<%s> {
/**
* Call the callback.
*/
public void call(%s);
}
""")
INTERFACE_TEMPLATE = (
"""// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This file was generated using
// mojo/tools/generate_java_callback_interfaces.py
package org.chromium.mojo.bindings;
/**
* Contains a generic interface for callbacks.
*/
public interface Callbacks {
%s
}""")
def GenerateCallback(nb_args):
params = '\n * '.join(
['@param <T%d> the type of argument %d.' % (i+1, i+1)
for i in xrange(nb_args)])
template_parameters = ', '.join(['T%d' % (i+1) for i in xrange(nb_args)])
callback_parameters = ', '.join(['T%d arg%d' % ((i+1), (i+1))
for i in xrange(nb_args)])
return CALLBACK_TEMPLATE % (nb_args, params, nb_args, template_parameters,
callback_parameters)
def main():
parser = argparse.ArgumentParser(
description="Generate org.chromium.mojo.bindings.Callbacks")
parser.add_argument("max_args", nargs=1, type=int,
help="maximal number of arguments to generate callbacks for")
args = parser.parse_args()
max_args = args.max_args[0]
print INTERFACE_TEMPLATE % ''.join([GenerateCallback(i+1)
for i in xrange(max_args)])
return 0
if __name__ == "__main__":
sys.exit(main())
| bsd-3-clause |
NINAnor/QGIS | python/plugins/processing/algs/qgis/TextToFloat.py | 10 | 3230 | # -*- coding: utf-8 -*-
"""
***************************************************************************
TextToFloat.py
---------------------
Date : May 2010
Copyright : (C) 2010 by Michael Minn
Email : pyqgis at michaelminn dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Michael Minn'
__date__ = 'May 2010'
__copyright__ = '(C) 2010, Michael Minn'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from PyQt4.QtCore import QVariant
from qgis.core import QgsField
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.parameters import ParameterVector
from processing.core.parameters import ParameterTableField
from processing.core.outputs import OutputVector
from processing.tools import dataobjects, vector
class TextToFloat(GeoAlgorithm):
INPUT = 'INPUT'
FIELD = 'FIELD'
OUTPUT = 'OUTPUT'
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Text to float')
self.group, self.i18n_group = self.trAlgorithm('Vector table tools')
self.addParameter(ParameterVector(self.INPUT,
self.tr('Input Layer'), [ParameterVector.VECTOR_TYPE_ANY]))
self.addParameter(ParameterTableField(self.FIELD,
self.tr('Text attribute to convert to float'),
self.INPUT, ParameterTableField.DATA_TYPE_STRING))
self.addOutput(OutputVector(self.OUTPUT, self.tr('Float from text')))
def processAlgorithm(self, progress):
layer = dataobjects.getObjectFromUri(self.getParameterValue(self.INPUT))
fieldName = self.getParameterValue(self.FIELD)
idx = layer.fieldNameIndex(fieldName)
fields = layer.pendingFields()
fields[idx] = QgsField(fieldName, QVariant.Double, '', 24, 15)
writer = self.getOutputFromName(self.OUTPUT).getVectorWriter(fields,
layer.wkbType(), layer.crs())
features = vector.features(layer)
count = len(features)
total = 100.0 / float(count)
for count, f in enumerate(features):
value = f[idx]
try:
if '%' in value:
f[idx] = float(value.replace('%', '')) / 100.0
else:
f[idx] = float(value)
except:
f[idx] = None
writer.addFeature(f)
progress.setPercentage(int(count * total))
del writer
| gpl-2.0 |
SebasSBM/django | tests/fixtures_model_package/tests.py | 312 | 2204 | from __future__ import unicode_literals
import warnings
from django.core import management
from django.test import TestCase
from .models import Article
class SampleTestCase(TestCase):
fixtures = ['fixture1.json', 'fixture2.json']
def testClassFixtures(self):
"Test cases can load fixture objects into models defined in packages"
self.assertEqual(Article.objects.count(), 3)
self.assertQuerysetEqual(
Article.objects.all(), [
"Django conquers world!",
"Copyright is fine the way it is",
"Poker has no place on ESPN",
],
lambda a: a.headline
)
class FixtureTestCase(TestCase):
def test_loaddata(self):
"Fixtures can load data into models defined in packages"
# Load fixture 1. Single JSON file, with two objects
management.call_command("loaddata", "fixture1.json", verbosity=0)
self.assertQuerysetEqual(
Article.objects.all(), [
"Time to reform copyright",
"Poker has no place on ESPN",
],
lambda a: a.headline,
)
# Load fixture 2. JSON file imported by default. Overwrites some
# existing objects
management.call_command("loaddata", "fixture2.json", verbosity=0)
self.assertQuerysetEqual(
Article.objects.all(), [
"Django conquers world!",
"Copyright is fine the way it is",
"Poker has no place on ESPN",
],
lambda a: a.headline,
)
# Load a fixture that doesn't exist
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
management.call_command("loaddata", "unknown.json", verbosity=0)
self.assertEqual(len(w), 1)
self.assertTrue(w[0].message, "No fixture named 'unknown' found.")
self.assertQuerysetEqual(
Article.objects.all(), [
"Django conquers world!",
"Copyright is fine the way it is",
"Poker has no place on ESPN",
],
lambda a: a.headline,
)
| bsd-3-clause |
admetricks/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/w3c/test_converter_unittest.py | 115 | 11612 | #!/usr/bin/env python
# Copyright (C) 2013 Adobe Systems Incorporated. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
import os
import re
import unittest2 as unittest
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.thirdparty.BeautifulSoup import BeautifulSoup
from webkitpy.w3c.test_converter import W3CTestConverter
DUMMY_FILENAME = 'dummy.html'
class W3CTestConverterTest(unittest.TestCase):
def fake_dir_path(self, converter, dirname):
return converter.path_from_webkit_root("LayoutTests", "css", dirname)
def test_read_prefixed_property_list(self):
""" Tests that the current list of properties requiring the -webkit- prefix load correctly """
# FIXME: We should be passing in a MockHost here ...
converter = W3CTestConverter()
prop_list = converter.prefixed_properties
self.assertTrue(prop_list, 'No prefixed properties found')
for prop in prop_list:
self.assertTrue(prop.startswith('-webkit-'))
def test_convert_for_webkit_nothing_to_convert(self):
""" Tests convert_for_webkit() using a basic test that has nothing to convert """
test_html = """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<title>CSS Test: DESCRIPTION OF TEST</title>
<link rel="author" title="NAME_OF_AUTHOR"
href="mailto:EMAIL OR http://CONTACT_PAGE"/>
<link rel="help" href="RELEVANT_SPEC_SECTION"/>
<meta name="assert" content="TEST ASSERTION"/>
<style type="text/css"><![CDATA[
CSS FOR TEST
]]></style>
</head>
<body>
CONTENT OF TEST
</body>
</html>
"""
converter = W3CTestConverter()
oc = OutputCapture()
oc.capture_output()
try:
converted = converter.convert_html('/nothing/to/convert', test_html, DUMMY_FILENAME)
finally:
oc.restore_output()
self.verify_no_conversion_happened(converted)
def test_convert_for_webkit_harness_only(self):
""" Tests convert_for_webkit() using a basic JS test that uses testharness.js only and has no prefixed properties """
test_html = """<head>
<link href="/resources/testharness.css" rel="stylesheet" type="text/css">
<script src="/resources/testharness.js"></script>
</head>
"""
converter = W3CTestConverter()
fake_dir_path = self.fake_dir_path(converter, "harnessonly")
converted = converter.convert_html(fake_dir_path, test_html, DUMMY_FILENAME)
self.verify_conversion_happened(converted)
self.verify_test_harness_paths(converter, converted[1], fake_dir_path, 1, 1)
self.verify_prefixed_properties(converted, [])
def test_convert_for_webkit_properties_only(self):
""" Tests convert_for_webkit() using a test that has 2 prefixed properties: 1 in a style block + 1 inline style """
test_html = """<html>
<head>
<link href="/resources/testharness.css" rel="stylesheet" type="text/css">
<script src="/resources/testharness.js"></script>
<style type="text/css">
#block1 { @test0@: propvalue; }
</style>
</head>
<body>
<div id="elem1" style="@test1@: propvalue;"></div>
</body>
</html>
"""
converter = W3CTestConverter()
fake_dir_path = self.fake_dir_path(converter, 'harnessandprops')
test_content = self.generate_test_content(converter.prefixed_properties, 1, test_html)
oc = OutputCapture()
oc.capture_output()
try:
converted = converter.convert_html(fake_dir_path, test_content[1], DUMMY_FILENAME)
finally:
oc.restore_output()
self.verify_conversion_happened(converted)
self.verify_test_harness_paths(converter, converted[1], fake_dir_path, 1, 1)
self.verify_prefixed_properties(converted, test_content[0])
def test_convert_for_webkit_harness_and_properties(self):
""" Tests convert_for_webkit() using a basic JS test that uses testharness.js and testharness.css and has 4 prefixed properties: 3 in a style block + 1 inline style """
test_html = """<html>
<head>
<link href="/resources/testharness.css" rel="stylesheet" type="text/css">
<script src="/resources/testharness.js"></script>
<style type="text/css">
#block1 { @test0@: propvalue; }
#block2 { @test1@: propvalue; }
#block3 { @test2@: propvalue; }
</style>
</head>
<body>
<div id="elem1" style="@test3@: propvalue;"></div>
</body>
</html>
"""
converter = W3CTestConverter()
fake_dir_path = self.fake_dir_path(converter, 'harnessandprops')
oc = OutputCapture()
oc.capture_output()
try:
test_content = self.generate_test_content(converter.prefixed_properties, 2, test_html)
converted = converter.convert_html(fake_dir_path, test_content[1], DUMMY_FILENAME)
finally:
oc.restore_output()
self.verify_conversion_happened(converted)
self.verify_test_harness_paths(converter, converted[1], fake_dir_path, 1, 1)
self.verify_prefixed_properties(converted, test_content[0])
def test_convert_test_harness_paths(self):
""" Tests convert_testharness_paths() with a test that uses all three testharness files """
test_html = """<head>
<link href="/resources/testharness.css" rel="stylesheet" type="text/css">
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
</head>
"""
converter = W3CTestConverter()
fake_dir_path = self.fake_dir_path(converter, 'testharnesspaths')
doc = BeautifulSoup(test_html)
oc = OutputCapture()
oc.capture_output()
try:
converted = converter.convert_testharness_paths(doc, fake_dir_path, DUMMY_FILENAME)
finally:
oc.restore_output()
self.verify_conversion_happened(converted)
self.verify_test_harness_paths(converter, doc, fake_dir_path, 2, 1)
def test_convert_prefixed_properties(self):
""" Tests convert_prefixed_properties() file that has 20 properties requiring the -webkit- prefix:
10 in one style block + 5 in another style
block + 5 inline styles, including one with multiple prefixed properties.
The properties in the test content are in all sorts of wack formatting.
"""
test_html = """<html>
<style type="text/css"><![CDATA[
.block1 {
width: 300px;
height: 300px
}
.block2 {
@test0@: propvalue;
}
.block3{@test1@: propvalue;}
.block4 { @test2@:propvalue; }
.block5{ @test3@ :propvalue; }
#block6 { @test4@ : propvalue; }
#block7
{
@test5@: propvalue;
}
#block8 { @test6@: propvalue; }
#block9:pseudo
{
@test7@: propvalue;
@test8@: propvalue propvalue propvalue;;
}
]]></style>
</head>
<body>
<div id="elem1" style="@test9@: propvalue;"></div>
<div id="elem2" style="propname: propvalue; @test10@ : propvalue; propname:propvalue;"></div>
<div id="elem2" style="@test11@: propvalue; @test12@ : propvalue; @test13@ :propvalue;"></div>
<div id="elem3" style="@test14@:propvalue"></div>
</body>
<style type="text/css"><![CDATA[
.block10{ @test15@: propvalue; }
.block11{ @test16@: propvalue; }
.block12{ @test17@: propvalue; }
#block13:pseudo
{
@test18@: propvalue;
@test19@: propvalue;
}
]]></style>
</html>
"""
converter = W3CTestConverter()
test_content = self.generate_test_content(converter.prefixed_properties, 20, test_html)
oc = OutputCapture()
oc.capture_output()
try:
converted = converter.convert_prefixed_properties(BeautifulSoup(test_content[1]), DUMMY_FILENAME)
finally:
oc.restore_output()
self.verify_conversion_happened(converted)
self.verify_prefixed_properties(converted, test_content[0])
def verify_conversion_happened(self, converted):
self.assertTrue(converted, "conversion didn't happen")
def verify_no_conversion_happened(self, converted):
self.assertEqual(converted, None, 'test should not have been converted')
def verify_test_harness_paths(self, converter, converted, test_path, num_src_paths, num_href_paths):
if isinstance(converted, basestring):
converted = BeautifulSoup(converted)
resources_dir = converter.path_from_webkit_root("LayoutTests", "resources")
# Verify the original paths are gone, and the new paths are present.
orig_path_pattern = re.compile('\"/resources/testharness')
self.assertEquals(len(converted.findAll(src=orig_path_pattern)), 0, 'testharness src path was not converted')
self.assertEquals(len(converted.findAll(href=orig_path_pattern)), 0, 'testharness href path was not converted')
new_relpath = os.path.relpath(resources_dir, test_path)
relpath_pattern = re.compile(new_relpath)
self.assertEquals(len(converted.findAll(src=relpath_pattern)), num_src_paths, 'testharness src relative path not correct')
self.assertEquals(len(converted.findAll(href=relpath_pattern)), num_href_paths, 'testharness href relative path not correct')
def verify_prefixed_properties(self, converted, test_properties):
self.assertEqual(len(converted[0]), len(test_properties), 'Incorrect number of properties converted')
for test_prop in test_properties:
self.assertTrue((test_prop in converted[1]), 'Property ' + test_prop + ' not found in converted doc')
def generate_test_content(self, full_property_list, num_test_properties, html):
"""Inserts properties requiring a -webkit- prefix into the content, replacing \'@testXX@\' with a property."""
test_properties = []
count = 0
while count < num_test_properties:
test_properties.append(full_property_list[count])
count += 1
# Replace the tokens in the testhtml with the test properties. Walk backward
# through the list to replace the double-digit tokens first
index = len(test_properties) - 1
while index >= 0:
# Use the unprefixed version
test_prop = test_properties[index].replace('-webkit-', '')
# Replace the token
html = html.replace('@test' + str(index) + '@', test_prop)
index -= 1
return (test_properties, html)
| bsd-3-clause |
CTSRD-SOAAP/chromium-42.0.2311.135 | third_party/pywebsocket/src/example/abort_wsh.py | 465 | 1776 | # Copyright 2012, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from mod_pywebsocket import handshake
def web_socket_do_extra_handshake(request):
pass
def web_socket_transfer_data(request):
raise handshake.AbortedByUserException(
"Aborted in web_socket_transfer_data")
# vi:sts=4 sw=4 et
| bsd-3-clause |
ZdrowyGosciu/kernel_lge_d802_v30d | tools/perf/scripts/python/sctop.py | 11180 | 1924 | # system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
| gpl-2.0 |
oinopion/django | django/core/serializers/__init__.py | 28 | 8194 | """
Interfaces for serializing Django objects.
Usage::
from django.core import serializers
json = serializers.serialize("json", some_queryset)
objects = list(serializers.deserialize("json", json))
To add your own serializers, use the SERIALIZATION_MODULES setting::
SERIALIZATION_MODULES = {
"csv": "path.to.csv.serializer",
"txt": "path.to.txt.serializer",
}
"""
import importlib
from django.apps import apps
from django.conf import settings
from django.utils import six
from django.core.serializers.base import SerializerDoesNotExist
# Built-in serializers
BUILTIN_SERIALIZERS = {
"xml": "django.core.serializers.xml_serializer",
"python": "django.core.serializers.python",
"json": "django.core.serializers.json",
"yaml": "django.core.serializers.pyyaml",
}
_serializers = {}
class BadSerializer(object):
"""
Stub serializer to hold exception raised during registration
This allows the serializer registration to cache serializers and if there
is an error raised in the process of creating a serializer it will be
raised and passed along to the caller when the serializer is used.
"""
internal_use_only = False
def __init__(self, exception):
self.exception = exception
def __call__(self, *args, **kwargs):
raise self.exception
def register_serializer(format, serializer_module, serializers=None):
"""Register a new serializer.
``serializer_module`` should be the fully qualified module name
for the serializer.
If ``serializers`` is provided, the registration will be added
to the provided dictionary.
If ``serializers`` is not provided, the registration will be made
directly into the global register of serializers. Adding serializers
directly is not a thread-safe operation.
"""
if serializers is None and not _serializers:
_load_serializers()
try:
module = importlib.import_module(serializer_module)
except ImportError as exc:
bad_serializer = BadSerializer(exc)
module = type('BadSerializerModule', (object,), {
'Deserializer': bad_serializer,
'Serializer': bad_serializer,
})
if serializers is None:
_serializers[format] = module
else:
serializers[format] = module
def unregister_serializer(format):
"Unregister a given serializer. This is not a thread-safe operation."
if not _serializers:
_load_serializers()
if format not in _serializers:
raise SerializerDoesNotExist(format)
del _serializers[format]
def get_serializer(format):
if not _serializers:
_load_serializers()
if format not in _serializers:
raise SerializerDoesNotExist(format)
return _serializers[format].Serializer
def get_serializer_formats():
if not _serializers:
_load_serializers()
return list(_serializers)
def get_public_serializer_formats():
if not _serializers:
_load_serializers()
return [k for k, v in six.iteritems(_serializers) if not v.Serializer.internal_use_only]
def get_deserializer(format):
if not _serializers:
_load_serializers()
if format not in _serializers:
raise SerializerDoesNotExist(format)
return _serializers[format].Deserializer
def serialize(format, queryset, **options):
"""
Serialize a queryset (or any iterator that returns database objects) using
a certain serializer.
"""
s = get_serializer(format)()
s.serialize(queryset, **options)
return s.getvalue()
def deserialize(format, stream_or_string, **options):
"""
Deserialize a stream or a string. Returns an iterator that yields ``(obj,
m2m_relation_dict)``, where ``obj`` is an instantiated -- but *unsaved* --
object, and ``m2m_relation_dict`` is a dictionary of ``{m2m_field_name :
list_of_related_objects}``.
"""
d = get_deserializer(format)
return d(stream_or_string, **options)
def _load_serializers():
"""
Register built-in and settings-defined serializers. This is done lazily so
that user code has a chance to (e.g.) set up custom settings without
needing to be careful of import order.
"""
global _serializers
serializers = {}
for format in BUILTIN_SERIALIZERS:
register_serializer(format, BUILTIN_SERIALIZERS[format], serializers)
if hasattr(settings, "SERIALIZATION_MODULES"):
for format in settings.SERIALIZATION_MODULES:
register_serializer(format, settings.SERIALIZATION_MODULES[format], serializers)
_serializers = serializers
def sort_dependencies(app_list):
"""Sort a list of (app_config, models) pairs into a single list of models.
The single list of models is sorted so that any model with a natural key
is serialized before a normal model, and any model with a natural key
dependency has it's dependencies serialized first.
"""
# Process the list of models, and get the list of dependencies
model_dependencies = []
models = set()
for app_config, model_list in app_list:
if model_list is None:
model_list = app_config.get_models()
for model in model_list:
models.add(model)
# Add any explicitly defined dependencies
if hasattr(model, 'natural_key'):
deps = getattr(model.natural_key, 'dependencies', [])
if deps:
deps = [apps.get_model(dep) for dep in deps]
else:
deps = []
# Now add a dependency for any FK relation with a model that
# defines a natural key
for field in model._meta.fields:
if field.remote_field:
rel_model = field.remote_field.model
if hasattr(rel_model, 'natural_key') and rel_model != model:
deps.append(rel_model)
# Also add a dependency for any simple M2M relation with a model
# that defines a natural key. M2M relations with explicit through
# models don't count as dependencies.
for field in model._meta.many_to_many:
if field.remote_field.through._meta.auto_created:
rel_model = field.remote_field.model
if hasattr(rel_model, 'natural_key') and rel_model != model:
deps.append(rel_model)
model_dependencies.append((model, deps))
model_dependencies.reverse()
# Now sort the models to ensure that dependencies are met. This
# is done by repeatedly iterating over the input list of models.
# If all the dependencies of a given model are in the final list,
# that model is promoted to the end of the final list. This process
# continues until the input list is empty, or we do a full iteration
# over the input models without promoting a model to the final list.
# If we do a full iteration without a promotion, that means there are
# circular dependencies in the list.
model_list = []
while model_dependencies:
skipped = []
changed = False
while model_dependencies:
model, deps = model_dependencies.pop()
# If all of the models in the dependency list are either already
# on the final model list, or not on the original serialization list,
# then we've found another model with all it's dependencies satisfied.
found = True
for candidate in ((d not in models or d in model_list) for d in deps):
if not candidate:
found = False
if found:
model_list.append(model)
changed = True
else:
skipped.append((model, deps))
if not changed:
raise RuntimeError("Can't resolve dependencies for %s in serialized app list." %
', '.join('%s.%s' % (model._meta.app_label, model._meta.object_name)
for model, deps in sorted(skipped, key=lambda obj: obj[0].__name__))
)
model_dependencies = skipped
return model_list
| bsd-3-clause |
jerpat/csmake | csmake/CsmakeModules/copyright.py | 2 | 2470 | # <copyright>
# (c) Copyright 2017 Hewlett Packard Enterprise Development LP
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# </copyright>
from Csmake.CsmakeModuleAllPhase import CsmakeModuleAllPhase
class copyright(CsmakeModuleAllPhase):
"""Purpose: Create a copyright clause for the software described in
a csmakefile.
Type: Module Library: csmake (core)
Phases: *any*
Options:
holder - Holder(s) of the license
years - The years in which the copyrighted material has bee
updated. E.g., 2014 or 2014-2017 or 2012,2015-2016
license - (OPTIONAL) Describes a license (e.g., non-free, GPL2+)
see https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/index.html#license-field
and https://spdx.org/licenses/
license-text - (OPTIONAL) The text for the full license to include
disclaimer - (OPTIONAL) A statement about the copyright
(for example, for Debian, why the copyright is non-free)
comment - (OPTIONAL) Any apropos comment for the copyright
Results: A dictionary containing the options provided.
References:
The most comprehensive copyright documentation for packaging
comes from debian and is the inspiration for this module:
https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/"""
REQUIRED_OPTIONS = ['holder', 'years']
def __repr__(self):
return "<<copyright step definition>>"
def __str__(self):
return "<<copyright step definition>>"
def default(self, options):
result = {}
for key, value in options.iteritems():
if key.startswith("**"):
continue
result[key] = value
self.log.passed()
return result
| gpl-3.0 |
amir343/ansible-modules-extras | network/f5/bigip_monitor_http.py | 77 | 15434 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, serge van Ginderachter <serge@vanginderachter.be>
# based on Matt Hite's bigip_pool module
# (c) 2013, Matt Hite <mhite@hotmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: bigip_monitor_http
short_description: "Manages F5 BIG-IP LTM http monitors"
description:
- "Manages F5 BIG-IP LTM monitors via iControl SOAP API"
version_added: "1.4"
author: "Serge van Ginderachter (@srvg)"
notes:
- "Requires BIG-IP software version >= 11"
- "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)"
- "Best run as a local_action in your playbook"
- "Monitor API documentation: https://devcentral.f5.com/wiki/iControl.LocalLB__Monitor.ashx"
requirements:
- bigsuds
options:
server:
description:
- BIG-IP host
required: true
default: null
user:
description:
- BIG-IP username
required: true
default: null
password:
description:
- BIG-IP password
required: true
default: null
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
version_added: 2.0
state:
description:
- Monitor state
required: false
default: 'present'
choices: ['present', 'absent']
name:
description:
- Monitor name
required: true
default: null
aliases: ['monitor']
partition:
description:
- Partition for the monitor
required: false
default: 'Common'
parent:
description:
- The parent template of this monitor template
required: false
default: 'http'
parent_partition:
description:
- Partition for the parent monitor
required: false
default: 'Common'
send:
description:
- The send string for the monitor call
required: true
default: none
receive:
description:
- The receive string for the monitor call
required: true
default: none
receive_disable:
description:
- The receive disable string for the monitor call
required: true
default: none
ip:
description:
- IP address part of the ipport definition. The default API setting
is "0.0.0.0".
required: false
default: none
port:
description:
- port address part op the ipport definition. The default API
setting is 0.
required: false
default: none
interval:
description:
- The interval specifying how frequently the monitor instance
of this template will run. By default, this interval is used for up and
down states. The default API setting is 5.
required: false
default: none
timeout:
description:
- The number of seconds in which the node or service must respond to
the monitor request. If the target responds within the set time
period, it is considered up. If the target does not respond within
the set time period, it is considered down. You can change this
number to any number you want, however, it should be 3 times the
interval number of seconds plus 1 second. The default API setting
is 16.
required: false
default: none
time_until_up:
description:
- Specifies the amount of time in seconds after the first successful
response before a node will be marked up. A value of 0 will cause a
node to be marked up immediately after a valid response is received
from the node. The default API setting is 0.
required: false
default: none
'''
EXAMPLES = '''
- name: BIGIP F5 | Create HTTP Monitor
local_action:
module: bigip_monitor_http
state: present
server: "{{ f5server }}"
user: "{{ f5user }}"
password: "{{ f5password }}"
name: "{{ item.monitorname }}"
send: "{{ item.send }}"
receive: "{{ item.receive }}"
with_items: f5monitors
- name: BIGIP F5 | Remove HTTP Monitor
local_action:
module: bigip_monitor_http
state: absent
server: "{{ f5server }}"
user: "{{ f5user }}"
password: "{{ f5password }}"
name: "{{ monitorname }}"
'''
TEMPLATE_TYPE = 'TTYPE_HTTP'
DEFAULT_PARENT_TYPE = 'http'
def check_monitor_exists(module, api, monitor, parent):
# hack to determine if monitor exists
result = False
try:
ttype = api.LocalLB.Monitor.get_template_type(template_names=[monitor])[0]
parent2 = api.LocalLB.Monitor.get_parent_template(template_names=[monitor])[0]
if ttype == TEMPLATE_TYPE and parent == parent2:
result = True
else:
module.fail_json(msg='Monitor already exists, but has a different type (%s) or parent(%s)' % (ttype, parent))
except bigsuds.OperationFailed, e:
if "was not found" in str(e):
result = False
else:
# genuine exception
raise
return result
def create_monitor(api, monitor, template_attributes):
try:
api.LocalLB.Monitor.create_template(templates=[{'template_name': monitor, 'template_type': TEMPLATE_TYPE}], template_attributes=[template_attributes])
except bigsuds.OperationFailed, e:
if "already exists" in str(e):
return False
else:
# genuine exception
raise
return True
def delete_monitor(api, monitor):
try:
api.LocalLB.Monitor.delete_template(template_names=[monitor])
except bigsuds.OperationFailed, e:
# maybe it was deleted since we checked
if "was not found" in str(e):
return False
else:
# genuine exception
raise
return True
def check_string_property(api, monitor, str_property):
try:
return str_property == api.LocalLB.Monitor.get_template_string_property([monitor], [str_property['type']])[0]
except bigsuds.OperationFailed, e:
# happens in check mode if not created yet
if "was not found" in str(e):
return True
else:
# genuine exception
raise
def set_string_property(api, monitor, str_property):
api.LocalLB.Monitor.set_template_string_property(template_names=[monitor], values=[str_property])
def check_integer_property(api, monitor, int_property):
try:
return int_property == api.LocalLB.Monitor.get_template_integer_property([monitor], [int_property['type']])[0]
except bigsuds.OperationFailed, e:
# happens in check mode if not created yet
if "was not found" in str(e):
return True
else:
# genuine exception
raise
def set_integer_property(api, monitor, int_property):
api.LocalLB.Monitor.set_template_int_property(template_names=[monitor], values=[int_property])
def update_monitor_properties(api, module, monitor, template_string_properties, template_integer_properties):
changed = False
for str_property in template_string_properties:
if str_property['value'] is not None and not check_string_property(api, monitor, str_property):
if not module.check_mode:
set_string_property(api, monitor, str_property)
changed = True
for int_property in template_integer_properties:
if int_property['value'] is not None and not check_integer_property(api, monitor, int_property):
if not module.check_mode:
set_integer_property(api, monitor, int_property)
changed = True
return changed
def get_ipport(api, monitor):
return api.LocalLB.Monitor.get_template_destination(template_names=[monitor])[0]
def set_ipport(api, monitor, ipport):
try:
api.LocalLB.Monitor.set_template_destination(template_names=[monitor], destinations=[ipport])
return True, ""
except bigsuds.OperationFailed, e:
if "Cannot modify the address type of monitor" in str(e):
return False, "Cannot modify the address type of monitor if already assigned to a pool."
else:
# genuine exception
raise
# ===========================================
# main loop
#
# writing a module for other monitor types should
# only need an updated main() (and monitor specific functions)
def main():
# begin monitor specific stuff
argument_spec=f5_argument_spec();
argument_spec.update( dict(
name = dict(required=True),
parent = dict(default=DEFAULT_PARENT_TYPE),
parent_partition = dict(default='Common'),
send = dict(required=False),
receive = dict(required=False),
receive_disable = dict(required=False),
ip = dict(required=False),
port = dict(required=False, type='int'),
interval = dict(required=False, type='int'),
timeout = dict(required=False, type='int'),
time_until_up = dict(required=False, type='int', default=0)
)
)
module = AnsibleModule(
argument_spec = argument_spec,
supports_check_mode=True
)
(server,user,password,state,partition,validate_certs) = f5_parse_arguments(module)
parent_partition = module.params['parent_partition']
name = module.params['name']
parent = fq_name(parent_partition, module.params['parent'])
monitor = fq_name(partition, name)
send = module.params['send']
receive = module.params['receive']
receive_disable = module.params['receive_disable']
ip = module.params['ip']
port = module.params['port']
interval = module.params['interval']
timeout = module.params['timeout']
time_until_up = module.params['time_until_up']
# end monitor specific stuff
api = bigip_api(server, user, password)
monitor_exists = check_monitor_exists(module, api, monitor, parent)
# ipport is a special setting
if monitor_exists: # make sure to not update current settings if not asked
cur_ipport = get_ipport(api, monitor)
if ip is None:
ip = cur_ipport['ipport']['address']
if port is None:
port = cur_ipport['ipport']['port']
else: # use API defaults if not defined to create it
if interval is None:
interval = 5
if timeout is None:
timeout = 16
if ip is None:
ip = '0.0.0.0'
if port is None:
port = 0
if send is None:
send = ''
if receive is None:
receive = ''
if receive_disable is None:
receive_disable = ''
# define and set address type
if ip == '0.0.0.0' and port == 0:
address_type = 'ATYPE_STAR_ADDRESS_STAR_PORT'
elif ip == '0.0.0.0' and port != 0:
address_type = 'ATYPE_STAR_ADDRESS_EXPLICIT_PORT'
elif ip != '0.0.0.0' and port != 0:
address_type = 'ATYPE_EXPLICIT_ADDRESS_EXPLICIT_PORT'
else:
address_type = 'ATYPE_UNSET'
ipport = {'address_type': address_type,
'ipport': {'address': ip,
'port': port}}
template_attributes = {'parent_template': parent,
'interval': interval,
'timeout': timeout,
'dest_ipport': ipport,
'is_read_only': False,
'is_directly_usable': True}
# monitor specific stuff
template_string_properties = [{'type': 'STYPE_SEND',
'value': send},
{'type': 'STYPE_RECEIVE',
'value': receive},
{'type': 'STYPE_RECEIVE_DRAIN',
'value': receive_disable}]
template_integer_properties = [{'type': 'ITYPE_INTERVAL',
'value': interval},
{'type': 'ITYPE_TIMEOUT',
'value': timeout},
{'type': 'ITYPE_TIME_UNTIL_UP',
'value': time_until_up}]
# main logic, monitor generic
try:
result = {'changed': False} # default
if state == 'absent':
if monitor_exists:
if not module.check_mode:
# possible race condition if same task
# on other node deleted it first
result['changed'] |= delete_monitor(api, monitor)
else:
result['changed'] |= True
else: # state present
## check for monitor itself
if not monitor_exists: # create it
if not module.check_mode:
# again, check changed status here b/c race conditions
# if other task already created it
result['changed'] |= create_monitor(api, monitor, template_attributes)
else:
result['changed'] |= True
## check for monitor parameters
# whether it already existed, or was just created, now update
# the update functions need to check for check mode but
# cannot update settings if it doesn't exist which happens in check mode
result['changed'] |= update_monitor_properties(api, module, monitor,
template_string_properties,
template_integer_properties)
# we just have to update the ipport if monitor already exists and it's different
if monitor_exists and cur_ipport != ipport:
set_ipport(api, monitor, ipport)
result['changed'] |= True
#else: monitor doesn't exist (check mode) or ipport is already ok
except Exception, e:
module.fail_json(msg="received exception: %s" % e)
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.f5 import *
main()
| gpl-3.0 |
jocelynj/weboob | weboob/backends/aum/optim/queries_queue.py | 1 | 3223 | # -*- coding: utf-8 -*-
# Copyright(C) 2010 Romain Bignon
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
from __future__ import with_statement
from weboob.tools.browser import BrowserUnavailable
from weboob.capabilities.dating import Optimization
from weboob.tools.log import getLogger
__all__ = ['QueriesQueue']
class QueriesQueue(Optimization):
def __init__(self, sched, storage, browser):
self.sched = sched
self.storage = storage
self.browser = browser
self.logger = getLogger('queriesqueue', browser.logger)
self.queue = storage.get('queries_queue', 'queue', default=[])
self.check_cron = None
def save(self):
self.storage.set('queries_queue', 'queue', self.queue)
self.storage.save()
def start(self):
self.check_cron = self.sched.repeat(3600, self.flush_queue)
return True
def stop(self):
self.sched.cancel(self.check_cron)
self.check_cron = None
return True
def is_running(self):
return self.check_cron is not None
def enqueue_query(self, id, priority=999):
self.queue.append((int(priority), int(id)))
self.save()
# Try to flush queue to send it now.
self.flush_queue()
# Check if the enqueued query has been sent
for p, i in self.queue:
if i == int(id):
return False
return True
def flush_queue(self):
self.queue.sort()
priority = 0
id = None
try:
try:
while len(self.queue) > 0:
priority, id = self.queue.pop()
if not id:
continue
with self.browser:
if self.browser.send_charm(id):
self.logger.info('Charm sent to %s' % id)
else:
self.queue.append((priority, id))
self.logger.info("Charm can't be send to %s" % id)
break
# As the charm has been correctly sent (no exception raised),
# we don't store anymore ID, because if nbAvailableCharms()
# fails, we don't want to re-queue this ID.
id = None
priority = 0
except BrowserUnavailable:
# We consider this profil hasn't been [correctly] analysed
if not id is None:
self.queue.append((priority, id))
finally:
self.save()
| gpl-3.0 |
lydiadwyer/sheparddb | data/website/modules/Excavations/controller.py | 1 | 6315 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""The Controller class for Excavations."""
import re
from flask import Blueprint, render_template, redirect, url_for, current_app, \
request, abort
from modules.Shared.database import db
from modules.Countries.model import Country
from modules.Regions.model import Region
from modules.Cities.model import City
from modules.Excavations.model import Excavation
# collection of URLs for the excavations section of the website
# setup the controller, use a local folder for templates
excavations = Blueprint(
'excavations',
__name__,
template_folder='templates',
static_folder='static',
url_prefix='/excavations'
)
@excavations.route('/')
def view_all_excavations():
""" homepage with all excavations listed """
return render_template('excavations/view_all.html', Excavations=Excavation)
@excavations.route('/view/<excavation_id>')
def view_one_excavation(excavation_id):
entry = Excavation.query.get(excavation_id)
return render_template('excavations/view.html', entry=entry)
@excavations.route('/add', methods=['GET', 'POST'])
def add_excavation():
""" add an excavation page function """
entry = Excavation() # creates a model.py instance, instance only has a name right now
error_msg = {}
form_is_valid = True
country_list = Country.query.all()
region_list = Region.query.all()
city_list = City.query.all()
if request.method == 'GET':
return render_template('excavations/add.html', entry=entry, \
country_list=country_list, \
region_list=region_list, \
city_list=city_list, \
error_msg=error_msg)
if request.method == 'POST':
# validate input
[entry, form_is_valid, error_msg] = form_validate_excavation(entry)
# check if the form is valid
if not form_is_valid:
# current_app.logger.info('invalid add excavation')
return render_template('excavations/add.html', entry=entry, \
country_list=country_list, \
region_list=region_list, \
city_list=city_list, \
error_msg=error_msg)
# if data is valid, commit
db.session.add(entry)
db.session.commit()
return redirect(url_for('excavations.view_one_excavation', \
excavation_id=entry.excavation_id))
@excavations.route('/edit/<excavation_id>', methods=['GET', 'POST'])
def edit_excavation(excavation_id):
""" edit excavation details """
# init variables
entry = Excavation.query.get(excavation_id)
error_msg = {}
form_is_valid = True
country_list = Country.query.all()
region_list = Region.query.all()
city_list = City.query.all()
if request.method == 'GET':
return render_template('excavations/edit.html', \
entry=entry, error_msg=error_msg, \
country_list=country_list, \
region_list=region_list, \
city_list=city_list)
if request.method == 'POST':
# validate input
[entry, form_is_valid, error_msg] = form_validate_excavation(entry)
# check if the form is valid
if not form_is_valid:
# current_app.logger.info('invalid edit excavation: ' + str(entry))
return render_template('excavations/edit.html', entry=entry, \
country_list=country_list, \
region_list=region_list, \
city_list=city_list, \
error_msg=error_msg)
# the data is valid, save it
db.session.commit()
return redirect(url_for('excavations.view_one_excavation', \
excavation_id=entry.excavation_id))
# current_app.logger.error("unsupported method")
def form_validate_excavation(entry):
""" validate Excavation form data """
# retrieve data from the global Request object
data = request.form
# get string, cast to ASCII, truncate to 128 chars, strip multi spaces
entry.excavation_name = \
re.sub(' +', ' ',
data['excavation_name'].encode('ascii', 'ignore')[:127])
# retrieve ids in the data var from the html form
entry.country_id = data['country_id']
entry.region_id = data['region_id']
entry.city_id = data['city_id']
# validate data
form_is_valid = True
error_msg = {}
# ensure the excavation_name is filled in
if not entry.excavation_name:
form_is_valid = False
error_msg['excavation_name'] = "Please fill in the excavation name."
# excavation name underflow check, 1 or less characters
if len(entry.excavation_name) < 2:
form_is_valid = False
error_msg['excavation_name'] = "Please fill in the excavation name completely."
# ensure the excavation name is alphanumeric
match = re.match('^[a-zA-Z0-9 ]*$', entry.excavation_name)
if not match:
form_is_valid = False
error_msg['excavation_name'] = "Please fill in a excavation name only with English letters and numbers."
else:
current_app.logger.info("match = " + str(match.group(0)))
# ensure country_id city_id region_id are chosen
if not entry.country_id:
form_is_valid = False
error_msg['country_id'] = "Please choose the country."
if not entry.region_id:
form_is_valid = False
error_msg['region_id'] = "Please choose the region."
if not entry.city_id:
form_is_valid = False
error_msg['city_id'] = "Please choose the city."
return [entry, form_is_valid, error_msg]
@excavations.route('/delete/<excavation_id>')
def delete_excavation(excavation_id):
""" delete an excavation """
entry = Excavation.query.get(Excavation_id)
# check something doesnt exist
if entry is None:
return abort(400, 'Entry does not exist.')
db.session.delete(entry)
db.session.commit()
return redirect(url_for('excavations.view_all_excavations'))
| apache-2.0 |
ccrook/Quantum-GIS | python/plugins/processing/algs/gdal/roughness.py | 7 | 4403 | # -*- coding: utf-8 -*-
"""
***************************************************************************
roughness.py
---------------------
Date : October 2013
Copyright : (C) 2013 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Alexander Bruy'
__date__ = 'October 2013'
__copyright__ = '(C) 2013, Alexander Bruy'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from qgis.core import (QgsRasterFileWriter,
QgsProcessingParameterDefinition,
QgsProcessingParameterRasterLayer,
QgsProcessingParameterBand,
QgsProcessingParameterString,
QgsProcessingParameterBoolean,
QgsProcessingParameterRasterDestination)
from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm
from processing.algs.gdal.GdalUtils import GdalUtils
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class roughness(GdalAlgorithm):
INPUT = 'INPUT'
BAND = 'BAND'
COMPUTE_EDGES = 'COMPUTE_EDGES'
OPTIONS = 'OPTIONS'
OUTPUT = 'OUTPUT'
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.addParameter(QgsProcessingParameterRasterLayer(self.INPUT, self.tr('Input layer')))
self.addParameter(QgsProcessingParameterBand(self.BAND,
self.tr('Band number'),
parentLayerParameterName=self.INPUT))
self.addParameter(QgsProcessingParameterBoolean(self.COMPUTE_EDGES,
self.tr('Compute edges'),
defaultValue=False))
options_param = QgsProcessingParameterString(self.OPTIONS,
self.tr('Additional creation parameters'),
defaultValue='',
optional=True)
options_param.setFlags(options_param.flags() | QgsProcessingParameterDefinition.FlagAdvanced)
options_param.setMetadata({
'widget_wrapper': {
'class': 'processing.algs.gdal.ui.RasterOptionsWidget.RasterOptionsWidgetWrapper'}})
self.addParameter(options_param)
self.addParameter(QgsProcessingParameterRasterDestination(self.OUTPUT, self.tr('Roughness')))
def name(self):
return 'roughness'
def displayName(self):
return self.tr('Roughness')
def group(self):
return self.tr('Raster analysis')
def groupId(self):
return 'rasteranalysis'
def getConsoleCommands(self, parameters, context, feedback, executing=True):
arguments = ['roughness']
inLayer = self.parameterAsRasterLayer(parameters, self.INPUT, context)
arguments.append(inLayer.source())
out = self.parameterAsOutputLayer(parameters, self.OUTPUT, context)
arguments.append(out)
arguments.append('-of')
arguments.append(QgsRasterFileWriter.driverForExtension(os.path.splitext(out)[1]))
arguments.append('-b')
arguments.append(str(self.parameterAsInt(parameters, self.BAND, context)))
if self.parameterAsBool(parameters, self.COMPUTE_EDGES, context):
arguments.append('-compute_edges')
options = self.parameterAsString(parameters, self.OPTIONS, context)
if options:
arguments.append('-co')
arguments.append(options)
return ['gdaldem', GdalUtils.escapeAndJoin(arguments)]
| gpl-2.0 |
SoftwareHeritage/swh-storage | swh/storage/tests/test_tenacious.py | 1 | 13693 | # Copyright (C) 2020-2021 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
from collections import Counter
from contextlib import contextmanager
from unittest.mock import patch
import attr
import pytest
from swh.model import model
from swh.model.tests.swh_model_data import TEST_OBJECTS
from swh.storage import get_storage
from swh.storage.in_memory import InMemoryStorage
from swh.storage.proxies.tenacious import TenaciousProxyStorage
from swh.storage.tests.storage_data import StorageData
from swh.storage.tests.storage_tests import (
TestStorageGeneratedData as _TestStorageGeneratedData,
)
from swh.storage.tests.storage_tests import TestStorage as _TestStorage # noqa
from swh.storage.utils import now
data = StorageData()
collections = {
"origin": data.origins,
"content": data.contents,
"skipped_content": data.skipped_contents,
"revision": data.revisions,
"directory": data.directories,
"release": data.releases,
"snapshot": data.snapshots,
}
# generic storage tests (using imported TestStorage* classes)
@pytest.fixture
def swh_storage_backend_config2():
yield {
"cls": "memory",
"journal_writer": {"cls": "memory",},
}
@pytest.fixture
def swh_storage():
storage_config = {
"cls": "pipeline",
"steps": [
{"cls": "tenacious"},
{"cls": "memory", "journal_writer": {"cls": "memory",}},
],
}
storage = get_storage(**storage_config)
storage.journal_writer = storage.storage.journal_writer
return storage
class TestTenaciousStorage(_TestStorage):
@pytest.mark.skip(
'The "person" table of the pgsql is a legacy thing, and not '
"supported by the cassandra/in-memory backend."
)
def test_person_fullname_unicity(self):
pass
@pytest.mark.skip(reason="No collision with the tenacious storage")
def test_content_add_collision(self, swh_storage, sample_data):
pass
@pytest.mark.skip(reason="No collision with the tenacious storage")
def test_content_add_metadata_collision(self, swh_storage, sample_data):
pass
@pytest.mark.skip("content_update is not implemented")
def test_content_update(self):
pass
@pytest.mark.skip("Not supported by Cassandra/InMemory storage")
def test_origin_count(self):
pass
class TestTenaciousStorageGeneratedData(_TestStorageGeneratedData):
@pytest.mark.skip("Not supported by Cassandra/InMemory")
def test_origin_count(self):
pass
@pytest.mark.skip("Not supported by Cassandra/InMemory")
def test_origin_count_with_visit_no_visits(self):
pass
@pytest.mark.skip("Not supported by Cassandra/InMemory")
def test_origin_count_with_visit_with_visits_and_snapshot(self):
pass
@pytest.mark.skip("Not supported by Cassandra/InMemory")
def test_origin_count_with_visit_with_visits_no_snapshot(self):
pass
# specific tests for the tenacious behavior
def get_tenacious_storage(**config):
storage_config = {
"cls": "pipeline",
"steps": [
{"cls": "validate"},
{"cls": "tenacious", **config},
{"cls": "memory"},
],
}
return get_storage(**storage_config)
@contextmanager
def disabled_validators():
attr.set_run_validators(False)
yield
attr.set_run_validators(True)
def popid(d):
d.pop("id")
return d
testdata = [
pytest.param(
"content",
"content_add",
list(TEST_OBJECTS["content"]),
attr.evolve(model.Content.from_data(data=b"too big"), length=1000),
attr.evolve(model.Content.from_data(data=b"to fail"), length=1000),
id="content",
),
pytest.param(
"content",
"content_add_metadata",
[attr.evolve(cnt, ctime=now()) for cnt in TEST_OBJECTS["content"]],
attr.evolve(model.Content.from_data(data=b"too big"), length=1000, ctime=now()),
attr.evolve(model.Content.from_data(data=b"to fail"), length=1000, ctime=now()),
id="content_metadata",
),
pytest.param(
"skipped_content",
"skipped_content_add",
list(TEST_OBJECTS["skipped_content"]),
attr.evolve(
model.SkippedContent.from_data(data=b"too big", reason="too big"),
length=1000,
),
attr.evolve(
model.SkippedContent.from_data(data=b"to fail", reason="to fail"),
length=1000,
),
id="skipped_content",
),
pytest.param(
"directory",
"directory_add",
list(TEST_OBJECTS["directory"]),
data.directory,
data.directory2,
id="directory",
),
pytest.param(
"revision",
"revision_add",
list(TEST_OBJECTS["revision"]),
data.revision,
data.revision2,
id="revision",
),
pytest.param(
"release",
"release_add",
list(TEST_OBJECTS["release"]),
data.release,
data.release2,
id="release",
),
pytest.param(
"snapshot",
"snapshot_add",
list(TEST_OBJECTS["snapshot"]),
data.snapshot,
data.complete_snapshot,
id="snapshot",
),
pytest.param(
"origin",
"origin_add",
list(TEST_OBJECTS["origin"]),
data.origin,
data.origin2,
id="origin",
),
]
class LimitedInMemoryStorage(InMemoryStorage):
# forbidden are 'bad1' and 'bad2' arguments of `testdata`
forbidden = [x[0][3] for x in testdata] + [x[0][4] for x in testdata]
def __init__(self, *args, **kw):
self.add_calls = Counter()
super().__init__(*args, **kw)
def reset(self):
super().reset()
self.add_calls.clear()
def content_add(self, contents):
return self._maybe_add(super().content_add, "content", contents)
def content_add_metadata(self, contents):
return self._maybe_add(super().content_add_metadata, "content", contents)
def skipped_content_add(self, skipped_contents):
return self._maybe_add(
super().skipped_content_add, "skipped_content", skipped_contents
)
def origin_add(self, origins):
return self._maybe_add(super().origin_add, "origin", origins)
def directory_add(self, directories):
return self._maybe_add(super().directory_add, "directory", directories)
def revision_add(self, revisions):
return self._maybe_add(super().revision_add, "revision", revisions)
def release_add(self, releases):
return self._maybe_add(super().release_add, "release", releases)
def snapshot_add(self, snapshots):
return self._maybe_add(super().snapshot_add, "snapshot", snapshots)
def _maybe_add(self, add_func, object_type, objects):
self.add_calls[object_type] += 1
if any(c in self.forbidden for c in objects):
raise ValueError(
f"{object_type} is forbidden",
[c.unique_key() for c in objects if c in self.forbidden],
)
return add_func(objects)
@patch("swh.storage.in_memory.InMemoryStorage", LimitedInMemoryStorage)
@pytest.mark.parametrize("object_type, add_func_name, objects, bad1, bad2", testdata)
def test_tenacious_proxy_storage(object_type, add_func_name, objects, bad1, bad2):
storage = get_tenacious_storage()
tenacious = storage.storage
in_memory = tenacious.storage
assert isinstance(tenacious, TenaciousProxyStorage)
assert isinstance(in_memory, LimitedInMemoryStorage)
size = len(objects)
add_func = getattr(storage, add_func_name)
# Note: when checking the LimitedInMemoryStorage.add_calls counter, it's
# hard to guess the exact number of calls in the end (depends on the size
# of batch and the position of bad objects in this batch). So we will only
# check a lower limit of the form (n + m), where n is the minimum expected
# number of additions (due to the batch begin split), and m is the fact
# that bad objects are tried (individually) several (3) times before giving
# up. So for one bad object, m is 3; for 2 bad objects, m is 6, etc.
s = add_func(objects)
assert s.get(f"{object_type}:add", 0) == size
assert s.get(f"{object_type}:add:errors", 0) == 0
assert storage.add_calls[object_type] == (1 + 0)
in_memory.reset()
tenacious.reset()
# bad1 is the last element
s = add_func(objects + [bad1])
assert s.get(f"{object_type}:add", 0) == size
assert s.get(f"{object_type}:add:errors", 0) == 1
assert storage.add_calls[object_type] >= (2 + 3)
in_memory.reset()
tenacious.reset()
# bad1 and bad2 are the last elements
s = add_func(objects + [bad1, bad2])
assert s.get(f"{object_type}:add", 0) == size
assert s.get(f"{object_type}:add:errors", 0) == 2
assert storage.add_calls[object_type] >= (3 + 6)
in_memory.reset()
tenacious.reset()
# bad1 is the first element
s = add_func([bad1] + objects)
assert s.get(f"{object_type}:add", 0) == size
assert s.get(f"{object_type}:add:errors", 0) == 1
assert storage.add_calls[object_type] >= (2 + 3)
in_memory.reset()
tenacious.reset()
# bad1 and bad2 are the first elements
s = add_func([bad1, bad2] + objects)
assert s.get(f"{object_type}:add", 0) == size
assert s.get(f"{object_type}:add:errors", 0) == 2
assert storage.add_calls[object_type] >= (3 + 6)
in_memory.reset()
tenacious.reset()
# bad1 is in the middle of the list of inserted elements
s = add_func(objects[: size // 2] + [bad1] + objects[size // 2 :])
assert s.get(f"{object_type}:add", 0) == size
assert s.get(f"{object_type}:add:errors", 0) == 1
assert storage.add_calls[object_type] >= (3 + 3)
in_memory.reset()
tenacious.reset()
# bad1 and bad2 are together in the middle of the list of inserted elements
s = add_func(objects[: size // 2] + [bad1, bad2] + objects[size // 2 :])
assert s.get(f"{object_type}:add", 0) == size
assert s.get(f"{object_type}:add:errors", 0) == 2
assert storage.add_calls[object_type] >= (3 + 6)
in_memory.reset()
tenacious.reset()
# bad1 and bad2 are spread in the middle of the list of inserted elements
s = add_func(
objects[: size // 3]
+ [bad1]
+ objects[size // 3 : 2 * (size // 3)]
+ [bad2]
+ objects[2 * (size // 3) :]
)
assert s.get(f"{object_type}:add", 0) == size
assert s.get(f"{object_type}:add:errors", 0) == 2
assert storage.add_calls[object_type] >= (3 + 6)
in_memory.reset()
tenacious.reset()
# bad1 is the only element
s = add_func([bad1])
assert s.get(f"{object_type}:add", 0) == 0
assert s.get(f"{object_type}:add:errors", 0) == 1
assert storage.add_calls[object_type] == (0 + 3)
in_memory.reset()
tenacious.reset()
# bad1 and bad2 are the only elements
s = add_func([bad1, bad2])
assert s.get(f"{object_type}:add", 0) == 0
assert s.get(f"{object_type}:add:errors", 0) == 2
assert storage.add_calls[object_type] == (1 + 6)
in_memory.reset()
tenacious.reset()
@patch("swh.storage.in_memory.InMemoryStorage", LimitedInMemoryStorage)
@pytest.mark.parametrize("object_type, add_func_name, objects, bad1, bad2", testdata)
def test_tenacious_proxy_storage_rate_limit(
object_type, add_func_name, objects, bad1, bad2
):
storage = get_tenacious_storage(error_rate_limit={"errors": 1, "window_size": 3})
tenacious = storage.storage
in_memory = tenacious.storage
assert isinstance(tenacious, TenaciousProxyStorage)
assert isinstance(in_memory, LimitedInMemoryStorage)
size = len(objects)
add_func = getattr(storage, add_func_name)
# with no insertion failure, no impact
s = add_func(objects)
assert s.get(f"{object_type}:add", 0) == size
assert s.get(f"{object_type}:add:errors", 0) == 0
in_memory.reset()
tenacious.reset()
# with one insertion failure, no impact
s = add_func([bad1] + objects)
assert s.get(f"{object_type}:add", 0) == size
assert s.get(f"{object_type}:add:errors", 0) == 1
in_memory.reset()
tenacious.reset()
s = add_func(objects[: size // 2] + [bad1] + objects[size // 2 :])
assert s.get(f"{object_type}:add", 0) == size
assert s.get(f"{object_type}:add:errors", 0) == 1
in_memory.reset()
tenacious.reset()
# with two consecutive insertion failures, exception is raised
with pytest.raises(RuntimeError, match="Too many insertion errors"):
add_func([bad1, bad2] + objects)
in_memory.reset()
tenacious.reset()
if size > 2:
# with two consecutive insertion failures, exception is raised
# (errors not at the beginning)
with pytest.raises(RuntimeError, match="Too many insertion errors"):
add_func(objects[: size // 2] + [bad1, bad2] + objects[size // 2 :])
in_memory.reset()
tenacious.reset()
# with two non-consecutive insertion failures, no impact
# (errors are far enough to not reach the rate limit)
s = add_func(
objects[: size // 3]
+ [bad1]
+ objects[size // 3 : 2 * (size // 3)]
+ [bad2]
+ objects[2 * (size // 3) :]
)
assert s.get(f"{object_type}:add", 0) == size
assert s.get(f"{object_type}:add:errors", 0) == 2
in_memory.reset()
tenacious.reset()
| gpl-3.0 |
yoelk/kivy | kivy/tools/stub-gl-debug.py | 75 | 13278 | from __future__ import print_function
a = '''cdef void glActiveTexture (cgl.GLenum texture)
cdef void glAttachShader (cgl.GLuint program, cgl.GLuint shader)
cdef void glBindAttribLocation (cgl.GLuint program, cgl.GLuint index, cgl.GLchar* name)
cdef void glBindBuffer (cgl.GLenum target, cgl.GLuint buffer)
cdef void glBindFramebuffer (cgl.GLenum target, cgl.GLuint framebuffer)
cdef void glBindRenderbuffer (cgl.GLenum target, cgl.GLuint renderbuffer)
cdef void glBindTexture (cgl.GLenum target, cgl.GLuint texture)
cdef void glBlendColor (cgl.GLclampf red, cgl.GLclampf green, cgl.GLclampf blue, cgl.GLclampf alpha)
cdef void glBlendEquation (cgl.GLenum mode)
cdef void glBlendEquationSeparate (cgl.GLenum modeRGB, cgl.GLenum modeAlpha)
cdef void glBlendFunc (cgl.GLenum sfactor, cgl.GLenum dfactor)
cdef void glBlendFuncSeparate (cgl.GLenum srcRGB, cgl.GLenum dstRGB, cgl.GLenum srcAlpha, cgl.GLenum dstAlpha)
cdef void glBufferData (cgl.GLenum target, cgl.GLsizeiptr size, cgl.GLvoid* data, cgl.GLenum usage)
cdef void glBufferSubData (cgl.GLenum target, cgl.GLintptr offset, cgl.GLsizeiptr size, cgl.GLvoid* data)
cdef cgl.GLenum glCheckFramebufferStatus (cgl.GLenum target)
cdef void glClear (cgl.GLbitfield mask)
cdef void glClearColor (cgl.GLclampf red, cgl.GLclampf green, cgl.GLclampf blue, cgl.GLclampf alpha)
cdef void glClearDepthf (cgl.GLclampf depth)
cdef void glClearStencil (cgl.GLint s)
cdef void glColorMask (cgl.GLboolean red, cgl.GLboolean green, cgl.GLboolean blue, cgl.GLboolean alpha)
cdef void glCompileShader (cgl.GLuint shader)
cdef void glCompressedTexImage2D (cgl.GLenum target, cgl.GLint level, cgl.GLenum internalformat, cgl.GLsizei width, cgl.GLsizei height, cgl.GLint border, cgl.GLsizei imageSize, cgl.GLvoid* data)
cdef void glCompressedTexSubImage2D (cgl.GLenum target, cgl.GLint level, cgl.GLint xoffset, cgl.GLint yoffset, cgl.GLsizei width, cgl.GLsizei height, cgl.GLenum format, cgl.GLsizei imageSize, cgl.GLvoid* data)
cdef void glCopyTexImage2D (cgl.GLenum target, cgl.GLint level, cgl.GLenum internalformat, cgl.GLint x, cgl.GLint y, cgl.GLsizei width, cgl.GLsizei height, cgl.GLint border)
cdef void glCopyTexSubImage2D (cgl.GLenum target, cgl.GLint level, cgl.GLint xoffset, cgl.GLint yoffset, cgl.GLint x, cgl.GLint y, cgl.GLsizei width, cgl.GLsizei height)
cdef cgl.GLuint glCreateProgram ()
cdef cgl.GLuint glCreateShader (cgl.GLenum type)
cdef void glCullFace (cgl.GLenum mode)
cdef void glDeleteBuffers (cgl.GLsizei n, cgl.GLuint* buffers)
cdef void glDeleteFramebuffers (cgl.GLsizei n, cgl.GLuint* framebuffers)
cdef void glDeleteProgram (cgl.GLuint program)
cdef void glDeleteRenderbuffers (cgl.GLsizei n, cgl.GLuint* renderbuffers)
cdef void glDeleteShader (cgl.GLuint shader)
cdef void glDeleteTextures (cgl.GLsizei n, cgl.GLuint* textures)
cdef void glDepthFunc (cgl.GLenum func)
cdef void glDepthMask (cgl.GLboolean flag)
cdef void glDepthRangef (cgl.GLclampf zNear, cgl.GLclampf zFar)
cdef void glDetachShader (cgl.GLuint program, cgl.GLuint shader)
cdef void glDisable (cgl.GLenum cap)
cdef void glDisableVertexAttribArray (cgl.GLuint index)
cdef void glDrawArrays (cgl.GLenum mode, cgl.GLint first, cgl.GLsizei count)
cdef void glDrawElements (cgl.GLenum mode, cgl.GLsizei count, cgl.GLenum type, cgl.GLvoid* indices)
cdef void glEnable (cgl.GLenum cap)
cdef void glEnableVertexAttribArray (cgl.GLuint index)
cdef void glFinish ()
cdef void glFlush ()
cdef void glFramebufferRenderbuffer (cgl.GLenum target, cgl.GLenum attachment, cgl.GLenum renderbuffertarget, cgl.GLuint renderbuffer)
cdef void glFramebufferTexture2D (cgl.GLenum target, cgl.GLenum attachment, cgl.GLenum textarget, cgl.GLuint texture, cgl.GLint level)
cdef void glFrontFace (cgl.GLenum mode)
cdef void glGenBuffers (cgl.GLsizei n, cgl.GLuint* buffers)
cdef void glGenerateMipmap (cgl.GLenum target)
cdef void glGenFramebuffers (cgl.GLsizei n, cgl.GLuint* framebuffers)
cdef void glGenRenderbuffers (cgl.GLsizei n, cgl.GLuint* renderbuffers)
cdef void glGenTextures (cgl.GLsizei n, cgl.GLuint* textures)
cdef void glGetActiveAttrib (cgl.GLuint program, cgl.GLuint index, cgl.GLsizei bufsize, cgl.GLsizei* length, cgl.GLint* size, cgl.GLenum* type, cgl.GLchar* name)
cdef void glGetActiveUniform (cgl.GLuint program, cgl.GLuint index, cgl.GLsizei bufsize, cgl.GLsizei* length, cgl.GLint* size, cgl.GLenum* type, cgl.GLchar* name)
cdef void glGetAttachedShaders (cgl.GLuint program, cgl.GLsizei maxcount, cgl.GLsizei* count, cgl.GLuint* shaders)
cdef int glGetAttribLocation (cgl.GLuint program, cgl.GLchar* name)
cdef void glGetBooleanv (cgl.GLenum pname, cgl.GLboolean* params)
cdef void glGetBufferParameteriv (cgl.GLenum target, cgl.GLenum pname, cgl.GLint* params)
cdef cgl.GLenum glGetError ()
cdef void glGetFloatv (cgl.GLenum pname, cgl.GLfloat* params)
cdef void glGetFramebufferAttachmentParameteriv (cgl.GLenum target, cgl.GLenum attachment, cgl.GLenum pname, cgl.GLint* params)
cdef void glGetIntegerv (cgl.GLenum pname, cgl.GLint* params)
cdef void glGetProgramiv (cgl.GLuint program, cgl.GLenum pname, cgl.GLint* params)
cdef void glGetProgramInfoLog (cgl.GLuint program, cgl.GLsizei bufsize, cgl.GLsizei* length, cgl.GLchar* infolog)
cdef void glGetRenderbufferParameteriv (cgl.GLenum target, cgl.GLenum pname, cgl.GLint* params)
cdef void glGetShaderiv (cgl.GLuint shader, cgl.GLenum pname, cgl.GLint* params)
cdef void glGetShaderInfoLog (cgl.GLuint shader, cgl.GLsizei bufsize, cgl.GLsizei* length, cgl.GLchar* infolog)
#cdef void glGetShaderPrecisionFormat (cgl.GLenum shadertype, cgl.GLenum precisiontype, cgl.GLint* range, cgl.GLint* precision)
cdef void glGetShaderSource (cgl.GLuint shader, cgl.GLsizei bufsize, cgl.GLsizei* length, cgl.GLchar* source)
cdef cgl.GLubyte* glGetString (cgl.GLenum name)
cdef void glGetTexParameterfv (cgl.GLenum target, cgl.GLenum pname, cgl.GLfloat* params)
cdef void glGetTexParameteriv (cgl.GLenum target, cgl.GLenum pname, cgl.GLint* params)
cdef void glGetUniformfv (cgl.GLuint program, cgl.GLint location, cgl.GLfloat* params)
cdef void glGetUniformiv (cgl.GLuint program, cgl.GLint location, cgl.GLint* params)
cdef int glGetUniformLocation (cgl.GLuint program, cgl.GLchar* name)
cdef void glGetVertexAttribfv (cgl.GLuint index, cgl.GLenum pname, cgl.GLfloat* params)
cdef void glGetVertexAttribiv (cgl.GLuint index, cgl.GLenum pname, cgl.GLint* params)
cdef void glGetVertexAttribPointerv (cgl.GLuint index, cgl.GLenum pname, cgl.GLvoid** pointer)
cdef void glHint (cgl.GLenum target, cgl.GLenum mode)
cdef cgl.GLboolean glIsBuffer (cgl.GLuint buffer)
cdef cgl.GLboolean glIsEnabled (cgl.GLenum cap)
cdef cgl.GLboolean glIsFramebuffer (cgl.GLuint framebuffer)
cdef cgl.GLboolean glIsProgram (cgl.GLuint program)
cdef cgl.GLboolean glIsRenderbuffer (cgl.GLuint renderbuffer)
cdef cgl.GLboolean glIsShader (cgl.GLuint shader)
cdef cgl.GLboolean glIsTexture (cgl.GLuint texture)
cdef void glLineWidth (cgl.GLfloat width)
cdef void glLinkProgram (cgl.GLuint program)
cdef void glPixelStorei (cgl.GLenum pname, cgl.GLint param)
cdef void glPolygonOffset (cgl.GLfloat factor, cgl.GLfloat units)
cdef void glReadPixels (cgl.GLint x, cgl.GLint y, cgl.GLsizei width, cgl.GLsizei height, cgl.GLenum format, cgl.GLenum type, cgl.GLvoid* pixels)
#cdef void glReleaseShaderCompiler ()
cdef void glRenderbufferStorage (cgl.GLenum target, cgl.GLenum internalformat, cgl.GLsizei width, cgl.GLsizei height)
cdef void glSampleCoverage (cgl.GLclampf value, cgl.GLboolean invert)
cdef void glScissor (cgl.GLint x, cgl.GLint y, cgl.GLsizei width, cgl.GLsizei height)
#cdef void glShaderBinary (cgl.GLsizei n, cgl.GLuint* shaders, cgl.GLenum binaryformat, cgl.GLvoid* binary, cgl.GLsizei length)
cdef void glShaderSource (cgl.GLuint shader, cgl.GLsizei count, cgl.GLchar** string, cgl.GLint* length)
cdef void glStencilFunc (cgl.GLenum func, cgl.GLint ref, cgl.GLuint mask)
cdef void glStencilFuncSeparate (cgl.GLenum face, cgl.GLenum func, cgl.GLint ref, cgl.GLuint mask)
cdef void glStencilMask (cgl.GLuint mask)
cdef void glStencilMaskSeparate (cgl.GLenum face, cgl.GLuint mask)
cdef void glStencilOp (cgl.GLenum fail, cgl.GLenum zfail, cgl.GLenum zpass)
cdef void glStencilOpSeparate (cgl.GLenum face, cgl.GLenum fail, cgl.GLenum zfail, cgl.GLenum zpass)
cdef void glTexImage2D (cgl.GLenum target, cgl.GLint level, cgl.GLint internalformat, cgl.GLsizei width, cgl.GLsizei height, cgl.GLint border, cgl.GLenum format, cgl.GLenum type, cgl.GLvoid* pixels)
cdef void glTexParameterf (cgl.GLenum target, cgl.GLenum pname, cgl.GLfloat param)
cdef void glTexParameterfv (cgl.GLenum target, cgl.GLenum pname, cgl.GLfloat* params)
cdef void glTexParameteri (cgl.GLenum target, cgl.GLenum pname, cgl.GLint param)
cdef void glTexParameteriv (cgl.GLenum target, cgl.GLenum pname, cgl.GLint* params)
cdef void glTexSubImage2D (cgl.GLenum target, cgl.GLint level, cgl.GLint xoffset, cgl.GLint yoffset, cgl.GLsizei width, cgl.GLsizei height, cgl.GLenum format, cgl.GLenum type, cgl.GLvoid* pixels)
cdef void glUniform1f (cgl.GLint location, cgl.GLfloat x)
cdef void glUniform1fv (cgl.GLint location, cgl.GLsizei count, cgl.GLfloat* v)
cdef void glUniform1i (cgl.GLint location, cgl.GLint x)
cdef void glUniform1iv (cgl.GLint location, cgl.GLsizei count, cgl.GLint* v)
cdef void glUniform2f (cgl.GLint location, cgl.GLfloat x, cgl.GLfloat y)
cdef void glUniform2fv (cgl.GLint location, cgl.GLsizei count, cgl.GLfloat* v)
cdef void glUniform2i (cgl.GLint location, cgl.GLint x, cgl.GLint y)
cdef void glUniform2iv (cgl.GLint location, cgl.GLsizei count, cgl.GLint* v)
cdef void glUniform3f (cgl.GLint location, cgl.GLfloat x, cgl.GLfloat y, cgl.GLfloat z)
cdef void glUniform3fv (cgl.GLint location, cgl.GLsizei count, cgl.GLfloat* v)
cdef void glUniform3i (cgl.GLint location, cgl.GLint x, cgl.GLint y, cgl.GLint z)
cdef void glUniform3iv (cgl.GLint location, cgl.GLsizei count, cgl.GLint* v)
cdef void glUniform4f (cgl.GLint location, cgl.GLfloat x, cgl.GLfloat y, cgl.GLfloat z, cgl.GLfloat w)
cdef void glUniform4fv (cgl.GLint location, cgl.GLsizei count, cgl.GLfloat* v)
cdef void glUniform4i (cgl.GLint location, cgl.GLint x, cgl.GLint y, cgl.GLint z, cgl.GLint w)
cdef void glUniform4iv (cgl.GLint location, cgl.GLsizei count, cgl.GLint* v)
cdef void glUniformMatrix2fv (cgl.GLint location, cgl.GLsizei count, cgl.GLboolean transpose, cgl.GLfloat* value)
cdef void glUniformMatrix3fv (cgl.GLint location, cgl.GLsizei count, cgl.GLboolean transpose, cgl.GLfloat* value)
cdef void glUniformMatrix4fv (cgl.GLint location, cgl.GLsizei count, cgl.GLboolean transpose, cgl.GLfloat* value)
cdef void glUseProgram (cgl.GLuint program)
cdef void glValidateProgram (cgl.GLuint program)
cdef void glVertexAttrib1f (cgl.GLuint indx, cgl.GLfloat x)
cdef void glVertexAttrib1fv (cgl.GLuint indx, cgl.GLfloat* values)
cdef void glVertexAttrib2f (cgl.GLuint indx, cgl.GLfloat x, cgl.GLfloat y)
cdef void glVertexAttrib2fv (cgl.GLuint indx, cgl.GLfloat* values)
cdef void glVertexAttrib3f (cgl.GLuint indx, cgl.GLfloat x, cgl.GLfloat y, cgl.GLfloat z)
cdef void glVertexAttrib3fv (cgl.GLuint indx, cgl.GLfloat* values)
cdef void glVertexAttrib4f (cgl.GLuint indx, cgl.GLfloat x, cgl.GLfloat y, cgl.GLfloat z, cgl.GLfloat w)
cdef void glVertexAttrib4fv (cgl.GLuint indx, cgl.GLfloat* values)
cdef void glVertexAttribPointer (cgl.GLuint indx, cgl.GLint size, cgl.GLenum type, cgl.GLboolean normalized, cgl.GLsizei stride, cgl.GLvoid* ptr)
cdef void glViewport (cgl.GLint x, cgl.GLint y, cgl.GLsizei width, cgl.GLsizei height)'''
def replace(s):
item = s.split(' ')
rettype = item[1]
item = item[2:]
for x in item:
x = x.strip()
if not x or x.startswith('GL'):
continue
if x.startswith('(GL'):
yield '('
continue
if x.startswith('gl'):
prefix = ''
if rettype != 'void':
prefix = 'return '
yield '%scgl.%s' % (prefix, x)
continue
yield x
print('''
# This file was automatically generated with kivy/tools/stub-gl-debug.py
cimport c_opengl as cgl
''')
lines = a.splitlines()
for x in lines:
if x.startswith('#'):
# There are some functions that either do not exist or break on OSX.
# Just skip those.
print('# Skipping generation of: "%s"' % x)
continue
x = x.replace('cgl.', '')
y = ' '.join(replace(x))
print('%s with gil:' % x)
s = x.split()
print(' print "GL %s(' % s[2], end=' ')
pointer = 0
for arg in s[3:]:
arg = arg.strip()
arg = arg.replace(',', '').replace(')', '')
if 'GL' in arg or arg == '(':
pointer = arg.count('*')
continue
pointer = '*' * pointer
if pointer:
print('%s%s=", repr(hex(<long> %s)), ",' % (arg, pointer, arg), end=' ')
else:
print('%s = ", %s, ",' % (arg, arg), end=' ')
pointer = 0
print(')"')
print(' %s' % y)
print(' ret = glGetError()')
print(' if ret: print("ERR {} / {}".format(ret, ret))')
| mit |
robinro/ansible | lib/ansible/modules/cloud/vmware/vmware_cluster.py | 33 | 9985 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Joseph Callen <jcallen () csc.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: vmware_cluster
short_description: Create VMware vSphere Cluster
description:
- Create VMware vSphere Cluster
version_added: 2.0
author: Joseph Callen (@jcpowermac)
notes:
requirements:
- Tested on ESXi 5.5
- PyVmomi installed
options:
datacenter_name:
description:
- The name of the datacenter the cluster will be created in.
required: True
cluster_name:
description:
- The name of the cluster that will be created
required: True
enable_ha:
description:
- If set to True will enable HA when the cluster is created.
required: False
default: False
enable_drs:
description:
- If set to True will enable DRS when the cluster is created.
required: False
default: False
enable_vsan:
description:
- If set to True will enable vSAN when the cluster is created.
required: False
default: False
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
# Example vmware_cluster command from Ansible Playbooks
- name: Create Cluster
local_action:
module: vmware_cluster
hostname: "{{ ansible_ssh_host }}"
username: root
password: vmware
datacenter_name: "datacenter"
cluster_name: "cluster"
enable_ha: True
enable_drs: True
enable_vsan: True
'''
try:
from pyVmomi import vim, vmodl
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
class VMwareCluster(object):
def __init__(self, module):
self.module = module
self.enable_ha = module.params['enable_ha']
self.enable_drs = module.params['enable_drs']
self.enable_vsan = module.params['enable_vsan']
self.cluster_name = module.params['cluster_name']
self.desired_state = module.params['state']
self.datacenter = None
self.cluster = None
self.content = connect_to_api(module)
self.datacenter_name = module.params['datacenter_name']
def process_state(self):
cluster_states = {
'absent': {
'present': self.state_destroy_cluster,
'absent': self.state_exit_unchanged,
},
'present': {
'update': self.state_update_cluster,
'present': self.state_exit_unchanged,
'absent': self.state_create_cluster,
}
}
current_state = self.check_cluster_configuration()
# Based on the desired_state and the current_state call
# the appropriate method from the dictionary
cluster_states[self.desired_state][current_state]()
def configure_ha(self):
das_config = vim.cluster.DasConfigInfo()
das_config.enabled = self.enable_ha
das_config.admissionControlPolicy = vim.cluster.FailoverLevelAdmissionControlPolicy()
das_config.admissionControlPolicy.failoverLevel = 2
return das_config
def configure_drs(self):
drs_config = vim.cluster.DrsConfigInfo()
drs_config.enabled = self.enable_drs
# Set to partially automated
drs_config.vmotionRate = 3
return drs_config
def configure_vsan(self):
vsan_config = vim.vsan.cluster.ConfigInfo()
vsan_config.enabled = self.enable_vsan
vsan_config.defaultConfig = vim.vsan.cluster.ConfigInfo.HostDefaultInfo()
vsan_config.defaultConfig.autoClaimStorage = False
return vsan_config
def state_create_cluster(self):
try:
cluster_config_spec = vim.cluster.ConfigSpecEx()
cluster_config_spec.dasConfig = self.configure_ha()
cluster_config_spec.drsConfig = self.configure_drs()
if self.enable_vsan:
cluster_config_spec.vsanConfig = self.configure_vsan()
if not self.module.check_mode:
self.datacenter.hostFolder.CreateClusterEx(self.cluster_name, cluster_config_spec)
self.module.exit_json(changed=True)
except vim.fault.DuplicateName:
self.module.fail_json(msg="A cluster with the name %s already exists" % self.cluster_name)
except vmodl.fault.InvalidArgument:
self.module.fail_json(msg="Cluster configuration specification parameter is invalid")
except vim.fault.InvalidName:
self.module.fail_json(msg="%s is an invalid name for a cluster" % self.cluster_name)
except vmodl.fault.NotSupported:
# This should never happen
self.module.fail_json(msg="Trying to create a cluster on an incorrect folder object")
except vmodl.RuntimeFault as runtime_fault:
self.module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
# This should never happen either
self.module.fail_json(msg=method_fault.msg)
def state_destroy_cluster(self):
changed = True
result = None
try:
if not self.module.check_mode:
task = self.cluster.Destroy_Task()
changed, result = wait_for_task(task)
self.module.exit_json(changed=changed, result=result)
except vim.fault.VimFault as vim_fault:
self.module.fail_json(msg=vim_fault.msg)
except vmodl.RuntimeFault as runtime_fault:
self.module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
self.module.fail_json(msg=method_fault.msg)
def state_exit_unchanged(self):
self.module.exit_json(changed=False)
def state_update_cluster(self):
cluster_config_spec = vim.cluster.ConfigSpecEx()
changed = True
result = None
if self.cluster.configurationEx.dasConfig.enabled != self.enable_ha:
cluster_config_spec.dasConfig = self.configure_ha()
if self.cluster.configurationEx.drsConfig.enabled != self.enable_drs:
cluster_config_spec.drsConfig = self.configure_drs()
if self.cluster.configurationEx.vsanConfigInfo.enabled != self.enable_vsan:
cluster_config_spec.vsanConfig = self.configure_vsan()
try:
if not self.module.check_mode:
task = self.cluster.ReconfigureComputeResource_Task(cluster_config_spec, True)
changed, result = wait_for_task(task)
self.module.exit_json(changed=changed, result=result)
except vmodl.RuntimeFault as runtime_fault:
self.module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
self.module.fail_json(msg=method_fault.msg)
except TaskError as task_e:
self.module.fail_json(msg=str(task_e))
def check_cluster_configuration(self):
try:
self.datacenter = find_datacenter_by_name(self.content, self.datacenter_name)
if self.datacenter is None:
self.module.fail_json(msg="Datacenter %s does not exist, "
"please create first with Ansible Module vmware_datacenter or manually."
% self.datacenter_name)
self.cluster = find_cluster_by_name_datacenter(self.datacenter, self.cluster_name)
if self.cluster is None:
return 'absent'
else:
desired_state = (self.enable_ha,
self.enable_drs,
self.enable_vsan)
current_state = (self.cluster.configurationEx.dasConfig.enabled,
self.cluster.configurationEx.drsConfig.enabled,
self.cluster.configurationEx.vsanConfigInfo.enabled)
if desired_state != current_state:
return 'update'
else:
return 'present'
except vmodl.RuntimeFault as runtime_fault:
self.module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
self.module.fail_json(msg=method_fault.msg)
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(dict(datacenter_name=dict(required=True, type='str'),
cluster_name=dict(required=True, type='str'),
enable_ha=dict(default=False, required=False, type='bool'),
enable_drs=dict(default=False, required=False, type='bool'),
enable_vsan=dict(default=False, required=False, type='bool'),
state=dict(default='present', choices=['present', 'absent'], type='str')))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
if not HAS_PYVMOMI:
module.fail_json(msg='pyvmomi is required for this module')
vmware_cluster = VMwareCluster(module)
vmware_cluster.process_state()
from ansible.module_utils.vmware import *
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
gylian/headphones | lib/html5lib/inputstream.py | 618 | 30855 | from __future__ import absolute_import, division, unicode_literals
from six import text_type
from six.moves import http_client
import codecs
import re
from .constants import EOF, spaceCharacters, asciiLetters, asciiUppercase
from .constants import encodings, ReparseException
from . import utils
from io import StringIO
try:
from io import BytesIO
except ImportError:
BytesIO = StringIO
try:
from io import BufferedIOBase
except ImportError:
class BufferedIOBase(object):
pass
# Non-unicode versions of constants for use in the pre-parser
spaceCharactersBytes = frozenset([item.encode("ascii") for item in spaceCharacters])
asciiLettersBytes = frozenset([item.encode("ascii") for item in asciiLetters])
asciiUppercaseBytes = frozenset([item.encode("ascii") for item in asciiUppercase])
spacesAngleBrackets = spaceCharactersBytes | frozenset([b">", b"<"])
invalid_unicode_re = re.compile("[\u0001-\u0008\u000B\u000E-\u001F\u007F-\u009F\uD800-\uDFFF\uFDD0-\uFDEF\uFFFE\uFFFF\U0001FFFE\U0001FFFF\U0002FFFE\U0002FFFF\U0003FFFE\U0003FFFF\U0004FFFE\U0004FFFF\U0005FFFE\U0005FFFF\U0006FFFE\U0006FFFF\U0007FFFE\U0007FFFF\U0008FFFE\U0008FFFF\U0009FFFE\U0009FFFF\U000AFFFE\U000AFFFF\U000BFFFE\U000BFFFF\U000CFFFE\U000CFFFF\U000DFFFE\U000DFFFF\U000EFFFE\U000EFFFF\U000FFFFE\U000FFFFF\U0010FFFE\U0010FFFF]")
non_bmp_invalid_codepoints = set([0x1FFFE, 0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE,
0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE, 0x5FFFF,
0x6FFFE, 0x6FFFF, 0x7FFFE, 0x7FFFF, 0x8FFFE,
0x8FFFF, 0x9FFFE, 0x9FFFF, 0xAFFFE, 0xAFFFF,
0xBFFFE, 0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE,
0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE, 0xFFFFF,
0x10FFFE, 0x10FFFF])
ascii_punctuation_re = re.compile("[\u0009-\u000D\u0020-\u002F\u003A-\u0040\u005B-\u0060\u007B-\u007E]")
# Cache for charsUntil()
charsUntilRegEx = {}
class BufferedStream(object):
"""Buffering for streams that do not have buffering of their own
The buffer is implemented as a list of chunks on the assumption that
joining many strings will be slow since it is O(n**2)
"""
def __init__(self, stream):
self.stream = stream
self.buffer = []
self.position = [-1, 0] # chunk number, offset
def tell(self):
pos = 0
for chunk in self.buffer[:self.position[0]]:
pos += len(chunk)
pos += self.position[1]
return pos
def seek(self, pos):
assert pos <= self._bufferedBytes()
offset = pos
i = 0
while len(self.buffer[i]) < offset:
offset -= len(self.buffer[i])
i += 1
self.position = [i, offset]
def read(self, bytes):
if not self.buffer:
return self._readStream(bytes)
elif (self.position[0] == len(self.buffer) and
self.position[1] == len(self.buffer[-1])):
return self._readStream(bytes)
else:
return self._readFromBuffer(bytes)
def _bufferedBytes(self):
return sum([len(item) for item in self.buffer])
def _readStream(self, bytes):
data = self.stream.read(bytes)
self.buffer.append(data)
self.position[0] += 1
self.position[1] = len(data)
return data
def _readFromBuffer(self, bytes):
remainingBytes = bytes
rv = []
bufferIndex = self.position[0]
bufferOffset = self.position[1]
while bufferIndex < len(self.buffer) and remainingBytes != 0:
assert remainingBytes > 0
bufferedData = self.buffer[bufferIndex]
if remainingBytes <= len(bufferedData) - bufferOffset:
bytesToRead = remainingBytes
self.position = [bufferIndex, bufferOffset + bytesToRead]
else:
bytesToRead = len(bufferedData) - bufferOffset
self.position = [bufferIndex, len(bufferedData)]
bufferIndex += 1
rv.append(bufferedData[bufferOffset:bufferOffset + bytesToRead])
remainingBytes -= bytesToRead
bufferOffset = 0
if remainingBytes:
rv.append(self._readStream(remainingBytes))
return b"".join(rv)
def HTMLInputStream(source, encoding=None, parseMeta=True, chardet=True):
if isinstance(source, http_client.HTTPResponse):
# Work around Python bug #20007: read(0) closes the connection.
# http://bugs.python.org/issue20007
isUnicode = False
elif hasattr(source, "read"):
isUnicode = isinstance(source.read(0), text_type)
else:
isUnicode = isinstance(source, text_type)
if isUnicode:
if encoding is not None:
raise TypeError("Cannot explicitly set an encoding with a unicode string")
return HTMLUnicodeInputStream(source)
else:
return HTMLBinaryInputStream(source, encoding, parseMeta, chardet)
class HTMLUnicodeInputStream(object):
"""Provides a unicode stream of characters to the HTMLTokenizer.
This class takes care of character encoding and removing or replacing
incorrect byte-sequences and also provides column and line tracking.
"""
_defaultChunkSize = 10240
def __init__(self, source):
"""Initialises the HTMLInputStream.
HTMLInputStream(source, [encoding]) -> Normalized stream from source
for use by html5lib.
source can be either a file-object, local filename or a string.
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
parseMeta - Look for a <meta> element containing encoding information
"""
# Craziness
if len("\U0010FFFF") == 1:
self.reportCharacterErrors = self.characterErrorsUCS4
self.replaceCharactersRegexp = re.compile("[\uD800-\uDFFF]")
else:
self.reportCharacterErrors = self.characterErrorsUCS2
self.replaceCharactersRegexp = re.compile("([\uD800-\uDBFF](?![\uDC00-\uDFFF])|(?<![\uD800-\uDBFF])[\uDC00-\uDFFF])")
# List of where new lines occur
self.newLines = [0]
self.charEncoding = ("utf-8", "certain")
self.dataStream = self.openStream(source)
self.reset()
def reset(self):
self.chunk = ""
self.chunkSize = 0
self.chunkOffset = 0
self.errors = []
# number of (complete) lines in previous chunks
self.prevNumLines = 0
# number of columns in the last line of the previous chunk
self.prevNumCols = 0
# Deal with CR LF and surrogates split over chunk boundaries
self._bufferedCharacter = None
def openStream(self, source):
"""Produces a file object from source.
source can be either a file object, local filename or a string.
"""
# Already a file object
if hasattr(source, 'read'):
stream = source
else:
stream = StringIO(source)
return stream
def _position(self, offset):
chunk = self.chunk
nLines = chunk.count('\n', 0, offset)
positionLine = self.prevNumLines + nLines
lastLinePos = chunk.rfind('\n', 0, offset)
if lastLinePos == -1:
positionColumn = self.prevNumCols + offset
else:
positionColumn = offset - (lastLinePos + 1)
return (positionLine, positionColumn)
def position(self):
"""Returns (line, col) of the current position in the stream."""
line, col = self._position(self.chunkOffset)
return (line + 1, col)
def char(self):
""" Read one character from the stream or queue if available. Return
EOF when EOF is reached.
"""
# Read a new chunk from the input stream if necessary
if self.chunkOffset >= self.chunkSize:
if not self.readChunk():
return EOF
chunkOffset = self.chunkOffset
char = self.chunk[chunkOffset]
self.chunkOffset = chunkOffset + 1
return char
def readChunk(self, chunkSize=None):
if chunkSize is None:
chunkSize = self._defaultChunkSize
self.prevNumLines, self.prevNumCols = self._position(self.chunkSize)
self.chunk = ""
self.chunkSize = 0
self.chunkOffset = 0
data = self.dataStream.read(chunkSize)
# Deal with CR LF and surrogates broken across chunks
if self._bufferedCharacter:
data = self._bufferedCharacter + data
self._bufferedCharacter = None
elif not data:
# We have no more data, bye-bye stream
return False
if len(data) > 1:
lastv = ord(data[-1])
if lastv == 0x0D or 0xD800 <= lastv <= 0xDBFF:
self._bufferedCharacter = data[-1]
data = data[:-1]
self.reportCharacterErrors(data)
# Replace invalid characters
# Note U+0000 is dealt with in the tokenizer
data = self.replaceCharactersRegexp.sub("\ufffd", data)
data = data.replace("\r\n", "\n")
data = data.replace("\r", "\n")
self.chunk = data
self.chunkSize = len(data)
return True
def characterErrorsUCS4(self, data):
for i in range(len(invalid_unicode_re.findall(data))):
self.errors.append("invalid-codepoint")
def characterErrorsUCS2(self, data):
# Someone picked the wrong compile option
# You lose
skip = False
for match in invalid_unicode_re.finditer(data):
if skip:
continue
codepoint = ord(match.group())
pos = match.start()
# Pretty sure there should be endianness issues here
if utils.isSurrogatePair(data[pos:pos + 2]):
# We have a surrogate pair!
char_val = utils.surrogatePairToCodepoint(data[pos:pos + 2])
if char_val in non_bmp_invalid_codepoints:
self.errors.append("invalid-codepoint")
skip = True
elif (codepoint >= 0xD800 and codepoint <= 0xDFFF and
pos == len(data) - 1):
self.errors.append("invalid-codepoint")
else:
skip = False
self.errors.append("invalid-codepoint")
def charsUntil(self, characters, opposite=False):
""" Returns a string of characters from the stream up to but not
including any character in 'characters' or EOF. 'characters' must be
a container that supports the 'in' method and iteration over its
characters.
"""
# Use a cache of regexps to find the required characters
try:
chars = charsUntilRegEx[(characters, opposite)]
except KeyError:
if __debug__:
for c in characters:
assert(ord(c) < 128)
regex = "".join(["\\x%02x" % ord(c) for c in characters])
if not opposite:
regex = "^%s" % regex
chars = charsUntilRegEx[(characters, opposite)] = re.compile("[%s]+" % regex)
rv = []
while True:
# Find the longest matching prefix
m = chars.match(self.chunk, self.chunkOffset)
if m is None:
# If nothing matched, and it wasn't because we ran out of chunk,
# then stop
if self.chunkOffset != self.chunkSize:
break
else:
end = m.end()
# If not the whole chunk matched, return everything
# up to the part that didn't match
if end != self.chunkSize:
rv.append(self.chunk[self.chunkOffset:end])
self.chunkOffset = end
break
# If the whole remainder of the chunk matched,
# use it all and read the next chunk
rv.append(self.chunk[self.chunkOffset:])
if not self.readChunk():
# Reached EOF
break
r = "".join(rv)
return r
def unget(self, char):
# Only one character is allowed to be ungotten at once - it must
# be consumed again before any further call to unget
if char is not None:
if self.chunkOffset == 0:
# unget is called quite rarely, so it's a good idea to do
# more work here if it saves a bit of work in the frequently
# called char and charsUntil.
# So, just prepend the ungotten character onto the current
# chunk:
self.chunk = char + self.chunk
self.chunkSize += 1
else:
self.chunkOffset -= 1
assert self.chunk[self.chunkOffset] == char
class HTMLBinaryInputStream(HTMLUnicodeInputStream):
"""Provides a unicode stream of characters to the HTMLTokenizer.
This class takes care of character encoding and removing or replacing
incorrect byte-sequences and also provides column and line tracking.
"""
def __init__(self, source, encoding=None, parseMeta=True, chardet=True):
"""Initialises the HTMLInputStream.
HTMLInputStream(source, [encoding]) -> Normalized stream from source
for use by html5lib.
source can be either a file-object, local filename or a string.
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
parseMeta - Look for a <meta> element containing encoding information
"""
# Raw Stream - for unicode objects this will encode to utf-8 and set
# self.charEncoding as appropriate
self.rawStream = self.openStream(source)
HTMLUnicodeInputStream.__init__(self, self.rawStream)
self.charEncoding = (codecName(encoding), "certain")
# Encoding Information
# Number of bytes to use when looking for a meta element with
# encoding information
self.numBytesMeta = 512
# Number of bytes to use when using detecting encoding using chardet
self.numBytesChardet = 100
# Encoding to use if no other information can be found
self.defaultEncoding = "windows-1252"
# Detect encoding iff no explicit "transport level" encoding is supplied
if (self.charEncoding[0] is None):
self.charEncoding = self.detectEncoding(parseMeta, chardet)
# Call superclass
self.reset()
def reset(self):
self.dataStream = codecs.getreader(self.charEncoding[0])(self.rawStream,
'replace')
HTMLUnicodeInputStream.reset(self)
def openStream(self, source):
"""Produces a file object from source.
source can be either a file object, local filename or a string.
"""
# Already a file object
if hasattr(source, 'read'):
stream = source
else:
stream = BytesIO(source)
try:
stream.seek(stream.tell())
except:
stream = BufferedStream(stream)
return stream
def detectEncoding(self, parseMeta=True, chardet=True):
# First look for a BOM
# This will also read past the BOM if present
encoding = self.detectBOM()
confidence = "certain"
# If there is no BOM need to look for meta elements with encoding
# information
if encoding is None and parseMeta:
encoding = self.detectEncodingMeta()
confidence = "tentative"
# Guess with chardet, if avaliable
if encoding is None and chardet:
confidence = "tentative"
try:
try:
from charade.universaldetector import UniversalDetector
except ImportError:
from chardet.universaldetector import UniversalDetector
buffers = []
detector = UniversalDetector()
while not detector.done:
buffer = self.rawStream.read(self.numBytesChardet)
assert isinstance(buffer, bytes)
if not buffer:
break
buffers.append(buffer)
detector.feed(buffer)
detector.close()
encoding = detector.result['encoding']
self.rawStream.seek(0)
except ImportError:
pass
# If all else fails use the default encoding
if encoding is None:
confidence = "tentative"
encoding = self.defaultEncoding
# Substitute for equivalent encodings:
encodingSub = {"iso-8859-1": "windows-1252"}
if encoding.lower() in encodingSub:
encoding = encodingSub[encoding.lower()]
return encoding, confidence
def changeEncoding(self, newEncoding):
assert self.charEncoding[1] != "certain"
newEncoding = codecName(newEncoding)
if newEncoding in ("utf-16", "utf-16-be", "utf-16-le"):
newEncoding = "utf-8"
if newEncoding is None:
return
elif newEncoding == self.charEncoding[0]:
self.charEncoding = (self.charEncoding[0], "certain")
else:
self.rawStream.seek(0)
self.reset()
self.charEncoding = (newEncoding, "certain")
raise ReparseException("Encoding changed from %s to %s" % (self.charEncoding[0], newEncoding))
def detectBOM(self):
"""Attempts to detect at BOM at the start of the stream. If
an encoding can be determined from the BOM return the name of the
encoding otherwise return None"""
bomDict = {
codecs.BOM_UTF8: 'utf-8',
codecs.BOM_UTF16_LE: 'utf-16-le', codecs.BOM_UTF16_BE: 'utf-16-be',
codecs.BOM_UTF32_LE: 'utf-32-le', codecs.BOM_UTF32_BE: 'utf-32-be'
}
# Go to beginning of file and read in 4 bytes
string = self.rawStream.read(4)
assert isinstance(string, bytes)
# Try detecting the BOM using bytes from the string
encoding = bomDict.get(string[:3]) # UTF-8
seek = 3
if not encoding:
# Need to detect UTF-32 before UTF-16
encoding = bomDict.get(string) # UTF-32
seek = 4
if not encoding:
encoding = bomDict.get(string[:2]) # UTF-16
seek = 2
# Set the read position past the BOM if one was found, otherwise
# set it to the start of the stream
self.rawStream.seek(encoding and seek or 0)
return encoding
def detectEncodingMeta(self):
"""Report the encoding declared by the meta element
"""
buffer = self.rawStream.read(self.numBytesMeta)
assert isinstance(buffer, bytes)
parser = EncodingParser(buffer)
self.rawStream.seek(0)
encoding = parser.getEncoding()
if encoding in ("utf-16", "utf-16-be", "utf-16-le"):
encoding = "utf-8"
return encoding
class EncodingBytes(bytes):
"""String-like object with an associated position and various extra methods
If the position is ever greater than the string length then an exception is
raised"""
def __new__(self, value):
assert isinstance(value, bytes)
return bytes.__new__(self, value.lower())
def __init__(self, value):
self._position = -1
def __iter__(self):
return self
def __next__(self):
p = self._position = self._position + 1
if p >= len(self):
raise StopIteration
elif p < 0:
raise TypeError
return self[p:p + 1]
def next(self):
# Py2 compat
return self.__next__()
def previous(self):
p = self._position
if p >= len(self):
raise StopIteration
elif p < 0:
raise TypeError
self._position = p = p - 1
return self[p:p + 1]
def setPosition(self, position):
if self._position >= len(self):
raise StopIteration
self._position = position
def getPosition(self):
if self._position >= len(self):
raise StopIteration
if self._position >= 0:
return self._position
else:
return None
position = property(getPosition, setPosition)
def getCurrentByte(self):
return self[self.position:self.position + 1]
currentByte = property(getCurrentByte)
def skip(self, chars=spaceCharactersBytes):
"""Skip past a list of characters"""
p = self.position # use property for the error-checking
while p < len(self):
c = self[p:p + 1]
if c not in chars:
self._position = p
return c
p += 1
self._position = p
return None
def skipUntil(self, chars):
p = self.position
while p < len(self):
c = self[p:p + 1]
if c in chars:
self._position = p
return c
p += 1
self._position = p
return None
def matchBytes(self, bytes):
"""Look for a sequence of bytes at the start of a string. If the bytes
are found return True and advance the position to the byte after the
match. Otherwise return False and leave the position alone"""
p = self.position
data = self[p:p + len(bytes)]
rv = data.startswith(bytes)
if rv:
self.position += len(bytes)
return rv
def jumpTo(self, bytes):
"""Look for the next sequence of bytes matching a given sequence. If
a match is found advance the position to the last byte of the match"""
newPosition = self[self.position:].find(bytes)
if newPosition > -1:
# XXX: This is ugly, but I can't see a nicer way to fix this.
if self._position == -1:
self._position = 0
self._position += (newPosition + len(bytes) - 1)
return True
else:
raise StopIteration
class EncodingParser(object):
"""Mini parser for detecting character encoding from meta elements"""
def __init__(self, data):
"""string - the data to work on for encoding detection"""
self.data = EncodingBytes(data)
self.encoding = None
def getEncoding(self):
methodDispatch = (
(b"<!--", self.handleComment),
(b"<meta", self.handleMeta),
(b"</", self.handlePossibleEndTag),
(b"<!", self.handleOther),
(b"<?", self.handleOther),
(b"<", self.handlePossibleStartTag))
for byte in self.data:
keepParsing = True
for key, method in methodDispatch:
if self.data.matchBytes(key):
try:
keepParsing = method()
break
except StopIteration:
keepParsing = False
break
if not keepParsing:
break
return self.encoding
def handleComment(self):
"""Skip over comments"""
return self.data.jumpTo(b"-->")
def handleMeta(self):
if self.data.currentByte not in spaceCharactersBytes:
# if we have <meta not followed by a space so just keep going
return True
# We have a valid meta element we want to search for attributes
hasPragma = False
pendingEncoding = None
while True:
# Try to find the next attribute after the current position
attr = self.getAttribute()
if attr is None:
return True
else:
if attr[0] == b"http-equiv":
hasPragma = attr[1] == b"content-type"
if hasPragma and pendingEncoding is not None:
self.encoding = pendingEncoding
return False
elif attr[0] == b"charset":
tentativeEncoding = attr[1]
codec = codecName(tentativeEncoding)
if codec is not None:
self.encoding = codec
return False
elif attr[0] == b"content":
contentParser = ContentAttrParser(EncodingBytes(attr[1]))
tentativeEncoding = contentParser.parse()
if tentativeEncoding is not None:
codec = codecName(tentativeEncoding)
if codec is not None:
if hasPragma:
self.encoding = codec
return False
else:
pendingEncoding = codec
def handlePossibleStartTag(self):
return self.handlePossibleTag(False)
def handlePossibleEndTag(self):
next(self.data)
return self.handlePossibleTag(True)
def handlePossibleTag(self, endTag):
data = self.data
if data.currentByte not in asciiLettersBytes:
# If the next byte is not an ascii letter either ignore this
# fragment (possible start tag case) or treat it according to
# handleOther
if endTag:
data.previous()
self.handleOther()
return True
c = data.skipUntil(spacesAngleBrackets)
if c == b"<":
# return to the first step in the overall "two step" algorithm
# reprocessing the < byte
data.previous()
else:
# Read all attributes
attr = self.getAttribute()
while attr is not None:
attr = self.getAttribute()
return True
def handleOther(self):
return self.data.jumpTo(b">")
def getAttribute(self):
"""Return a name,value pair for the next attribute in the stream,
if one is found, or None"""
data = self.data
# Step 1 (skip chars)
c = data.skip(spaceCharactersBytes | frozenset([b"/"]))
assert c is None or len(c) == 1
# Step 2
if c in (b">", None):
return None
# Step 3
attrName = []
attrValue = []
# Step 4 attribute name
while True:
if c == b"=" and attrName:
break
elif c in spaceCharactersBytes:
# Step 6!
c = data.skip()
break
elif c in (b"/", b">"):
return b"".join(attrName), b""
elif c in asciiUppercaseBytes:
attrName.append(c.lower())
elif c is None:
return None
else:
attrName.append(c)
# Step 5
c = next(data)
# Step 7
if c != b"=":
data.previous()
return b"".join(attrName), b""
# Step 8
next(data)
# Step 9
c = data.skip()
# Step 10
if c in (b"'", b'"'):
# 10.1
quoteChar = c
while True:
# 10.2
c = next(data)
# 10.3
if c == quoteChar:
next(data)
return b"".join(attrName), b"".join(attrValue)
# 10.4
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
# 10.5
else:
attrValue.append(c)
elif c == b">":
return b"".join(attrName), b""
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
elif c is None:
return None
else:
attrValue.append(c)
# Step 11
while True:
c = next(data)
if c in spacesAngleBrackets:
return b"".join(attrName), b"".join(attrValue)
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
elif c is None:
return None
else:
attrValue.append(c)
class ContentAttrParser(object):
def __init__(self, data):
assert isinstance(data, bytes)
self.data = data
def parse(self):
try:
# Check if the attr name is charset
# otherwise return
self.data.jumpTo(b"charset")
self.data.position += 1
self.data.skip()
if not self.data.currentByte == b"=":
# If there is no = sign keep looking for attrs
return None
self.data.position += 1
self.data.skip()
# Look for an encoding between matching quote marks
if self.data.currentByte in (b'"', b"'"):
quoteMark = self.data.currentByte
self.data.position += 1
oldPosition = self.data.position
if self.data.jumpTo(quoteMark):
return self.data[oldPosition:self.data.position]
else:
return None
else:
# Unquoted value
oldPosition = self.data.position
try:
self.data.skipUntil(spaceCharactersBytes)
return self.data[oldPosition:self.data.position]
except StopIteration:
# Return the whole remaining value
return self.data[oldPosition:]
except StopIteration:
return None
def codecName(encoding):
"""Return the python codec name corresponding to an encoding or None if the
string doesn't correspond to a valid encoding."""
if isinstance(encoding, bytes):
try:
encoding = encoding.decode("ascii")
except UnicodeDecodeError:
return None
if encoding:
canonicalName = ascii_punctuation_re.sub("", encoding).lower()
return encodings.get(canonicalName, None)
else:
return None
| gpl-3.0 |
andixlm/android_kernel_samsung_galaxys2plus-common | tools/perf/python/twatch.py | 3213 | 1338 | #! /usr/bin/python
# -*- python -*-
# -*- coding: utf-8 -*-
# twatch - Experimental use of the perf python interface
# Copyright (C) 2011 Arnaldo Carvalho de Melo <acme@redhat.com>
#
# This application is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import perf
def main():
cpus = perf.cpu_map()
threads = perf.thread_map()
evsel = perf.evsel(task = 1, comm = 1, mmap = 0,
wakeup_events = 1, sample_period = 1,
sample_id_all = 1,
sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_TID)
evsel.open(cpus = cpus, threads = threads);
evlist = perf.evlist(cpus, threads)
evlist.add(evsel)
evlist.mmap()
while True:
evlist.poll(timeout = -1)
for cpu in cpus:
event = evlist.read_on_cpu(cpu)
if not event:
continue
print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu,
event.sample_pid,
event.sample_tid),
print event
if __name__ == '__main__':
main()
| gpl-2.0 |
fjorba/invenio | modules/bibrank/web/admin/bibrankadmin.py | 25 | 7810 | ## This file is part of Invenio.
## Copyright (C) 2004, 2005, 2006, 2007, 2008, 2010, 2011, 2012 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Invenio BibRank Administrator Interface."""
__revision__ = "$Id$"
__lastupdated__ = """$Date$"""
import invenio.bibrankadminlib as brc
#reload(brc)
from invenio.webpage import page, error_page
from invenio.config import CFG_SITE_URL, CFG_SITE_LANG, CFG_SITE_NAME
from invenio.webuser import getUid, page_not_authorized
def index(req, ln=CFG_SITE_LANG):
navtrail_previous_links = brc.getnavtrail() # + """> <a class="navtrail" href="%s/admin/bibrank/bibrankadmin.py">BibRank Admin Interface</a> """ % (CFG_SITE_URL)
try:
uid = getUid(req)
except:
return error_page('Error', req)
auth = brc.check_user(req,'cfgbibrank')
if not auth[0]:
return page(title="BibRank Admin Interface",
body=brc.perform_index(ln),
uid=uid,
language=ln,
navtrail = navtrail_previous_links,
lastupdated=__lastupdated__,
req=req)
else:
return page_not_authorized(req=req, text=auth[1], navtrail=navtrail_previous_links)
def addrankarea(req, ln=CFG_SITE_LANG, rnkcode='', template='', confirm=-1):
navtrail_previous_links = brc.getnavtrail() + """> <a class="navtrail" href="%s/admin/bibrank/bibrankadmin.py/">BibRank Admin Interface</a> """ % (CFG_SITE_URL)
try:
uid = getUid(req)
except:
return error_page('Error', req)
auth = brc.check_user(req,'cfgbibrank')
if not auth[0]:
return page(title="Add new rank method",
body=brc.perform_addrankarea(rnkcode=rnkcode,
ln=ln,
template=template,
confirm=confirm),
uid=uid,
language=ln,
navtrail = navtrail_previous_links,
req=req,
lastupdated=__lastupdated__)
else:
return page_not_authorized(req=req, text=auth[1], navtrail=navtrail_previous_links)
def modifytranslations(req, rnkID='', ln=CFG_SITE_LANG, sel_type='', trans = [], confirm=0):
navtrail_previous_links = brc.getnavtrail() + """> <a class="navtrail" href="%s/admin/bibrank/bibrankadmin.py/">BibRank Admin Interface</a> """ % (CFG_SITE_URL)
try:
uid = getUid(req)
except:
return error_page('Error', req)
auth = brc.check_user(req,'cfgbibrank')
if not auth[0]:
return page(title="Modify translations",
body=brc.perform_modifytranslations(rnkID=rnkID,
ln=ln,
sel_type=sel_type,
trans=trans,
confirm=confirm),
uid=uid,
language=ln,
req=req,
navtrail = navtrail_previous_links,
lastupdated=__lastupdated__)
else:
return page_not_authorized(req=req, text=auth[1], navtrail=navtrail_previous_links)
def modifycollection(req, ln=CFG_SITE_LANG, rnkID='', func='', colID='', confirm=0):
navtrail_previous_links = brc.getnavtrail() + """> <a class="navtrail" href="%s/admin/bibrank/bibrankadmin.py/">BibRank Admin Interface</a> """ % (CFG_SITE_URL)
try:
uid = getUid(req)
except:
return error_page('Error', req)
auth = brc.check_user(req,'cfgbibrank')
if not auth[0]:
return page(title="Modify visibility toward collections",
body=brc.perform_modifycollection(rnkID=rnkID,
ln=ln,
func=func,
colID=colID,
confirm=confirm),
uid=uid,
language=ln,
req=req,
navtrail = navtrail_previous_links,
lastupdated=__lastupdated__)
else:
return page_not_authorized(req=req, text=auth[1], navtrail=navtrail_previous_links)
def deleterank(req, ln=CFG_SITE_LANG, rnkID='', confirm=0):
navtrail_previous_links = brc.getnavtrail() + """> <a class="navtrail" href="%s/admin/bibrank/bibrankadmin.py/">BibRank Admin Interface</a> """ % (CFG_SITE_URL)
try:
uid = getUid(req)
except:
return error_page('Error', req)
auth = brc.check_user(req,'cfgbibrank')
if not auth[0]:
return page(title="Delete rank method",
body=brc.perform_deleterank(rnkID=rnkID,
ln=ln,
confirm=confirm),
uid=uid,
language=ln,
req=req,
navtrail = navtrail_previous_links,
lastupdated=__lastupdated__)
else:
return page_not_authorized(req=req, text=auth[1], navtrail=navtrail_previous_links)
def modifyrank(req, ln=CFG_SITE_LANG, rnkID='', rnkcode='', template='', cfgfile='', confirm=0):
navtrail_previous_links = brc.getnavtrail() + """> <a class="navtrail" href="%s/admin/bibrank/bibrankadmin.py/">BibRank Admin Interface</a> """ % (CFG_SITE_URL)
try:
uid = getUid(req)
except:
return error_page('Error', req)
auth = brc.check_user(req,'cfgbibrank')
if not auth[0]:
return page(title="Modify rank method",
body=brc.perform_modifyrank(rnkID=rnkID,
ln=ln,
rnkcode=rnkcode,
cfgfile=cfgfile,
template=template,
confirm=confirm),
uid=uid,
language=ln,
req=req,
navtrail = navtrail_previous_links,
lastupdated=__lastupdated__)
else:
return page_not_authorized(req=req, text=auth[1], navtrail=navtrail_previous_links)
def showrankdetails(req, ln=CFG_SITE_LANG, rnkID=''):
navtrail_previous_links = brc.getnavtrail() + """> <a class="navtrail" href="%s/admin/bibrank/bibrankadmin.py/">BibRank Admin Interface</a> """ % (CFG_SITE_URL)
try:
uid = getUid(req)
except:
return error_page('Error', req)
auth = brc.check_user(req,'cfgbibrank')
if not auth[0]:
return page(title="Rank method details",
body=brc.perform_showrankdetails(rnkID=rnkID,
ln=ln),
uid=uid,
language=ln,
req=req,
navtrail = navtrail_previous_links,
lastupdated=__lastupdated__)
else:
return page_not_authorized(req=req, text=auth[1], navtrail=navtrail_previous_links)
| gpl-2.0 |
aam-at/tensorflow | tensorflow/python/keras/tests/model_architectures_test.py | 7 | 3745 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
"""Tests for saving/loading function for keras Model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import optimizer_v1
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.tests import model_architectures
from tensorflow.python.platform import test
@keras_parameterized.run_with_all_saved_model_formats
class TestModelArchitectures(keras_parameterized.TestCase):
def _save_model_dir(self, dirname='saved_model'):
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
return os.path.join(temp_dir, dirname)
def get_test_data(self, input_shape, target_shape):
"""Generate test dataset for testing."""
if isinstance(input_shape, list):
x = [
np.random.random((2,) + input_shape[i][1:])
for i in range(len(input_shape))
]
else:
x = np.random.random((2,) + input_shape[1:])
if isinstance(target_shape, list):
y = [
np.random.random((2,) + target_shape[i][1:])
for i in range(len(target_shape))
]
else:
y = np.random.random((2,) + target_shape[1:])
return x, y
def get_custom_objects(self):
"""Define custom_objects."""
class CustomOpt(optimizer_v1.SGD):
pass
def custom_loss(y_true, y_pred):
return keras.losses.mse(y_true, y_pred)
return {'CustomOpt': CustomOpt,
'custom_loss': custom_loss}
@parameterized.named_parameters(*model_architectures.ALL_MODELS)
def test_basic_saving_and_loading(self, model_fn):
save_format = testing_utils.get_save_format()
custom_objects = self.get_custom_objects()
if 'subclassed_in_functional' in model_fn.__name__:
subclass_custom_objects = {
'MySubclassModel':
model_architectures.MySubclassModel,
}
custom_objects.update(subclass_custom_objects)
elif ('subclassed' in model_fn.__name__ and save_format == 'h5'):
self.skipTest('Saving the model to HDF5 format requires the model to be '
'a Functional model or a Sequential model.')
saved_model_dir = self._save_model_dir()
model_data = model_fn()
model = model_data.model
x_test, y_test = self.get_test_data(
model_data.input_shape, model_data.target_shape)
model.compile('rmsprop', 'mse')
model.train_on_batch(x_test, y_test)
# Save model.
out1 = model.predict(x_test)
keras.models.save_model(model, saved_model_dir, save_format=save_format)
# Load model.
loaded_model = keras.models.load_model(
saved_model_dir,
custom_objects=custom_objects)
out2 = loaded_model.predict(x_test)
self.assertAllClose(out1, out2, atol=1e-05)
if __name__ == '__main__':
test.main()
| apache-2.0 |
webgeodatavore/django | django/contrib/gis/db/backends/postgis/models.py | 396 | 2158 | """
The GeometryColumns and SpatialRefSys models for the PostGIS backend.
"""
from django.contrib.gis.db.backends.base.models import SpatialRefSysMixin
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class PostGISGeometryColumns(models.Model):
"""
The 'geometry_columns' table from the PostGIS. See the PostGIS
documentation at Ch. 4.3.2.
On PostGIS 2, this is a view.
"""
f_table_catalog = models.CharField(max_length=256)
f_table_schema = models.CharField(max_length=256)
f_table_name = models.CharField(max_length=256)
f_geometry_column = models.CharField(max_length=256)
coord_dimension = models.IntegerField()
srid = models.IntegerField(primary_key=True)
type = models.CharField(max_length=30)
class Meta:
app_label = 'gis'
db_table = 'geometry_columns'
managed = False
@classmethod
def table_name_col(cls):
"""
Returns the name of the metadata column used to store the feature table
name.
"""
return 'f_table_name'
@classmethod
def geom_col_name(cls):
"""
Returns the name of the metadata column used to store the feature
geometry column.
"""
return 'f_geometry_column'
def __str__(self):
return "%s.%s - %dD %s field (SRID: %d)" % \
(self.f_table_name, self.f_geometry_column,
self.coord_dimension, self.type, self.srid)
class PostGISSpatialRefSys(models.Model, SpatialRefSysMixin):
"""
The 'spatial_ref_sys' table from PostGIS. See the PostGIS
documentaiton at Ch. 4.2.1.
"""
srid = models.IntegerField(primary_key=True)
auth_name = models.CharField(max_length=256)
auth_srid = models.IntegerField()
srtext = models.CharField(max_length=2048)
proj4text = models.CharField(max_length=2048)
class Meta:
app_label = 'gis'
db_table = 'spatial_ref_sys'
managed = False
@property
def wkt(self):
return self.srtext
@classmethod
def wkt_col(cls):
return 'srtext'
| bsd-3-clause |
kvar/ansible | lib/ansible/modules/cloud/centurylink/clc_firewall_policy.py | 21 | 21405 | #!/usr/bin/python
#
# Copyright (c) 2015 CenturyLink
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: clc_firewall_policy
short_description: Create/delete/update firewall policies
description:
- Create or delete or update firewall policies on Centurylink Cloud
version_added: "2.0"
options:
location:
description:
- Target datacenter for the firewall policy
required: True
state:
description:
- Whether to create or delete the firewall policy
default: present
choices: ['present', 'absent']
source:
description:
- The list of source addresses for traffic on the originating firewall.
This is required when state is 'present'
destination:
description:
- The list of destination addresses for traffic on the terminating firewall.
This is required when state is 'present'
ports:
description:
- The list of ports associated with the policy.
TCP and UDP can take in single ports or port ranges.
choices: ['any', 'icmp', 'TCP/123', 'UDP/123', 'TCP/123-456', 'UDP/123-456']
firewall_policy_id:
description:
- Id of the firewall policy. This is required to update or delete an existing firewall policy
source_account_alias:
description:
- CLC alias for the source account
required: True
destination_account_alias:
description:
- CLC alias for the destination account
wait:
description:
- Whether to wait for the provisioning tasks to finish before returning.
type: bool
default: 'yes'
enabled:
description:
- Whether the firewall policy is enabled or disabled
choices: [True, False]
default: 'yes'
requirements:
- python = 2.7
- requests >= 2.5.0
- clc-sdk
author: "CLC Runner (@clc-runner)"
notes:
- To use this module, it is required to set the below environment variables which enables access to the
Centurylink Cloud
- CLC_V2_API_USERNAME, the account login id for the centurylink cloud
- CLC_V2_API_PASSWORD, the account password for the centurylink cloud
- Alternatively, the module accepts the API token and account alias. The API token can be generated using the
CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
- CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
- CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
- Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
'''
EXAMPLES = '''
---
- name: Create Firewall Policy
hosts: localhost
gather_facts: False
connection: local
tasks:
- name: Create / Verify an Firewall Policy at CenturyLink Cloud
clc_firewall:
source_account_alias: WFAD
location: VA1
state: present
source: 10.128.216.0/24
destination: 10.128.216.0/24
ports: Any
destination_account_alias: WFAD
---
- name: Delete Firewall Policy
hosts: localhost
gather_facts: False
connection: local
tasks:
- name: Delete an Firewall Policy at CenturyLink Cloud
clc_firewall:
source_account_alias: WFAD
location: VA1
state: absent
firewall_policy_id: c62105233d7a4231bd2e91b9c791e43e1
'''
RETURN = '''
firewall_policy_id:
description: The fire wall policy id
returned: success
type: str
sample: fc36f1bfd47242e488a9c44346438c05
firewall_policy:
description: The fire wall policy information
returned: success
type: dict
sample:
{
"destination":[
"10.1.1.0/24",
"10.2.2.0/24"
],
"destinationAccount":"wfad",
"enabled":true,
"id":"fc36f1bfd47242e488a9c44346438c05",
"links":[
{
"href":"http://api.ctl.io/v2-experimental/firewallPolicies/wfad/uc1/fc36f1bfd47242e488a9c44346438c05",
"rel":"self",
"verbs":[
"GET",
"PUT",
"DELETE"
]
}
],
"ports":[
"any"
],
"source":[
"10.1.1.0/24",
"10.2.2.0/24"
],
"status":"active"
}
'''
__version__ = '${version}'
import os
import traceback
from ansible.module_utils.six.moves.urllib.parse import urlparse
from time import sleep
from distutils.version import LooseVersion
REQUESTS_IMP_ERR = None
try:
import requests
except ImportError:
REQUESTS_IMP_ERR = traceback.format_exc()
REQUESTS_FOUND = False
else:
REQUESTS_FOUND = True
CLC_IMP_ERR = None
try:
import clc as clc_sdk
from clc import APIFailedResponse
except ImportError:
CLC_IMP_ERR = traceback.format_exc()
CLC_FOUND = False
clc_sdk = None
else:
CLC_FOUND = True
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
class ClcFirewallPolicy:
clc = None
def __init__(self, module):
"""
Construct module
"""
self.clc = clc_sdk
self.module = module
self.firewall_dict = {}
if not CLC_FOUND:
self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
if not REQUESTS_FOUND:
self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
if requests.__version__ and LooseVersion(
requests.__version__) < LooseVersion('2.5.0'):
self.module.fail_json(
msg='requests library version should be >= 2.5.0')
self._set_user_agent(self.clc)
@staticmethod
def _define_module_argument_spec():
"""
Define the argument spec for the ansible module
:return: argument spec dictionary
"""
argument_spec = dict(
location=dict(required=True),
source_account_alias=dict(required=True, default=None),
destination_account_alias=dict(default=None),
firewall_policy_id=dict(default=None),
ports=dict(default=None, type='list'),
source=dict(default=None, type='list'),
destination=dict(default=None, type='list'),
wait=dict(default=True),
state=dict(default='present', choices=['present', 'absent']),
enabled=dict(default=True, choices=[True, False])
)
return argument_spec
def process_request(self):
"""
Execute the main code path, and handle the request
:return: none
"""
changed = False
firewall_policy = None
location = self.module.params.get('location')
source_account_alias = self.module.params.get('source_account_alias')
destination_account_alias = self.module.params.get(
'destination_account_alias')
firewall_policy_id = self.module.params.get('firewall_policy_id')
ports = self.module.params.get('ports')
source = self.module.params.get('source')
destination = self.module.params.get('destination')
wait = self.module.params.get('wait')
state = self.module.params.get('state')
enabled = self.module.params.get('enabled')
self.firewall_dict = {
'location': location,
'source_account_alias': source_account_alias,
'destination_account_alias': destination_account_alias,
'firewall_policy_id': firewall_policy_id,
'ports': ports,
'source': source,
'destination': destination,
'wait': wait,
'state': state,
'enabled': enabled}
self._set_clc_credentials_from_env()
if state == 'absent':
changed, firewall_policy_id, firewall_policy = self._ensure_firewall_policy_is_absent(
source_account_alias, location, self.firewall_dict)
elif state == 'present':
changed, firewall_policy_id, firewall_policy = self._ensure_firewall_policy_is_present(
source_account_alias, location, self.firewall_dict)
return self.module.exit_json(
changed=changed,
firewall_policy_id=firewall_policy_id,
firewall_policy=firewall_policy)
@staticmethod
def _get_policy_id_from_response(response):
"""
Method to parse out the policy id from creation response
:param response: response from firewall creation API call
:return: policy_id: firewall policy id from creation call
"""
url = response.get('links')[0]['href']
path = urlparse(url).path
path_list = os.path.split(path)
policy_id = path_list[-1]
return policy_id
def _set_clc_credentials_from_env(self):
"""
Set the CLC Credentials on the sdk by reading environment variables
:return: none
"""
env = os.environ
v2_api_token = env.get('CLC_V2_API_TOKEN', False)
v2_api_username = env.get('CLC_V2_API_USERNAME', False)
v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
clc_alias = env.get('CLC_ACCT_ALIAS', False)
api_url = env.get('CLC_V2_API_URL', False)
if api_url:
self.clc.defaults.ENDPOINT_URL_V2 = api_url
if v2_api_token and clc_alias:
self.clc._LOGIN_TOKEN_V2 = v2_api_token
self.clc._V2_ENABLED = True
self.clc.ALIAS = clc_alias
elif v2_api_username and v2_api_passwd:
self.clc.v2.SetCredentials(
api_username=v2_api_username,
api_passwd=v2_api_passwd)
else:
return self.module.fail_json(
msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
"environment variables")
def _ensure_firewall_policy_is_present(
self,
source_account_alias,
location,
firewall_dict):
"""
Ensures that a given firewall policy is present
:param source_account_alias: the source account alias for the firewall policy
:param location: datacenter of the firewall policy
:param firewall_dict: dictionary of request parameters for firewall policy
:return: (changed, firewall_policy_id, firewall_policy)
changed: flag for if a change occurred
firewall_policy_id: the firewall policy id that was created/updated
firewall_policy: The firewall_policy object
"""
firewall_policy = None
firewall_policy_id = firewall_dict.get('firewall_policy_id')
if firewall_policy_id is None:
if not self.module.check_mode:
response = self._create_firewall_policy(
source_account_alias,
location,
firewall_dict)
firewall_policy_id = self._get_policy_id_from_response(
response)
changed = True
else:
firewall_policy = self._get_firewall_policy(
source_account_alias, location, firewall_policy_id)
if not firewall_policy:
return self.module.fail_json(
msg='Unable to find the firewall policy id : {0}'.format(
firewall_policy_id))
changed = self._compare_get_request_with_dict(
firewall_policy,
firewall_dict)
if not self.module.check_mode and changed:
self._update_firewall_policy(
source_account_alias,
location,
firewall_policy_id,
firewall_dict)
if changed and firewall_policy_id:
firewall_policy = self._wait_for_requests_to_complete(
source_account_alias,
location,
firewall_policy_id)
return changed, firewall_policy_id, firewall_policy
def _ensure_firewall_policy_is_absent(
self,
source_account_alias,
location,
firewall_dict):
"""
Ensures that a given firewall policy is removed if present
:param source_account_alias: the source account alias for the firewall policy
:param location: datacenter of the firewall policy
:param firewall_dict: firewall policy to delete
:return: (changed, firewall_policy_id, response)
changed: flag for if a change occurred
firewall_policy_id: the firewall policy id that was deleted
response: response from CLC API call
"""
changed = False
response = []
firewall_policy_id = firewall_dict.get('firewall_policy_id')
result = self._get_firewall_policy(
source_account_alias, location, firewall_policy_id)
if result:
if not self.module.check_mode:
response = self._delete_firewall_policy(
source_account_alias,
location,
firewall_policy_id)
changed = True
return changed, firewall_policy_id, response
def _create_firewall_policy(
self,
source_account_alias,
location,
firewall_dict):
"""
Creates the firewall policy for the given account alias
:param source_account_alias: the source account alias for the firewall policy
:param location: datacenter of the firewall policy
:param firewall_dict: dictionary of request parameters for firewall policy
:return: response from CLC API call
"""
payload = {
'destinationAccount': firewall_dict.get('destination_account_alias'),
'source': firewall_dict.get('source'),
'destination': firewall_dict.get('destination'),
'ports': firewall_dict.get('ports')}
try:
response = self.clc.v2.API.Call(
'POST', '/v2-experimental/firewallPolicies/%s/%s' %
(source_account_alias, location), payload)
except APIFailedResponse as e:
return self.module.fail_json(
msg="Unable to create firewall policy. %s" %
str(e.response_text))
return response
def _delete_firewall_policy(
self,
source_account_alias,
location,
firewall_policy_id):
"""
Deletes a given firewall policy for an account alias in a datacenter
:param source_account_alias: the source account alias for the firewall policy
:param location: datacenter of the firewall policy
:param firewall_policy_id: firewall policy id to delete
:return: response: response from CLC API call
"""
try:
response = self.clc.v2.API.Call(
'DELETE', '/v2-experimental/firewallPolicies/%s/%s/%s' %
(source_account_alias, location, firewall_policy_id))
except APIFailedResponse as e:
return self.module.fail_json(
msg="Unable to delete the firewall policy id : {0}. {1}".format(
firewall_policy_id, str(e.response_text)))
return response
def _update_firewall_policy(
self,
source_account_alias,
location,
firewall_policy_id,
firewall_dict):
"""
Updates a firewall policy for a given datacenter and account alias
:param source_account_alias: the source account alias for the firewall policy
:param location: datacenter of the firewall policy
:param firewall_policy_id: firewall policy id to update
:param firewall_dict: dictionary of request parameters for firewall policy
:return: response: response from CLC API call
"""
try:
response = self.clc.v2.API.Call(
'PUT',
'/v2-experimental/firewallPolicies/%s/%s/%s' %
(source_account_alias,
location,
firewall_policy_id),
firewall_dict)
except APIFailedResponse as e:
return self.module.fail_json(
msg="Unable to update the firewall policy id : {0}. {1}".format(
firewall_policy_id, str(e.response_text)))
return response
@staticmethod
def _compare_get_request_with_dict(response, firewall_dict):
"""
Helper method to compare the json response for getting the firewall policy with the request parameters
:param response: response from the get method
:param firewall_dict: dictionary of request parameters for firewall policy
:return: changed: Boolean that returns true if there are differences between
the response parameters and the playbook parameters
"""
changed = False
response_dest_account_alias = response.get('destinationAccount')
response_enabled = response.get('enabled')
response_source = response.get('source')
response_dest = response.get('destination')
response_ports = response.get('ports')
request_dest_account_alias = firewall_dict.get(
'destination_account_alias')
request_enabled = firewall_dict.get('enabled')
if request_enabled is None:
request_enabled = True
request_source = firewall_dict.get('source')
request_dest = firewall_dict.get('destination')
request_ports = firewall_dict.get('ports')
if (
response_dest_account_alias and str(response_dest_account_alias) != str(request_dest_account_alias)) or (
response_enabled != request_enabled) or (
response_source and response_source != request_source) or (
response_dest and response_dest != request_dest) or (
response_ports and response_ports != request_ports):
changed = True
return changed
def _get_firewall_policy(
self,
source_account_alias,
location,
firewall_policy_id):
"""
Get back details for a particular firewall policy
:param source_account_alias: the source account alias for the firewall policy
:param location: datacenter of the firewall policy
:param firewall_policy_id: id of the firewall policy to get
:return: response - The response from CLC API call
"""
response = None
try:
response = self.clc.v2.API.Call(
'GET', '/v2-experimental/firewallPolicies/%s/%s/%s' %
(source_account_alias, location, firewall_policy_id))
except APIFailedResponse as e:
if e.response_status_code != 404:
self.module.fail_json(
msg="Unable to fetch the firewall policy with id : {0}. {1}".format(
firewall_policy_id, str(e.response_text)))
return response
def _wait_for_requests_to_complete(
self,
source_account_alias,
location,
firewall_policy_id,
wait_limit=50):
"""
Waits until the CLC requests are complete if the wait argument is True
:param source_account_alias: The source account alias for the firewall policy
:param location: datacenter of the firewall policy
:param firewall_policy_id: The firewall policy id
:param wait_limit: The number of times to check the status for completion
:return: the firewall_policy object
"""
wait = self.module.params.get('wait')
count = 0
firewall_policy = None
while wait:
count += 1
firewall_policy = self._get_firewall_policy(
source_account_alias, location, firewall_policy_id)
status = firewall_policy.get('status')
if status == 'active' or count > wait_limit:
wait = False
else:
# wait for 2 seconds
sleep(2)
return firewall_policy
@staticmethod
def _set_user_agent(clc):
if hasattr(clc, 'SetRequestsSession'):
agent_string = "ClcAnsibleModule/" + __version__
ses = requests.Session()
ses.headers.update({"Api-Client": agent_string})
ses.headers['User-Agent'] += " " + agent_string
clc.SetRequestsSession(ses)
def main():
"""
The main function. Instantiates the module and calls process_request.
:return: none
"""
module = AnsibleModule(
argument_spec=ClcFirewallPolicy._define_module_argument_spec(),
supports_check_mode=True)
clc_firewall = ClcFirewallPolicy(module)
clc_firewall.process_request()
if __name__ == '__main__':
main()
| gpl-3.0 |
burtwalsh/mongo-connector | mongo-connector/mongo_connector.py | 7 | 18958 | # Copyright 2012 10gen, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file will be used with PyPi in order to package and distribute the final
# product.
"""Discovers the mongo cluster and starts the connector.
"""
import inspect
import logging
import oplog_manager
import optparse
import os
import pymongo
import re
import shutil
import subprocess
import sys
import threading
import time
import util
import imp
from locking_dict import LockingDict
try:
import simplejson as json
except:
import json
class Connector(threading.Thread):
"""Checks the cluster for shards to tail.
"""
def __init__(self, address, oplog_checkpoint, target_url, ns_set,
u_key, auth_key, doc_manager=None, auth_username=None):
file = inspect.getfile(inspect.currentframe())
cmd_folder = os.path.realpath(os.path.abspath(os.path.split(file)[0]))
if doc_manager is not None:
doc_manager = imp.load_source('DocManager', doc_manager)
else:
from doc_manager import DocManager
time.sleep(1)
super(Connector, self).__init__()
#can_run is set to false when we join the thread
self.can_run = True
#The name of the file that stores the progress of the OplogThreads
self.oplog_checkpoint = oplog_checkpoint
#main address - either mongos for sharded setups or a primary otherwise
self.address = address
#The URL of the target system
self.target_url = target_url
#The set of relevant namespaces to consider
self.ns_set = ns_set
#The key that is a unique document identifier for the target system.
#Not necessarily the mongo unique key.
self.u_key = u_key
#Password for authentication
self.auth_key = auth_key
#Username for authentication
self.auth_username = auth_username
#The set of OplogThreads created
self.shard_set = {}
#Dict of OplogThread/timestamp pairs to record progress
self.oplog_progress = LockingDict()
try:
if target_url is None:
if doc_manager is None: # imported using from... import
self.doc_manager = DocManager(unique_key=u_key)
else: # imported using load source
self.doc_manager = doc_manager.DocManager(unique_key=u_key)
else:
if doc_manager is None:
self.doc_manager = DocManager(self.target_url,
unique_key=u_key)
else:
self.doc_manager = doc_manager.DocManager(self.target_url,
unique_key=u_key)
except SystemError:
logging.critical("MongoConnector: Bad target system URL!")
self.can_run = False
return
if self.oplog_checkpoint is not None:
if not os.path.exists(self.oplog_checkpoint):
info_str = "MongoC`onnector: Can't find OplogProgress file!"
logging.critical(info_str)
self.doc_manager.stop()
self.can_run = False
def join(self):
""" Joins thread, stops it from running
"""
self.can_run = False
self.doc_manager.stop()
threading.Thread.join(self)
def write_oplog_progress(self):
""" Writes oplog progress to file provided by user
"""
if self.oplog_checkpoint is None:
return None
# write to temp file
backup_file = self.oplog_checkpoint + '.backup'
os.rename(self.oplog_checkpoint, backup_file)
# for each of the threads write to file
with open(self.oplog_checkpoint, 'w') as dest:
with self.oplog_progress as oplog_prog:
oplog_dict = oplog_prog.get_dict()
for oplog, ts in oplog_dict.items():
oplog_str = str(oplog)
timestamp = util.bson_ts_to_long(ts)
json_str = json.dumps([oplog_str, timestamp])
try:
dest.write(json_str)
except IOError:
# Basically wipe the file, copy from backup
dest.truncate()
with open(backup_file, 'r') as backup:
shutil.copyfile(backup, dest)
break
os.remove(self.oplog_checkpoint + '.backup')
def read_oplog_progress(self):
"""Reads oplog progress from file provided by user.
This method is only called once before any threads are spanwed.
"""
if self.oplog_checkpoint is None:
return None
# Check for empty file
try:
if os.stat(self.oplog_checkpoint).st_size == 0:
logging.info("MongoConnector: Empty oplog progress file.")
return None
except OSError:
return None
source = open(self.oplog_checkpoint, 'r')
try:
data = json.load(source)
except ValueError: # empty file
err_msg = "MongoConnector: Can't read oplog progress file."
reason = "It may be empty or corrupt."
logging.info("%s %s" % (err_msg, reason))
source.close()
return None
source.close()
count = 0
oplog_dict = self.oplog_progress.get_dict()
for count in range(0, len(data), 2):
oplog_str = data[count]
ts = data[count + 1]
oplog_dict[oplog_str] = util.long_to_bson_ts(ts)
#stored as bson_ts
def run(self):
"""Discovers the mongo cluster and creates a thread for each primary.
"""
main_conn = pymongo.Connection(self.address)
if self.auth_key is not None:
main_conn['admin'].authenticate(self.auth_username, self.auth_key)
self.read_oplog_progress()
conn_type = None
try:
main_conn.admin.command("isdbgrid")
except pymongo.errors.OperationFailure:
conn_type = "REPLSET"
if conn_type == "REPLSET":
#non sharded configuration
oplog_coll = main_conn['local']['oplog.rs']
prim_admin = main_conn.admin
repl_set = prim_admin.command("replSetGetStatus")['set']
host = main_conn.host
port = main_conn.port
address = host + ":" + str(port)
oplog = oplog_manager.OplogThread(main_conn, address, oplog_coll,
False, self.doc_manager,
self.oplog_progress,
self.ns_set, self.auth_key,
self.auth_username,
repl_set=repl_set)
self.shard_set[0] = oplog
logging.info('MongoConnector: Starting connection thread %s' %
main_conn)
oplog.start()
while self.can_run:
if not self.shard_set[0].running:
err_msg = "MongoConnector: OplogThread"
set = str(self.shard_set[0])
effect = "unexpectedly stopped! Shutting down."
logging.error("%s %s %s" % (err_msg, set, effect))
self.oplog_thread_join()
self.doc_manager.stop()
return
self.write_oplog_progress()
time.sleep(1)
else: # sharded cluster
while self.can_run is True:
shard_coll = main_conn['config']['shards']
shard_cursor = shard_coll.find()
for shard_doc in shard_cursor:
shard_id = shard_doc['_id']
if shard_id in self.shard_set:
if not self.shard_set[shard_id].running:
err_msg = "MongoConnector: OplogThread"
set = str(self.shard_set[shard_id])
effect = "unexpectedly stopped! Shutting down"
logging.error("%s %s %s" % (err_msg, set, effect))
self.oplog_thread_join()
self.doc_manager.stop()
return
self.write_oplog_progress()
time.sleep(1)
continue
try:
repl_set, hosts = shard_doc['host'].split('/')
except ValueError:
cause = "The system only uses replica sets!"
logging.error("MongoConnector: %s", cause)
self.oplog_thread_join()
self.doc_manager.stop()
return
shard_conn = pymongo.Connection(hosts, replicaset=repl_set)
oplog_coll = shard_conn['local']['oplog.rs']
oplog = oplog_manager.OplogThread(shard_conn, self.address,
oplog_coll, True,
self.doc_manager,
self.oplog_progress,
self.ns_set,
self.auth_key,
self.auth_username)
self.shard_set[shard_id] = oplog
msg = "Starting connection thread"
logging.info("MongoConnector: %s %s" % (msg, shard_conn))
oplog.start()
self.oplog_thread_join()
def oplog_thread_join(self):
"""Stops all the OplogThreads
"""
logging.info('MongoConnector: Stopping all OplogThreads')
for thread in self.shard_set.values():
thread.join()
if __name__ == '__main__':
"""Runs mongo connector
"""
logger = logging.getLogger()
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
ch.setFormatter(formatter)
logger.addHandler(ch)
logger.info('Beginning Mongo Connector')
parser = optparse.OptionParser()
#-m is for the main address, which is a host:port pair, ideally of the
#mongos. For non sharded clusters, it can be the primary.
parser.add_option("-m", "--main", action="store", type="string",
dest="main_addr", default="localhost:27217",
help="""Specify the main address, which is a"""
""" host:port pair. For sharded clusters, this"""
""" should be the mongos address. For individual"""
""" replica sets, supply the address of the"""
""" primary. For example, `-m localhost:27217`"""
""" would be a valid argument to `-m`. Don't use"""
""" quotes around the address""")
#-o is to specify the oplog-config file. This file is used by the system
#to store the last timestamp read on a specific oplog. This allows for
#quick recovery from failure.
parser.add_option("-o", "--oplog-ts", action="store", type="string",
dest="oplog_config", default="config.txt",
help="""Specify the name of the file that stores the"""
"""oplog progress timestamps. """
"""This file is used by the system to store the last"""
"""timestamp read on a specific oplog. This allows"""
""" for quick recovery from failure. By default this"""
""" is `config.txt`, which starts off empty. An empty"""
""" file causes the system to go through all the mongo"""
""" oplog and sync all the documents. Whenever the """
"""cluster is restarted, it is essential that the """
"""oplog-timestamp config file be emptied - otherwise"""
""" the connector will miss some documents and behave"""
"""incorrectly.""")
#-t is to specify the URL to the target system being used.
parser.add_option("-t", "--target-url", action="store", type="string",
dest="url", default=None,
help="""Specify the URL to the target system being """
"""used. For example, if you were using Solr out of """
"""the box, you could use '-t """
""" http://localhost:8080/solr' with the """
""" SolrDocManager to establish a proper connection."""
""" Don't use quotes around address."""
"""If target system doesn't need URL, don't specify""")
#-n is to specify the namespaces we want to consider. The default
#considers all the namespaces
parser.add_option("-n", "--namespace-set", action="store", type="string",
dest="ns_set", default=None, help=
"""Used to specify the namespaces we want to """
""" consider. For example, if we wished to store all """
""" documents from the test.test and alpha.foo """
""" namespaces, we could use `-n test.test,alpha.foo`."""
""" The default is to consider all the namespaces, """
""" excluding the system and config databases, and """
""" also ignoring the "system.indexes" collection in """
"""any database.""")
#-u is to specify the mongoDB field that will serve as the unique key
#for the target system,
parser.add_option("-u", "--unique-key", action="store", type="string",
dest="u_key", default="_id", help=
"""Used to specify the mongoDB field that will serve"""
"""as the unique key for the target system"""
"""The default is "_id", which can be noted by """
""" '-u _id'""")
#-f is to specify the authentication key file. This file is used by mongos
#to authenticate connections to the shards, and we'll use it in the oplog
#threads.
parser.add_option("-f", "--password-file", action="store", type="string",
dest="auth_file", default=None, help=
""" Used to store the password for authentication."""
""" Use this option if you wish to specify a"""
""" username and password but don't want to"""
""" type in the password. The contents of this"""
""" file should be the password for the admin user.""")
#-p is to specify the password used for authentication.
parser.add_option("-p", "--password", action="store", type="string",
dest="password", default=None, help=
""" Used to specify the password."""
""" This is used by mongos to authenticate"""
""" connections to the shards, and in the"""
""" oplog threads. If authentication is not used, then"""
""" this field can be left empty as the default """)
#-a is to specify the username for authentication.
parser.add_option("-a", "--admin-username", action="store", type="string",
dest="admin_name", default="__system", help=
"""Used to specify the username of an admin user to"""
"""authenticate with. To use authentication, the user"""
"""must specify both an admin username and a keyFile."""
"""The default username is '__system'""")
#-d is to specify the doc manager file.
parser.add_option("-d", "--docManager", action="store", type="string",
dest="doc_manager", default=None, help=
"""Used to specify the doc manager file that"""
""" is going to be used. You should send the"""
""" path of the file you want to be used."""
""" By default, it will use the """
""" doc_manager_simulator.py file. It is"""
""" recommended that all doc manager files be"""
""" kept in the doc_managers folder in"""
""" mongo-connector. For more information"""
""" about making your own doc manager,"""
""" see Doc Manager section.""")
(options, args) = parser.parse_args()
if options.doc_manager is None:
logger.info('No doc manager specified, using simulator.')
try:
if options.ns_set is None:
ns_set = []
else:
ns_set = options.ns_set.split(',')
except:
logger.error('Namespaces must be separated by commas!')
sys.exit(1)
key = None
if options.auth_file is not None:
try:
file = open(options.auth_file)
key = file.read()
re.sub(r'\s', '', key)
except:
logger.error('Could not parse password authentication file!')
sys.exit(1)
if options.password is not None:
key = options.password
if key is None and options.admin_name != "__system":
logger.error("Admin username specified without password!")
sys.exit(1)
ct = Connector(options.main_addr, options.oplog_config, options.url,
ns_set, options.u_key, key, options.doc_manager,
auth_username=options.admin_name)
ct.start()
while True:
try:
time.sleep(3)
if not ct.is_alive():
break
except KeyboardInterrupt:
logging.info("Caught keyboard interrupt, exiting!")
ct.join()
break
| apache-2.0 |
kimoonkim/spark | examples/src/main/python/ml/estimator_transformer_param_example.py | 123 | 3952 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Estimator Transformer Param Example.
"""
from __future__ import print_function
# $example on$
from pyspark.ml.linalg import Vectors
from pyspark.ml.classification import LogisticRegression
# $example off$
from pyspark.sql import SparkSession
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("EstimatorTransformerParamExample")\
.getOrCreate()
# $example on$
# Prepare training data from a list of (label, features) tuples.
training = spark.createDataFrame([
(1.0, Vectors.dense([0.0, 1.1, 0.1])),
(0.0, Vectors.dense([2.0, 1.0, -1.0])),
(0.0, Vectors.dense([2.0, 1.3, 1.0])),
(1.0, Vectors.dense([0.0, 1.2, -0.5]))], ["label", "features"])
# Create a LogisticRegression instance. This instance is an Estimator.
lr = LogisticRegression(maxIter=10, regParam=0.01)
# Print out the parameters, documentation, and any default values.
print("LogisticRegression parameters:\n" + lr.explainParams() + "\n")
# Learn a LogisticRegression model. This uses the parameters stored in lr.
model1 = lr.fit(training)
# Since model1 is a Model (i.e., a transformer produced by an Estimator),
# we can view the parameters it used during fit().
# This prints the parameter (name: value) pairs, where names are unique IDs for this
# LogisticRegression instance.
print("Model 1 was fit using parameters: ")
print(model1.extractParamMap())
# We may alternatively specify parameters using a Python dictionary as a paramMap
paramMap = {lr.maxIter: 20}
paramMap[lr.maxIter] = 30 # Specify 1 Param, overwriting the original maxIter.
paramMap.update({lr.regParam: 0.1, lr.threshold: 0.55}) # Specify multiple Params.
# You can combine paramMaps, which are python dictionaries.
paramMap2 = {lr.probabilityCol: "myProbability"} # Change output column name
paramMapCombined = paramMap.copy()
paramMapCombined.update(paramMap2)
# Now learn a new model using the paramMapCombined parameters.
# paramMapCombined overrides all parameters set earlier via lr.set* methods.
model2 = lr.fit(training, paramMapCombined)
print("Model 2 was fit using parameters: ")
print(model2.extractParamMap())
# Prepare test data
test = spark.createDataFrame([
(1.0, Vectors.dense([-1.0, 1.5, 1.3])),
(0.0, Vectors.dense([3.0, 2.0, -0.1])),
(1.0, Vectors.dense([0.0, 2.2, -1.5]))], ["label", "features"])
# Make predictions on test data using the Transformer.transform() method.
# LogisticRegression.transform will only use the 'features' column.
# Note that model2.transform() outputs a "myProbability" column instead of the usual
# 'probability' column since we renamed the lr.probabilityCol parameter previously.
prediction = model2.transform(test)
result = prediction.select("features", "label", "myProbability", "prediction") \
.collect()
for row in result:
print("features=%s, label=%s -> prob=%s, prediction=%s"
% (row.features, row.label, row.myProbability, row.prediction))
# $example off$
spark.stop()
| apache-2.0 |
patilav/citydash | server/parcel/migrations/0001_initial.py | 3 | 1719 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.contrib.gis.db.models.fields
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Parcel',
fields=[
('gid', models.AutoField(serialize=False, primary_key=True)),
('shape_leng', models.DecimalField(null=True, decimal_places=65535, blank=True, max_digits=65535)),
('shape_area', models.DecimalField(null=True, decimal_places=65535, blank=True, max_digits=65535)),
('map_par_id', models.CharField(null=True, blank=True, max_length=26)),
('loc_id', models.CharField(null=True, blank=True, max_length=18)),
('poly_type', models.CharField(null=True, blank=True, max_length=15)),
('map_no', models.CharField(null=True, blank=True, max_length=4)),
('source', models.CharField(null=True, blank=True, max_length=15)),
('plan_id', models.CharField(null=True, blank=True, max_length=40)),
('last_edit', models.IntegerField(null=True, blank=True)),
('bnd_chk', models.CharField(null=True, blank=True, max_length=2)),
('no_match', models.CharField(null=True, blank=True, max_length=1)),
('town_id', models.SmallIntegerField(null=True, blank=True)),
('shape', django.contrib.gis.db.models.fields.MultiPolygonField(null=True, srid=97406, blank=True)),
],
options={
'db_table': 'parcel',
'managed': False,
},
),
]
| mit |
dparlevliet/zelenka-report-storage | server-local/twisted/trial/_dist/worker.py | 35 | 9030 | # -*- test-case-name: twisted.trial._dist.test.test_worker -*-
#
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
This module implements the worker classes.
@since: 12.3
"""
import os
from zope.interface import implements
from twisted.internet.protocol import ProcessProtocol
from twisted.internet.interfaces import ITransport, IAddress
from twisted.internet.defer import Deferred
from twisted.protocols.amp import AMP
from twisted.python.failure import Failure
from twisted.python.reflect import namedObject
from twisted.trial.unittest import Todo
from twisted.trial.runner import TrialSuite, TestLoader
from twisted.trial._dist import workercommands, managercommands
from twisted.trial._dist import _WORKER_AMP_STDIN, _WORKER_AMP_STDOUT
from twisted.trial._dist.workerreporter import WorkerReporter
class WorkerProtocol(AMP):
"""
The worker-side trial distributed protocol.
"""
def __init__(self, forceGarbageCollection=False):
self._loader = TestLoader()
self._result = WorkerReporter(self)
self._forceGarbageCollection = forceGarbageCollection
def run(self, testCase):
"""
Run a test case by name.
"""
case = self._loader.loadByName(testCase)
suite = TrialSuite([case], self._forceGarbageCollection)
suite.run(self._result)
return {'success': True}
workercommands.Run.responder(run)
def start(self, directory):
"""
Set up the worker, moving into given directory for tests to run in
them.
"""
os.chdir(directory)
return {'success': True}
workercommands.Start.responder(start)
class LocalWorkerAMP(AMP):
"""
Local implementation of the manager commands.
"""
def addSuccess(self, testName):
"""
Add a success to the reporter.
"""
self._result.addSuccess(self._testCase)
return {'success': True}
managercommands.AddSuccess.responder(addSuccess)
def _buildFailure(self, error, errorClass, frames):
"""
Helper to build a C{Failure} with some traceback.
@param error: An C{Exception} instance.
@param error: The class name of the C{error} class.
@param frames: A flat list of strings representing the information need
to approximatively rebuild C{Failure} frames.
@return: A L{Failure} instance with enough information about a test
error.
"""
errorType = namedObject(errorClass)
failure = Failure(error, errorType)
for i in range(0, len(frames), 3):
failure.frames.append(
(frames[i], frames[i + 1], int(frames[i + 2]), [], []))
return failure
def addError(self, testName, error, errorClass, frames):
"""
Add an error to the reporter.
"""
failure = self._buildFailure(error, errorClass, frames)
self._result.addError(self._testCase, failure)
return {'success': True}
managercommands.AddError.responder(addError)
def addFailure(self, testName, fail, failClass, frames):
"""
Add a failure to the reporter.
"""
failure = self._buildFailure(fail, failClass, frames)
self._result.addFailure(self._testCase, failure)
return {'success': True}
managercommands.AddFailure.responder(addFailure)
def addSkip(self, testName, reason):
"""
Add a skip to the reporter.
"""
self._result.addSkip(self._testCase, reason)
return {'success': True}
managercommands.AddSkip.responder(addSkip)
def addExpectedFailure(self, testName, error, todo):
"""
Add an expected failure to the reporter.
"""
_todo = Todo(todo)
self._result.addExpectedFailure(self._testCase, error, _todo)
return {'success': True}
managercommands.AddExpectedFailure.responder(addExpectedFailure)
def addUnexpectedSuccess(self, testName, todo):
"""
Add an unexpected success to the reporter.
"""
self._result.addUnexpectedSuccess(self._testCase, todo)
return {'success': True}
managercommands.AddUnexpectedSuccess.responder(addUnexpectedSuccess)
def testWrite(self, out):
"""
Print test output from the worker.
"""
self._testStream.write(out + '\n')
self._testStream.flush()
return {'success': True}
managercommands.TestWrite.responder(testWrite)
def _stopTest(self, result):
"""
Stop the current running test case, forwarding the result.
"""
self._result.stopTest(self._testCase)
return result
def run(self, testCase, result):
"""
Run a test.
"""
self._testCase = testCase
self._result = result
self._result.startTest(testCase)
d = self.callRemote(workercommands.Run, testCase=testCase.id())
return d.addCallback(self._stopTest)
def setTestStream(self, stream):
"""
Set the stream used to log output from tests.
"""
self._testStream = stream
class LocalWorkerAddress(object):
"""
A L{IAddress} implementation meant to provide stub addresses for
L{ITransport.getPeer} and L{ITransport.getHost}.
"""
implements(IAddress)
class LocalWorkerTransport(object):
"""
A stub transport implementation used to support L{AMP} over a
L{ProcessProtocol} transport.
"""
implements(ITransport)
def __init__(self, transport):
self._transport = transport
def write(self, data):
"""
Forward data to transport.
"""
self._transport.writeToChild(_WORKER_AMP_STDIN, data)
def writeSequence(self, sequence):
"""
Emulate C{writeSequence} by iterating data in the C{sequence}.
"""
for data in sequence:
self._transport.writeToChild(_WORKER_AMP_STDIN, data)
def loseConnection(self):
"""
Closes the transport.
"""
self._transport.loseConnection()
def getHost(self):
"""
Return a L{LocalWorkerAddress} instance.
"""
return LocalWorkerAddress()
def getPeer(self):
"""
Return a L{LocalWorkerAddress} instance.
"""
return LocalWorkerAddress()
class LocalWorker(ProcessProtocol):
"""
Local process worker protocol. This worker runs as a local process and
communicates via stdin/out.
@ivar _ampProtocol: The L{AMP} protocol instance used to communicate with
the worker.
@ivar _logDirectory: The directory where logs will reside.
@ivar _logFile: The name of the main log file for tests output.
"""
def __init__(self, ampProtocol, logDirectory, logFile):
self._ampProtocol = ampProtocol
self._logDirectory = logDirectory
self._logFile = logFile
self.endDeferred = Deferred()
def connectionMade(self):
"""
When connection is made, create the AMP protocol instance.
"""
self._ampProtocol.makeConnection(LocalWorkerTransport(self.transport))
if not os.path.exists(self._logDirectory):
os.makedirs(self._logDirectory)
self._outLog = file(os.path.join(self._logDirectory, 'out.log'), 'w')
self._errLog = file(os.path.join(self._logDirectory, 'err.log'), 'w')
testLog = file(os.path.join(self._logDirectory, self._logFile), 'w')
self._ampProtocol.setTestStream(testLog)
d = self._ampProtocol.callRemote(workercommands.Start,
directory=self._logDirectory)
# Ignore the potential errors, the test suite will fail properly and it
# would just print garbage.
d.addErrback(lambda x: None)
def connectionLost(self, reason):
"""
On connection lost, close the log files that we're managing for stdin
and stdout.
"""
self._outLog.close()
self._errLog.close()
def processEnded(self, reason):
"""
When the process closes, call C{connectionLost} for cleanup purposes
and forward the information to the C{_ampProtocol}.
"""
self.connectionLost(reason)
self._ampProtocol.connectionLost(reason)
self.endDeferred.callback(reason)
def outReceived(self, data):
"""
Send data received from stdout to log.
"""
self._outLog.write(data)
def errReceived(self, data):
"""
Write error data to log.
"""
self._errLog.write(data)
def childDataReceived(self, childFD, data):
"""
Handle data received on the specific pipe for the C{_ampProtocol}.
"""
if childFD == _WORKER_AMP_STDOUT:
self._ampProtocol.dataReceived(data)
else:
ProcessProtocol.childDataReceived(self, childFD, data)
| lgpl-3.0 |
afandria/sky_engine | sky/engine/build/scripts/in_file.py | 27 | 6635 | # Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import copy
import os
# NOTE: This has only been used to parse
# core/page/RuntimeEnabledFeatures.in and may not be capable
# of parsing other .in files correctly.
# .in file format is:
# // comment
# name1 arg=value, arg2=value2, arg2=value3
#
# InFile must be passed a dictionary of default values
# with which to validate arguments against known names.
# Sequence types as default values will produce sequences
# as parse results.
# Bare arguments (no '=') are treated as names with value True.
# The first field will always be labeled 'name'.
#
# InFile.load_from_files(['file.in'], {'arg': None, 'arg2': []})
#
# Parsing produces an array of dictionaries:
# [ { 'name' : 'name1', 'arg' :' value', arg2=['value2', 'value3'] }
def _is_comment(line):
return line.startswith("//") or line.startswith("#")
class InFile(object):
def __init__(self, lines, defaults, valid_values=None, default_parameters=None):
self.name_dictionaries = []
self.parameters = copy.deepcopy(default_parameters if default_parameters else {})
self._defaults = defaults
self._valid_values = copy.deepcopy(valid_values if valid_values else {})
self._parse(map(str.strip, lines))
@classmethod
def load_from_files(self, file_paths, defaults, valid_values, default_parameters):
lines = []
for path in file_paths:
assert path.endswith(".in")
with open(os.path.abspath(path)) as in_file:
lines += in_file.readlines()
return InFile(lines, defaults, valid_values, default_parameters)
def _is_sequence(self, arg):
return (not hasattr(arg, "strip")
and hasattr(arg, "__getitem__")
or hasattr(arg, "__iter__"))
def _parse(self, lines):
parsing_parameters = True
indices = {}
for line in lines:
if _is_comment(line):
continue
if not line:
parsing_parameters = False
continue
if parsing_parameters:
self._parse_parameter(line)
else:
entry = self._parse_line(line)
name = entry['name']
if name in indices:
entry = self._merge_entries(entry, self.name_dictionaries[indices[name]])
entry['name'] = name
self.name_dictionaries[indices[name]] = entry
else:
indices[name] = len(self.name_dictionaries)
self.name_dictionaries.append(entry)
def _merge_entries(self, one, two):
merged = {}
for key in one:
if key not in two:
self._fatal("Expected key '%s' not found in entry: %s" % (key, two))
if one[key] and two[key]:
val_one = one[key]
val_two = two[key]
if isinstance(val_one, list) and isinstance(val_two, list):
val = val_one + val_two
elif isinstance(val_one, list):
val = val_one + [val_two]
elif isinstance(val_two, list):
val = [val_one] + val_two
else:
val = [val_one, val_two]
merged[key] = val
elif one[key]:
merged[key] = one[key]
else:
merged[key] = two[key]
return merged
def _parse_parameter(self, line):
if '=' in line:
name, value = line.split('=')
else:
name, value = line, True
if not name in self.parameters:
self._fatal("Unknown parameter: '%s' in line:\n%s\nKnown parameters: %s" % (name, line, self.parameters.keys()))
self.parameters[name] = value
def _parse_line(self, line):
args = copy.deepcopy(self._defaults)
parts = line.split(' ')
args['name'] = parts[0]
# re-join the rest of the line and split on ','
args_list = ' '.join(parts[1:]).strip().split(',')
for arg_string in args_list:
arg_string = arg_string.strip()
if not arg_string: # Ignore empty args
continue
if '=' in arg_string:
arg_name, arg_value = arg_string.split('=')
else:
arg_name, arg_value = arg_string, True
if arg_name not in self._defaults:
self._fatal("Unknown argument: '%s' in line:\n%s\nKnown arguments: %s" % (arg_name, line, self._defaults.keys()))
valid_values = self._valid_values.get(arg_name)
if valid_values and arg_value not in valid_values:
self._fatal("Unknown value: '%s' in line:\n%s\nKnown values: %s" % (arg_value, line, valid_values))
if self._is_sequence(args[arg_name]):
args[arg_name].append(arg_value)
else:
args[arg_name] = arg_value
return args
def _fatal(self, message):
# FIXME: This should probably raise instead of exit(1)
print message
exit(1)
| bsd-3-clause |
junmin-zhu/chromium-rivertrail | tools/chrome_remote_control/chrome_remote_control/browser_options_unittest.py | 3 | 2400 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from chrome_remote_control import browser_options
class BrowserOptionsTest(unittest.TestCase):
def testDefaults(self):
options = browser_options.BrowserOptions()
parser = options.CreateParser()
parser.add_option('-x', action='store', default=3)
parser.parse_args(['--browser', 'any'])
self.assertEquals(options.x, 3) # pylint: disable=E1101
def testDefaultsPlusOverride(self):
options = browser_options.BrowserOptions()
parser = options.CreateParser()
parser.add_option('-x', action='store', default=3)
parser.parse_args(['--browser', 'any', '-x', 10])
self.assertEquals(options.x, 10) # pylint: disable=E1101
def testDefaultsDontClobberPresetValue(self):
options = browser_options.BrowserOptions()
setattr(options, 'x', 7)
parser = options.CreateParser()
parser.add_option('-x', action='store', default=3)
parser.parse_args(['--browser', 'any'])
self.assertEquals(options.x, 7) # pylint: disable=E1101
def testCount0(self):
options = browser_options.BrowserOptions()
parser = options.CreateParser()
parser.add_option('-x', action='count', dest='v')
parser.parse_args(['--browser', 'any'])
self.assertEquals(options.v, None) # pylint: disable=E1101
def testCount2(self):
options = browser_options.BrowserOptions()
parser = options.CreateParser()
parser.add_option('-x', action='count', dest='v')
parser.parse_args(['--browser', 'any', '-xx'])
self.assertEquals(options.v, 2) # pylint: disable=E1101
def testOptparseMutabilityWhenSpecified(self):
options = browser_options.BrowserOptions()
parser = options.CreateParser()
parser.add_option('-x', dest='verbosity', action='store_true')
options_ret, _ = parser.parse_args(['--browser', 'any', '-x'])
self.assertEquals(options_ret, options)
self.assertTrue(options.verbosity)
def testOptparseMutabilityWhenNotSpecified(self):
options = browser_options.BrowserOptions()
parser = options.CreateParser()
parser.add_option('-x', dest='verbosity', action='store_true')
options_ret, _ = parser.parse_args(['--browser', 'any'])
self.assertEquals(options_ret, options)
self.assertFalse(options.verbosity)
| bsd-3-clause |
HackBulgaria/Odin | students/south_migrations/0021_auto__chg_field_checkin_date__add_unique_checkin_student_date.py | 1 | 9304 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'CheckIn.date'
db.alter_column(u'students_checkin', 'date', self.gf('django.db.models.fields.DateField')(auto_now=True))
# Adding unique constraint on 'CheckIn', fields ['student', 'date']
db.create_unique(u'students_checkin', ['student_id', 'date'])
def backwards(self, orm):
# Removing unique constraint on 'CheckIn', fields ['student', 'date']
db.delete_unique(u'students_checkin', ['student_id', 'date'])
# Changing field 'CheckIn.date'
db.alter_column(u'students_checkin', 'date', self.gf('django.db.models.fields.DateField')())
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'courses.course': {
'Meta': {'object_name': 'Course'},
'SEO_description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'SEO_title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'application_until': ('django.db.models.fields.DateField', [], {}),
'applications_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'description': ('tinymce.models.HTMLField', [], {}),
'git_repository': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'next_season_mail_list': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'partner': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['courses.Partner']", 'null': 'True', 'blank': 'True'}),
'short_description': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'show_on_index': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'url': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '80'})
},
u'courses.partner': {
'Meta': {'object_name': 'Partner'},
'description': ('tinymce.models.HTMLField', [], {}),
'facebook': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'twitter': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'students.checkin': {
'Meta': {'unique_together': "(('student', 'date'),)", 'object_name': 'CheckIn'},
'date': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mac': ('django.db.models.fields.CharField', [], {'max_length': '17'}),
'student': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['students.User']", 'null': 'True', 'blank': 'True'})
},
u'students.courseassignment': {
'Meta': {'unique_together': "(('user', 'course'),)", 'object_name': 'CourseAssignment'},
'course': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['courses.Course']"}),
'group_time': ('django.db.models.fields.SmallIntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'points': ('django.db.models.fields.PositiveIntegerField', [], {'default': "'0'"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['students.User']"})
},
u'students.user': {
'Meta': {'object_name': 'User'},
'avatar': ('django_resized.forms.ResizedImageField', [], {'max_length': '100', 'max_width': '200', 'blank': 'True'}),
'courses': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['courses.Course']", 'through': u"orm['students.CourseAssignment']", 'symmetrical': 'False'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'github_account': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'linkedin_account': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'mac': ('django.db.models.fields.CharField', [], {'max_length': '17', 'null': 'True', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'status': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75'}),
'works_at': ('django.db.models.fields.CharField', [], {'max_length': "'40'", 'null': 'True', 'blank': 'True'})
},
u'students.usernote': {
'Meta': {'object_name': 'UserNote'},
'assignment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['students.CourseAssignment']"}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['students.User']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'post_time': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'text': ('django.db.models.fields.TextField', [], {'blank': 'True'})
}
}
complete_apps = ['students'] | agpl-3.0 |
renyi533/tensorflow | tensorflow/python/kernel_tests/constant_op_eager_test.py | 33 | 21448 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ConstantOp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.util import compat
# TODO(josh11b): add tests with lists/tuples, Shape.
# TODO(ashankar): Collapse with tests in constant_op_test.py and use something
# like the test_util.run_in_graph_and_eager_modes decorator to confirm
# equivalence between graph and eager execution.
class ConstantTest(test.TestCase):
def _testCpu(self, x):
np_ans = np.array(x)
with context.device("/device:CPU:0"):
tf_ans = ops.convert_to_tensor(x).numpy()
if np_ans.dtype in [np.float32, np.float64, np.complex64, np.complex128]:
self.assertAllClose(np_ans, tf_ans)
else:
self.assertAllEqual(np_ans, tf_ans)
def _testGpu(self, x):
device = test_util.gpu_device_name()
if device:
np_ans = np.array(x)
with context.device(device):
tf_ans = ops.convert_to_tensor(x).numpy()
if np_ans.dtype in [np.float32, np.float64, np.complex64, np.complex128]:
self.assertAllClose(np_ans, tf_ans)
else:
self.assertAllEqual(np_ans, tf_ans)
def _testAll(self, x):
self._testCpu(x)
self._testGpu(x)
def testFloat(self):
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float32))
self._testAll(
np.random.normal(size=30).reshape([2, 3, 5]).astype(np.float32))
self._testAll(np.empty((2, 0, 5)).astype(np.float32))
orig = [-1.0, 2.0, 0.0]
tf_ans = constant_op.constant(orig)
self.assertEqual(dtypes_lib.float32, tf_ans.dtype)
self.assertAllClose(np.array(orig), tf_ans.numpy())
# Mix floats and ints
orig = [-1.5, 2, 0]
tf_ans = constant_op.constant(orig)
self.assertEqual(dtypes_lib.float32, tf_ans.dtype)
self.assertAllClose(np.array(orig), tf_ans.numpy())
orig = [-5, 2.5, 0]
tf_ans = constant_op.constant(orig)
self.assertEqual(dtypes_lib.float32, tf_ans.dtype)
self.assertAllClose(np.array(orig), tf_ans.numpy())
# Mix floats and ints that don't fit in int32
orig = [1, 2**42, 0.5]
tf_ans = constant_op.constant(orig)
self.assertEqual(dtypes_lib.float32, tf_ans.dtype)
self.assertAllClose(np.array(orig), tf_ans.numpy())
def testDouble(self):
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float64))
self._testAll(
np.random.normal(size=30).reshape([2, 3, 5]).astype(np.float64))
self._testAll(np.empty((2, 0, 5)).astype(np.float64))
orig = [-5, 2.5, 0]
tf_ans = constant_op.constant(orig, dtypes_lib.float64)
self.assertEqual(dtypes_lib.float64, tf_ans.dtype)
self.assertAllClose(np.array(orig), tf_ans.numpy())
# This integer is not exactly representable as a double, gets rounded.
tf_ans = constant_op.constant(2**54 + 1, dtypes_lib.float64)
self.assertEqual(2**54, tf_ans.numpy())
# This integer is larger than all non-infinite numbers representable
# by a double, raises an exception.
with self.assertRaisesRegexp(ValueError, "out-of-range integer"):
constant_op.constant(10**310, dtypes_lib.float64)
def testInt32(self):
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.int32))
self._testAll(
(100 * np.random.normal(size=30)).reshape([2, 3, 5]).astype(np.int32))
self._testAll(np.empty((2, 0, 5)).astype(np.int32))
self._testAll([-1, 2])
def testInt64(self):
self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.int64))
self._testAll(
(100 * np.random.normal(size=30)).reshape([2, 3, 5]).astype(np.int64))
self._testAll(np.empty((2, 0, 5)).astype(np.int64))
# Should detect out of range for int32 and use int64 instead.
orig = [2, 2**48, -2**48]
tf_ans = constant_op.constant(orig)
self.assertEqual(dtypes_lib.int64, tf_ans.dtype)
self.assertAllClose(np.array(orig), tf_ans.numpy())
# Out of range for an int64
with self.assertRaisesRegexp(ValueError, "out-of-range integer"):
constant_op.constant([2**72])
def testComplex64(self):
self._testAll(
np.complex(1, 2) *
np.arange(-15, 15).reshape([2, 3, 5]).astype(np.complex64))
self._testAll(
np.complex(1, 2) *
np.random.normal(size=30).reshape([2, 3, 5]).astype(np.complex64))
self._testAll(np.empty((2, 0, 5)).astype(np.complex64))
def testComplex128(self):
self._testAll(
np.complex(1, 2) * np.arange(-15, 15).reshape([2, 3, 5
]).astype(np.complex128))
self._testAll(
np.complex(1, 2) * np.random.normal(size=30).reshape(
[2, 3, 5]).astype(np.complex128))
self._testAll(np.empty((2, 0, 5)).astype(np.complex128))
def testString(self):
val = [compat.as_bytes(str(x)) for x in np.arange(-15, 15)]
self._testCpu(np.array(val).reshape([2, 3, 5]))
self._testCpu(np.empty((2, 0, 5)).astype(np.str_))
def testStringWithNulls(self):
val = ops.convert_to_tensor(b"\0\0\0\0").numpy()
self.assertEqual(len(val), 4)
self.assertEqual(val, b"\0\0\0\0")
val = ops.convert_to_tensor(b"xx\0xx").numpy()
self.assertEqual(len(val), 5)
self.assertAllEqual(val, b"xx\0xx")
nested = [[b"\0\0\0\0", b"xx\0xx"], [b"\0_\0_\0_\0", b"\0"]]
val = ops.convert_to_tensor(nested).numpy()
# NOTE(mrry): Do not use assertAllEqual, because it converts nested to a
# numpy array, which loses the null terminators.
self.assertEqual(val.tolist(), nested)
def testExplicitShapeNumPy(self):
c = constant_op.constant(
np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float32),
shape=[2, 3, 5])
self.assertEqual(c.get_shape(), [2, 3, 5])
def testImplicitShapeNumPy(self):
c = constant_op.constant(
np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float32))
self.assertEqual(c.get_shape(), [2, 3, 5])
def testExplicitShapeList(self):
c = constant_op.constant([1, 2, 3, 4, 5, 6, 7], shape=[7])
self.assertEqual(c.get_shape(), [7])
def testExplicitShapeFill(self):
c = constant_op.constant(12, shape=[7])
self.assertEqual(c.get_shape(), [7])
self.assertAllEqual([12, 12, 12, 12, 12, 12, 12], c.numpy())
def testExplicitShapeReshape(self):
c = constant_op.constant(
np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float32),
shape=[5, 2, 3])
self.assertEqual(c.get_shape(), [5, 2, 3])
def testImplicitShapeList(self):
c = constant_op.constant([1, 2, 3, 4, 5, 6, 7])
self.assertEqual(c.get_shape(), [7])
def testExplicitShapeNumber(self):
c = constant_op.constant(1, shape=[1])
self.assertEqual(c.get_shape(), [1])
def testImplicitShapeNumber(self):
c = constant_op.constant(1)
self.assertEqual(c.get_shape(), [])
def testShapeTooBig(self):
with self.assertRaises(TypeError):
constant_op.constant([1, 2, 3, 4, 5, 6, 7], shape=[10])
def testShapeTooSmall(self):
with self.assertRaises(TypeError):
constant_op.constant([1, 2, 3, 4, 5, 6, 7], shape=[5])
def testShapeWrong(self):
with self.assertRaisesRegexp(TypeError, None):
constant_op.constant([1, 2, 3, 4, 5, 6, 7], shape=[5])
def testShape(self):
self._testAll(constant_op.constant([1]).get_shape())
def testDimension(self):
x = constant_op.constant([1]).shape[0]
self._testAll(x)
def testDimensionList(self):
x = [constant_op.constant([1]).shape[0]]
self._testAll(x)
# Mixing with regular integers is fine too
self._testAll([1] + x)
self._testAll(x + [1])
def testDimensionTuple(self):
x = constant_op.constant([1]).shape[0]
self._testAll((x,))
self._testAll((1, x))
self._testAll((x, 1))
def testInvalidLength(self):
class BadList(list):
def __init__(self):
super(BadList, self).__init__([1, 2, 3]) # pylint: disable=invalid-length-returned
def __len__(self):
return -1
with self.assertRaisesRegexp(ValueError, "should return >= 0"):
constant_op.constant([BadList()])
with self.assertRaisesRegexp(ValueError, "mixed types"):
constant_op.constant([1, 2, BadList()])
with self.assertRaisesRegexp(ValueError, "should return >= 0"):
constant_op.constant(BadList())
with self.assertRaisesRegexp(ValueError, "should return >= 0"):
constant_op.constant([[BadList(), 2], 3])
with self.assertRaisesRegexp(ValueError, "should return >= 0"):
constant_op.constant([BadList(), [1, 2, 3]])
with self.assertRaisesRegexp(ValueError, "should return >= 0"):
constant_op.constant([BadList(), []])
# TODO(allenl, josh11b): These cases should return exceptions rather than
# working (currently shape checking only checks the first element of each
# sequence recursively). Maybe the first one is fine, but the second one
# silently truncating is rather bad.
# with self.assertRaisesRegexp(ValueError, "should return >= 0"):
# constant_op.constant([[3, 2, 1], BadList()])
# with self.assertRaisesRegexp(ValueError, "should return >= 0"):
# constant_op.constant([[], BadList()])
def testSparseValuesRaiseErrors(self):
with self.assertRaisesRegexp(ValueError, "non-rectangular Python sequence"):
constant_op.constant([[1, 2], [3]], dtype=dtypes_lib.int32)
with self.assertRaisesRegexp(ValueError, None):
constant_op.constant([[1, 2], [3]])
with self.assertRaisesRegexp(ValueError, None):
constant_op.constant([[1, 2], [3], [4, 5]])
# TODO(ashankar): This test fails with graph construction since
# tensor_util.make_tensor_proto (invoked from constant_op.constant)
# does not handle iterables (it relies on numpy conversion).
# For consistency, should graph construction handle Python objects
# that implement the sequence protocol (but not numpy conversion),
# or should eager execution fail on such sequences?
def testCustomSequence(self):
# This is inspired by how many objects in pandas are implemented:
# - They implement the Python sequence protocol
# - But may raise a KeyError on __getitem__(self, 0)
# See https://github.com/tensorflow/tensorflow/issues/20347
class MySeq(object):
def __getitem__(self, key):
if key != 1 and key != 3:
raise KeyError(key)
return key
def __len__(self):
return 2
def __iter__(self):
l = list([1, 3])
return l.__iter__()
self.assertAllEqual([1, 3], self.evaluate(constant_op.constant(MySeq())))
class AsTensorTest(test.TestCase):
def testAsTensorForTensorInput(self):
t = constant_op.constant(10.0)
x = ops.convert_to_tensor(t)
self.assertIs(t, x)
def testAsTensorForNonTensorInput(self):
x = ops.convert_to_tensor(10.0)
self.assertTrue(isinstance(x, ops.EagerTensor))
class ZerosTest(test.TestCase):
def _Zeros(self, shape):
ret = array_ops.zeros(shape)
self.assertEqual(shape, ret.get_shape())
return ret.numpy()
def testConst(self):
self.assertTrue(
np.array_equal(self._Zeros([2, 3]), np.array([[0] * 3] * 2)))
def testScalar(self):
self.assertEqual(0, self._Zeros([]))
self.assertEqual(0, self._Zeros(()))
scalar = array_ops.zeros(constant_op.constant([], dtype=dtypes_lib.int32))
self.assertEqual(0, scalar.numpy())
def testDynamicSizes(self):
np_ans = np.array([[0] * 3] * 2)
# Creates a tensor of 2 x 3.
d = array_ops.fill([2, 3], 12., name="fill")
# Constructs a tensor of zeros of the same dimensions as "d".
z = array_ops.zeros(array_ops.shape(d))
out = z.numpy()
self.assertAllEqual(np_ans, out)
self.assertShapeEqual(np_ans, d)
self.assertShapeEqual(np_ans, z)
def testDtype(self):
d = array_ops.fill([2, 3], 12., name="fill")
self.assertEqual(d.get_shape(), [2, 3])
# Test default type for both constant size and dynamic size
z = array_ops.zeros([2, 3])
self.assertEqual(z.dtype, dtypes_lib.float32)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.numpy(), np.zeros([2, 3]))
z = array_ops.zeros(array_ops.shape(d))
self.assertEqual(z.dtype, dtypes_lib.float32)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.numpy(), np.zeros([2, 3]))
# Test explicit type control
for dtype in [
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8,
dtypes_lib.complex64, dtypes_lib.complex128, dtypes_lib.int64,
dtypes_lib.bool,
# TODO(josh11b): Support string type here.
# dtypes_lib.string
]:
z = array_ops.zeros([2, 3], dtype=dtype)
self.assertEqual(z.dtype, dtype)
self.assertEqual([2, 3], z.get_shape())
z_value = z.numpy()
self.assertFalse(np.any(z_value))
self.assertEqual((2, 3), z_value.shape)
z = array_ops.zeros(array_ops.shape(d), dtype=dtype)
self.assertEqual(z.dtype, dtype)
self.assertEqual([2, 3], z.get_shape())
z_value = z.numpy()
self.assertFalse(np.any(z_value))
self.assertEqual((2, 3), z_value.shape)
class ZerosLikeTest(test.TestCase):
def _compareZeros(self, dtype, use_gpu):
# Creates a tensor of non-zero values with shape 2 x 3.
# NOTE(kearnes): The default numpy dtype associated with tf.string is
# np.object (and can't be changed without breaking a lot things), which
# causes a TypeError in constant_op.constant below. Here we catch the
# special case of tf.string and set the numpy dtype appropriately.
if dtype == dtypes_lib.string:
numpy_dtype = np.string_
else:
numpy_dtype = dtype.as_numpy_dtype
d = constant_op.constant(np.ones((2, 3), dtype=numpy_dtype), dtype=dtype)
# Constructs a tensor of zeros of the same dimensions and type as "d".
z_var = array_ops.zeros_like(d)
# Test that the type is correct
self.assertEqual(z_var.dtype, dtype)
# Test that the shape is correct
self.assertEqual([2, 3], z_var.get_shape())
# Test that the value is correct
z_value = z_var.numpy()
self.assertFalse(np.any(z_value))
self.assertEqual((2, 3), z_value.shape)
def testZerosLikeCPU(self):
for dtype in [
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8,
dtypes_lib.complex64, dtypes_lib.complex128, dtypes_lib.int64,
# TODO(josh11b): Support string type here.
# dtypes_lib.string
]:
self._compareZeros(dtype, use_gpu=False)
def testZerosLikeGPU(self):
for dtype in [
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
dtypes_lib.bool, dtypes_lib.int64,
# TODO(josh11b): Support string type here.
# dtypes_lib.string
]:
self._compareZeros(dtype, use_gpu=True)
def testZerosLikeDtype(self):
# Make sure zeros_like works even for dtypes that cannot be cast between
shape = (3, 5)
dtypes = np.float32, np.complex64
for in_type in dtypes:
x = np.arange(15).astype(in_type).reshape(*shape)
for out_type in dtypes:
y = array_ops.zeros_like(x, dtype=out_type).numpy()
self.assertEqual(y.dtype, out_type)
self.assertEqual(y.shape, shape)
self.assertAllEqual(y, np.zeros(shape, dtype=out_type))
class OnesTest(test.TestCase):
def _Ones(self, shape):
ret = array_ops.ones(shape)
self.assertEqual(shape, ret.get_shape())
return ret.numpy()
def testConst(self):
self.assertTrue(np.array_equal(self._Ones([2, 3]), np.array([[1] * 3] * 2)))
def testScalar(self):
self.assertEqual(1, self._Ones([]))
self.assertEqual(1, self._Ones(()))
scalar = array_ops.ones(constant_op.constant([], dtype=dtypes_lib.int32))
self.assertEqual(1, scalar.numpy())
def testDynamicSizes(self):
np_ans = np.array([[1] * 3] * 2)
# Creates a tensor of 2 x 3.
d = array_ops.fill([2, 3], 12., name="fill")
# Constructs a tensor of ones of the same dimensions as "d".
z = array_ops.ones(array_ops.shape(d))
out = z.numpy()
self.assertAllEqual(np_ans, out)
self.assertShapeEqual(np_ans, d)
self.assertShapeEqual(np_ans, z)
def testDtype(self):
d = array_ops.fill([2, 3], 12., name="fill")
self.assertEqual(d.get_shape(), [2, 3])
# Test default type for both constant size and dynamic size
z = array_ops.ones([2, 3])
self.assertEqual(z.dtype, dtypes_lib.float32)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.numpy(), np.ones([2, 3]))
z = array_ops.ones(array_ops.shape(d))
self.assertEqual(z.dtype, dtypes_lib.float32)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.numpy(), np.ones([2, 3]))
# Test explicit type control
for dtype in (dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8,
dtypes_lib.complex64, dtypes_lib.complex128, dtypes_lib.int64,
dtypes_lib.bool):
z = array_ops.ones([2, 3], dtype=dtype)
self.assertEqual(z.dtype, dtype)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.numpy(), np.ones([2, 3]))
z = array_ops.ones(array_ops.shape(d), dtype=dtype)
self.assertEqual(z.dtype, dtype)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.numpy(), np.ones([2, 3]))
class OnesLikeTest(test.TestCase):
def testOnesLike(self):
for dtype in [
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8,
dtypes_lib.complex64, dtypes_lib.complex128, dtypes_lib.int64
]:
numpy_dtype = dtype.as_numpy_dtype
# Creates a tensor of non-zero values with shape 2 x 3.
d = constant_op.constant(np.ones((2, 3), dtype=numpy_dtype), dtype=dtype)
# Constructs a tensor of zeros of the same dimensions and type as "d".
z_var = array_ops.ones_like(d)
# Test that the type is correct
self.assertEqual(z_var.dtype, dtype)
z_value = z_var.numpy()
# Test that the value is correct
self.assertTrue(np.array_equal(z_value, np.array([[1] * 3] * 2)))
self.assertEqual([2, 3], z_var.get_shape())
class FillTest(test.TestCase):
def _compare(self, dims, val, np_ans, use_gpu):
ctx = context.context()
device = "GPU:0" if (use_gpu and ctx.num_gpus()) else "CPU:0"
with ops.device(device):
tf_ans = array_ops.fill(dims, val, name="fill")
out = tf_ans.numpy()
self.assertAllClose(np_ans, out)
def _compareAll(self, dims, val, np_ans):
self._compare(dims, val, np_ans, False)
self._compare(dims, val, np_ans, True)
def testFillFloat(self):
np_ans = np.array([[3.1415] * 3] * 2).astype(np.float32)
self._compareAll([2, 3], np_ans[0][0], np_ans)
def testFillDouble(self):
np_ans = np.array([[3.1415] * 3] * 2).astype(np.float64)
self._compareAll([2, 3], np_ans[0][0], np_ans)
def testFillInt32(self):
np_ans = np.array([[42] * 3] * 2).astype(np.int32)
self._compareAll([2, 3], np_ans[0][0], np_ans)
def testFillInt64(self):
np_ans = np.array([[-42] * 3] * 2).astype(np.int64)
self._compareAll([2, 3], np_ans[0][0], np_ans)
def testFillComplex64(self):
np_ans = np.array([[0.15] * 3] * 2).astype(np.complex64)
self._compare([2, 3], np_ans[0][0], np_ans, use_gpu=False)
def testFillComplex128(self):
np_ans = np.array([[0.15] * 3] * 2).astype(np.complex128)
self._compare([2, 3], np_ans[0][0], np_ans, use_gpu=False)
def testFillString(self):
np_ans = np.array([[b"yolo"] * 3] * 2)
tf_ans = array_ops.fill([2, 3], np_ans[0][0], name="fill").numpy()
self.assertAllEqual(np_ans, tf_ans)
def testFillNegative(self):
for shape in (-1,), (2, -1), (-1, 2), (-2), (-3):
with self.assertRaises(errors_impl.InvalidArgumentError):
array_ops.fill(shape, 7)
def testShapeFunctionEdgeCases(self):
# Non-vector dimensions.
with self.assertRaises(errors_impl.InvalidArgumentError):
array_ops.fill([[0, 1], [2, 3]], 1.0)
# Non-scalar value.
with self.assertRaises(errors_impl.InvalidArgumentError):
array_ops.fill([3, 2], [1.0, 2.0])
if __name__ == "__main__":
test.main()
| apache-2.0 |
Gravecorp/Gap | Gap/Lib/distutils/spawn.py | 72 | 6467 | """distutils.spawn
Provides the 'spawn()' function, a front-end to various platform-
specific functions for launching another program in a sub-process.
Also provides the 'find_executable()' to search the path for a given
executable name.
"""
__revision__ = "$Id$"
import sys
import os
from distutils.errors import DistutilsPlatformError, DistutilsExecError
from distutils import log
def spawn(cmd, search_path=1, verbose=0, dry_run=0):
"""Run another program, specified as a command list 'cmd', in a new process.
'cmd' is just the argument list for the new process, ie.
cmd[0] is the program to run and cmd[1:] are the rest of its arguments.
There is no way to run a program with a name different from that of its
executable.
If 'search_path' is true (the default), the system's executable
search path will be used to find the program; otherwise, cmd[0]
must be the exact path to the executable. If 'dry_run' is true,
the command will not actually be run.
Raise DistutilsExecError if running the program fails in any way; just
return on success.
"""
if os.name == 'posix':
_spawn_posix(cmd, search_path, dry_run=dry_run)
elif os.name == 'nt':
_spawn_nt(cmd, search_path, dry_run=dry_run)
elif os.name == 'os2':
_spawn_os2(cmd, search_path, dry_run=dry_run)
else:
raise DistutilsPlatformError, \
"don't know how to spawn programs on platform '%s'" % os.name
def _nt_quote_args(args):
"""Quote command-line arguments for DOS/Windows conventions.
Just wraps every argument which contains blanks in double quotes, and
returns a new argument list.
"""
# XXX this doesn't seem very robust to me -- but if the Windows guys
# say it'll work, I guess I'll have to accept it. (What if an arg
# contains quotes? What other magic characters, other than spaces,
# have to be escaped? Is there an escaping mechanism other than
# quoting?)
for i, arg in enumerate(args):
if ' ' in arg:
args[i] = '"%s"' % arg
return args
def _spawn_nt(cmd, search_path=1, verbose=0, dry_run=0):
executable = cmd[0]
cmd = _nt_quote_args(cmd)
if search_path:
# either we find one or it stays the same
executable = find_executable(executable) or executable
log.info(' '.join([executable] + cmd[1:]))
if not dry_run:
# spawn for NT requires a full path to the .exe
try:
rc = os.spawnv(os.P_WAIT, executable, cmd)
except OSError, exc:
# this seems to happen when the command isn't found
raise DistutilsExecError, \
"command '%s' failed: %s" % (cmd[0], exc[-1])
if rc != 0:
# and this reflects the command running but failing
raise DistutilsExecError, \
"command '%s' failed with exit status %d" % (cmd[0], rc)
def _spawn_os2(cmd, search_path=1, verbose=0, dry_run=0):
executable = cmd[0]
if search_path:
# either we find one or it stays the same
executable = find_executable(executable) or executable
log.info(' '.join([executable] + cmd[1:]))
if not dry_run:
# spawnv for OS/2 EMX requires a full path to the .exe
try:
rc = os.spawnv(os.P_WAIT, executable, cmd)
except OSError, exc:
# this seems to happen when the command isn't found
raise DistutilsExecError, \
"command '%s' failed: %s" % (cmd[0], exc[-1])
if rc != 0:
# and this reflects the command running but failing
log.debug("command '%s' failed with exit status %d" % (cmd[0], rc))
raise DistutilsExecError, \
"command '%s' failed with exit status %d" % (cmd[0], rc)
def _spawn_posix(cmd, search_path=1, verbose=0, dry_run=0):
log.info(' '.join(cmd))
if dry_run:
return
exec_fn = search_path and os.execvp or os.execv
pid = os.fork()
if pid == 0: # in the child
try:
exec_fn(cmd[0], cmd)
except OSError, e:
sys.stderr.write("unable to execute %s: %s\n" %
(cmd[0], e.strerror))
os._exit(1)
sys.stderr.write("unable to execute %s for unknown reasons" % cmd[0])
os._exit(1)
else: # in the parent
# Loop until the child either exits or is terminated by a signal
# (ie. keep waiting if it's merely stopped)
while 1:
try:
pid, status = os.waitpid(pid, 0)
except OSError, exc:
import errno
if exc.errno == errno.EINTR:
continue
raise DistutilsExecError, \
"command '%s' failed: %s" % (cmd[0], exc[-1])
if os.WIFSIGNALED(status):
raise DistutilsExecError, \
"command '%s' terminated by signal %d" % \
(cmd[0], os.WTERMSIG(status))
elif os.WIFEXITED(status):
exit_status = os.WEXITSTATUS(status)
if exit_status == 0:
return # hey, it succeeded!
else:
raise DistutilsExecError, \
"command '%s' failed with exit status %d" % \
(cmd[0], exit_status)
elif os.WIFSTOPPED(status):
continue
else:
raise DistutilsExecError, \
"unknown error executing '%s': termination status %d" % \
(cmd[0], status)
def find_executable(executable, path=None):
"""Tries to find 'executable' in the directories listed in 'path'.
A string listing directories separated by 'os.pathsep'; defaults to
os.environ['PATH']. Returns the complete filename or None if not found.
"""
if path is None:
path = os.environ['PATH']
paths = path.split(os.pathsep)
base, ext = os.path.splitext(executable)
if (sys.platform == 'win32' or os.name == 'os2') and (ext != '.exe'):
executable = executable + '.exe'
if not os.path.isfile(executable):
for p in paths:
f = os.path.join(p, executable)
if os.path.isfile(f):
# the file exists, we have a shot at spawn working
return f
return None
else:
return executable
| mpl-2.0 |
wfxiang08/Nuitka | nuitka/build/inline_copy/lib/scons-2.3.2/SCons/Scanner/IDL.py | 6 | 1807 | """SCons.Scanner.IDL
This module implements the depenency scanner for IDL (Interface
Definition Language) files.
"""
#
# Copyright (c) 2001 - 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Scanner/IDL.py 2014/07/05 09:42:21 garyo"
import SCons.Node.FS
import SCons.Scanner
def IDLScan():
"""Return a prototype Scanner instance for scanning IDL source files"""
cs = SCons.Scanner.ClassicCPP("IDLScan",
"$IDLSUFFIXES",
"CPPPATH",
'^[ \t]*(?:#[ \t]*include|[ \t]*import)[ \t]+(<|")([^>"]+)(>|")')
return cs
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| apache-2.0 |
zverevalexei/trex-http-proxy | trex_client/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/eventloop/minitornado/util.py | 29 | 6226 | """Miscellaneous utility functions and classes.
This module is used internally by Tornado. It is not necessarily expected
that the functions and classes defined here will be useful to other
applications, but they are documented here in case they are.
The one public-facing part of this module is the `Configurable` class
and its `~Configurable.configure` method, which becomes a part of the
interface of its subclasses, including `.AsyncHTTPClient`, `.IOLoop`,
and `.Resolver`.
"""
from __future__ import absolute_import, division, print_function, with_statement
import sys
def import_object(name):
"""Imports an object by name.
import_object('x') is equivalent to 'import x'.
import_object('x.y.z') is equivalent to 'from x.y import z'.
>>> import tornado.escape
>>> import_object('tornado.escape') is tornado.escape
True
>>> import_object('tornado.escape.utf8') is tornado.escape.utf8
True
>>> import_object('tornado') is tornado
True
>>> import_object('tornado.missing_module')
Traceback (most recent call last):
...
ImportError: No module named missing_module
"""
if name.count('.') == 0:
return __import__(name, None, None)
parts = name.split('.')
obj = __import__('.'.join(parts[:-1]), None, None, [parts[-1]], 0)
try:
return getattr(obj, parts[-1])
except AttributeError:
raise ImportError("No module named %s" % parts[-1])
# Fake unicode literal support: Python 3.2 doesn't have the u'' marker for
# literal strings, and alternative solutions like "from __future__ import
# unicode_literals" have other problems (see PEP 414). u() can be applied
# to ascii strings that include \u escapes (but they must not contain
# literal non-ascii characters).
if type('') is not type(b''):
def u(s):
return s
bytes_type = bytes
unicode_type = str
basestring_type = str
else:
def u(s):
return s.decode('unicode_escape')
bytes_type = str
unicode_type = unicode
basestring_type = basestring
if sys.version_info > (3,):
exec("""
def raise_exc_info(exc_info):
raise exc_info[1].with_traceback(exc_info[2])
def exec_in(code, glob, loc=None):
if isinstance(code, str):
code = compile(code, '<string>', 'exec', dont_inherit=True)
exec(code, glob, loc)
""")
else:
exec("""
def raise_exc_info(exc_info):
raise exc_info[0], exc_info[1], exc_info[2]
def exec_in(code, glob, loc=None):
if isinstance(code, basestring):
# exec(string) inherits the caller's future imports; compile
# the string first to prevent that.
code = compile(code, '<string>', 'exec', dont_inherit=True)
exec code in glob, loc
""")
class Configurable(object):
"""Base class for configurable interfaces.
A configurable interface is an (abstract) class whose constructor
acts as a factory function for one of its implementation subclasses.
The implementation subclass as well as optional keyword arguments to
its initializer can be set globally at runtime with `configure`.
By using the constructor as the factory method, the interface
looks like a normal class, `isinstance` works as usual, etc. This
pattern is most useful when the choice of implementation is likely
to be a global decision (e.g. when `~select.epoll` is available,
always use it instead of `~select.select`), or when a
previously-monolithic class has been split into specialized
subclasses.
Configurable subclasses must define the class methods
`configurable_base` and `configurable_default`, and use the instance
method `initialize` instead of ``__init__``.
"""
__impl_class = None
__impl_kwargs = None
def __new__(cls, **kwargs):
base = cls.configurable_base()
args = {}
if cls is base:
impl = cls.configured_class()
if base.__impl_kwargs:
args.update(base.__impl_kwargs)
else:
impl = cls
args.update(kwargs)
instance = super(Configurable, cls).__new__(impl)
# initialize vs __init__ chosen for compatiblity with AsyncHTTPClient
# singleton magic. If we get rid of that we can switch to __init__
# here too.
instance.initialize(**args)
return instance
@classmethod
def configurable_base(cls):
"""Returns the base class of a configurable hierarchy.
This will normally return the class in which it is defined.
(which is *not* necessarily the same as the cls classmethod parameter).
"""
raise NotImplementedError()
@classmethod
def configurable_default(cls):
"""Returns the implementation class to be used if none is configured."""
raise NotImplementedError()
def initialize(self):
"""Initialize a `Configurable` subclass instance.
Configurable classes should use `initialize` instead of ``__init__``.
"""
@classmethod
def configure(cls, impl, **kwargs):
"""Sets the class to use when the base class is instantiated.
Keyword arguments will be saved and added to the arguments passed
to the constructor. This can be used to set global defaults for
some parameters.
"""
base = cls.configurable_base()
if isinstance(impl, (unicode_type, bytes_type)):
impl = import_object(impl)
if impl is not None and not issubclass(impl, cls):
raise ValueError("Invalid subclass of %s" % cls)
base.__impl_class = impl
base.__impl_kwargs = kwargs
@classmethod
def configured_class(cls):
"""Returns the currently configured class."""
base = cls.configurable_base()
if cls.__impl_class is None:
base.__impl_class = cls.configurable_default()
return base.__impl_class
@classmethod
def _save_configuration(cls):
base = cls.configurable_base()
return (base.__impl_class, base.__impl_kwargs)
@classmethod
def _restore_configuration(cls, saved):
base = cls.configurable_base()
base.__impl_class = saved[0]
base.__impl_kwargs = saved[1]
| mit |
Godiyos/python-for-android | python-modules/twisted/twisted/test/test_paths.py | 49 | 47338 | # Copyright (c) 2001-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases covering L{twisted.python.filepath} and L{twisted.python.zippath}.
"""
import os, time, pickle, errno, zipfile, stat
from twisted.python.compat import set
from twisted.python.win32 import WindowsError, ERROR_DIRECTORY
from twisted.python import filepath
from twisted.python.zippath import ZipArchive
from twisted.python.runtime import platform
from twisted.trial import unittest
class AbstractFilePathTestCase(unittest.TestCase):
f1content = "file 1"
f2content = "file 2"
def _mkpath(self, *p):
x = os.path.abspath(os.path.join(self.cmn, *p))
self.all.append(x)
return x
def subdir(self, *dirname):
os.mkdir(self._mkpath(*dirname))
def subfile(self, *dirname):
return open(self._mkpath(*dirname), "wb")
def setUp(self):
self.now = time.time()
cmn = self.cmn = os.path.abspath(self.mktemp())
self.all = [cmn]
os.mkdir(cmn)
self.subdir("sub1")
f = self.subfile("file1")
f.write(self.f1content)
f.close()
f = self.subfile("sub1", "file2")
f.write(self.f2content)
f.close()
self.subdir('sub3')
f = self.subfile("sub3", "file3.ext1")
f.close()
f = self.subfile("sub3", "file3.ext2")
f.close()
f = self.subfile("sub3", "file3.ext3")
f.close()
self.path = filepath.FilePath(cmn)
self.root = filepath.FilePath("/")
def test_segmentsFromPositive(self):
"""
Verify that the segments between two paths are correctly identified.
"""
self.assertEquals(
self.path.child("a").child("b").child("c").segmentsFrom(self.path),
["a", "b", "c"])
def test_segmentsFromNegative(self):
"""Verify that segmentsFrom notices when the ancestor isn't an ancestor.
"""
self.assertRaises(
ValueError,
self.path.child("a").child("b").child("c").segmentsFrom,
self.path.child("d").child("c").child("e"))
def test_walk(self):
"""
Verify that walking the path gives the same result as the known file
hierarchy.
"""
x = [foo.path for foo in self.path.walk()]
self.assertEquals(set(x), set(self.all))
def test_parents(self):
"""
L{FilePath.parents()} should return an iterator of every ancestor of
the L{FilePath} in question.
"""
L = []
pathobj = self.path.child("a").child("b").child("c")
fullpath = pathobj.path
lastpath = fullpath
thispath = os.path.dirname(fullpath)
while lastpath != self.root.path:
L.append(thispath)
lastpath = thispath
thispath = os.path.dirname(thispath)
self.assertEquals([x.path for x in pathobj.parents()], L)
def test_validSubdir(self):
"""Verify that a valid subdirectory will show up as a directory, but not as a
file, not as a symlink, and be listable.
"""
sub1 = self.path.child('sub1')
self.failUnless(sub1.exists(),
"This directory does exist.")
self.failUnless(sub1.isdir(),
"It's a directory.")
self.failUnless(not sub1.isfile(),
"It's a directory.")
self.failUnless(not sub1.islink(),
"It's a directory.")
self.failUnlessEqual(sub1.listdir(),
['file2'])
def test_invalidSubdir(self):
"""
Verify that a subdirectory that doesn't exist is reported as such.
"""
sub2 = self.path.child('sub2')
self.failIf(sub2.exists(),
"This directory does not exist.")
def test_validFiles(self):
"""
Make sure that we can read existent non-empty files.
"""
f1 = self.path.child('file1')
self.failUnlessEqual(f1.open().read(), self.f1content)
f2 = self.path.child('sub1').child('file2')
self.failUnlessEqual(f2.open().read(), self.f2content)
def test_multipleChildSegments(self):
"""
C{fp.descendant([a, b, c])} returns the same L{FilePath} as is returned
by C{fp.child(a).child(b).child(c)}.
"""
multiple = self.path.descendant(['a', 'b', 'c'])
single = self.path.child('a').child('b').child('c')
self.assertEquals(multiple, single)
def test_dictionaryKeys(self):
"""
Verify that path instances are usable as dictionary keys.
"""
f1 = self.path.child('file1')
f1prime = self.path.child('file1')
f2 = self.path.child('file2')
dictoid = {}
dictoid[f1] = 3
dictoid[f1prime] = 4
self.assertEquals(dictoid[f1], 4)
self.assertEquals(dictoid.keys(), [f1])
self.assertIdentical(dictoid.keys()[0], f1)
self.assertNotIdentical(dictoid.keys()[0], f1prime) # sanity check
dictoid[f2] = 5
self.assertEquals(dictoid[f2], 5)
self.assertEquals(len(dictoid), 2)
def test_dictionaryKeyWithString(self):
"""
Verify that path instances are usable as dictionary keys which do not clash
with their string counterparts.
"""
f1 = self.path.child('file1')
dictoid = {f1: 'hello'}
dictoid[f1.path] = 'goodbye'
self.assertEquals(len(dictoid), 2)
def test_childrenNonexistentError(self):
"""
Verify that children raises the appropriate exception for non-existent
directories.
"""
self.assertRaises(filepath.UnlistableError,
self.path.child('not real').children)
def test_childrenNotDirectoryError(self):
"""
Verify that listdir raises the appropriate exception for attempting to list
a file rather than a directory.
"""
self.assertRaises(filepath.UnlistableError,
self.path.child('file1').children)
def test_newTimesAreFloats(self):
"""
Verify that all times returned from the various new time functions are ints
(and hopefully therefore 'high precision').
"""
for p in self.path, self.path.child('file1'):
self.failUnlessEqual(type(p.getAccessTime()), float)
self.failUnlessEqual(type(p.getModificationTime()), float)
self.failUnlessEqual(type(p.getStatusChangeTime()), float)
def test_oldTimesAreInts(self):
"""
Verify that all times returned from the various time functions are
integers, for compatibility.
"""
for p in self.path, self.path.child('file1'):
self.failUnlessEqual(type(p.getatime()), int)
self.failUnlessEqual(type(p.getmtime()), int)
self.failUnlessEqual(type(p.getctime()), int)
class FakeWindowsPath(filepath.FilePath):
"""
A test version of FilePath which overrides listdir to raise L{WindowsError}.
"""
def listdir(self):
"""
@raise WindowsError: always.
"""
raise WindowsError(
ERROR_DIRECTORY,
"A directory's validness was called into question")
class ListingCompatibilityTests(unittest.TestCase):
"""
These tests verify compatibility with legacy behavior of directory listing.
"""
def test_windowsErrorExcept(self):
"""
Verify that when a WindowsError is raised from listdir, catching
WindowsError works.
"""
fwp = FakeWindowsPath(self.mktemp())
self.assertRaises(filepath.UnlistableError, fwp.children)
self.assertRaises(WindowsError, fwp.children)
def test_alwaysCatchOSError(self):
"""
Verify that in the normal case where a directory does not exist, we will
get an OSError.
"""
fp = filepath.FilePath(self.mktemp())
self.assertRaises(OSError, fp.children)
def test_keepOriginalAttributes(self):
"""
Verify that the Unlistable exception raised will preserve the attributes of
the previously-raised exception.
"""
fp = filepath.FilePath(self.mktemp())
ose = self.assertRaises(OSError, fp.children)
d1 = ose.__dict__.keys()
d1.remove('originalException')
d2 = ose.originalException.__dict__.keys()
d1.sort()
d2.sort()
self.assertEquals(d1, d2)
def zipit(dirname, zfname):
"""
create a zipfile on zfname, containing the contents of dirname'
"""
zf = zipfile.ZipFile(zfname, "w")
for root, ignored, files, in os.walk(dirname):
for fname in files:
fspath = os.path.join(root, fname)
arcpath = os.path.join(root, fname)[len(dirname)+1:]
# print fspath, '=>', arcpath
zf.write(fspath, arcpath)
zf.close()
class ZipFilePathTestCase(AbstractFilePathTestCase):
"""
Test various L{ZipPath} path manipulations as well as reprs for L{ZipPath}
and L{ZipArchive}.
"""
def setUp(self):
AbstractFilePathTestCase.setUp(self)
zipit(self.cmn, self.cmn + '.zip')
self.path = ZipArchive(self.cmn + '.zip')
self.root = self.path
self.all = [x.replace(self.cmn, self.cmn + '.zip') for x in self.all]
def test_zipPathRepr(self):
"""
Make sure that invoking ZipPath's repr prints the correct class name
and an absolute path to the zip file.
"""
child = self.path.child("foo")
pathRepr = "ZipPath(%r)" % (
os.path.abspath(self.cmn + ".zip" + os.sep + 'foo'),)
# Check for an absolute path
self.assertEquals(repr(child), pathRepr)
# Create a path to the file rooted in the current working directory
relativeCommon = self.cmn.replace(os.getcwd() + os.sep, "", 1) + ".zip"
relpath = ZipArchive(relativeCommon)
child = relpath.child("foo")
# Check using a path without the cwd prepended
self.assertEquals(repr(child), pathRepr)
def test_zipPathReprParentDirSegment(self):
"""
The repr of a ZipPath with C{".."} in the internal part of its path
includes the C{".."} rather than applying the usual parent directory
meaning.
"""
child = self.path.child("foo").child("..").child("bar")
pathRepr = "ZipPath(%r)" % (
self.cmn + ".zip" + os.sep.join(["", "foo", "..", "bar"]))
self.assertEquals(repr(child), pathRepr)
def test_zipPathReprEscaping(self):
"""
Bytes in the ZipPath path which have special meaning in Python
string literals are escaped in the ZipPath repr.
"""
child = self.path.child("'")
path = self.cmn + ".zip" + os.sep.join(["", "'"])
pathRepr = "ZipPath('%s')" % (path.encode('string-escape'),)
self.assertEquals(repr(child), pathRepr)
def test_zipArchiveRepr(self):
"""
Make sure that invoking ZipArchive's repr prints the correct class
name and an absolute path to the zip file.
"""
pathRepr = 'ZipArchive(%r)' % (os.path.abspath(self.cmn + '.zip'),)
# Check for an absolute path
self.assertEquals(repr(self.path), pathRepr)
# Create a path to the file rooted in the current working directory
relativeCommon = self.cmn.replace(os.getcwd() + os.sep, "", 1) + ".zip"
relpath = ZipArchive(relativeCommon)
# Check using a path without the cwd prepended
self.assertEquals(repr(relpath), pathRepr)
class ExplodingFile:
"""
A C{file}-alike which raises exceptions from its I/O methods and keeps track
of whether it has been closed.
@ivar closed: A C{bool} which is C{False} until C{close} is called, then it
is C{True}.
"""
closed = False
def read(self, n=0):
"""
@raise IOError: Always raised.
"""
raise IOError()
def write(self, what):
"""
@raise IOError: Always raised.
"""
raise IOError()
def close(self):
"""
Mark the file as having been closed.
"""
self.closed = True
class TrackingFilePath(filepath.FilePath):
"""
A subclass of L{filepath.FilePath} which maintains a list of all other paths
created by clonePath.
@ivar trackingList: A list of all paths created by this path via
C{clonePath} (which also includes paths created by methods like
C{parent}, C{sibling}, C{child}, etc (and all paths subsequently created
by those paths, etc).
@type trackingList: C{list} of L{TrackingFilePath}
@ivar openedFiles: A list of all file objects opened by this
L{TrackingFilePath} or any other L{TrackingFilePath} in C{trackingList}.
@type openedFiles: C{list} of C{file}
"""
def __init__(self, path, alwaysCreate=False, trackingList=None):
filepath.FilePath.__init__(self, path, alwaysCreate)
if trackingList is None:
trackingList = []
self.trackingList = trackingList
self.openedFiles = []
def open(self, *a, **k):
"""
Override 'open' to track all files opened by this path.
"""
f = filepath.FilePath.open(self, *a, **k)
self.openedFiles.append(f)
return f
def openedPaths(self):
"""
Return a list of all L{TrackingFilePath}s associated with this
L{TrackingFilePath} that have had their C{open()} method called.
"""
return [path for path in self.trackingList if path.openedFiles]
def clonePath(self, name):
"""
Override L{filepath.FilePath.clonePath} to give the new path a reference
to the same tracking list.
"""
clone = TrackingFilePath(name, trackingList=self.trackingList)
self.trackingList.append(clone)
return clone
class ExplodingFilePath(filepath.FilePath):
"""
A specialized L{FilePath} which always returns an instance of
L{ExplodingFile} from its C{open} method.
@ivar fp: The L{ExplodingFile} instance most recently returned from the
C{open} method.
"""
def __init__(self, pathName, originalExploder=None):
"""
Initialize an L{ExplodingFilePath} with a name and a reference to the
@param pathName: The path name as passed to L{filepath.FilePath}.
@type pathName: C{str}
@param originalExploder: The L{ExplodingFilePath} to associate opened
files with.
@type originalExploder: L{ExplodingFilePath}
"""
filepath.FilePath.__init__(self, pathName)
if originalExploder is None:
originalExploder = self
self._originalExploder = originalExploder
def open(self, mode=None):
"""
Create, save, and return a new C{ExplodingFile}.
@param mode: Present for signature compatibility. Ignored.
@return: A new C{ExplodingFile}.
"""
f = self._originalExploder.fp = ExplodingFile()
return f
def clonePath(self, name):
return ExplodingFilePath(name, self._originalExploder)
class FilePathTestCase(AbstractFilePathTestCase):
"""
Test various L{FilePath} path manipulations.
"""
def test_chmod(self):
"""
Make sure that calling L{FilePath.chmod} modifies the permissions of
the passed file as expected (using C{os.stat} to check). We use some
basic modes that should work everywhere (even on Windows).
"""
for mode in (0555, 0777):
self.path.child("sub1").chmod(mode)
self.assertEquals(
stat.S_IMODE(os.stat(self.path.child("sub1").path).st_mode),
mode)
def symlink(self, target, name):
"""
Create a symbolic link named C{name} pointing at C{target}.
@type target: C{str}
@type name: C{str}
@raise SkipTest: raised if symbolic links are not supported on the
host platform.
"""
if getattr(os, 'symlink', None) is None:
raise unittest.SkipTest(
"Platform does not support symbolic links.")
os.symlink(target, name)
def createLinks(self):
"""
Create several symbolic links to files and directories.
"""
subdir = self.path.child("sub1")
self.symlink(subdir.path, self._mkpath("sub1.link"))
self.symlink(subdir.child("file2").path, self._mkpath("file2.link"))
self.symlink(subdir.child("file2").path,
self._mkpath("sub1", "sub1.file2.link"))
def test_realpathSymlink(self):
"""
L{FilePath.realpath} returns the path of the ultimate target of a
symlink.
"""
self.createLinks()
self.symlink(self.path.child("file2.link").path,
self.path.child("link.link").path)
self.assertEquals(self.path.child("link.link").realpath(),
self.path.child("sub1").child("file2"))
def test_realpathCyclicalSymlink(self):
"""
L{FilePath.realpath} raises L{filepath.LinkError} if the path is a
symbolic link which is part of a cycle.
"""
self.symlink(self.path.child("link1").path, self.path.child("link2").path)
self.symlink(self.path.child("link2").path, self.path.child("link1").path)
self.assertRaises(filepath.LinkError,
self.path.child("link2").realpath)
def test_realpathNoSymlink(self):
"""
L{FilePath.realpath} returns the path itself if the path is not a
symbolic link.
"""
self.assertEquals(self.path.child("sub1").realpath(),
self.path.child("sub1"))
def test_walkCyclicalSymlink(self):
"""
Verify that walking a path with a cyclical symlink raises an error
"""
self.createLinks()
self.symlink(self.path.child("sub1").path,
self.path.child("sub1").child("sub1.loopylink").path)
def iterateOverPath():
return [foo.path for foo in self.path.walk()]
self.assertRaises(filepath.LinkError, iterateOverPath)
def test_walkObeysDescendWithCyclicalSymlinks(self):
"""
Verify that, after making a path with cyclical symlinks, when the
supplied C{descend} predicate returns C{False}, the target is not
traversed, as if it was a simple symlink.
"""
self.createLinks()
# we create cyclical symlinks
self.symlink(self.path.child("sub1").path,
self.path.child("sub1").child("sub1.loopylink").path)
def noSymLinks(path):
return not path.islink()
def iterateOverPath():
return [foo.path for foo in self.path.walk(descend=noSymLinks)]
self.assertTrue(iterateOverPath())
def test_walkObeysDescend(self):
"""
Verify that when the supplied C{descend} predicate returns C{False},
the target is not traversed.
"""
self.createLinks()
def noSymLinks(path):
return not path.islink()
x = [foo.path for foo in self.path.walk(descend=noSymLinks)]
self.assertEquals(set(x), set(self.all))
def test_getAndSet(self):
content = 'newcontent'
self.path.child('new').setContent(content)
newcontent = self.path.child('new').getContent()
self.failUnlessEqual(content, newcontent)
content = 'content'
self.path.child('new').setContent(content, '.tmp')
newcontent = self.path.child('new').getContent()
self.failUnlessEqual(content, newcontent)
def test_getContentFileClosing(self):
"""
If reading from the underlying file raises an exception,
L{FilePath.getContent} raises that exception after closing the file.
"""
fp = ExplodingFilePath("")
self.assertRaises(IOError, fp.getContent)
self.assertTrue(fp.fp.closed)
def test_setContentFileClosing(self):
"""
If writing to the underlying file raises an exception,
L{FilePath.setContent} raises that exception after closing the file.
"""
fp = ExplodingFilePath("")
self.assertRaises(IOError, fp.setContent, "blah")
self.assertTrue(fp.fp.closed)
def test_setContentNameCollision(self):
"""
L{FilePath.setContent} will use a different temporary filename on each
invocation, so that multiple processes, threads, or reentrant
invocations will not collide with each other.
"""
fp = TrackingFilePath(self.mktemp())
fp.setContent("alpha")
fp.setContent("beta")
# Sanity check: setContent should only open one derivative path each
# time to store the temporary file.
openedSiblings = fp.openedPaths()
self.assertEquals(len(openedSiblings), 2)
self.assertNotEquals(openedSiblings[0], openedSiblings[1])
def test_setContentExtension(self):
"""
L{FilePath.setContent} creates temporary files with a user-supplied
extension, so that if it is somehow interrupted while writing them, the
file that it leaves behind will be identifiable.
"""
fp = TrackingFilePath(self.mktemp())
fp.setContent("hello")
opened = fp.openedPaths()
self.assertEquals(len(opened), 1)
self.assertTrue(opened[0].basename().endswith(".new"),
"%s does not end with default '.new' extension" % (
opened[0].basename()))
fp.setContent("goodbye", "-something-else")
opened = fp.openedPaths()
self.assertEquals(len(opened), 2)
self.assertTrue(opened[1].basename().endswith("-something-else"),
"%s does not end with -something-else extension" % (
opened[1].basename()))
def test_symbolicLink(self):
"""
Verify the behavior of the C{isLink} method against links and
non-links. Also check that the symbolic link shares the directory
property with its target.
"""
s4 = self.path.child("sub4")
s3 = self.path.child("sub3")
self.symlink(s3.path, s4.path)
self.assertTrue(s4.islink())
self.assertFalse(s3.islink())
self.assertTrue(s4.isdir())
self.assertTrue(s3.isdir())
def test_linkTo(self):
"""
Verify that symlink creates a valid symlink that is both a link and a
file if its target is a file, or a directory if its target is a
directory.
"""
targetLinks = [
(self.path.child("sub2"), self.path.child("sub2.link")),
(self.path.child("sub2").child("file3.ext1"),
self.path.child("file3.ext1.link"))
]
for target, link in targetLinks:
target.linkTo(link)
self.assertTrue(link.islink(), "This is a link")
self.assertEquals(target.isdir(), link.isdir())
self.assertEquals(target.isfile(), link.isfile())
def test_linkToErrors(self):
"""
Verify C{linkTo} fails in the following case:
- the target is in a directory that doesn't exist
- the target already exists
"""
self.assertRaises(OSError, self.path.child("file1").linkTo,
self.path.child('nosub').child('file1'))
self.assertRaises(OSError, self.path.child("file1").linkTo,
self.path.child('sub1').child('file2'))
if not getattr(os, "symlink", None):
skipMsg = "Your platform does not support symbolic links."
test_symbolicLink.skip = skipMsg
test_linkTo.skip = skipMsg
test_linkToErrors.skip = skipMsg
def testMultiExt(self):
f3 = self.path.child('sub3').child('file3')
exts = '.foo','.bar', 'ext1','ext2','ext3'
self.failIf(f3.siblingExtensionSearch(*exts))
f3e = f3.siblingExtension(".foo")
f3e.touch()
self.failIf(not f3.siblingExtensionSearch(*exts).exists())
self.failIf(not f3.siblingExtensionSearch('*').exists())
f3e.remove()
self.failIf(f3.siblingExtensionSearch(*exts))
def testPreauthChild(self):
fp = filepath.FilePath('.')
fp.preauthChild('foo/bar')
self.assertRaises(filepath.InsecurePath, fp.child, '/foo')
def testStatCache(self):
p = self.path.child('stattest')
p.touch()
self.failUnlessEqual(p.getsize(), 0)
self.failUnlessEqual(abs(p.getmtime() - time.time()) // 20, 0)
self.failUnlessEqual(abs(p.getctime() - time.time()) // 20, 0)
self.failUnlessEqual(abs(p.getatime() - time.time()) // 20, 0)
self.failUnlessEqual(p.exists(), True)
self.failUnlessEqual(p.exists(), True)
# OOB removal: FilePath.remove() will automatically restat
os.remove(p.path)
# test caching
self.failUnlessEqual(p.exists(), True)
p.restat(reraise=False)
self.failUnlessEqual(p.exists(), False)
self.failUnlessEqual(p.islink(), False)
self.failUnlessEqual(p.isdir(), False)
self.failUnlessEqual(p.isfile(), False)
def testPersist(self):
newpath = pickle.loads(pickle.dumps(self.path))
self.failUnlessEqual(self.path.__class__, newpath.__class__)
self.failUnlessEqual(self.path.path, newpath.path)
def testInsecureUNIX(self):
self.assertRaises(filepath.InsecurePath, self.path.child, "..")
self.assertRaises(filepath.InsecurePath, self.path.child, "/etc")
self.assertRaises(filepath.InsecurePath, self.path.child, "../..")
def testInsecureWin32(self):
self.assertRaises(filepath.InsecurePath, self.path.child, r"..\..")
self.assertRaises(filepath.InsecurePath, self.path.child, r"C:randomfile")
if platform.getType() != 'win32':
testInsecureWin32.skip = "Consider yourself lucky."
def testInsecureWin32Whacky(self):
"""Windows has 'special' filenames like NUL and CON and COM1 and LPR
and PRN and ... god knows what else. They can be located anywhere in
the filesystem. For obvious reasons, we do not wish to normally permit
access to these.
"""
self.assertRaises(filepath.InsecurePath, self.path.child, "CON")
self.assertRaises(filepath.InsecurePath, self.path.child, "C:CON")
self.assertRaises(filepath.InsecurePath, self.path.child, r"C:\CON")
if platform.getType() != 'win32':
testInsecureWin32Whacky.skip = "Consider yourself lucky."
def testComparison(self):
self.assertEquals(filepath.FilePath('a'),
filepath.FilePath('a'))
self.failUnless(filepath.FilePath('z') >
filepath.FilePath('a'))
self.failUnless(filepath.FilePath('z') >=
filepath.FilePath('a'))
self.failUnless(filepath.FilePath('a') >=
filepath.FilePath('a'))
self.failUnless(filepath.FilePath('a') <=
filepath.FilePath('a'))
self.failUnless(filepath.FilePath('a') <
filepath.FilePath('z'))
self.failUnless(filepath.FilePath('a') <=
filepath.FilePath('z'))
self.failUnless(filepath.FilePath('a') !=
filepath.FilePath('z'))
self.failUnless(filepath.FilePath('z') !=
filepath.FilePath('a'))
self.failIf(filepath.FilePath('z') !=
filepath.FilePath('z'))
def test_descendantOnly(self):
"""
If C{".."} is in the sequence passed to L{FilePath.descendant},
L{InsecurePath} is raised.
"""
self.assertRaises(
filepath.InsecurePath, self.path.descendant, ['a', '..'])
def testSibling(self):
p = self.path.child('sibling_start')
ts = p.sibling('sibling_test')
self.assertEquals(ts.dirname(), p.dirname())
self.assertEquals(ts.basename(), 'sibling_test')
ts.createDirectory()
self.assertIn(ts, self.path.children())
def testTemporarySibling(self):
ts = self.path.temporarySibling()
self.assertEquals(ts.dirname(), self.path.dirname())
self.assertNotIn(ts.basename(), self.path.listdir())
ts.createDirectory()
self.assertIn(ts, self.path.parent().children())
def test_temporarySiblingExtension(self):
"""
If L{FilePath.temporarySibling} is given an extension argument, it will
produce path objects with that extension appended to their names.
"""
testExtension = ".test-extension"
ts = self.path.temporarySibling(testExtension)
self.assertTrue(ts.basename().endswith(testExtension),
"%s does not end with %s" % (
ts.basename(), testExtension))
def test_removeDirectory(self):
"""
L{FilePath.remove} on a L{FilePath} that refers to a directory will
recursively delete its contents.
"""
self.path.remove()
self.failIf(self.path.exists())
def test_removeWithSymlink(self):
"""
For a path which is a symbolic link, L{FilePath.remove} just deletes
the link, not the target.
"""
link = self.path.child("sub1.link")
# setUp creates the sub1 child
self.symlink(self.path.child("sub1").path, link.path)
link.remove()
self.assertFalse(link.exists())
self.assertTrue(self.path.child("sub1").exists())
def test_copyToDirectory(self):
"""
L{FilePath.copyTo} makes a copy of all the contents of the directory
named by that L{FilePath} if it is able to do so.
"""
oldPaths = list(self.path.walk()) # Record initial state
fp = filepath.FilePath(self.mktemp())
self.path.copyTo(fp)
self.path.remove()
fp.copyTo(self.path)
newPaths = list(self.path.walk()) # Record double-copy state
newPaths.sort()
oldPaths.sort()
self.assertEquals(newPaths, oldPaths)
def test_copyToMissingDestFileClosing(self):
"""
If an exception is raised while L{FilePath.copyTo} is trying to open
source file to read from, the destination file is closed and the
exception is raised to the caller of L{FilePath.copyTo}.
"""
nosuch = self.path.child("nothere")
# Make it look like something to copy, even though it doesn't exist.
# This could happen if the file is deleted between the isfile check and
# the file actually being opened.
nosuch.isfile = lambda: True
# We won't get as far as writing to this file, but it's still useful for
# tracking whether we closed it.
destination = ExplodingFilePath(self.mktemp())
self.assertRaises(IOError, nosuch.copyTo, destination)
self.assertTrue(destination.fp.closed)
def test_copyToFileClosing(self):
"""
If an exception is raised while L{FilePath.copyTo} is copying bytes
between two regular files, the source and destination files are closed
and the exception propagates to the caller of L{FilePath.copyTo}.
"""
destination = ExplodingFilePath(self.mktemp())
source = ExplodingFilePath(__file__)
self.assertRaises(IOError, source.copyTo, destination)
self.assertTrue(source.fp.closed)
self.assertTrue(destination.fp.closed)
def test_copyToDirectoryItself(self):
"""
L{FilePath.copyTo} fails with an OSError or IOError (depending on
platform, as it propagates errors from open() and write()) when
attempting to copy a directory to a child of itself.
"""
self.assertRaises((OSError, IOError),
self.path.copyTo, self.path.child('file1'))
def test_copyToWithSymlink(self):
"""
Verify that copying with followLinks=True copies symlink targets
instead of symlinks
"""
self.symlink(self.path.child("sub1").path,
self.path.child("link1").path)
fp = filepath.FilePath(self.mktemp())
self.path.copyTo(fp)
self.assertFalse(fp.child("link1").islink())
self.assertEquals([x.basename() for x in fp.child("sub1").children()],
[x.basename() for x in fp.child("link1").children()])
def test_copyToWithoutSymlink(self):
"""
Verify that copying with followLinks=False copies symlinks as symlinks
"""
self.symlink("sub1", self.path.child("link1").path)
fp = filepath.FilePath(self.mktemp())
self.path.copyTo(fp, followLinks=False)
self.assertTrue(fp.child("link1").islink())
self.assertEquals(os.readlink(self.path.child("link1").path),
os.readlink(fp.child("link1").path))
def test_moveTo(self):
"""
Verify that moving an entire directory results into another directory
with the same content.
"""
oldPaths = list(self.path.walk()) # Record initial state
fp = filepath.FilePath(self.mktemp())
self.path.moveTo(fp)
fp.moveTo(self.path)
newPaths = list(self.path.walk()) # Record double-move state
newPaths.sort()
oldPaths.sort()
self.assertEquals(newPaths, oldPaths)
def test_moveToExistsCache(self):
"""
A L{FilePath} that has been moved aside with L{FilePath.moveTo} no
longer registers as existing. Its previously non-existent target
exists, though, as it was created by the call to C{moveTo}.
"""
fp = filepath.FilePath(self.mktemp())
fp2 = filepath.FilePath(self.mktemp())
fp.touch()
# Both a sanity check (make sure the file status looks right) and an
# enticement for stat-caching logic to kick in and remember that these
# exist / don't exist.
self.assertEquals(fp.exists(), True)
self.assertEquals(fp2.exists(), False)
fp.moveTo(fp2)
self.assertEqual(fp.exists(), False)
self.assertEqual(fp2.exists(), True)
def test_moveToExistsCacheCrossMount(self):
"""
The assertion of test_moveToExistsCache should hold in the case of a
cross-mount move.
"""
self.setUpFaultyRename()
self.test_moveToExistsCache()
def test_moveToSizeCache(self, hook=lambda : None):
"""
L{FilePath.moveTo} clears its destination's status cache, such that
calls to L{FilePath.getsize} after the call to C{moveTo} will report the
new size, not the old one.
This is a separate test from C{test_moveToExistsCache} because it is
intended to cover the fact that the destination's cache is dropped;
test_moveToExistsCache doesn't cover this case because (currently) a
file that doesn't exist yet does not cache the fact of its non-
existence.
"""
fp = filepath.FilePath(self.mktemp())
fp2 = filepath.FilePath(self.mktemp())
fp.setContent("1234")
fp2.setContent("1234567890")
hook()
# Sanity check / kick off caching.
self.assertEqual(fp.getsize(), 4)
self.assertEqual(fp2.getsize(), 10)
# Actually attempting to replace a file on Windows would fail with
# ERROR_ALREADY_EXISTS, but we don't need to test that, just the cached
# metadata, so, delete the file ...
os.remove(fp2.path)
# ... but don't clear the status cache, as fp2.remove() would.
self.assertEqual(fp2.getsize(), 10)
fp.moveTo(fp2)
self.assertEqual(fp2.getsize(), 4)
def test_moveToSizeCacheCrossMount(self):
"""
The assertion of test_moveToSizeCache should hold in the case of a
cross-mount move.
"""
self.test_moveToSizeCache(hook=self.setUpFaultyRename)
def test_moveToError(self):
"""
Verify error behavior of moveTo: it should raises one of OSError or
IOError if you want to move a path into one of its child. It's simply
the error raised by the underlying rename system call.
"""
self.assertRaises((OSError, IOError), self.path.moveTo, self.path.child('file1'))
def setUpFaultyRename(self):
"""
Set up a C{os.rename} that will fail with L{errno.EXDEV} on first call.
This is used to simulate a cross-device rename failure.
@return: a list of pair (src, dest) of calls to C{os.rename}
@rtype: C{list} of C{tuple}
"""
invokedWith = []
def faultyRename(src, dest):
invokedWith.append((src, dest))
if len(invokedWith) == 1:
raise OSError(errno.EXDEV, 'Test-induced failure simulating '
'cross-device rename failure')
return originalRename(src, dest)
originalRename = os.rename
self.patch(os, "rename", faultyRename)
return invokedWith
def test_crossMountMoveTo(self):
"""
C{moveTo} should be able to handle C{EXDEV} error raised by
C{os.rename} when trying to move a file on a different mounted
filesystem.
"""
invokedWith = self.setUpFaultyRename()
# Bit of a whitebox test - force os.rename, which moveTo tries
# before falling back to a slower method, to fail, forcing moveTo to
# use the slower behavior.
self.test_moveTo()
# A bit of a sanity check for this whitebox test - if our rename
# was never invoked, the test has probably fallen into disrepair!
self.assertTrue(invokedWith)
def test_crossMountMoveToWithSymlink(self):
"""
By default, when moving a symlink, it should follow the link and
actually copy the content of the linked node.
"""
invokedWith = self.setUpFaultyRename()
f2 = self.path.child('file2')
f3 = self.path.child('file3')
self.symlink(self.path.child('file1').path, f2.path)
f2.moveTo(f3)
self.assertFalse(f3.islink())
self.assertEquals(f3.getContent(), 'file 1')
self.assertTrue(invokedWith)
def test_crossMountMoveToWithoutSymlink(self):
"""
Verify that moveTo called with followLinks=False actually create
another symlink.
"""
invokedWith = self.setUpFaultyRename()
f2 = self.path.child('file2')
f3 = self.path.child('file3')
self.symlink(self.path.child('file1').path, f2.path)
f2.moveTo(f3, followLinks=False)
self.assertTrue(f3.islink())
self.assertEquals(f3.getContent(), 'file 1')
self.assertTrue(invokedWith)
def test_createBinaryMode(self):
"""
L{FilePath.create} should always open (and write to) files in binary
mode; line-feed octets should be unmodified.
(While this test should pass on all platforms, it is only really
interesting on platforms which have the concept of binary mode, i.e.
Windows platforms.)
"""
path = filepath.FilePath(self.mktemp())
f = path.create()
self.failUnless("b" in f.mode)
f.write("\n")
f.close()
read = open(path.path, "rb").read()
self.assertEqual(read, "\n")
def testOpen(self):
# Opening a file for reading when it does not already exist is an error
nonexistent = self.path.child('nonexistent')
e = self.assertRaises(IOError, nonexistent.open)
self.assertEquals(e.errno, errno.ENOENT)
# Opening a file for writing when it does not exist is okay
writer = self.path.child('writer')
f = writer.open('w')
f.write('abc\ndef')
f.close()
# Make sure those bytes ended up there - and test opening a file for
# reading when it does exist at the same time
f = writer.open()
self.assertEquals(f.read(), 'abc\ndef')
f.close()
# Re-opening that file in write mode should erase whatever was there.
f = writer.open('w')
f.close()
f = writer.open()
self.assertEquals(f.read(), '')
f.close()
# Put some bytes in a file so we can test that appending does not
# destroy them.
appender = self.path.child('appender')
f = appender.open('w')
f.write('abc')
f.close()
f = appender.open('a')
f.write('def')
f.close()
f = appender.open('r')
self.assertEquals(f.read(), 'abcdef')
f.close()
# read/write should let us do both without erasing those bytes
f = appender.open('r+')
self.assertEquals(f.read(), 'abcdef')
# ANSI C *requires* an fseek or an fgetpos between an fread and an
# fwrite or an fwrite and a fread. We can't reliable get Python to
# invoke fgetpos, so we seek to a 0 byte offset from the current
# position instead. Also, Python sucks for making this seek
# relative to 1 instead of a symbolic constant representing the
# current file position.
f.seek(0, 1)
# Put in some new bytes for us to test for later.
f.write('ghi')
f.close()
# Make sure those new bytes really showed up
f = appender.open('r')
self.assertEquals(f.read(), 'abcdefghi')
f.close()
# write/read should let us do both, but erase anything that's there
# already.
f = appender.open('w+')
self.assertEquals(f.read(), '')
f.seek(0, 1) # Don't forget this!
f.write('123')
f.close()
# super append mode should let us read and write and also position the
# cursor at the end of the file, without erasing everything.
f = appender.open('a+')
# The order of these lines may seem surprising, but it is necessary.
# The cursor is not at the end of the file until after the first write.
f.write('456')
f.seek(0, 1) # Asinine.
self.assertEquals(f.read(), '')
f.seek(0, 0)
self.assertEquals(f.read(), '123456')
f.close()
# Opening a file exclusively must fail if that file exists already.
nonexistent.requireCreate(True)
nonexistent.open('w').close()
existent = nonexistent
del nonexistent
self.assertRaises((OSError, IOError), existent.open)
def test_openWithExplicitBinaryMode(self):
"""
Due to a bug in Python 2.7 on Windows including multiple 'b'
characters in the mode passed to the built-in open() will cause an
error. FilePath.open() ensures that only a single 'b' character is
included in the mode passed to the built-in open().
See http://bugs.python.org/issue7686 for details about the bug.
"""
writer = self.path.child('explicit-binary')
file = writer.open('wb')
file.write('abc\ndef')
file.close()
self.assertTrue(writer.exists)
def test_openWithRedundantExplicitBinaryModes(self):
"""
Due to a bug in Python 2.7 on Windows including multiple 'b'
characters in the mode passed to the built-in open() will cause an
error. No matter how many 'b' modes are specified, FilePath.open()
ensures that only a single 'b' character is included in the mode
passed to the built-in open().
See http://bugs.python.org/issue7686 for details about the bug.
"""
writer = self.path.child('multiple-binary')
file = writer.open('wbb')
file.write('abc\ndef')
file.close()
self.assertTrue(writer.exists)
def test_existsCache(self):
"""
Check that C{filepath.FilePath.exists} correctly restat the object if
an operation has occurred in the mean time.
"""
fp = filepath.FilePath(self.mktemp())
self.assertEquals(fp.exists(), False)
fp.makedirs()
self.assertEquals(fp.exists(), True)
def test_changed(self):
"""
L{FilePath.changed} indicates that the L{FilePath} has changed, but does
not re-read the status information from the filesystem until it is
queried again via another method, such as C{getsize}.
"""
fp = filepath.FilePath(self.mktemp())
fp.setContent("12345")
self.assertEquals(fp.getsize(), 5)
# Someone else comes along and changes the file.
fObj = open(fp.path, 'wb')
fObj.write("12345678")
fObj.close()
# Sanity check for caching: size should still be 5.
self.assertEquals(fp.getsize(), 5)
fp.changed()
# This path should look like we don't know what status it's in, not that
# we know that it didn't exist when last we checked.
self.assertEqual(fp.statinfo, None)
self.assertEquals(fp.getsize(), 8)
from twisted.python import urlpath
class URLPathTestCase(unittest.TestCase):
def setUp(self):
self.path = urlpath.URLPath.fromString("http://example.com/foo/bar?yes=no&no=yes#footer")
def testStringConversion(self):
self.assertEquals(str(self.path), "http://example.com/foo/bar?yes=no&no=yes#footer")
def testChildString(self):
self.assertEquals(str(self.path.child('hello')), "http://example.com/foo/bar/hello")
self.assertEquals(str(self.path.child('hello').child('')), "http://example.com/foo/bar/hello/")
def testSiblingString(self):
self.assertEquals(str(self.path.sibling('baz')), 'http://example.com/foo/baz')
# The sibling of http://example.com/foo/bar/
# is http://example.comf/foo/bar/baz
# because really we are constructing a sibling of
# http://example.com/foo/bar/index.html
self.assertEquals(str(self.path.child('').sibling('baz')), 'http://example.com/foo/bar/baz')
def testParentString(self):
# parent should be equivalent to '..'
# 'foo' is the current directory, '/' is the parent directory
self.assertEquals(str(self.path.parent()), 'http://example.com/')
self.assertEquals(str(self.path.child('').parent()), 'http://example.com/foo/')
self.assertEquals(str(self.path.child('baz').parent()), 'http://example.com/foo/')
self.assertEquals(str(self.path.parent().parent().parent().parent().parent()), 'http://example.com/')
def testHereString(self):
# here should be equivalent to '.'
self.assertEquals(str(self.path.here()), 'http://example.com/foo/')
self.assertEquals(str(self.path.child('').here()), 'http://example.com/foo/bar/')
| apache-2.0 |
Elektropippo/kernel_852i | Documentation/networking/cxacru-cf.py | 14668 | 1626 | #!/usr/bin/env python
# Copyright 2009 Simon Arlott
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Usage: cxacru-cf.py < cxacru-cf.bin
# Output: values string suitable for the sysfs adsl_config attribute
#
# Warning: cxacru-cf.bin with MD5 hash cdbac2689969d5ed5d4850f117702110
# contains mis-aligned values which will stop the modem from being able
# to make a connection. If the first and last two bytes are removed then
# the values become valid, but the modulation will be forced to ANSI
# T1.413 only which may not be appropriate.
#
# The original binary format is a packed list of le32 values.
import sys
import struct
i = 0
while True:
buf = sys.stdin.read(4)
if len(buf) == 0:
break
elif len(buf) != 4:
sys.stdout.write("\n")
sys.stderr.write("Error: read {0} not 4 bytes\n".format(len(buf)))
sys.exit(1)
if i > 0:
sys.stdout.write(" ")
sys.stdout.write("{0:x}={1}".format(i, struct.unpack("<I", buf)[0]))
i += 1
sys.stdout.write("\n")
| gpl-2.0 |
lmazuel/ansible | lib/ansible/modules/source_control/github_issue.py | 41 | 3583 | #!/usr/bin/python
# (c) 2017, Abhijeet Kasurde <akasurde@redhat.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: github_issue
short_description: View GitHub issue.
description:
- View GitHub issue for a given repository.
version_added: "2.4"
options:
repo:
description:
- Name of repository from which issue needs to be retrieved.
required: true
default: none
organization:
description:
- Name of the GitHub organization in which the repository is hosted.
required: true
default: none
issue:
description:
- Issue number for which information is required.
default: none
required: true
action:
description:
- Get various details about issue depending upon action specified.
default: 'get_status'
required: false
choices:
- ['get_status']
author:
- Abhijeet Kasurde (@akasurde)
requirements:
- "github3.py >= 1.0.0a4"
'''
RETURN = '''
get_status:
description: State of the GitHub issue
type: string
returned: success
sample: open, closed
'''
EXAMPLES = '''
- name: Check if GitHub issue is closed or not
github_issue:
organization: ansible
repo: ansible
issue: 23642
action: get_status
register: r
- name: Take action depending upon issue status
debug:
msg: Do something when issue 23642 is open
when: r.issue_status == 'open'
'''
try:
import github3
HAS_GITHUB_PACKAGE = True
except ImportError:
HAS_GITHUB_PACKAGE = False
from ansible.module_utils.basic import AnsibleModule
def main():
module = AnsibleModule(
argument_spec=dict(
organization=dict(required=True),
repo=dict(required=True),
issue=dict(required=True),
action=dict(required=False, choices=['get_status']),
),
supports_check_mode=True,
)
if not HAS_GITHUB_PACKAGE:
module.fail_json(msg="Missing required github3 module. (check docs or "
"install with: pip install github3.py==1.0.0a4)")
organization = module.params['organization']
repo = module.params['repo']
issue = module.params['issue']
action = module.params['action']
result = dict()
gh_obj = github3.issue(organization, repo, issue)
if isinstance(gh_obj, github3.null.NullObject):
module.fail_json(msg="Failed to get details about issue specified. "
"Please check organization, repo and issue "
"details and try again.")
if action == 'get_status' or action is None:
if module.check_mode:
result.update(changed=True)
else:
result.update(changed=True, issue_status=gh_obj.state)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
p0psicles/SickGear | lib/html5lib/filters/optionaltags.py | 1727 | 10500 | from __future__ import absolute_import, division, unicode_literals
from . import _base
class Filter(_base.Filter):
def slider(self):
previous1 = previous2 = None
for token in self.source:
if previous1 is not None:
yield previous2, previous1, token
previous2 = previous1
previous1 = token
yield previous2, previous1, None
def __iter__(self):
for previous, token, next in self.slider():
type = token["type"]
if type == "StartTag":
if (token["data"] or
not self.is_optional_start(token["name"], previous, next)):
yield token
elif type == "EndTag":
if not self.is_optional_end(token["name"], next):
yield token
else:
yield token
def is_optional_start(self, tagname, previous, next):
type = next and next["type"] or None
if tagname in 'html':
# An html element's start tag may be omitted if the first thing
# inside the html element is not a space character or a comment.
return type not in ("Comment", "SpaceCharacters")
elif tagname == 'head':
# A head element's start tag may be omitted if the first thing
# inside the head element is an element.
# XXX: we also omit the start tag if the head element is empty
if type in ("StartTag", "EmptyTag"):
return True
elif type == "EndTag":
return next["name"] == "head"
elif tagname == 'body':
# A body element's start tag may be omitted if the first thing
# inside the body element is not a space character or a comment,
# except if the first thing inside the body element is a script
# or style element and the node immediately preceding the body
# element is a head element whose end tag has been omitted.
if type in ("Comment", "SpaceCharacters"):
return False
elif type == "StartTag":
# XXX: we do not look at the preceding event, so we never omit
# the body element's start tag if it's followed by a script or
# a style element.
return next["name"] not in ('script', 'style')
else:
return True
elif tagname == 'colgroup':
# A colgroup element's start tag may be omitted if the first thing
# inside the colgroup element is a col element, and if the element
# is not immediately preceeded by another colgroup element whose
# end tag has been omitted.
if type in ("StartTag", "EmptyTag"):
# XXX: we do not look at the preceding event, so instead we never
# omit the colgroup element's end tag when it is immediately
# followed by another colgroup element. See is_optional_end.
return next["name"] == "col"
else:
return False
elif tagname == 'tbody':
# A tbody element's start tag may be omitted if the first thing
# inside the tbody element is a tr element, and if the element is
# not immediately preceeded by a tbody, thead, or tfoot element
# whose end tag has been omitted.
if type == "StartTag":
# omit the thead and tfoot elements' end tag when they are
# immediately followed by a tbody element. See is_optional_end.
if previous and previous['type'] == 'EndTag' and \
previous['name'] in ('tbody', 'thead', 'tfoot'):
return False
return next["name"] == 'tr'
else:
return False
return False
def is_optional_end(self, tagname, next):
type = next and next["type"] or None
if tagname in ('html', 'head', 'body'):
# An html element's end tag may be omitted if the html element
# is not immediately followed by a space character or a comment.
return type not in ("Comment", "SpaceCharacters")
elif tagname in ('li', 'optgroup', 'tr'):
# A li element's end tag may be omitted if the li element is
# immediately followed by another li element or if there is
# no more content in the parent element.
# An optgroup element's end tag may be omitted if the optgroup
# element is immediately followed by another optgroup element,
# or if there is no more content in the parent element.
# A tr element's end tag may be omitted if the tr element is
# immediately followed by another tr element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] == tagname
else:
return type == "EndTag" or type is None
elif tagname in ('dt', 'dd'):
# A dt element's end tag may be omitted if the dt element is
# immediately followed by another dt element or a dd element.
# A dd element's end tag may be omitted if the dd element is
# immediately followed by another dd element or a dt element,
# or if there is no more content in the parent element.
if type == "StartTag":
return next["name"] in ('dt', 'dd')
elif tagname == 'dd':
return type == "EndTag" or type is None
else:
return False
elif tagname == 'p':
# A p element's end tag may be omitted if the p element is
# immediately followed by an address, article, aside,
# blockquote, datagrid, dialog, dir, div, dl, fieldset,
# footer, form, h1, h2, h3, h4, h5, h6, header, hr, menu,
# nav, ol, p, pre, section, table, or ul, element, or if
# there is no more content in the parent element.
if type in ("StartTag", "EmptyTag"):
return next["name"] in ('address', 'article', 'aside',
'blockquote', 'datagrid', 'dialog',
'dir', 'div', 'dl', 'fieldset', 'footer',
'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6',
'header', 'hr', 'menu', 'nav', 'ol',
'p', 'pre', 'section', 'table', 'ul')
else:
return type == "EndTag" or type is None
elif tagname == 'option':
# An option element's end tag may be omitted if the option
# element is immediately followed by another option element,
# or if it is immediately followed by an <code>optgroup</code>
# element, or if there is no more content in the parent
# element.
if type == "StartTag":
return next["name"] in ('option', 'optgroup')
else:
return type == "EndTag" or type is None
elif tagname in ('rt', 'rp'):
# An rt element's end tag may be omitted if the rt element is
# immediately followed by an rt or rp element, or if there is
# no more content in the parent element.
# An rp element's end tag may be omitted if the rp element is
# immediately followed by an rt or rp element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] in ('rt', 'rp')
else:
return type == "EndTag" or type is None
elif tagname == 'colgroup':
# A colgroup element's end tag may be omitted if the colgroup
# element is not immediately followed by a space character or
# a comment.
if type in ("Comment", "SpaceCharacters"):
return False
elif type == "StartTag":
# XXX: we also look for an immediately following colgroup
# element. See is_optional_start.
return next["name"] != 'colgroup'
else:
return True
elif tagname in ('thead', 'tbody'):
# A thead element's end tag may be omitted if the thead element
# is immediately followed by a tbody or tfoot element.
# A tbody element's end tag may be omitted if the tbody element
# is immediately followed by a tbody or tfoot element, or if
# there is no more content in the parent element.
# A tfoot element's end tag may be omitted if the tfoot element
# is immediately followed by a tbody element, or if there is no
# more content in the parent element.
# XXX: we never omit the end tag when the following element is
# a tbody. See is_optional_start.
if type == "StartTag":
return next["name"] in ['tbody', 'tfoot']
elif tagname == 'tbody':
return type == "EndTag" or type is None
else:
return False
elif tagname == 'tfoot':
# A tfoot element's end tag may be omitted if the tfoot element
# is immediately followed by a tbody element, or if there is no
# more content in the parent element.
# XXX: we never omit the end tag when the following element is
# a tbody. See is_optional_start.
if type == "StartTag":
return next["name"] == 'tbody'
else:
return type == "EndTag" or type is None
elif tagname in ('td', 'th'):
# A td element's end tag may be omitted if the td element is
# immediately followed by a td or th element, or if there is
# no more content in the parent element.
# A th element's end tag may be omitted if the th element is
# immediately followed by a td or th element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] in ('td', 'th')
else:
return type == "EndTag" or type is None
return False
| gpl-3.0 |
shssoichiro/servo | tests/wpt/harness/wptrunner/update/sync.py | 40 | 6328 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import shutil
import sys
import uuid
from .. import testloader
from base import Step, StepRunner
from tree import Commit
here = os.path.abspath(os.path.split(__file__)[0])
bsd_license = """W3C 3-clause BSD License
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of works must retain the original copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the original copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the W3C nor the names of its contributors may be
used to endorse or promote products derived from this work without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
def copy_wpt_tree(tree, dest):
"""Copy the working copy of a Tree to a destination directory.
:param tree: The Tree to copy.
:param dest: The destination directory"""
if os.path.exists(dest):
assert os.path.isdir(dest)
shutil.rmtree(dest)
os.mkdir(dest)
for tree_path in tree.paths():
source_path = os.path.join(tree.root, tree_path)
dest_path = os.path.join(dest, tree_path)
dest_dir = os.path.split(dest_path)[0]
if not os.path.isdir(source_path):
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
shutil.copy2(source_path, dest_path)
for source, destination in [("testharness_runner.html", ""),
("testharnessreport.js", "resources/")]:
source_path = os.path.join(here, os.pardir, source)
dest_path = os.path.join(dest, destination, os.path.split(source)[1])
shutil.copy2(source_path, dest_path)
add_license(dest)
def add_license(dest):
"""Write the bsd license string to a LICENSE file.
:param dest: Directory in which to place the LICENSE file."""
with open(os.path.join(dest, "LICENSE"), "w") as f:
f.write(bsd_license)
class UpdateCheckout(Step):
"""Pull changes from upstream into the local sync tree."""
provides = ["local_branch"]
def create(self, state):
sync_tree = state.sync_tree
state.local_branch = uuid.uuid4().hex
sync_tree.update(state.sync["remote_url"],
state.sync["branch"],
state.local_branch)
sync_path = os.path.abspath(sync_tree.root)
if not sync_path in sys.path:
from update import setup_paths
setup_paths(sync_path)
def restore(self, state):
assert os.path.abspath(state.sync_tree.root) in sys.path
Step.restore(self, state)
class GetSyncTargetCommit(Step):
"""Find the commit that we will sync to."""
provides = ["sync_commit"]
def create(self, state):
if state.target_rev is None:
#Use upstream branch HEAD as the base commit
state.sync_commit = state.sync_tree.get_remote_sha1(state.sync["remote_url"],
state.sync["branch"])
else:
state.sync_commit = Commit(state.sync_tree, state.rev)
state.sync_tree.checkout(state.sync_commit.sha1, state.local_branch, force=True)
self.logger.debug("New base commit is %s" % state.sync_commit.sha1)
class LoadManifest(Step):
"""Load the test manifest"""
provides = ["manifest_path", "test_manifest"]
def create(self, state):
from manifest import manifest
state.manifest_path = os.path.join(state.metadata_path, "MANIFEST.json")
state.test_manifest = manifest.Manifest("/")
class UpdateManifest(Step):
"""Update the manifest to match the tests in the sync tree checkout"""
def create(self, state):
from manifest import manifest, update
update.update(state.sync["path"], state.test_manifest)
manifest.write(state.test_manifest, state.manifest_path)
class CopyWorkTree(Step):
"""Copy the sync tree over to the destination in the local tree"""
def create(self, state):
copy_wpt_tree(state.sync_tree,
state.tests_path)
class CreateSyncPatch(Step):
"""Add the updated test files to a commit/patch in the local tree."""
def create(self, state):
if state.no_patch:
return
local_tree = state.local_tree
sync_tree = state.sync_tree
local_tree.create_patch("web-platform-tests_update_%s" % sync_tree.rev,
"Update %s to revision %s" % (state.suite_name, sync_tree.rev))
local_tree.add_new(os.path.relpath(state.tests_path,
local_tree.root))
updated = local_tree.update_patch(include=[state.tests_path,
state.metadata_path])
local_tree.commit_patch()
if not updated:
self.logger.info("Nothing to sync")
class SyncFromUpstreamRunner(StepRunner):
"""(Sub)Runner for doing an upstream sync"""
steps = [UpdateCheckout,
GetSyncTargetCommit,
LoadManifest,
UpdateManifest,
CopyWorkTree,
CreateSyncPatch]
| mpl-2.0 |
m1093782566/openstack_org_ceilometer | ceilometer/tests/alarm/test_rpc.py | 6 | 9121 | #
# Copyright 2013-2014 eNovance <licensing@enovance.com>
#
# Authors: Mehdi Abaakouk <mehdi.abaakouk@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from ceilometerclient.v2 import alarms
import eventlet
from oslo.config import fixture as fixture_config
from oslo.utils import timeutils
import six
from ceilometer.alarm import rpc as rpc_alarm
from ceilometer.alarm.storage import models
from ceilometer import messaging
from ceilometer.tests import base as tests_base
class FakeNotifier(object):
def __init__(self, transport):
self.rpc = messaging.get_rpc_server(
transport, "alarm_notifier", self)
self.notified = []
def start(self, expected_length):
self.expected_length = expected_length
self.rpc.start()
def notify_alarm(self, context, data):
self.notified.append(data)
if len(self.notified) == self.expected_length:
self.rpc.stop()
class TestRPCAlarmNotifier(tests_base.BaseTestCase):
def setUp(self):
super(TestRPCAlarmNotifier, self).setUp()
self.CONF = self.useFixture(fixture_config.Config()).conf
self.setup_messaging(self.CONF)
self.notifier_server = FakeNotifier(self.transport)
self.notifier = rpc_alarm.RPCAlarmNotifier()
self.alarms = [
alarms.Alarm(None, info={
'name': 'instance_running_hot',
'meter_name': 'cpu_util',
'comparison_operator': 'gt',
'threshold': 80.0,
'evaluation_periods': 5,
'statistic': 'avg',
'state': 'ok',
'ok_actions': ['http://host:8080/path'],
'user_id': 'foobar',
'project_id': 'snafu',
'period': 60,
'alarm_id': str(uuid.uuid4()),
'matching_metadata':{'resource_id':
'my_instance'}
}),
alarms.Alarm(None, info={
'name': 'group_running_idle',
'meter_name': 'cpu_util',
'comparison_operator': 'le',
'threshold': 10.0,
'statistic': 'max',
'evaluation_periods': 4,
'state': 'insufficient data',
'insufficient_data_actions': ['http://other_host/path'],
'user_id': 'foobar',
'project_id': 'snafu',
'period': 300,
'alarm_id': str(uuid.uuid4()),
'matching_metadata':{'metadata.user_metadata.AS':
'my_group'}
}),
]
def test_rpc_target(self):
topic = self.notifier.client.target.topic
self.assertEqual('alarm_notifier', topic)
def test_notify_alarm(self):
self.notifier_server.start(2)
previous = ['alarm', 'ok']
for i, a in enumerate(self.alarms):
self.notifier.notify(a, previous[i], "what? %d" % i,
{'fire': '%d' % i})
self.notifier_server.rpc.wait()
self.assertEqual(2, len(self.notifier_server.notified))
for i, a in enumerate(self.alarms):
actions = getattr(a, models.Alarm.ALARM_ACTIONS_MAP[a.state])
self.assertEqual(self.alarms[i].alarm_id,
self.notifier_server.notified[i]["alarm_id"])
self.assertEqual(actions,
self.notifier_server.notified[i]["actions"])
self.assertEqual(previous[i],
self.notifier_server.notified[i]["previous"])
self.assertEqual(self.alarms[i].state,
self.notifier_server.notified[i]["current"])
self.assertEqual("what? %d" % i,
self.notifier_server.notified[i]["reason"])
self.assertEqual({'fire': '%d' % i},
self.notifier_server.notified[i]["reason_data"])
def test_notify_non_string_reason(self):
self.notifier_server.start(1)
self.notifier.notify(self.alarms[0], 'ok', 42, {})
self.notifier_server.rpc.wait()
reason = self.notifier_server.notified[0]['reason']
self.assertIsInstance(reason, six.string_types)
def test_notify_no_actions(self):
alarm = alarms.Alarm(None, info={
'name': 'instance_running_hot',
'meter_name': 'cpu_util',
'comparison_operator': 'gt',
'threshold': 80.0,
'evaluation_periods': 5,
'statistic': 'avg',
'state': 'ok',
'user_id': 'foobar',
'project_id': 'snafu',
'period': 60,
'ok_actions': [],
'alarm_id': str(uuid.uuid4()),
'matching_metadata': {'resource_id':
'my_instance'}
})
self.notifier.notify(alarm, 'alarm', "what?", {})
self.assertEqual(0, len(self.notifier_server.notified))
class FakeCoordinator(object):
def __init__(self, transport):
self.rpc = messaging.get_rpc_server(
transport, "alarm_partition_coordination", self)
self.notified = []
def presence(self, context, data):
self._record('presence', data)
def allocate(self, context, data):
self._record('allocate', data)
def assign(self, context, data):
self._record('assign', data)
def _record(self, method, data):
self.notified.append((method, data))
self.rpc.stop()
class TestRPCAlarmPartitionCoordination(tests_base.BaseTestCase):
def setUp(self):
super(TestRPCAlarmPartitionCoordination, self).setUp()
self.CONF = self.useFixture(fixture_config.Config()).conf
self.setup_messaging(self.CONF)
self.coordinator_server = FakeCoordinator(self.transport)
self.coordinator_server.rpc.start()
eventlet.sleep() # must be sure that fanout queue is created
self.coordination = rpc_alarm.RPCAlarmPartitionCoordination()
self.alarms = [
alarms.Alarm(None, info={
'name': 'instance_running_hot',
'meter_name': 'cpu_util',
'comparison_operator': 'gt',
'threshold': 80.0,
'evaluation_periods': 5,
'statistic': 'avg',
'state': 'ok',
'ok_actions': ['http://host:8080/path'],
'user_id': 'foobar',
'project_id': 'snafu',
'period': 60,
'alarm_id': str(uuid.uuid4()),
'matching_metadata':{'resource_id':
'my_instance'}
}),
alarms.Alarm(None, info={
'name': 'group_running_idle',
'meter_name': 'cpu_util',
'comparison_operator': 'le',
'threshold': 10.0,
'statistic': 'max',
'evaluation_periods': 4,
'state': 'insufficient data',
'insufficient_data_actions': ['http://other_host/path'],
'user_id': 'foobar',
'project_id': 'snafu',
'period': 300,
'alarm_id': str(uuid.uuid4()),
'matching_metadata':{'metadata.user_metadata.AS':
'my_group'}
}),
]
def test_coordination_presence(self):
id = str(uuid.uuid4())
priority = float(timeutils.utcnow().strftime('%s.%f'))
self.coordination.presence(id, priority)
self.coordinator_server.rpc.wait()
method, args = self.coordinator_server.notified[0]
self.assertEqual(id, args['uuid'])
self.assertEqual(priority, args['priority'])
self.assertEqual('presence', method)
def test_coordination_assign(self):
id = str(uuid.uuid4())
self.coordination.assign(id, self.alarms)
self.coordinator_server.rpc.wait()
method, args = self.coordinator_server.notified[0]
self.assertEqual(id, args['uuid'])
self.assertEqual(2, len(args['alarms']))
self.assertEqual('assign', method)
def test_coordination_allocate(self):
id = str(uuid.uuid4())
self.coordination.allocate(id, self.alarms)
self.coordinator_server.rpc.wait()
method, args = self.coordinator_server.notified[0]
self.assertEqual(id, args['uuid'])
self.assertEqual(2, len(args['alarms']))
self.assertEqual('allocate', method)
| apache-2.0 |
hryamzik/ansible | test/units/parsing/vault/test_vault_editor.py | 59 | 21955 | # (c) 2014, James Tanner <tanner.jc@gmail.com>
# (c) 2014, James Cammarata, <jcammarata@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import tempfile
import pytest
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch
from ansible import errors
from ansible.parsing import vault
from ansible.parsing.vault import VaultLib, VaultEditor, match_encrypt_secret
from ansible.module_utils._text import to_bytes, to_text
from units.mock.vault_helper import TextVaultSecret
v10_data = """$ANSIBLE_VAULT;1.0;AES
53616c7465645f5fd0026926a2d415a28a2622116273fbc90e377225c12a347e1daf4456d36a77f9
9ad98d59f61d06a4b66718d855f16fb7bdfe54d1ec8aeaa4d06c2dc1fa630ae1846a029877f0eeb1
83c62ffb04c2512995e815de4b4d29ed"""
v11_data = """$ANSIBLE_VAULT;1.1;AES256
62303130653266653331306264616235333735323636616539316433666463323964623162386137
3961616263373033353631316333623566303532663065310a393036623466376263393961326530
64336561613965383835646464623865663966323464653236343638373165343863623638316664
3631633031323837340a396530313963373030343933616133393566366137363761373930663833
3739"""
@pytest.mark.skipif(not vault.HAS_CRYPTOGRAPHY,
reason="Skipping cryptography tests because cryptography is not installed")
class TestVaultEditor(unittest.TestCase):
def setUp(self):
self._test_dir = None
self.vault_password = "test-vault-password"
vault_secret = TextVaultSecret(self.vault_password)
self.vault_secrets = [('vault_secret', vault_secret),
('default', vault_secret)]
@property
def vault_secret(self):
return match_encrypt_secret(self.vault_secrets)[1]
def tearDown(self):
if self._test_dir:
pass
# shutil.rmtree(self._test_dir)
self._test_dir = None
def _secrets(self, password):
vault_secret = TextVaultSecret(password)
vault_secrets = [('default', vault_secret)]
return vault_secrets
def test_methods_exist(self):
v = vault.VaultEditor(None)
slots = ['create_file',
'decrypt_file',
'edit_file',
'encrypt_file',
'rekey_file',
'read_data',
'write_data']
for slot in slots:
assert hasattr(v, slot), "VaultLib is missing the %s method" % slot
def _create_test_dir(self):
suffix = '_ansible_unit_test_%s_' % (self.__class__.__name__)
return tempfile.mkdtemp(suffix=suffix)
def _create_file(self, test_dir, name, content=None, symlink=False):
file_path = os.path.join(test_dir, name)
opened_file = open(file_path, 'wb')
if content:
opened_file.write(content)
opened_file.close()
return file_path
def _vault_editor(self, vault_secrets=None):
if vault_secrets is None:
vault_secrets = self._secrets(self.vault_password)
return VaultEditor(VaultLib(vault_secrets))
@patch('ansible.parsing.vault.subprocess.call')
def test_edit_file_helper_empty_target(self, mock_sp_call):
self._test_dir = self._create_test_dir()
src_contents = to_bytes("some info in a file\nyup.")
src_file_path = self._create_file(self._test_dir, 'src_file', content=src_contents)
mock_sp_call.side_effect = self._faux_command
ve = self._vault_editor()
b_ciphertext = ve._edit_file_helper(src_file_path, self.vault_secret)
self.assertNotEqual(src_contents, b_ciphertext)
@patch('ansible.parsing.vault.subprocess.call')
def test_edit_file_helper_call_exception(self, mock_sp_call):
self._test_dir = self._create_test_dir()
src_contents = to_bytes("some info in a file\nyup.")
src_file_path = self._create_file(self._test_dir, 'src_file', content=src_contents)
error_txt = 'calling editor raised an exception'
mock_sp_call.side_effect = errors.AnsibleError(error_txt)
ve = self._vault_editor()
self.assertRaisesRegexp(errors.AnsibleError,
error_txt,
ve._edit_file_helper,
src_file_path,
self.vault_secret)
@patch('ansible.parsing.vault.subprocess.call')
def test_edit_file_helper_symlink_target(self, mock_sp_call):
self._test_dir = self._create_test_dir()
src_file_contents = to_bytes("some info in a file\nyup.")
src_file_path = self._create_file(self._test_dir, 'src_file', content=src_file_contents)
src_file_link_path = os.path.join(self._test_dir, 'a_link_to_dest_file')
os.symlink(src_file_path, src_file_link_path)
mock_sp_call.side_effect = self._faux_command
ve = self._vault_editor()
b_ciphertext = ve._edit_file_helper(src_file_link_path, self.vault_secret)
self.assertNotEqual(src_file_contents, b_ciphertext,
'b_ciphertext should be encrypted and not equal to src_contents')
def _faux_editor(self, editor_args, new_src_contents=None):
if editor_args[0] == 'shred':
return
tmp_path = editor_args[-1]
# simulate the tmp file being editted
tmp_file = open(tmp_path, 'wb')
if new_src_contents:
tmp_file.write(new_src_contents)
tmp_file.close()
def _faux_command(self, tmp_path):
pass
@patch('ansible.parsing.vault.subprocess.call')
def test_edit_file_helper_no_change(self, mock_sp_call):
self._test_dir = self._create_test_dir()
src_file_contents = to_bytes("some info in a file\nyup.")
src_file_path = self._create_file(self._test_dir, 'src_file', content=src_file_contents)
# editor invocation doesn't change anything
def faux_editor(editor_args):
self._faux_editor(editor_args, src_file_contents)
mock_sp_call.side_effect = faux_editor
ve = self._vault_editor()
ve._edit_file_helper(src_file_path, self.vault_secret, existing_data=src_file_contents)
new_target_file = open(src_file_path, 'rb')
new_target_file_contents = new_target_file.read()
self.assertEqual(src_file_contents, new_target_file_contents)
def _assert_file_is_encrypted(self, vault_editor, src_file_path, src_contents):
new_src_file = open(src_file_path, 'rb')
new_src_file_contents = new_src_file.read()
# TODO: assert that it is encrypted
self.assertTrue(vault.is_encrypted(new_src_file_contents))
src_file_plaintext = vault_editor.vault.decrypt(new_src_file_contents)
# the plaintext should not be encrypted
self.assertFalse(vault.is_encrypted(src_file_plaintext))
# and the new plaintext should match the original
self.assertEqual(src_file_plaintext, src_contents)
def _assert_file_is_link(self, src_file_link_path, src_file_path):
self.assertTrue(os.path.islink(src_file_link_path),
'The dest path (%s) should be a symlink to (%s) but is not' % (src_file_link_path, src_file_path))
def test_rekey_file(self):
self._test_dir = self._create_test_dir()
src_file_contents = to_bytes("some info in a file\nyup.")
src_file_path = self._create_file(self._test_dir, 'src_file', content=src_file_contents)
ve = self._vault_editor()
ve.encrypt_file(src_file_path, self.vault_secret)
# FIXME: update to just set self._secrets or just a new vault secret id
new_password = 'password2:electricbugaloo'
new_vault_secret = TextVaultSecret(new_password)
new_vault_secrets = [('default', new_vault_secret)]
ve.rekey_file(src_file_path, vault.match_encrypt_secret(new_vault_secrets)[1])
# FIXME: can just update self._secrets here
new_ve = vault.VaultEditor(VaultLib(new_vault_secrets))
self._assert_file_is_encrypted(new_ve, src_file_path, src_file_contents)
def test_rekey_file_no_new_password(self):
self._test_dir = self._create_test_dir()
src_file_contents = to_bytes("some info in a file\nyup.")
src_file_path = self._create_file(self._test_dir, 'src_file', content=src_file_contents)
ve = self._vault_editor()
ve.encrypt_file(src_file_path, self.vault_secret)
self.assertRaisesRegexp(errors.AnsibleError,
'The value for the new_password to rekey',
ve.rekey_file,
src_file_path,
None)
def test_rekey_file_not_encrypted(self):
self._test_dir = self._create_test_dir()
src_file_contents = to_bytes("some info in a file\nyup.")
src_file_path = self._create_file(self._test_dir, 'src_file', content=src_file_contents)
ve = self._vault_editor()
new_password = 'password2:electricbugaloo'
self.assertRaisesRegexp(errors.AnsibleError,
'input is not vault encrypted data',
ve.rekey_file,
src_file_path, new_password)
def test_plaintext(self):
self._test_dir = self._create_test_dir()
src_file_contents = to_bytes("some info in a file\nyup.")
src_file_path = self._create_file(self._test_dir, 'src_file', content=src_file_contents)
ve = self._vault_editor()
ve.encrypt_file(src_file_path, self.vault_secret)
res = ve.plaintext(src_file_path)
self.assertEqual(src_file_contents, res)
def test_plaintext_not_encrypted(self):
self._test_dir = self._create_test_dir()
src_file_contents = to_bytes("some info in a file\nyup.")
src_file_path = self._create_file(self._test_dir, 'src_file', content=src_file_contents)
ve = self._vault_editor()
self.assertRaisesRegexp(errors.AnsibleError,
'input is not vault encrypted data',
ve.plaintext,
src_file_path)
def test_encrypt_file(self):
self._test_dir = self._create_test_dir()
src_file_contents = to_bytes("some info in a file\nyup.")
src_file_path = self._create_file(self._test_dir, 'src_file', content=src_file_contents)
ve = self._vault_editor()
ve.encrypt_file(src_file_path, self.vault_secret)
self._assert_file_is_encrypted(ve, src_file_path, src_file_contents)
def test_encrypt_file_symlink(self):
self._test_dir = self._create_test_dir()
src_file_contents = to_bytes("some info in a file\nyup.")
src_file_path = self._create_file(self._test_dir, 'src_file', content=src_file_contents)
src_file_link_path = os.path.join(self._test_dir, 'a_link_to_dest_file')
os.symlink(src_file_path, src_file_link_path)
ve = self._vault_editor()
ve.encrypt_file(src_file_link_path, self.vault_secret)
self._assert_file_is_encrypted(ve, src_file_path, src_file_contents)
self._assert_file_is_encrypted(ve, src_file_link_path, src_file_contents)
self._assert_file_is_link(src_file_link_path, src_file_path)
@patch('ansible.parsing.vault.subprocess.call')
def test_edit_file_no_vault_id(self, mock_sp_call):
self._test_dir = self._create_test_dir()
src_contents = to_bytes("some info in a file\nyup.")
src_file_path = self._create_file(self._test_dir, 'src_file', content=src_contents)
new_src_contents = to_bytes("The info is different now.")
def faux_editor(editor_args):
self._faux_editor(editor_args, new_src_contents)
mock_sp_call.side_effect = faux_editor
ve = self._vault_editor()
ve.encrypt_file(src_file_path, self.vault_secret)
ve.edit_file(src_file_path)
new_src_file = open(src_file_path, 'rb')
new_src_file_contents = new_src_file.read()
self.assertTrue(b'$ANSIBLE_VAULT;1.1;AES256' in new_src_file_contents)
src_file_plaintext = ve.vault.decrypt(new_src_file_contents)
self.assertEqual(src_file_plaintext, new_src_contents)
@patch('ansible.parsing.vault.subprocess.call')
def test_edit_file_with_vault_id(self, mock_sp_call):
self._test_dir = self._create_test_dir()
src_contents = to_bytes("some info in a file\nyup.")
src_file_path = self._create_file(self._test_dir, 'src_file', content=src_contents)
new_src_contents = to_bytes("The info is different now.")
def faux_editor(editor_args):
self._faux_editor(editor_args, new_src_contents)
mock_sp_call.side_effect = faux_editor
ve = self._vault_editor()
ve.encrypt_file(src_file_path, self.vault_secret,
vault_id='vault_secrets')
ve.edit_file(src_file_path)
new_src_file = open(src_file_path, 'rb')
new_src_file_contents = new_src_file.read()
self.assertTrue(b'$ANSIBLE_VAULT;1.2;AES256;vault_secrets' in new_src_file_contents)
src_file_plaintext = ve.vault.decrypt(new_src_file_contents)
self.assertEqual(src_file_plaintext, new_src_contents)
@patch('ansible.parsing.vault.subprocess.call')
def test_edit_file_symlink(self, mock_sp_call):
self._test_dir = self._create_test_dir()
src_contents = to_bytes("some info in a file\nyup.")
src_file_path = self._create_file(self._test_dir, 'src_file', content=src_contents)
new_src_contents = to_bytes("The info is different now.")
def faux_editor(editor_args):
self._faux_editor(editor_args, new_src_contents)
mock_sp_call.side_effect = faux_editor
ve = self._vault_editor()
ve.encrypt_file(src_file_path, self.vault_secret)
src_file_link_path = os.path.join(self._test_dir, 'a_link_to_dest_file')
os.symlink(src_file_path, src_file_link_path)
ve.edit_file(src_file_link_path)
new_src_file = open(src_file_path, 'rb')
new_src_file_contents = new_src_file.read()
src_file_plaintext = ve.vault.decrypt(new_src_file_contents)
self._assert_file_is_link(src_file_link_path, src_file_path)
self.assertEqual(src_file_plaintext, new_src_contents)
# self.assertEqual(src_file_plaintext, new_src_contents,
# 'The decrypted plaintext of the editted file is not the expected contents.')
@patch('ansible.parsing.vault.subprocess.call')
def test_edit_file_not_encrypted(self, mock_sp_call):
self._test_dir = self._create_test_dir()
src_contents = to_bytes("some info in a file\nyup.")
src_file_path = self._create_file(self._test_dir, 'src_file', content=src_contents)
new_src_contents = to_bytes("The info is different now.")
def faux_editor(editor_args):
self._faux_editor(editor_args, new_src_contents)
mock_sp_call.side_effect = faux_editor
ve = self._vault_editor()
self.assertRaisesRegexp(errors.AnsibleError,
'input is not vault encrypted data',
ve.edit_file,
src_file_path)
def test_create_file_exists(self):
self._test_dir = self._create_test_dir()
src_contents = to_bytes("some info in a file\nyup.")
src_file_path = self._create_file(self._test_dir, 'src_file', content=src_contents)
ve = self._vault_editor()
self.assertRaisesRegexp(errors.AnsibleError,
'please use .edit. instead',
ve.create_file,
src_file_path,
self.vault_secret)
def test_decrypt_file_exception(self):
self._test_dir = self._create_test_dir()
src_contents = to_bytes("some info in a file\nyup.")
src_file_path = self._create_file(self._test_dir, 'src_file', content=src_contents)
ve = self._vault_editor()
self.assertRaisesRegexp(errors.AnsibleError,
'input is not vault encrypted data',
ve.decrypt_file,
src_file_path)
@patch.object(vault.VaultEditor, '_editor_shell_command')
def test_create_file(self, mock_editor_shell_command):
def sc_side_effect(filename):
return ['touch', filename]
mock_editor_shell_command.side_effect = sc_side_effect
tmp_file = tempfile.NamedTemporaryFile()
os.unlink(tmp_file.name)
_secrets = self._secrets('ansible')
ve = self._vault_editor(_secrets)
ve.create_file(tmp_file.name, vault.match_encrypt_secret(_secrets)[1])
self.assertTrue(os.path.exists(tmp_file.name))
def test_decrypt_1_0(self):
# Skip testing decrypting 1.0 files if we don't have access to AES, KDF or Counter.
v10_file = tempfile.NamedTemporaryFile(delete=False)
with v10_file as f:
f.write(to_bytes(v10_data))
ve = self._vault_editor(self._secrets("ansible"))
# make sure the password functions for the cipher
error_hit = False
try:
ve.decrypt_file(v10_file.name)
except errors.AnsibleError:
error_hit = True
raise
# verify decrypted content
f = open(v10_file.name, "rb")
fdata = to_text(f.read())
f.close()
os.unlink(v10_file.name)
assert error_hit is False, "error decrypting 1.0 file"
self.assertEqual(fdata.strip(), "foo")
assert fdata.strip() == "foo", "incorrect decryption of 1.0 file: %s" % fdata.strip()
def test_decrypt_1_1(self):
v11_file = tempfile.NamedTemporaryFile(delete=False)
with v11_file as f:
f.write(to_bytes(v11_data))
ve = self._vault_editor(self._secrets("ansible"))
# make sure the password functions for the cipher
error_hit = False
try:
ve.decrypt_file(v11_file.name)
except errors.AnsibleError:
error_hit = True
# verify decrypted content
f = open(v11_file.name, "rb")
fdata = to_text(f.read())
f.close()
os.unlink(v11_file.name)
assert error_hit is False, "error decrypting 1.1 file"
assert fdata.strip() == "foo", "incorrect decryption of 1.1 file: %s" % fdata.strip()
def test_rekey_migration(self):
v10_file = tempfile.NamedTemporaryFile(delete=False)
with v10_file as f:
f.write(to_bytes(v10_data))
ve = self._vault_editor(self._secrets("ansible"))
# make sure the password functions for the cipher
error_hit = False
new_secrets = self._secrets("ansible2")
try:
ve.rekey_file(v10_file.name, vault.match_encrypt_secret(new_secrets)[1])
except errors.AnsibleError:
error_hit = True
# verify decrypted content
f = open(v10_file.name, "rb")
fdata = f.read()
f.close()
assert error_hit is False, "error rekeying 1.0 file to 1.1"
# ensure filedata can be decrypted, is 1.1 and is AES256
vl = VaultLib(new_secrets)
dec_data = None
error_hit = False
try:
dec_data = vl.decrypt(fdata)
except errors.AnsibleError:
error_hit = True
os.unlink(v10_file.name)
self.assertIn(b'AES256', fdata, 'AES256 was not found in vault file %s' % to_text(fdata))
assert error_hit is False, "error decrypting migrated 1.0 file"
assert dec_data.strip() == b"foo", "incorrect decryption of rekeyed/migrated file: %s" % dec_data
def test_real_path_dash(self):
filename = '-'
ve = self._vault_editor()
res = ve._real_path(filename)
self.assertEqual(res, '-')
def test_real_path_dev_null(self):
filename = '/dev/null'
ve = self._vault_editor()
res = ve._real_path(filename)
self.assertEqual(res, '/dev/null')
def test_real_path_symlink(self):
self._test_dir = self._create_test_dir()
file_path = self._create_file(self._test_dir, 'test_file', content=b'this is a test file')
file_link_path = os.path.join(self._test_dir, 'a_link_to_test_file')
os.symlink(file_path, file_link_path)
ve = self._vault_editor()
res = ve._real_path(file_link_path)
self.assertEqual(res, file_path)
@pytest.mark.skipif(not vault.HAS_PYCRYPTO,
reason="Skipping pycrypto tests because pycrypto is not installed")
class TestVaultEditorPyCrypto(unittest.TestCase):
def setUp(self):
self.has_cryptography = vault.HAS_CRYPTOGRAPHY
vault.HAS_CRYPTOGRAPHY = False
super(TestVaultEditorPyCrypto, self).setUp()
def tearDown(self):
vault.HAS_CRYPTOGRAPHY = self.has_cryptography
super(TestVaultEditorPyCrypto, self).tearDown()
| gpl-3.0 |
alrifqi/django | django/conf/locale/pt_BR/formats.py | 504 | 1434 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = r'j \d\e F \d\e Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = r'j \d\e F \d\e Y à\s H:i'
YEAR_MONTH_FORMAT = r'F \d\e Y'
MONTH_DAY_FORMAT = r'j \d\e F'
SHORT_DATE_FORMAT = 'd/m/Y'
SHORT_DATETIME_FORMAT = 'd/m/Y H:i'
FIRST_DAY_OF_WEEK = 0 # Sunday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d/%m/%Y', '%d/%m/%y', # '25/10/2006', '25/10/06'
# '%d de %b de %Y', '%d de %b, %Y', # '25 de Out de 2006', '25 Out, 2006'
# '%d de %B de %Y', '%d de %B, %Y', # '25 de Outubro de 2006', '25 de Outubro, 2006'
]
DATETIME_INPUT_FORMATS = [
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M:%S.%f', # '25/10/2006 14:30:59.000200'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
'%d/%m/%y %H:%M:%S', # '25/10/06 14:30:59'
'%d/%m/%y %H:%M:%S.%f', # '25/10/06 14:30:59.000200'
'%d/%m/%y %H:%M', # '25/10/06 14:30'
'%d/%m/%y', # '25/10/06'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| bsd-3-clause |
sebrandon1/nova | nova/objects/network.py | 10 | 9382 | # Copyright 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from oslo_utils import versionutils
import nova.conf
from nova import db
from nova import exception
from nova.i18n import _
from nova import objects
from nova.objects import base as obj_base
from nova.objects import fields
CONF = nova.conf.CONF
# TODO(berrange): Remove NovaObjectDictCompat
@obj_base.NovaObjectRegistry.register
class Network(obj_base.NovaPersistentObject, obj_base.NovaObject,
obj_base.NovaObjectDictCompat):
# Version 1.0: Initial version
# Version 1.1: Added in_use_on_host()
# Version 1.2: Added mtu, dhcp_server, enable_dhcp, share_address
VERSION = '1.2'
fields = {
'id': fields.IntegerField(),
'label': fields.StringField(),
'injected': fields.BooleanField(),
'cidr': fields.IPV4NetworkField(nullable=True),
'cidr_v6': fields.IPV6NetworkField(nullable=True),
'multi_host': fields.BooleanField(),
'netmask': fields.IPV4AddressField(nullable=True),
'gateway': fields.IPV4AddressField(nullable=True),
'broadcast': fields.IPV4AddressField(nullable=True),
'netmask_v6': fields.IPV6AddressField(nullable=True),
'gateway_v6': fields.IPV6AddressField(nullable=True),
'bridge': fields.StringField(nullable=True),
'bridge_interface': fields.StringField(nullable=True),
'dns1': fields.IPAddressField(nullable=True),
'dns2': fields.IPAddressField(nullable=True),
'vlan': fields.IntegerField(nullable=True),
'vpn_public_address': fields.IPAddressField(nullable=True),
'vpn_public_port': fields.IntegerField(nullable=True),
'vpn_private_address': fields.IPAddressField(nullable=True),
'dhcp_start': fields.IPV4AddressField(nullable=True),
'rxtx_base': fields.IntegerField(nullable=True),
'project_id': fields.UUIDField(nullable=True),
'priority': fields.IntegerField(nullable=True),
'host': fields.StringField(nullable=True),
'uuid': fields.UUIDField(),
'mtu': fields.IntegerField(nullable=True),
'dhcp_server': fields.IPAddressField(nullable=True),
'enable_dhcp': fields.BooleanField(),
'share_address': fields.BooleanField(),
}
@staticmethod
def _convert_legacy_ipv6_netmask(netmask):
"""Handle netmask_v6 possibilities from the database.
Historically, this was stored as just an integral CIDR prefix,
but in the future it should be stored as an actual netmask.
Be tolerant of either here.
"""
try:
prefix = int(netmask)
return netaddr.IPNetwork('1::/%i' % prefix).netmask
except ValueError:
pass
try:
return netaddr.IPNetwork(netmask).netmask
except netaddr.AddrFormatError:
raise ValueError(_('IPv6 netmask "%s" must be a netmask '
'or integral prefix') % netmask)
def obj_make_compatible(self, primitive, target_version):
target_version = versionutils.convert_version_to_tuple(target_version)
if target_version < (1, 2):
if 'mtu' in primitive:
del primitive['mtu']
if 'enable_dhcp' in primitive:
del primitive['enable_dhcp']
if 'dhcp_server' in primitive:
del primitive['dhcp_server']
if 'share_address' in primitive:
del primitive['share_address']
@staticmethod
def _from_db_object(context, network, db_network):
for field in network.fields:
db_value = db_network[field]
if field is 'netmask_v6' and db_value is not None:
db_value = network._convert_legacy_ipv6_netmask(db_value)
if field is 'dhcp_server' and db_value is None:
db_value = db_network['gateway']
if field is 'share_address' and CONF.share_dhcp_address:
db_value = CONF.share_dhcp_address
network[field] = db_value
network._context = context
network.obj_reset_changes()
return network
@obj_base.remotable_classmethod
def get_by_id(cls, context, network_id, project_only='allow_none'):
db_network = db.network_get(context, network_id,
project_only=project_only)
return cls._from_db_object(context, cls(), db_network)
@obj_base.remotable_classmethod
def get_by_uuid(cls, context, network_uuid):
db_network = db.network_get_by_uuid(context, network_uuid)
return cls._from_db_object(context, cls(), db_network)
@obj_base.remotable_classmethod
def get_by_cidr(cls, context, cidr):
db_network = db.network_get_by_cidr(context, cidr)
return cls._from_db_object(context, cls(), db_network)
@obj_base.remotable_classmethod
def associate(cls, context, project_id, network_id=None, force=False):
db.network_associate(context, project_id, network_id=network_id,
force=force)
@obj_base.remotable_classmethod
def disassociate(cls, context, network_id, host=False, project=False):
db.network_disassociate(context, network_id, host, project)
@obj_base.remotable_classmethod
def in_use_on_host(cls, context, network_id, host):
return db.network_in_use_on_host(context, network_id, host)
def _get_primitive_changes(self):
changes = {}
for key, value in self.obj_get_changes().items():
if isinstance(value, netaddr.IPAddress):
changes[key] = str(value)
else:
changes[key] = value
return changes
@obj_base.remotable
def create(self):
updates = self._get_primitive_changes()
if 'id' in updates:
raise exception.ObjectActionError(action='create',
reason='already created')
db_network = db.network_create_safe(self._context, updates)
self._from_db_object(self._context, self, db_network)
@obj_base.remotable
def destroy(self):
db.network_delete_safe(self._context, self.id)
self.deleted = True
self.obj_reset_changes(['deleted'])
@obj_base.remotable
def save(self):
context = self._context
updates = self._get_primitive_changes()
if 'netmask_v6' in updates:
# NOTE(danms): For some reason, historical code stores the
# IPv6 netmask as just the CIDR mask length, so convert that
# back here before saving for now.
updates['netmask_v6'] = netaddr.IPNetwork(
updates['netmask_v6']).netmask
set_host = 'host' in updates
if set_host:
db.network_set_host(context, self.id, updates.pop('host'))
if updates:
db_network = db.network_update(context, self.id, updates)
elif set_host:
db_network = db.network_get(context, self.id)
else:
db_network = None
if db_network is not None:
self._from_db_object(context, self, db_network)
@obj_base.NovaObjectRegistry.register
class NetworkList(obj_base.ObjectListBase, obj_base.NovaObject):
# Version 1.0: Initial version
# Version 1.1: Added get_by_project()
# Version 1.2: Network <= version 1.2
VERSION = '1.2'
fields = {
'objects': fields.ListOfObjectsField('Network'),
}
@obj_base.remotable_classmethod
def get_all(cls, context, project_only='allow_none'):
db_networks = db.network_get_all(context, project_only)
return obj_base.obj_make_list(context, cls(context), objects.Network,
db_networks)
@obj_base.remotable_classmethod
def get_by_uuids(cls, context, network_uuids, project_only='allow_none'):
db_networks = db.network_get_all_by_uuids(context, network_uuids,
project_only)
return obj_base.obj_make_list(context, cls(context), objects.Network,
db_networks)
@obj_base.remotable_classmethod
def get_by_host(cls, context, host):
db_networks = db.network_get_all_by_host(context, host)
return obj_base.obj_make_list(context, cls(context), objects.Network,
db_networks)
@obj_base.remotable_classmethod
def get_by_project(cls, context, project_id, associate=True):
db_networks = db.project_get_networks(context, project_id,
associate=associate)
return obj_base.obj_make_list(context, cls(context), objects.Network,
db_networks)
| apache-2.0 |
sbidoul/odoo | addons/hw_escpos/controllers/main.py | 89 | 14633 | # -*- coding: utf-8 -*-
import commands
import logging
import simplejson
import os
import os.path
import io
import base64
import openerp
import time
import random
import math
import md5
import openerp.addons.hw_proxy.controllers.main as hw_proxy
import pickle
import re
import subprocess
import traceback
try:
from .. escpos import *
from .. escpos.exceptions import *
from .. escpos.printer import Usb
except ImportError:
escpos = printer = None
from threading import Thread, Lock
from Queue import Queue, Empty
try:
import usb.core
except ImportError:
usb = None
from PIL import Image
from openerp import http
from openerp.http import request
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
class EscposDriver(Thread):
def __init__(self):
Thread.__init__(self)
self.queue = Queue()
self.lock = Lock()
self.status = {'status':'connecting', 'messages':[]}
def supported_devices(self):
if not os.path.isfile('escpos_devices.pickle'):
return supported_devices.device_list
else:
try:
f = open('escpos_devices.pickle','r')
return pickle.load(f)
f.close()
except Exception as e:
self.set_status('error',str(e))
return supported_devices.device_list
def add_supported_device(self,device_string):
r = re.compile('[0-9A-Fa-f]{4}:[0-9A-Fa-f]{4}');
match = r.search(device_string)
if match:
match = match.group().split(':')
vendor = int(match[0],16)
product = int(match[1],16)
name = device_string.split('ID')
if len(name) >= 2:
name = name[1]
else:
name = name[0]
_logger.info('ESC/POS: adding support for device: '+match[0]+':'+match[1]+' '+name)
device_list = supported_devices.device_list[:]
if os.path.isfile('escpos_devices.pickle'):
try:
f = open('escpos_devices.pickle','r')
device_list = pickle.load(f)
f.close()
except Exception as e:
self.set_status('error',str(e))
device_list.append({
'vendor': vendor,
'product': product,
'name': name,
})
try:
f = open('escpos_devices.pickle','w+')
f.seek(0)
pickle.dump(device_list,f)
f.close()
except Exception as e:
self.set_status('error',str(e))
def connected_usb_devices(self):
connected = []
for device in self.supported_devices():
if usb.core.find(idVendor=device['vendor'], idProduct=device['product']) != None:
connected.append(device)
return connected
def lockedstart(self):
with self.lock:
if not self.isAlive():
self.daemon = True
self.start()
def get_escpos_printer(self):
printers = self.connected_usb_devices()
if len(printers) > 0:
self.set_status('connected','Connected to '+printers[0]['name'])
return Usb(printers[0]['vendor'], printers[0]['product'])
else:
self.set_status('disconnected','Printer Not Found')
return None
def get_status(self):
self.push_task('status')
return self.status
def open_cashbox(self,printer):
printer.cashdraw(2)
printer.cashdraw(5)
def set_status(self, status, message = None):
_logger.info(status+' : '+ (message or 'no message'))
if status == self.status['status']:
if message != None and (len(self.status['messages']) == 0 or message != self.status['messages'][-1]):
self.status['messages'].append(message)
else:
self.status['status'] = status
if message:
self.status['messages'] = [message]
else:
self.status['messages'] = []
if status == 'error' and message:
_logger.error('ESC/POS Error: '+message)
elif status == 'disconnected' and message:
_logger.warning('ESC/POS Device Disconnected: '+message)
def run(self):
if not escpos:
_logger.error('ESC/POS cannot initialize, please verify system dependencies.')
return
while True:
try:
error = True
timestamp, task, data = self.queue.get(True)
printer = self.get_escpos_printer()
if printer == None:
if task != 'status':
self.queue.put((timestamp,task,data))
error = False
time.sleep(5)
continue
elif task == 'receipt':
if timestamp >= time.time() - 1 * 60 * 60:
self.print_receipt_body(printer,data)
printer.cut()
elif task == 'xml_receipt':
if timestamp >= time.time() - 1 * 60 * 60:
printer.receipt(data)
elif task == 'cashbox':
if timestamp >= time.time() - 12:
self.open_cashbox(printer)
elif task == 'printstatus':
self.print_status(printer)
elif task == 'status':
pass
error = False
except NoDeviceError as e:
print "No device found %s" %str(e)
except HandleDeviceError as e:
print "Impossible to handle the device due to previous error %s" % str(e)
except TicketNotPrinted as e:
print "The ticket does not seems to have been fully printed %s" % str(e)
except NoStatusError as e:
print "Impossible to get the status of the printer %s" % str(e)
except Exception as e:
self.set_status('error', str(e))
errmsg = str(e) + '\n' + '-'*60+'\n' + traceback.format_exc() + '-'*60 + '\n'
_logger.error(errmsg);
finally:
if error:
self.queue.put((timestamp, task, data))
if printer:
printer.close()
def push_task(self,task, data = None):
self.lockedstart()
self.queue.put((time.time(),task,data))
def print_status(self,eprint):
localips = ['0.0.0.0','127.0.0.1','127.0.1.1']
ips = [ c.split(':')[1].split(' ')[0] for c in commands.getoutput("/sbin/ifconfig").split('\n') if 'inet addr' in c ]
ips = [ ip for ip in ips if ip not in localips ]
eprint.text('\n\n')
eprint.set(align='center',type='b',height=2,width=2)
eprint.text('PosBox Status\n')
eprint.text('\n')
eprint.set(align='center')
if len(ips) == 0:
eprint.text('ERROR: Could not connect to LAN\n\nPlease check that the PosBox is correc-\ntly connected with a network cable,\n that the LAN is setup with DHCP, and\nthat network addresses are available')
elif len(ips) == 1:
eprint.text('IP Address:\n'+ips[0]+'\n')
else:
eprint.text('IP Addresses:\n')
for ip in ips:
eprint.text(ip+'\n')
if len(ips) >= 1:
eprint.text('\nHomepage:\nhttp://'+ips[0]+':8069\n')
eprint.text('\n\n')
eprint.cut()
def print_receipt_body(self,eprint,receipt):
def check(string):
return string != True and bool(string) and string.strip()
def price(amount):
return ("{0:."+str(receipt['precision']['price'])+"f}").format(amount)
def money(amount):
return ("{0:."+str(receipt['precision']['money'])+"f}").format(amount)
def quantity(amount):
if math.floor(amount) != amount:
return ("{0:."+str(receipt['precision']['quantity'])+"f}").format(amount)
else:
return str(amount)
def printline(left, right='', width=40, ratio=0.5, indent=0):
lwidth = int(width * ratio)
rwidth = width - lwidth
lwidth = lwidth - indent
left = left[:lwidth]
if len(left) != lwidth:
left = left + ' ' * (lwidth - len(left))
right = right[-rwidth:]
if len(right) != rwidth:
right = ' ' * (rwidth - len(right)) + right
return ' ' * indent + left + right + '\n'
def print_taxes():
taxes = receipt['tax_details']
for tax in taxes:
eprint.text(printline(tax['tax']['name'],price(tax['amount']), width=40,ratio=0.6))
# Receipt Header
if receipt['company']['logo']:
eprint.set(align='center')
eprint.print_base64_image(receipt['company']['logo'])
eprint.text('\n')
else:
eprint.set(align='center',type='b',height=2,width=2)
eprint.text(receipt['company']['name'] + '\n')
eprint.set(align='center',type='b')
if check(receipt['company']['contact_address']):
eprint.text(receipt['company']['contact_address'] + '\n')
if check(receipt['company']['phone']):
eprint.text('Tel:' + receipt['company']['phone'] + '\n')
if check(receipt['company']['vat']):
eprint.text('VAT:' + receipt['company']['vat'] + '\n')
if check(receipt['company']['email']):
eprint.text(receipt['company']['email'] + '\n')
if check(receipt['company']['website']):
eprint.text(receipt['company']['website'] + '\n')
if check(receipt['header']):
eprint.text(receipt['header']+'\n')
if check(receipt['cashier']):
eprint.text('-'*32+'\n')
eprint.text('Served by '+receipt['cashier']+'\n')
# Orderlines
eprint.text('\n\n')
eprint.set(align='center')
for line in receipt['orderlines']:
pricestr = price(line['price_display'])
if line['discount'] == 0 and line['unit_name'] == 'Unit(s)' and line['quantity'] == 1:
eprint.text(printline(line['product_name'],pricestr,ratio=0.6))
else:
eprint.text(printline(line['product_name'],ratio=0.6))
if line['discount'] != 0:
eprint.text(printline('Discount: '+str(line['discount'])+'%', ratio=0.6, indent=2))
if line['unit_name'] == 'Unit(s)':
eprint.text( printline( quantity(line['quantity']) + ' x ' + price(line['price']), pricestr, ratio=0.6, indent=2))
else:
eprint.text( printline( quantity(line['quantity']) + line['unit_name'] + ' x ' + price(line['price']), pricestr, ratio=0.6, indent=2))
# Subtotal if the taxes are not included
taxincluded = True
if money(receipt['subtotal']) != money(receipt['total_with_tax']):
eprint.text(printline('','-------'));
eprint.text(printline(_('Subtotal'),money(receipt['subtotal']),width=40, ratio=0.6))
print_taxes()
#eprint.text(printline(_('Taxes'),money(receipt['total_tax']),width=40, ratio=0.6))
taxincluded = False
# Total
eprint.text(printline('','-------'));
eprint.set(align='center',height=2)
eprint.text(printline(_(' TOTAL'),money(receipt['total_with_tax']),width=40, ratio=0.6))
eprint.text('\n\n');
# Paymentlines
eprint.set(align='center')
for line in receipt['paymentlines']:
eprint.text(printline(line['journal'], money(line['amount']), ratio=0.6))
eprint.text('\n');
eprint.set(align='center',height=2)
eprint.text(printline(_(' CHANGE'),money(receipt['change']),width=40, ratio=0.6))
eprint.set(align='center')
eprint.text('\n');
# Extra Payment info
if receipt['total_discount'] != 0:
eprint.text(printline(_('Discounts'),money(receipt['total_discount']),width=40, ratio=0.6))
if taxincluded:
print_taxes()
#eprint.text(printline(_('Taxes'),money(receipt['total_tax']),width=40, ratio=0.6))
# Footer
if check(receipt['footer']):
eprint.text('\n'+receipt['footer']+'\n\n')
eprint.text(receipt['name']+'\n')
eprint.text( str(receipt['date']['date']).zfill(2)
+'/'+ str(receipt['date']['month']+1).zfill(2)
+'/'+ str(receipt['date']['year']).zfill(4)
+' '+ str(receipt['date']['hour']).zfill(2)
+':'+ str(receipt['date']['minute']).zfill(2) )
driver = EscposDriver()
driver.push_task('printstatus')
hw_proxy.drivers['escpos'] = driver
class EscposProxy(hw_proxy.Proxy):
@http.route('/hw_proxy/open_cashbox', type='json', auth='none', cors='*')
def open_cashbox(self):
_logger.info('ESC/POS: OPEN CASHBOX')
driver.push_task('cashbox')
@http.route('/hw_proxy/print_receipt', type='json', auth='none', cors='*')
def print_receipt(self, receipt):
_logger.info('ESC/POS: PRINT RECEIPT')
driver.push_task('receipt',receipt)
@http.route('/hw_proxy/print_xml_receipt', type='json', auth='none', cors='*')
def print_xml_receipt(self, receipt):
_logger.info('ESC/POS: PRINT XML RECEIPT')
driver.push_task('xml_receipt',receipt)
@http.route('/hw_proxy/escpos/add_supported_device', type='http', auth='none', cors='*')
def add_supported_device(self, device_string):
_logger.info('ESC/POS: ADDED NEW DEVICE:'+device_string)
driver.add_supported_device(device_string)
return "The device:\n"+device_string+"\n has been added to the list of supported devices.<br/><a href='/hw_proxy/status'>Ok</a>"
@http.route('/hw_proxy/escpos/reset_supported_devices', type='http', auth='none', cors='*')
def reset_supported_devices(self):
try:
os.remove('escpos_devices.pickle')
except Exception as e:
pass
return 'The list of supported devices has been reset to factory defaults.<br/><a href="/hw_proxy/status">Ok</a>'
| agpl-3.0 |
formiano/enigma2 | lib/python/Components/Converter/DRRefString.py | 3 | 1790 | # Embedded file name: /usr/lib/enigma2/python/Components/Converter/DRRefString.py
from Components.Converter.Converter import Converter
from Components.Element import cached
from Screens.InfoBar import InfoBar
class DRRefString(Converter, object):
CURRENT = 0
EVENT = 1
def __init__(self, type):
Converter.__init__(self, type)
self.CHANSEL = None
self.type = {'CurrentRef': self.CURRENT,
'ServicelistRef': self.EVENT}[type]
return
@cached
def getText(self):
if self.type == self.EVENT:
antw = str(self.source.service.toString())
if antw[:6] == '1:7:0:':
teilantw = antw.split('ORDER BY name:')
if len(teilantw) > 1:
teil2antw = teilantw[1].split()
if len(teil2antw) > 0:
return teil2antw[0]
elif antw[:6] == '1:7:1:':
teilantw = antw.split('.')
if len(teilantw) > 1:
return teilantw[1]
return antw
elif self.type == self.CURRENT:
if self.CHANSEL == None:
self.CHANSEL = InfoBar.instance.servicelist
if len(InfoBar.instance.session.dialog_stack) > 1:
for zz in InfoBar.instance.session.dialog_stack:
if str(zz[0]) == "<class 'Screens.MovieSelection.MovieSelection'>" or str(InfoBar.instance.session.dialog_stack[1][0]) == "<class 'Screens.InfoBar.MoviePlayer'>":
return self.source.text
vSrv = self.CHANSEL.servicelist.getCurrent()
return str(vSrv.toString())
else:
return 'na'
return
text = property(getText) | gpl-2.0 |
Elandril/Sick-Beard | lib/hachoir_parser/misc/ttf.py | 90 | 9433 | """
TrueType Font parser.
Documents:
- "An Introduction to TrueType Fonts: A look inside the TTF format"
written by "NRSI: Computers & Writing Systems"
http://scripts.sil.org/cms/scripts/page.php?site_id=nrsi&item_id=IWS-Chapter08
Author: Victor Stinner
Creation date: 2007-02-08
"""
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (FieldSet, ParserError,
UInt16, UInt32, Bit, Bits,
PaddingBits, NullBytes,
String, RawBytes, Bytes, Enum,
TimestampMac32)
from lib.hachoir_core.endian import BIG_ENDIAN
from lib.hachoir_core.text_handler import textHandler, hexadecimal, filesizeHandler
MAX_NAME_COUNT = 300
MIN_NB_TABLE = 3
MAX_NB_TABLE = 30
DIRECTION_NAME = {
0: u"Mixed directional",
1: u"Left to right",
2: u"Left to right + neutrals",
-1: u"Right to left",
-2: u"Right to left + neutrals",
}
NAMEID_NAME = {
0: u"Copyright notice",
1: u"Font family name",
2: u"Font subfamily name",
3: u"Unique font identifier",
4: u"Full font name",
5: u"Version string",
6: u"Postscript name",
7: u"Trademark",
8: u"Manufacturer name",
9: u"Designer",
10: u"Description",
11: u"URL Vendor",
12: u"URL Designer",
13: u"License Description",
14: u"License info URL",
16: u"Preferred Family",
17: u"Preferred Subfamily",
18: u"Compatible Full",
19: u"Sample text",
20: u"PostScript CID findfont name",
}
PLATFORM_NAME = {
0: "Unicode",
1: "Macintosh",
2: "ISO",
3: "Microsoft",
4: "Custom",
}
CHARSET_MAP = {
# (platform, encoding) => charset
0: {3: "UTF-16-BE"},
1: {0: "MacRoman"},
3: {1: "UTF-16-BE"},
}
class TableHeader(FieldSet):
def createFields(self):
yield String(self, "tag", 4)
yield textHandler(UInt32(self, "checksum"), hexadecimal)
yield UInt32(self, "offset")
yield filesizeHandler(UInt32(self, "size"))
def createDescription(self):
return "Table entry: %s (%s)" % (self["tag"].display, self["size"].display)
class NameHeader(FieldSet):
def createFields(self):
yield Enum(UInt16(self, "platformID"), PLATFORM_NAME)
yield UInt16(self, "encodingID")
yield UInt16(self, "languageID")
yield Enum(UInt16(self, "nameID"), NAMEID_NAME)
yield UInt16(self, "length")
yield UInt16(self, "offset")
def getCharset(self):
platform = self["platformID"].value
encoding = self["encodingID"].value
try:
return CHARSET_MAP[platform][encoding]
except KeyError:
self.warning("TTF: Unknown charset (%s,%s)" % (platform, encoding))
return "ISO-8859-1"
def createDescription(self):
platform = self["platformID"].display
name = self["nameID"].display
return "Name record: %s (%s)" % (name, platform)
def parseFontHeader(self):
yield UInt16(self, "maj_ver", "Major version")
yield UInt16(self, "min_ver", "Minor version")
yield UInt16(self, "font_maj_ver", "Font major version")
yield UInt16(self, "font_min_ver", "Font minor version")
yield textHandler(UInt32(self, "checksum"), hexadecimal)
yield Bytes(self, "magic", 4, r"Magic string (\x5F\x0F\x3C\xF5)")
if self["magic"].value != "\x5F\x0F\x3C\xF5":
raise ParserError("TTF: invalid magic of font header")
# Flags
yield Bit(self, "y0", "Baseline at y=0")
yield Bit(self, "x0", "Left sidebearing point at x=0")
yield Bit(self, "instr_point", "Instructions may depend on point size")
yield Bit(self, "ppem", "Force PPEM to integer values for all")
yield Bit(self, "instr_width", "Instructions may alter advance width")
yield Bit(self, "vertical", "e laid out vertically?")
yield PaddingBits(self, "reserved[]", 1)
yield Bit(self, "linguistic", "Requires layout for correct linguistic rendering?")
yield Bit(self, "gx", "Metamorphosis effects?")
yield Bit(self, "strong", "Contains strong right-to-left glyphs?")
yield Bit(self, "indic", "contains Indic-style rearrangement effects?")
yield Bit(self, "lossless", "Data is lossless (Agfa MicroType compression)")
yield Bit(self, "converted", "Font converted (produce compatible metrics)")
yield Bit(self, "cleartype", "Optimised for ClearType")
yield Bits(self, "adobe", 2, "(used by Adobe)")
yield UInt16(self, "unit_per_em", "Units per em")
if not(16 <= self["unit_per_em"].value <= 16384):
raise ParserError("TTF: Invalid unit/em value")
yield UInt32(self, "created_high")
yield TimestampMac32(self, "created")
yield UInt32(self, "modified_high")
yield TimestampMac32(self, "modified")
yield UInt16(self, "xmin")
yield UInt16(self, "ymin")
yield UInt16(self, "xmax")
yield UInt16(self, "ymax")
# Mac style
yield Bit(self, "bold")
yield Bit(self, "italic")
yield Bit(self, "underline")
yield Bit(self, "outline")
yield Bit(self, "shadow")
yield Bit(self, "condensed", "(narrow)")
yield Bit(self, "expanded")
yield PaddingBits(self, "reserved[]", 9)
yield UInt16(self, "lowest", "Smallest readable size in pixels")
yield Enum(UInt16(self, "font_dir", "Font direction hint"), DIRECTION_NAME)
yield Enum(UInt16(self, "ofst_format"), {0: "short offsets", 1: "long"})
yield UInt16(self, "glyph_format", "(=0)")
def parseNames(self):
# Read header
yield UInt16(self, "format")
if self["format"].value != 0:
raise ParserError("TTF (names): Invalid format (%u)" % self["format"].value)
yield UInt16(self, "count")
yield UInt16(self, "offset")
if MAX_NAME_COUNT < self["count"].value:
raise ParserError("Invalid number of names (%s)"
% self["count"].value)
# Read name index
entries = []
for index in xrange(self["count"].value):
entry = NameHeader(self, "header[]")
yield entry
entries.append(entry)
# Sort names by their offset
entries.sort(key=lambda field: field["offset"].value)
# Read name value
last = None
for entry in entries:
# Skip duplicates values
new = (entry["offset"].value, entry["length"].value)
if last and last == new:
self.warning("Skip duplicate %s %s" % (entry.name, new))
continue
last = (entry["offset"].value, entry["length"].value)
# Skip negative offset
offset = entry["offset"].value + self["offset"].value
if offset < self.current_size//8:
self.warning("Skip value %s (negative offset)" % entry.name)
continue
# Add padding if any
padding = self.seekByte(offset, relative=True, null=True)
if padding:
yield padding
# Read value
size = entry["length"].value
if size:
yield String(self, "value[]", size, entry.description, charset=entry.getCharset())
padding = (self.size - self.current_size) // 8
if padding:
yield NullBytes(self, "padding_end", padding)
class Table(FieldSet):
TAG_INFO = {
"head": ("header", "Font header", parseFontHeader),
"name": ("names", "Names", parseNames),
}
def __init__(self, parent, name, table, **kw):
FieldSet.__init__(self, parent, name, **kw)
self.table = table
tag = table["tag"].value
if tag in self.TAG_INFO:
self._name, self._description, self.parser = self.TAG_INFO[tag]
else:
self.parser = None
def createFields(self):
if self.parser:
for field in self.parser(self):
yield field
else:
yield RawBytes(self, "content", self.size//8)
def createDescription(self):
return "Table %s (%s)" % (self.table["tag"].value, self.table.path)
class TrueTypeFontFile(Parser):
endian = BIG_ENDIAN
PARSER_TAGS = {
"id": "ttf",
"category": "misc",
"file_ext": ("ttf",),
"min_size": 10*8, # FIXME
"description": "TrueType font",
}
def validate(self):
if self["maj_ver"].value != 1:
return "Invalid major version (%u)" % self["maj_ver"].value
if self["min_ver"].value != 0:
return "Invalid minor version (%u)" % self["min_ver"].value
if not (MIN_NB_TABLE <= self["nb_table"].value <= MAX_NB_TABLE):
return "Invalid number of table (%u)" % self["nb_table"].value
return True
def createFields(self):
yield UInt16(self, "maj_ver", "Major version")
yield UInt16(self, "min_ver", "Minor version")
yield UInt16(self, "nb_table")
yield UInt16(self, "search_range")
yield UInt16(self, "entry_selector")
yield UInt16(self, "range_shift")
tables = []
for index in xrange(self["nb_table"].value):
table = TableHeader(self, "table_hdr[]")
yield table
tables.append(table)
tables.sort(key=lambda field: field["offset"].value)
for table in tables:
padding = self.seekByte(table["offset"].value, null=True)
if padding:
yield padding
size = table["size"].value
if size:
yield Table(self, "table[]", table, size=size*8)
padding = self.seekBit(self.size, null=True)
if padding:
yield padding
| gpl-3.0 |
fafaman/django | django/contrib/sessions/middleware.py | 256 | 2658 | import time
from importlib import import_module
from django.conf import settings
from django.utils.cache import patch_vary_headers
from django.utils.http import cookie_date
class SessionMiddleware(object):
def __init__(self):
engine = import_module(settings.SESSION_ENGINE)
self.SessionStore = engine.SessionStore
def process_request(self, request):
session_key = request.COOKIES.get(settings.SESSION_COOKIE_NAME)
request.session = self.SessionStore(session_key)
def process_response(self, request, response):
"""
If request.session was modified, or if the configuration is to save the
session every time, save the changes and set a session cookie or delete
the session cookie if the session has been emptied.
"""
try:
accessed = request.session.accessed
modified = request.session.modified
empty = request.session.is_empty()
except AttributeError:
pass
else:
# First check if we need to delete this cookie.
# The session should be deleted only if the session is entirely empty
if settings.SESSION_COOKIE_NAME in request.COOKIES and empty:
response.delete_cookie(settings.SESSION_COOKIE_NAME,
domain=settings.SESSION_COOKIE_DOMAIN)
else:
if accessed:
patch_vary_headers(response, ('Cookie',))
if (modified or settings.SESSION_SAVE_EVERY_REQUEST) and not empty:
if request.session.get_expire_at_browser_close():
max_age = None
expires = None
else:
max_age = request.session.get_expiry_age()
expires_time = time.time() + max_age
expires = cookie_date(expires_time)
# Save the session data and refresh the client cookie.
# Skip session save for 500 responses, refs #3881.
if response.status_code != 500:
request.session.save()
response.set_cookie(settings.SESSION_COOKIE_NAME,
request.session.session_key, max_age=max_age,
expires=expires, domain=settings.SESSION_COOKIE_DOMAIN,
path=settings.SESSION_COOKIE_PATH,
secure=settings.SESSION_COOKIE_SECURE or None,
httponly=settings.SESSION_COOKIE_HTTPONLY or None)
return response
| bsd-3-clause |
Mozu/mozu-python-sdk | test/functional_test/security_test/userauthenticator_test.py | 2 | 1389 | import unittest
from mozurestsdk import mozuclient;
from mozurestsdk.security.userauthenticator import UserAuthenticator;
from mozurestsdk import util;
class UserAuthenticator_Test(unittest.TestCase):
def setUp(self):
self.config = util.readConfigFile("c:\projects\mozuconfig.txt");
mozuclient.configure(config="c:\projects\mozuconfig.txt");
def test_tenantAuth(self):
userName = self.config.get("userName", None);
password = self.config.get("password", None);
tenantId = self.config.get("tenantId", None);
userAuth = UserAuthenticator(userName, password, tenantId=tenantId);
userAuth.authenticate();
userAuth.refreshAuth();
self.assertIsNotNone(userAuth.auth);
self.assertIsNotNone(userAuth.auth["accessToken"]);
self.assertEqual(str(userAuth.auth["tenant"]["id"]), tenantId);
def test_devAccountAuth(self):
userName = self.config.get("userName", None);
password = self.config.get("password", None);
devAccountId = self.config.get("devAccountId", None);
authUrl = self.config.get("baseAuthUrl", None);
userAuth = UserAuthenticator(userName, password, devAccountId=devAccountId);
userAuth.authenticate();
userAuth.refreshAuth();
self.assertIsNotNone(userAuth.auth);
self.assertIsNotNone(userAuth.auth["accessToken"]);
self.assertEqual(str(userAuth.auth["account"]["id"]), devAccountId);
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
haakenlid/django-extensions | tests/test_models.py | 4 | 1590 | # -*- coding: utf-8 -*-
from django.test import TestCase
from django_extensions.db.models import ActivatorModel
from .testapp.models import Post
class ActivatorModelTestCase(TestCase):
def test_active_includes_active(self):
post = Post.objects.create(status=ActivatorModel.ACTIVE_STATUS)
active = Post.objects.active()
self.assertIn(post, active)
post.delete()
def test_active_excludes_inactive(self):
post = Post.objects.create(status=ActivatorModel.INACTIVE_STATUS)
active = Post.objects.active()
self.assertNotIn(post, active)
post.delete()
def test_inactive_includes_inactive(self):
post = Post.objects.create(status=ActivatorModel.INACTIVE_STATUS)
inactive = Post.objects.inactive()
self.assertIn(post, inactive)
post.delete()
def test_inactive_excludes_active(self):
post = Post.objects.create(status=ActivatorModel.ACTIVE_STATUS)
inactive = Post.objects.inactive()
self.assertNotIn(post, inactive)
post.delete()
def test_active_is_chainable(self):
post = Post.objects.create(title='Foo', status=ActivatorModel.ACTIVE_STATUS)
specific_post = Post.objects.filter(title='Foo').active()
self.assertIn(post, specific_post)
post.delete()
def test_inactive_is_chainable(self):
post = Post.objects.create(title='Foo', status=ActivatorModel.INACTIVE_STATUS)
specific_post = Post.objects.filter(title='Foo').inactive()
self.assertIn(post, specific_post)
post.delete()
| mit |
zulip/django | tests/bash_completion/tests.py | 327 | 3888 | """
A series of tests to establish that the command-line bash completion works.
"""
import os
import sys
import unittest
from django.apps import apps
from django.core.management import ManagementUtility
from django.test.utils import captured_stdout
class BashCompletionTests(unittest.TestCase):
"""
Testing the Python level bash completion code.
This requires setting up the environment as if we got passed data
from bash.
"""
def setUp(self):
self.old_DJANGO_AUTO_COMPLETE = os.environ.get('DJANGO_AUTO_COMPLETE')
os.environ['DJANGO_AUTO_COMPLETE'] = '1'
def tearDown(self):
if self.old_DJANGO_AUTO_COMPLETE:
os.environ['DJANGO_AUTO_COMPLETE'] = self.old_DJANGO_AUTO_COMPLETE
else:
del os.environ['DJANGO_AUTO_COMPLETE']
def _user_input(self, input_str):
"""
Set the environment and the list of command line arguments.
This sets the bash variables $COMP_WORDS and $COMP_CWORD. The former is
an array consisting of the individual words in the current command
line, the latter is the index of the current cursor position, so in
case a word is completed and the cursor is placed after a whitespace,
$COMP_CWORD must be incremented by 1:
* 'django-admin start' -> COMP_CWORD=1
* 'django-admin startproject' -> COMP_CWORD=1
* 'django-admin startproject ' -> COMP_CWORD=2
"""
os.environ['COMP_WORDS'] = input_str
idx = len(input_str.split(' ')) - 1 # Index of the last word
comp_cword = idx + 1 if input_str.endswith(' ') else idx
os.environ['COMP_CWORD'] = str(comp_cword)
sys.argv = input_str.split()
def _run_autocomplete(self):
util = ManagementUtility(argv=sys.argv)
with captured_stdout() as stdout:
try:
util.autocomplete()
except SystemExit:
pass
return stdout.getvalue().strip().split('\n')
def test_django_admin_py(self):
"django_admin.py will autocomplete option flags"
self._user_input('django-admin sqlmigrate --verb')
output = self._run_autocomplete()
self.assertEqual(output, ['--verbosity='])
def test_manage_py(self):
"manage.py will autocomplete option flags"
self._user_input('manage.py sqlmigrate --verb')
output = self._run_autocomplete()
self.assertEqual(output, ['--verbosity='])
def test_custom_command(self):
"A custom command can autocomplete option flags"
self._user_input('django-admin test_command --l')
output = self._run_autocomplete()
self.assertEqual(output, ['--list'])
def test_subcommands(self):
"Subcommands can be autocompleted"
self._user_input('django-admin sql')
output = self._run_autocomplete()
self.assertEqual(output, ['sqlflush sqlmigrate sqlsequencereset'])
def test_completed_subcommand(self):
"Show option flags in case a subcommand is completed"
self._user_input('django-admin startproject ') # Trailing whitespace
output = self._run_autocomplete()
for item in output:
self.assertTrue(item.startswith('--'))
def test_help(self):
"No errors, just an empty list if there are no autocomplete options"
self._user_input('django-admin help --')
output = self._run_autocomplete()
self.assertEqual(output, [''])
def test_app_completion(self):
"Application names will be autocompleted for an AppCommand"
self._user_input('django-admin sqlmigrate a')
output = self._run_autocomplete()
a_labels = sorted(app_config.label
for app_config in apps.get_app_configs()
if app_config.label.startswith('a'))
self.assertEqual(output, a_labels)
| bsd-3-clause |
kdart/pycopia | process/pycopia/netcat.py | 1 | 2424 | #!/usr/bin/python2.7
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Wrapper for the netcat program. Allows TCP port forwarding to stdio. If your
netcat (nc) is suid to root, you can forward privileged ports, such as SMTP.
"""
import sys
from pycopia import proctools
try:
NETCAT = proctools.which("nc")
except proctools.NotFoundError:
NETCAT = proctools.which("netcat")
TESTED_VERSIONS = ["[v1.10]"]
def get_netcat(host, port, callback=None, logfile=None, extraoptions=""):
"""get_netcat(host, port, [prompt], [callback], [logfile], [extraoptions])
Returns a Netcat object (an Expect subclass) attached to a netcat client.
The logfile parameter should be a file-like object (has a 'write' method).
"""
cmd = "%s %s %s %s" %(NETCAT, extraoptions, host, port)
pm = proctools.get_procmanager()
proc = pm.spawnpipe(cmd, callback=callback, logfile=logfile, merge=0)
return proc
def netcat_server(port, callback=None, logfile=None, extraoptions=""):
extraoptions += " -l -p %d" % (port)
cmd = "%s %s" %(NETCAT, extraoptions)
pm = proctools.get_procmanager()
proc = pm.spawnpipe(cmd, callback=callback, logfile=logfile, merge=0)
return proc
netcat_listener = netcat_server # alias
def killall():
pm = proctools.get_procmanager()
for proc in pm.getbyname(NETCAT):
proc.kill()
def netcat_version():
"""netcat_version() Return the version string for the netcat command on this system."""
nc = proctools.spawnpipe("%s -h" % (NETCAT))
ver = nc.readline()
nc.read() # discard rest
nc.wait()
return ver.strip()
def check_version():
"""Checks that the installed netcat program is the same as this module was
tested with (and written for)."""
ver = netcat_version()
for vs in TESTED_VERSIONS:
if ver == vs:
return 1
return 0
| apache-2.0 |
aroegies/trecrts-tools | trecrts-clients/python/dumb-topic-client/topic_client.py | 2 | 1800 | from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
import requests
import json
import argparse
cred_file = "oauth_tokens.txt"
hostname = ""
port = ""
clientid = ""
topics = dict()
api_base = ""
class TopicListener(StreamListener):
def on_status(self,status):
for topid,query in topics.items():
if hasattr(status,'retweeted_status'):
break;
text = status.text.lower()
if text.find(query) >= 0:
print status.id,text
resp = requests.post(api_base % ("tweet/%s/%s/%s" %(topid,status.id,clientid)))
print resp.status_code
return True
def on_error(self,status_code):
print status_code
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="TREC Real-Time Summarization Dummy Client")
parser.add_argument('--host',help="Hostname of broker",type=str,required=True)
parser.add_argument('--port',help="Port number of broker",type=str,required=True)
parser.add_argument('--oauth',help="File to read JSON encoded OAuth credentials",type=str,)
args=parser.parse_args()
hostname = args.host
port = args.port
api_base= "http://%s:%s/" % (hostname,port)
api_base += "%s"
if args.oauth:
cred_file = args.oauth
resp = requests.post(api_base % ("register/system"),data={"groupid":"uwar"})
clientid = resp.json()["clientid"]
resp = requests.get(api_base % ("topics/%s" % clientid))
for row in resp.json():
topics[row["topid"]] = row["query"].lower()
oauth = json.load(open('oauth_tokens.txt'))
listener = TopicListener()
auth = OAuthHandler(oauth["consumer_key"],oauth["consumer_secret"])
auth.set_access_token(oauth["access_token"],oauth["access_token_secret"])
stream = Stream(auth,listener)
stream.sample(languages=['en'])
| apache-2.0 |
juanmont/one | .vscode/extensions/tht13.rst-vscode-2.0.0/src/python/docutils/parsers/rst/languages/af.py | 128 | 3677 | # $Id: af.py 7119 2011-09-02 13:00:23Z milde $
# Author: Jannie Hofmeyr <jhsh@sun.ac.za>
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Afrikaans-language mappings for language-dependent features of
reStructuredText.
"""
__docformat__ = 'reStructuredText'
directives = {
'aandag': 'attention',
'versigtig': 'caution',
'code (translation required)': 'code',
'gevaar': 'danger',
'fout': 'error',
'wenk': 'hint',
'belangrik': 'important',
'nota': 'note',
'tip': 'tip', # hint and tip both have the same translation: wenk
'waarskuwing': 'warning',
'vermaning': 'admonition',
'kantstreep': 'sidebar',
'onderwerp': 'topic',
'lynblok': 'line-block',
'math (translation required)': 'math',
'parsed-literal (translation required)': 'parsed-literal',
'rubriek': 'rubric',
'epigraaf': 'epigraph',
'hoogtepunte': 'highlights',
'pull-quote (translation required)': 'pull-quote',
u'compound (translation required)': 'compound',
u'container (translation required)': 'container',
#'vrae': 'questions',
#'qa': 'questions',
#'faq': 'questions',
'table (translation required)': 'table',
'csv-table (translation required)': 'csv-table',
'list-table (translation required)': 'list-table',
'meta': 'meta',
#'beeldkaart': 'imagemap',
'beeld': 'image',
'figuur': 'figure',
'insluiting': 'include',
'rou': 'raw',
'vervang': 'replace',
'unicode': 'unicode', # should this be translated? unikode
'datum': 'date',
'klas': 'class',
'role (translation required)': 'role',
'default-role (translation required)': 'default-role',
'title (translation required)': 'title',
'inhoud': 'contents',
'sectnum': 'sectnum',
'section-numbering': 'sectnum',
u'header (translation required)': 'header',
u'footer (translation required)': 'footer',
#'voetnote': 'footnotes',
#'aanhalings': 'citations',
'teikennotas': 'target-notes',
'restructuredtext-test-directive': 'restructuredtext-test-directive'}
"""Afrikaans name to registered (in directives/__init__.py) directive name
mapping."""
roles = {
'afkorting': 'abbreviation',
'ab': 'abbreviation',
'akroniem': 'acronym',
'ac': 'acronym',
u'code (translation required)': 'code',
'indeks': 'index',
'i': 'index',
'voetskrif': 'subscript',
'sub': 'subscript',
'boskrif': 'superscript',
'sup': 'superscript',
'titelverwysing': 'title-reference',
'titel': 'title-reference',
't': 'title-reference',
'pep-verwysing': 'pep-reference',
'pep': 'pep-reference',
'rfc-verwysing': 'rfc-reference',
'rfc': 'rfc-reference',
'nadruk': 'emphasis',
'sterk': 'strong',
'literal (translation required)': 'literal',
'math (translation required)': 'math',
'benoemde verwysing': 'named-reference',
'anonieme verwysing': 'anonymous-reference',
'voetnootverwysing': 'footnote-reference',
'aanhalingverwysing': 'citation-reference',
'vervangingsverwysing': 'substitution-reference',
'teiken': 'target',
'uri-verwysing': 'uri-reference',
'uri': 'uri-reference',
'url': 'uri-reference',
'rou': 'raw',}
"""Mapping of Afrikaans role names to canonical role names for interpreted text.
"""
| apache-2.0 |
vponomaryov/manila | manila/share/drivers/maprfs/driver_util.py | 1 | 13677 | # Copyright (c) 2016, MapR Technologies
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility for processing MapR cluster operations
"""
import json
import pipes
import socket
from oslo_concurrency import processutils
from oslo_log import log
import six
from manila.common import constants
from manila import exception
from manila.i18n import _
from manila.i18n import _LE
from manila import utils
LOG = log.getLogger(__name__)
def get_version_handler(configuration):
# here can be choosing DriverUtils depend on cluster version
return BaseDriverUtil(configuration)
class BaseDriverUtil(object):
"""Utility class for MapR-FS specific operations."""
NOT_FOUND_MSG = 'No such'
ERROR_MSG = 'ERROR'
def __init__(self, configuration):
self.configuration = configuration
self.ssh_connections = {}
self.hosts = self.configuration.maprfs_clinode_ip
self.local_hosts = socket.gethostbyname_ex(socket.gethostname())[2]
self.maprcli_bin = '/usr/bin/maprcli'
self.hadoop_bin = '/usr/bin/hadoop'
def _execute(self, *cmd, **kwargs):
for x in range(0, len(self.hosts)):
try:
check_exit_code = kwargs.pop('check_exit_code', True)
host = self.hosts[x]
if host in self.local_hosts:
cmd = self._as_user(cmd,
self.configuration.maprfs_ssh_name)
out, err = utils.execute(*cmd,
check_exit_code=check_exit_code)
else:
out, err = self._run_ssh(host, cmd, check_exit_code)
# move available cldb host to the beginning
if x > 0:
self.hosts[0], self.hosts[x] = self.hosts[x], self.hosts[0]
return out, err
except exception.ProcessExecutionError as e:
if self._check_error(e):
raise
elif x < len(self.hosts) - 1:
msg = _LE('Error running SSH command. Trying another host')
LOG.error(msg)
else:
raise
except Exception as e:
if x < len(self.hosts) - 1:
msg = _LE('Error running SSH command. Trying another host')
LOG.error(msg)
else:
raise exception.ProcessExecutionError(six.text_type(e))
def _run_ssh(self, host, cmd_list, check_exit_code=False):
command = ' '.join(pipes.quote(cmd_arg) for cmd_arg in cmd_list)
connection = self.ssh_connections.get(host)
if connection is None:
ssh_name = self.configuration.maprfs_ssh_name
password = self.configuration.maprfs_ssh_pw
private_key = self.configuration.maprfs_ssh_private_key
remote_ssh_port = self.configuration.maprfs_ssh_port
ssh_conn_timeout = self.configuration.ssh_conn_timeout
min_size = self.configuration.ssh_min_pool_conn
max_size = self.configuration.ssh_max_pool_conn
ssh_pool = utils.SSHPool(host,
remote_ssh_port,
ssh_conn_timeout,
ssh_name,
password=password,
privatekey=private_key,
min_size=min_size,
max_size=max_size)
ssh = ssh_pool.create()
self.ssh_connections[host] = (ssh_pool, ssh)
else:
ssh_pool, ssh = connection
if not ssh.get_transport().is_active():
ssh_pool.remove(ssh)
ssh = ssh_pool.create()
self.ssh_connections[host] = (ssh_pool, ssh)
return processutils.ssh_execute(
ssh,
command,
check_exit_code=check_exit_code)
@staticmethod
def _check_error(error):
# check if error was native
return BaseDriverUtil.ERROR_MSG in error.stdout
@staticmethod
def _as_user(cmd, user):
return ['sudo', 'su', '-', user, '-c',
' '.join(pipes.quote(cmd_arg) for cmd_arg in cmd)]
@staticmethod
def _add_params(cmd, **kwargs):
params = []
for x in kwargs.keys():
params.append('-' + x)
params.append(kwargs[x])
return cmd + params
def create_volume(self, name, path, size, **kwargs):
# delete size param as it is set separately
if kwargs.get('quota'):
del kwargs['quota']
sizestr = six.text_type(size) + 'G'
cmd = [self.maprcli_bin, 'volume', 'create', '-name',
name, '-path', path, '-quota',
sizestr, '-readAce', '', '-writeAce', '']
cmd = self._add_params(cmd, **kwargs)
self._execute(*cmd)
def volume_exists(self, volume_name):
cmd = [self.maprcli_bin, 'volume', 'info', '-name', volume_name]
out, __ = self._execute(*cmd, check_exit_code=False)
return self.NOT_FOUND_MSG not in out
def delete_volume(self, name):
cmd = [self.maprcli_bin, 'volume', 'remove', '-name', name, '-force',
'true']
out, __ = self._execute(*cmd, check_exit_code=False)
# if volume does not exist do not raise exception.ProcessExecutionError
if self.ERROR_MSG in out and self.NOT_FOUND_MSG not in out:
raise exception.ProcessExecutionError(out)
def set_volume_size(self, name, size):
sizestr = six.text_type(size) + 'G'
cmd = [self.maprcli_bin, 'volume', 'modify', '-name', name, '-quota',
sizestr]
self._execute(*cmd)
def create_snapshot(self, name, volume_name):
cmd = [self.maprcli_bin, 'volume', 'snapshot', 'create',
'-snapshotname',
name, '-volume', volume_name]
self._execute(*cmd)
def delete_snapshot(self, name, volume_name):
cmd = [self.maprcli_bin, 'volume', 'snapshot', 'remove',
'-snapshotname',
name, '-volume', volume_name]
out, __ = self._execute(*cmd, check_exit_code=False)
# if snapshot does not exist do not raise ProcessExecutionError
if self.ERROR_MSG in out and self.NOT_FOUND_MSG not in out:
raise exception.ProcessExecutionError(out)
def get_volume_info(self, volume_name, columns=None):
cmd = [self.maprcli_bin, 'volume', 'info', '-name', volume_name,
'-json']
if columns:
cmd += ['-columns', ','.join(columns)]
out, __ = self._execute(*cmd)
return json.loads(out)['data'][0]
def get_volume_info_by_path(self, volume_path, columns=None,
check_if_exists=False):
cmd = [self.maprcli_bin, 'volume', 'info', '-path', volume_path,
'-json']
if columns:
cmd += ['-columns', ','.join(columns)]
out, __ = self._execute(*cmd, check_exit_code=not check_if_exists)
if check_if_exists and self.NOT_FOUND_MSG in out:
return None
return json.loads(out)['data'][0]
def get_snapshot_list(self, volume_name=None, volume_path=None):
params = {}
if volume_name:
params['volume'] = volume_name
if volume_path:
params['path'] = volume_name
cmd = [self.maprcli_bin, 'volume', 'snapshot', 'list', '-volume',
'-columns',
'snapshotname', '-json']
cmd = self._add_params(cmd, **params)
out, __ = self._execute(*cmd)
return [x['snapshotname'] for x in json.loads(out)['data']]
def rename_volume(self, name, new_name):
cmd = [self.maprcli_bin, 'volume', 'rename', '-name', name, '-newname',
new_name]
self._execute(*cmd)
def fs_capacity(self):
cmd = [self.hadoop_bin, 'fs', '-df']
out, err = self._execute(*cmd)
lines = out.splitlines()
try:
fields = lines[1].split()
total = int(fields[1])
free = int(fields[3])
except (IndexError, ValueError):
msg = _('Failed to get MapR-FS capacity info.')
LOG.exception(msg)
raise exception.ProcessExecutionError(msg)
return total, free
def maprfs_ls(self, path):
cmd = [self.hadoop_bin, 'fs', '-ls', path]
out, __ = self._execute(*cmd)
return out
def maprfs_cp(self, source, dest):
cmd = [self.hadoop_bin, 'fs', '-cp', '-p', source, dest]
self._execute(*cmd)
def maprfs_chmod(self, dest, mod):
cmd = [self.hadoop_bin, 'fs', '-chmod', mod, dest]
self._execute(*cmd)
def maprfs_du(self, path):
cmd = [self.hadoop_bin, 'fs', '-du', '-s', path]
out, __ = self._execute(*cmd)
return int(out.split(' ')[0])
def check_state(self):
cmd = [self.hadoop_bin, 'fs', '-ls', '/']
out, __ = self._execute(*cmd, check_exit_code=False)
return 'Found' in out
def dir_not_empty(self, path):
cmd = [self.hadoop_bin, 'fs', '-ls', path]
out, __ = self._execute(*cmd, check_exit_code=False)
return 'Found' in out
def set_volume_ace(self, volume_name, access_rules):
read_accesses = []
write_accesses = []
for access_rule in access_rules:
if access_rule['access_level'] == constants.ACCESS_LEVEL_RO:
read_accesses.append(access_rule['access_to'])
elif access_rule['access_level'] == constants.ACCESS_LEVEL_RW:
read_accesses.append(access_rule['access_to'])
write_accesses.append(access_rule['access_to'])
def rule_type(access_to):
if self.group_exists(access_to):
return 'g'
elif self.user_exists(access_to):
return 'u'
else:
# if nor user nor group exits, it should try add group rule
return 'g'
read_accesses_string = '|'.join(
map(lambda x: rule_type(x) + ':' + x, read_accesses))
write_accesses_string = '|'.join(
map(lambda x: rule_type(x) + ':' + x, write_accesses))
cmd = [self.maprcli_bin, 'volume', 'modify', '-name', volume_name,
'-readAce', read_accesses_string, '-writeAce',
write_accesses_string]
self._execute(*cmd)
def add_volume_ace_rules(self, volume_name, access_rules):
if not access_rules:
return
access_rules_map = self.get_access_rules(volume_name)
for access_rule in access_rules:
access_rules_map[access_rule['access_to']] = access_rule
self.set_volume_ace(volume_name, access_rules_map.values())
def remove_volume_ace_rules(self, volume_name, access_rules):
if not access_rules:
return
access_rules_map = self.get_access_rules(volume_name)
for access_rule in access_rules:
if access_rules_map.get(access_rule['access_to']):
del access_rules_map[access_rule['access_to']]
self.set_volume_ace(volume_name, access_rules_map.values())
def get_access_rules(self, volume_name):
info = self.get_volume_info(volume_name)
aces = info['volumeAces']
read_ace = aces['readAce']
write_ace = aces['writeAce']
access_rules_map = {}
self._retrieve_access_rules_from_ace(read_ace, 'r', access_rules_map)
self._retrieve_access_rules_from_ace(write_ace, 'w', access_rules_map)
return access_rules_map
def _retrieve_access_rules_from_ace(self, ace, ace_type, access_rules_map):
access = constants.ACCESS_LEVEL_RW if ace_type == 'w' else (
constants.ACCESS_LEVEL_RO)
if ace not in ['p', '']:
write_rules = [x.strip() for x in ace.split('|')]
for user in write_rules:
rule_type, username = user.split(':')
if rule_type not in ['u', 'g']:
continue
access_rules_map[username] = {
'access_level': access,
'access_to': username,
'access_type': 'user',
}
def user_exists(self, user):
cmd = ['getent', 'passwd', user]
out, __ = self._execute(*cmd, check_exit_code=False)
return out != ''
def group_exists(self, group):
cmd = ['getent', 'group', group]
out, __ = self._execute(*cmd, check_exit_code=False)
return out != ''
def get_cluster_name(self):
cmd = [self.maprcli_bin, 'dashboard', 'info', '-json']
out, __ = self._execute(*cmd)
try:
return json.loads(out)['data'][0]['cluster']['name']
except (IndexError, ValueError) as e:
msg = (_("Failed to parse cluster name. Error: %s") % e)
raise exception.ProcessExecutionError(msg)
| apache-2.0 |
wylwang/vnpy | archive/demo/ltsdemo/lts_data_type.py | 167 | 86932 | # encoding: UTF-8
defineDict = {}
typedefDict = {}
#//////////////////////////////////////////////////////////////////////
#@company shanghai liber information Technology Co.,Ltd
#@file SecurityFtdcUserApiDataType.h
#@brief 定义业务数据类型
#//////////////////////////////////////////////////////////////////////
#//////////////////////////////////////////////////////////////////////
#TFtdcErrorIDType是一个错误代码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcErrorIDType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcErrorMsgType是一个错误信息类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcErrorMsgType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcExchangeIDType是一个交易所代码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcExchangeIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcExchangeNameType是一个交易所名称类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcExchangeNameType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcExchangePropertyType是一个交易所属性类型
#//////////////////////////////////////////////////////////////////////
#正常
defineDict["SECURITY_FTDC_EXP_Normal"] = '0'
#根据成交生成报单
defineDict["SECURITY_FTDC_EXP_GenOrderByTrade"] = '1'
typedefDict["TSecurityFtdcExchangePropertyType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcExchangeConnectStatusType是一个交易所连接状态类型
#//////////////////////////////////////////////////////////////////////
#没有任何连接
defineDict["SECURITY_FTDC_ECS_NoConnection"] = '1'
#已经发出合约查询请求
defineDict["SECURITY_FTDC_ECS_QryInstrumentSent"] = '2'
#已经获取信息
defineDict["SECURITY_FTDC_ECS_GotInformation"] = '9'
typedefDict["TSecurityFtdcExchangeConnectStatusType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcDateType是一个日期类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcDateType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcTimeType是一个时间类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcTimeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcInstrumentIDType是一个合约代码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcInstrumentIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcProductNameType是一个产品名称类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcProductNameType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcProductClassType是一个产品类型类型
#//////////////////////////////////////////////////////////////////////
#期货
defineDict["SECURITY_FTDC_PC_Futures"] = '1'
#期权
defineDict["SECURITY_FTDC_PC_Options"] = '2'
#组合
defineDict["SECURITY_FTDC_PC_Combination"] = '3'
#即期
defineDict["SECURITY_FTDC_PC_Spot"] = '4'
#期转现
defineDict["SECURITY_FTDC_PC_EFP"] = '5'
#证券A股
defineDict["SECURITY_FTDC_PC_StockA"] = '6'
#证券B股
defineDict["SECURITY_FTDC_PC_StockB"] = '7'
#ETF
defineDict["SECURITY_FTDC_PC_ETF"] = '8'
#ETF申赎
defineDict["SECURITY_FTDC_PC_ETFPurRed"] = '9'
typedefDict["TSecurityFtdcProductClassType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcVolumeMultipleType是一个合约数量乘数类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcVolumeMultipleType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcPriceType是一个价格类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcPriceType"] = "float"
#//////////////////////////////////////////////////////////////////////
#TFtdcVolumeType是一个数量类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcVolumeType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcPositionTypeType是一个持仓类型类型
#//////////////////////////////////////////////////////////////////////
#净持仓
defineDict["SECURITY_FTDC_PT_Net"] = '1'
#综合持仓
defineDict["SECURITY_FTDC_PT_Gross"] = '2'
typedefDict["TSecurityFtdcPositionTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcPositionDateTypeType是一个持仓日期类型类型
#//////////////////////////////////////////////////////////////////////
#使用历史持仓
defineDict["SECURITY_FTDC_PDT_UseHistory"] = '1'
#不使用历史持仓
defineDict["SECURITY_FTDC_PDT_NoUseHistory"] = '2'
typedefDict["TSecurityFtdcPositionDateTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcExchangeInstIDType是一个合约在交易所的代码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcExchangeInstIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcYearType是一个年份类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcYearType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcMonthType是一个月份类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcMonthType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcInstLifePhaseType是一个合约生命周期状态类型
#//////////////////////////////////////////////////////////////////////
#未上市
defineDict["SECURITY_FTDC_IP_NotStart"] = '0'
#上市
defineDict["SECURITY_FTDC_IP_Started"] = '1'
#停牌
defineDict["SECURITY_FTDC_IP_Pause"] = '2'
#到期
defineDict["SECURITY_FTDC_IP_Expired"] = '3'
typedefDict["TSecurityFtdcInstLifePhaseType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcBoolType是一个布尔型类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcBoolType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcRightModelIDType是一个股票权限模版代码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcRightModelIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcRightModelNameType是一个股票权限模版名称类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcRightModelNameType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcPosTradeTypeType是一个持仓交易类型类型
#//////////////////////////////////////////////////////////////////////
#今日新增持仓能卖出
defineDict["SECURITY_FTDC_PTT_CanSelTodayPos"] = '1'
#今日新增持仓不能卖出
defineDict["SECURITY_FTDC_PTT_CannotSellTodayPos"] = '2'
typedefDict["TSecurityFtdcPosTradeTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcTraderIDType是一个交易所交易员代码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcTraderIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcParticipantIDType是一个会员代码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcParticipantIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcPasswordType是一个密码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcPasswordType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcBrokerIDType是一个经纪公司代码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcBrokerIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcOrderLocalIDType是一个本地报单编号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcOrderLocalIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcBrokerAbbrType是一个经纪公司简称类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcBrokerAbbrType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcBrokerNameType是一个经纪公司名称类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcBrokerNameType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcInvestorIDType是一个投资者代码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcInvestorIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcPartyNameType是一个参与人名称类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcPartyNameType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcIdCardTypeType是一个证件类型类型
#//////////////////////////////////////////////////////////////////////
#组织机构代码
defineDict["SECURITY_FTDC_ICT_EID"] = '0'
#身份证
defineDict["SECURITY_FTDC_ICT_IDCard"] = '1'
#军官证
defineDict["SECURITY_FTDC_ICT_OfficerIDCard"] = '2'
#警官证
defineDict["SECURITY_FTDC_ICT_PoliceIDCard"] = '3'
#士兵证
defineDict["SECURITY_FTDC_ICT_SoldierIDCard"] = '4'
#户口簿
defineDict["SECURITY_FTDC_ICT_HouseholdRegister"] = '5'
#护照
defineDict["SECURITY_FTDC_ICT_Passport"] = '6'
#台胞证
defineDict["SECURITY_FTDC_ICT_TaiwanCompatriotIDCard"] = '7'
#回乡证
defineDict["SECURITY_FTDC_ICT_HomeComingCard"] = '8'
#营业执照号
defineDict["SECURITY_FTDC_ICT_LicenseNo"] = '9'
#税务登记号
defineDict["SECURITY_FTDC_ICT_TaxNo"] = 'A'
#其他证件
defineDict["SECURITY_FTDC_ICT_OtherCard"] = 'x'
typedefDict["TSecurityFtdcIdCardTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcIdentifiedCardNoType是一个证件号码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcIdentifiedCardNoType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcClientIDType是一个交易编码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcClientIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcAccountIDType是一个投资者帐号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcAccountIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcClientTypeType是一个交易编码类型类型
#//////////////////////////////////////////////////////////////////////
#普通
defineDict["SECURITY_FTDC_CLT_Normal"] = '1'
#信用交易
defineDict["SECURITY_FTDC_CLT_Credit"] = '2'
#衍生品账户
defineDict["SECURITY_FTDC_CLT_Derive"] = '3'
#其他类型
defineDict["SECURITY_FTDC_CLT_Other"] = '4'
typedefDict["TSecurityFtdcClientTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcInvestorGroupNameType是一个投资者分组名称类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcInvestorGroupNameType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcUserIDType是一个用户代码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcUserIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcUserNameType是一个用户名称类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcUserNameType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFunctionCodeType是一个功能代码类型
#//////////////////////////////////////////////////////////////////////
#强制用户登出
defineDict["SECURITY_FTDC_FC_ForceUserLogout"] = '2'
#变更管理用户口令
defineDict["SECURITY_FTDC_FC_UserPasswordUpdate"] = '3'
#变更经纪公司口令
defineDict["SECURITY_FTDC_FC_BrokerPasswordUpdate"] = '4'
#变更投资者口令
defineDict["SECURITY_FTDC_FC_InvestorPasswordUpdate"] = '5'
#报单插入
defineDict["SECURITY_FTDC_FC_OrderInsert"] = '6'
#报单操作
defineDict["SECURITY_FTDC_FC_OrderAction"] = '7'
#同步系统数据
defineDict["SECURITY_FTDC_FC_SyncSystemData"] = '8'
#同步经纪公司数据
defineDict["SECURITY_FTDC_FC_SyncBrokerData"] = '9'
#超级查询
defineDict["SECURITY_FTDC_FC_SuperQuery"] = 'B'
#报单插入
defineDict["SECURITY_FTDC_FC_ParkedOrderInsert"] = 'C'
#报单操作
defineDict["SECURITY_FTDC_FC_ParkedOrderAction"] = 'D'
#同步动态令牌
defineDict["SECURITY_FTDC_FC_SyncOTP"] = 'E'
#未知单操作
defineDict["SECURITY_FTDC_FC_UnkownOrderAction"] = 'F'
#转托管
defineDict["SECURITY_FTDC_FC_DepositoryTransfer"] = 'G'
#余券划转
defineDict["SECURITY_FTDC_FC_ExcessStockTransfer"] = 'H'
typedefDict["TSecurityFtdcFunctionCodeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcUserTypeType是一个用户类型类型
#//////////////////////////////////////////////////////////////////////
#投资者
defineDict["SECURITY_FTDC_UT_Investor"] = '0'
#操作员
defineDict["SECURITY_FTDC_UT_Operator"] = '1'
#管理员
defineDict["SECURITY_FTDC_UT_SuperUser"] = '2'
typedefDict["TSecurityFtdcUserTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcBrokerFunctionCodeType是一个经纪公司功能代码类型
#//////////////////////////////////////////////////////////////////////
#强制用户登出
defineDict["SECURITY_FTDC_BFC_ForceUserLogout"] = '1'
#变更用户口令
defineDict["SECURITY_FTDC_BFC_UserPasswordUpdate"] = '2'
#同步经纪公司数据
defineDict["SECURITY_FTDC_BFC_SyncBrokerData"] = '3'
#报单插入
defineDict["SECURITY_FTDC_BFC_OrderInsert"] = '5'
#报单操作
defineDict["SECURITY_FTDC_BFC_OrderAction"] = '6'
#全部查询
defineDict["SECURITY_FTDC_BFC_AllQuery"] = '7'
#未知单操作
defineDict["SECURITY_FTDC_BFC_UnkownOrderAction"] = '8'
#转托管
defineDict["SECURITY_FTDC_BFC_DepositoryTransfer"] = '9'
#余券划转
defineDict["SECURITY_FTDC_BFC_ExcessStockTransfer"] = 'A'
#资金内转
defineDict["SECURITY_FTDC_BFC_FundInterTransfer"] = 'B'
#系统功能:登入/登出/修改密码等
defineDict["SECURITY_FTDC_BFC_log"] = 'a'
#基本查询:查询基础数据,如合约,交易所等常量
defineDict["SECURITY_FTDC_BFC_BaseQry"] = 'b'
#交易查询:如查成交,委托
defineDict["SECURITY_FTDC_BFC_TradeQry"] = 'c'
#交易功能:报单,撤单
defineDict["SECURITY_FTDC_BFC_Trade"] = 'd'
#转账
defineDict["SECURITY_FTDC_BFC_Virement"] = 'e'
#查询/管理:查询会话,踢人等
defineDict["SECURITY_FTDC_BFC_Session"] = 'g'
#同步动态令牌
defineDict["SECURITY_FTDC_BFC_SyncOTP"] = 'E'
typedefDict["TSecurityFtdcBrokerFunctionCodeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCurrencyCodeType是一个币种类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcCurrencyCodeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcMoneyType是一个资金类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcMoneyType"] = "float"
#//////////////////////////////////////////////////////////////////////
#TFtdcRatioType是一个比率类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcRatioType"] = "float"
#//////////////////////////////////////////////////////////////////////
#TFtdcAccountTypeType是一个账户类型类型
#//////////////////////////////////////////////////////////////////////
#普通账户
defineDict["SECURITY_FTDC_AcT_Normal"] = '1'
#信用账户
defineDict["SECURITY_FTDC_AcT_Credit"] = '2'
#衍生品账户
defineDict["SECURITY_FTDC_AcT_Derive"] = '3'
#其他类型
defineDict["SECURITY_FTDC_AcT_Other"] = '4'
typedefDict["TSecurityFtdcAccountTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcDepartmentRangeType是一个投资者范围类型
#//////////////////////////////////////////////////////////////////////
#所有
defineDict["SECURITY_FTDC_DR_All"] = '1'
#组织架构
defineDict["SECURITY_FTDC_DR_Group"] = '2'
#单一投资者
defineDict["SECURITY_FTDC_DR_Single"] = '3'
typedefDict["TSecurityFtdcDepartmentRangeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcUserRightTypeType是一个客户权限类型类型
#//////////////////////////////////////////////////////////////////////
#登录
defineDict["SECURITY_FTDC_URT_Logon"] = '1'
#银期转帐
defineDict["SECURITY_FTDC_URT_Transfer"] = '2'
#邮寄结算单
defineDict["SECURITY_FTDC_URT_EMail"] = '3'
#传真结算单
defineDict["SECURITY_FTDC_URT_Fax"] = '4'
#条件单
defineDict["SECURITY_FTDC_URT_ConditionOrder"] = '5'
typedefDict["TSecurityFtdcUserRightTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcProductInfoType是一个产品信息类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcProductInfoType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcAuthCodeType是一个客户端认证码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcAuthCodeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcLargeVolumeType是一个大额数量类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcLargeVolumeType"] = "float"
#//////////////////////////////////////////////////////////////////////
#TFtdcMillisecType是一个时间(毫秒)类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcMillisecType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcHedgeFlagType是一个投机套保标志类型
#//////////////////////////////////////////////////////////////////////
#投机
defineDict["SECURITY_FTDC_HF_Speculation"] = '1'
#套保
defineDict["SECURITY_FTDC_HF_Hedge"] = '3'
typedefDict["TSecurityFtdcHedgeFlagType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcDirectionType是一个买卖方向类型
#//////////////////////////////////////////////////////////////////////
#买
defineDict["SECURITY_FTDC_D_Buy"] = '0'
#卖
defineDict["SECURITY_FTDC_D_Sell"] = '1'
#ETF申购
defineDict["SECURITY_FTDC_D_ETFPur"] = '2'
#ETF赎回
defineDict["SECURITY_FTDC_D_ETFRed"] = '3'
#现金替代,只用作回报
defineDict["SECURITY_FTDC_D_CashIn"] = '4'
#债券入库
defineDict["SECURITY_FTDC_D_PledgeBondIn"] = '5'
#债券出库
defineDict["SECURITY_FTDC_D_PledgeBondOut"] = '6'
#配股
defineDict["SECURITY_FTDC_D_Rationed"] = '7'
#转托管
defineDict["SECURITY_FTDC_D_DepositoryTransfer"] = '8'
#信用账户配股
defineDict["SECURITY_FTDC_D_CreditRationed"] = '9'
#担保品买入
defineDict["SECURITY_FTDC_D_BuyCollateral"] = 'A'
#担保品卖出
defineDict["SECURITY_FTDC_D_SellCollateral"] = 'B'
#担保品转入
defineDict["SECURITY_FTDC_D_CollateralTransferIn"] = 'C'
#担保品转出
defineDict["SECURITY_FTDC_D_CollateralTransferOut"] = 'D'
#融资买入
defineDict["SECURITY_FTDC_D_MarginTrade"] = 'E'
#融券卖出
defineDict["SECURITY_FTDC_D_ShortSell"] = 'F'
#卖券还款
defineDict["SECURITY_FTDC_D_RepayMargin"] = 'G'
#买券还券
defineDict["SECURITY_FTDC_D_RepayStock"] = 'H'
#直接还款
defineDict["SECURITY_FTDC_D_DirectRepayMargin"] = 'I'
#直接还券
defineDict["SECURITY_FTDC_D_DirectRepayStock"] = 'J'
#余券划转
defineDict["SECURITY_FTDC_D_ExcessStockTransfer"] = 'K'
#OF申购
defineDict["SECURITY_FTDC_D_OFPur"] = 'L'
#OF赎回
defineDict["SECURITY_FTDC_D_OFRed"] = 'M'
#SF拆分
defineDict["SECURITY_FTDC_D_SFSplit"] = 'N'
#SF合并
defineDict["SECURITY_FTDC_D_SFMerge"] = 'O'
#备兑
defineDict["SECURITY_FTDC_D_Covered"] = 'P'
#证券冻结(开)/解冻(平)
defineDict["SECURITY_FTDC_D_Freeze"] = 'Q'
#行权
defineDict["SECURITY_FTDC_D_Execute"] = 'R'
typedefDict["TSecurityFtdcDirectionType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcTradeIDType是一个成交编号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcTradeIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcTradeTypeType是一个成交类型类型
#//////////////////////////////////////////////////////////////////////
#普通成交
defineDict["SECURITY_FTDC_TRDT_Common"] = '0'
#期权执行
defineDict["SECURITY_FTDC_TRDT_OptionsExecution"] = '1'
#OTC成交
defineDict["SECURITY_FTDC_TRDT_OTC"] = '2'
#期转现衍生成交
defineDict["SECURITY_FTDC_TRDT_EFPDerived"] = '3'
#组合衍生成交
defineDict["SECURITY_FTDC_TRDT_CombinationDerived"] = '4'
#ETF申购
defineDict["SECURITY_FTDC_TRDT_EFTPurchase"] = '5'
#ETF赎回
defineDict["SECURITY_FTDC_TRDT_EFTRedem"] = '6'
typedefDict["TSecurityFtdcTradeTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCreationredemptionStatusType是一个基金当天申购赎回状态类型
#//////////////////////////////////////////////////////////////////////
#不允许申购赎回
defineDict["SECURITY_FTDC_CDS_Forbidden"] = '0'
#表示允许申购和赎回
defineDict["SECURITY_FTDC_CDS_Allow"] = '1'
#允许申购、不允许赎回
defineDict["SECURITY_FTDC_CDS_OnlyPurchase"] = '2'
#不允许申购、允许赎回
defineDict["SECURITY_FTDC_CDS_OnlyRedeem"] = '3'
typedefDict["TSecurityFtdcCreationredemptionStatusType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcETFCurrenceReplaceStatusType是一个ETF现金替代标志类型
#//////////////////////////////////////////////////////////////////////
#禁止现金替代
defineDict["SECURITY_FTDC_ETFCRS_Forbidden"] = '0'
#可以现金替代
defineDict["SECURITY_FTDC_ETFCRS_Allow"] = '1'
#必须现金替代
defineDict["SECURITY_FTDC_ETFCRS_Force"] = '2'
typedefDict["TSecurityFtdcETFCurrenceReplaceStatusType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcInterestType是一个利息类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcInterestType"] = "float"
#//////////////////////////////////////////////////////////////////////
#TFtdcRepurchaseMaxTimesType是一个正回购放大倍数类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcRepurchaseMaxTimesType"] = "float"
#//////////////////////////////////////////////////////////////////////
#TFtdcCapitalStockTypeType是一个股本类型类型
#//////////////////////////////////////////////////////////////////////
#总通股本
defineDict["SECURITY_FTDC_CPTSTOCK_TOTALSTOCK"] = '1'
#流通股本
defineDict["SECURITY_FTDC_CPTSTOCK_CIRCULATION"] = '2'
typedefDict["TSecurityFtdcCapitalStockTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcMarginPriceTypeType是一个保证金价格类型类型
#//////////////////////////////////////////////////////////////////////
#昨结算价
defineDict["SECURITY_FTDC_MPT_PreSettlementPrice"] = '1'
#最新价
defineDict["SECURITY_FTDC_MPT_SettlementPrice"] = '2'
#成交均价
defineDict["SECURITY_FTDC_MPT_AveragePrice"] = '3'
#开仓价
defineDict["SECURITY_FTDC_MPT_OpenPrice"] = '4'
typedefDict["TSecurityFtdcMarginPriceTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcAlgorithmType是一个盈亏算法类型
#//////////////////////////////////////////////////////////////////////
#浮盈浮亏都计算
defineDict["SECURITY_FTDC_AG_All"] = '1'
#浮盈不计,浮亏计
defineDict["SECURITY_FTDC_AG_OnlyLost"] = '2'
#浮盈计,浮亏不计
defineDict["SECURITY_FTDC_AG_OnlyGain"] = '3'
#浮盈浮亏都不计算
defineDict["SECURITY_FTDC_AG_None"] = '4'
typedefDict["TSecurityFtdcAlgorithmType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcIncludeCloseProfitType是一个是否包含平仓盈利类型
#//////////////////////////////////////////////////////////////////////
#包含平仓盈利
defineDict["SECURITY_FTDC_ICP_Include"] = '0'
#不包含平仓盈利
defineDict["SECURITY_FTDC_ICP_NotInclude"] = '2'
typedefDict["TSecurityFtdcIncludeCloseProfitType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcAllWithoutTradeType是一个是否受可提比例限制类型
#//////////////////////////////////////////////////////////////////////
#不受可提比例限制
defineDict["SECURITY_FTDC_AWT_Enable"] = '0'
#受可提比例限制
defineDict["SECURITY_FTDC_AWT_Disable"] = '2'
#无仓不受可提比例限制
defineDict["SECURITY_FTDC_AWT_NoHoldEnable"] = '3'
typedefDict["TSecurityFtdcAllWithoutTradeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcHandlePositionAlgoIDType是一个持仓处理算法编号类型
#//////////////////////////////////////////////////////////////////////
#基本
defineDict["SECURITY_FTDC_HPA_Base"] = '1'
#非交易
defineDict["SECURITY_FTDC_HPA_NoneTrade"] = '4'
#证券
defineDict["SECURITY_FTDC_HPA_Stock"] = '5'
typedefDict["TSecurityFtdcHandlePositionAlgoIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcTradeParamIDType是一个交易系统参数代码类型
#//////////////////////////////////////////////////////////////////////
#系统加密算法
defineDict["SECURITY_FTDC_TPID_EncryptionStandard"] = 'E'
#用户最大会话数
defineDict["SECURITY_FTDC_TPID_SingleUserSessionMaxNum"] = 'S'
#最大连续登录失败数
defineDict["SECURITY_FTDC_TPID_LoginFailMaxNum"] = 'L'
#是否强制认证
defineDict["SECURITY_FTDC_TPID_IsAuthForce"] = 'A'
#是否生成用户事件
defineDict["SECURITY_FTDC_TPID_GenUserEvent"] = 'G'
#起始报单本地编号
defineDict["SECURITY_FTDC_TPID_StartOrderLocalID"] = 'O'
#融资融券买券还券算法
defineDict["SECURITY_FTDC_TPID_RepayStockAlgo"] = 'R'
#衍生品账户资金提取线
defineDict["SECURITY_FTDC_TPID_DeriveWithdrawRatio"] = 'D'
typedefDict["TSecurityFtdcTradeParamIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcSettlementParamValueType是一个参数代码值类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcSettlementParamValueType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcMemoType是一个备注类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcMemoType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcPriorityType是一个优先级类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcPriorityType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcOrderRefType是一个报单引用类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcOrderRefType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcMarketIDType是一个市场代码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcMarketIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcMacAddressType是一个Mac地址类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcMacAddressType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcInstrumentNameType是一个合约名称类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcInstrumentNameType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcOrderSysIDType是一个报单编号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcOrderSysIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcIPAddressType是一个IP地址类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcIPAddressType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcIPPortType是一个IP端口类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcIPPortType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcProtocolInfoType是一个协议信息类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcProtocolInfoType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcDepositSeqNoType是一个出入金流水号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcDepositSeqNoType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcSystemNameType是一个系统名称类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcSystemNameType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcInvestorRangeType是一个投资者范围类型
#//////////////////////////////////////////////////////////////////////
#所有
defineDict["SECURITY_FTDC_IR_All"] = '1'
#投资者组
defineDict["SECURITY_FTDC_IR_Group"] = '2'
#单一投资者
defineDict["SECURITY_FTDC_IR_Single"] = '3'
typedefDict["TSecurityFtdcInvestorRangeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcDataSyncStatusType是一个数据同步状态类型
#//////////////////////////////////////////////////////////////////////
#未同步
defineDict["SECURITY_FTDC_DS_Asynchronous"] = '1'
#同步中
defineDict["SECURITY_FTDC_DS_Synchronizing"] = '2'
#已同步
defineDict["SECURITY_FTDC_DS_Synchronized"] = '3'
typedefDict["TSecurityFtdcDataSyncStatusType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcTraderConnectStatusType是一个交易所交易员连接状态类型
#//////////////////////////////////////////////////////////////////////
#没有任何连接
defineDict["SECURITY_FTDC_TCS_NotConnected"] = '1'
#已经连接
defineDict["SECURITY_FTDC_TCS_Connected"] = '2'
#已经发出合约查询请求
defineDict["SECURITY_FTDC_TCS_QryInstrumentSent"] = '3'
#订阅私有流
defineDict["SECURITY_FTDC_TCS_SubPrivateFlow"] = '4'
typedefDict["TSecurityFtdcTraderConnectStatusType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcOrderActionStatusType是一个报单操作状态类型
#//////////////////////////////////////////////////////////////////////
#已经提交
defineDict["SECURITY_FTDC_OAS_Submitted"] = 'a'
#已经接受
defineDict["SECURITY_FTDC_OAS_Accepted"] = 'b'
#已经被拒绝
defineDict["SECURITY_FTDC_OAS_Rejected"] = 'c'
typedefDict["TSecurityFtdcOrderActionStatusType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcOrderStatusType是一个报单状态类型
#//////////////////////////////////////////////////////////////////////
#全部成交
defineDict["SECURITY_FTDC_OST_AllTraded"] = '0'
#部分成交还在队列中
defineDict["SECURITY_FTDC_OST_PartTradedQueueing"] = '1'
#部分成交不在队列中
defineDict["SECURITY_FTDC_OST_PartTradedNotQueueing"] = '2'
#未成交还在队列中
defineDict["SECURITY_FTDC_OST_NoTradeQueueing"] = '3'
#未成交不在队列中
defineDict["SECURITY_FTDC_OST_NoTradeNotQueueing"] = '4'
#撤单
defineDict["SECURITY_FTDC_OST_Canceled"] = '5'
#未知
defineDict["SECURITY_FTDC_OST_Unknown"] = 'a'
#尚未触发
defineDict["SECURITY_FTDC_OST_NotTouched"] = 'b'
#已触发
defineDict["SECURITY_FTDC_OST_Touched"] = 'c'
typedefDict["TSecurityFtdcOrderStatusType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcOrderSubmitStatusType是一个报单提交状态类型
#//////////////////////////////////////////////////////////////////////
#已经提交
defineDict["SECURITY_FTDC_OSS_InsertSubmitted"] = '0'
#撤单已经提交
defineDict["SECURITY_FTDC_OSS_CancelSubmitted"] = '1'
#修改已经提交
defineDict["SECURITY_FTDC_OSS_ModifySubmitted"] = '2'
#已经接受
defineDict["SECURITY_FTDC_OSS_Accepted"] = '3'
#报单已经被拒绝
defineDict["SECURITY_FTDC_OSS_InsertRejected"] = '4'
#撤单已经被拒绝
defineDict["SECURITY_FTDC_OSS_CancelRejected"] = '5'
#改单已经被拒绝
defineDict["SECURITY_FTDC_OSS_ModifyRejected"] = '6'
typedefDict["TSecurityFtdcOrderSubmitStatusType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcPositionDateType是一个持仓日期类型
#//////////////////////////////////////////////////////////////////////
#今日持仓
defineDict["SECURITY_FTDC_PSD_Today"] = '1'
#历史持仓
defineDict["SECURITY_FTDC_PSD_History"] = '2'
typedefDict["TSecurityFtdcPositionDateType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcTradingRoleType是一个交易角色类型
#//////////////////////////////////////////////////////////////////////
#代理
defineDict["SECURITY_FTDC_ER_Broker"] = '1'
#自营
defineDict["SECURITY_FTDC_ER_Host"] = '2'
#做市商
defineDict["SECURITY_FTDC_ER_Maker"] = '3'
typedefDict["TSecurityFtdcTradingRoleType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcPosiDirectionType是一个持仓多空方向类型
#//////////////////////////////////////////////////////////////////////
#净
defineDict["SECURITY_FTDC_PD_Net"] = '1'
#多头
defineDict["SECURITY_FTDC_PD_Long"] = '2'
#空头
defineDict["SECURITY_FTDC_PD_Short"] = '3'
#备兑
defineDict["SECURITY_FTDC_PD_Covered"] = '4'
typedefDict["TSecurityFtdcPosiDirectionType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcOrderPriceTypeType是一个报单价格条件类型
#//////////////////////////////////////////////////////////////////////
#即时成交剩余撤销市价单
defineDict["SECURITY_FTDC_OPT_AnyPrice"] = '1'
#限价
defineDict["SECURITY_FTDC_OPT_LimitPrice"] = '2'
#最优五档即时成交剩余撤销市价单
defineDict["SECURITY_FTDC_OPT_BestPrice"] = '3'
#最优五档即时成交剩余转限价市价单
defineDict["SECURITY_FTDC_OPT_BestLimitPrice"] = '4'
#全部成交或撤销市价单
defineDict["SECURITY_FTDC_OPT_AllPrice"] = '5'
#本方最优价格市价单
defineDict["SECURITY_FTDC_OPT_ForwardBestPrice"] = '6'
#对方最优价格市价单
defineDict["SECURITY_FTDC_OPT_ReverseBestPrice"] = '7'
#即时成交剩余转限价市价单
defineDict["SECURITY_FTDC_OPT_Any2LimitPrice"] = '8'
#全部成交或撤销限价单
defineDict["SECURITY_FTDC_OPT_AllLimitPrice"] = '9'
#激活A股网络密码服务代码
defineDict["SECURITY_FTDC_OPT_ActiveANetPassSvrCode"] = 'G'
#注销A股网络密码服务代码
defineDict["SECURITY_FTDC_OPT_InactiveANetPassSvrCode"] = 'H'
#激活B股网络密码服务代码
defineDict["SECURITY_FTDC_OPT_ActiveBNetPassSvrCode"] = 'I'
#注销B股网络密码服务代码
defineDict["SECURITY_FTDC_OPT_InactiveBNetPassSvrCode"] = 'J'
#回购注销
defineDict["SECURITY_FTDC_OPT_Repurchase"] = 'K'
#指定撤销
defineDict["SECURITY_FTDC_OPT_DesignatedCancel"] = 'L'
#指定登记
defineDict["SECURITY_FTDC_OPT_Designated"] = 'M'
#证券参与申购
defineDict["SECURITY_FTDC_OPT_SubscribingShares"] = 'N'
#证券参与配股
defineDict["SECURITY_FTDC_OPT_Split"] = 'O'
#要约收购登记
defineDict["SECURITY_FTDC_OPT_TenderOffer"] = 'P'
#要约收购撤销
defineDict["SECURITY_FTDC_OPT_TenderOfferCancel"] = 'Q'
#证券投票
defineDict["SECURITY_FTDC_OPT_Ballot"] = 'R'
#可转债转换登记
defineDict["SECURITY_FTDC_OPT_ConvertibleBondsConvet"] = 'S'
#可转债回售登记
defineDict["SECURITY_FTDC_OPT_ConvertibleBondsRepurchase"] = 'T'
#权证行权
defineDict["SECURITY_FTDC_OPT_Exercise"] = 'U'
#开放式基金申购
defineDict["SECURITY_FTDC_OPT_PurchasingFunds"] = 'V'
#开放式基金赎回
defineDict["SECURITY_FTDC_OPT_RedemingFunds"] = 'W'
#开放式基金认购
defineDict["SECURITY_FTDC_OPT_SubscribingFunds"] = 'X'
#开放式基金转托管转出
defineDict["SECURITY_FTDC_OPT_LOFIssue"] = 'Y'
#开放式基金设置分红方式
defineDict["SECURITY_FTDC_OPT_LOFSetBonusType"] = 'Z'
#开放式基金转换为其他基金
defineDict["SECURITY_FTDC_OPT_LOFConvert"] = 'a'
#债券入库
defineDict["SECURITY_FTDC_OPT_DebentureStockIn"] = 'b'
#债券出库
defineDict["SECURITY_FTDC_OPT_DebentureStockOut"] = 'c'
#ETF申购
defineDict["SECURITY_FTDC_OPT_PurchasesETF"] = 'd'
#ETF赎回
defineDict["SECURITY_FTDC_OPT_RedeemETF"] = 'e'
typedefDict["TSecurityFtdcOrderPriceTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcOffsetFlagType是一个开平标志类型
#//////////////////////////////////////////////////////////////////////
#开仓
defineDict["SECURITY_FTDC_OF_Open"] = '0'
#平仓
defineDict["SECURITY_FTDC_OF_Close"] = '1'
#强平
defineDict["SECURITY_FTDC_OF_ForceClose"] = '2'
#平今
defineDict["SECURITY_FTDC_OF_CloseToday"] = '3'
#平昨
defineDict["SECURITY_FTDC_OF_CloseYesterday"] = '4'
#强减
defineDict["SECURITY_FTDC_OF_ForceOff"] = '5'
#本地强平
defineDict["SECURITY_FTDC_OF_LocalForceClose"] = '6'
typedefDict["TSecurityFtdcOffsetFlagType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcForceCloseReasonType是一个强平原因类型
#//////////////////////////////////////////////////////////////////////
#非强平
defineDict["SECURITY_FTDC_FCC_NotForceClose"] = '0'
#资金不足
defineDict["SECURITY_FTDC_FCC_LackDeposit"] = '1'
#客户超仓
defineDict["SECURITY_FTDC_FCC_ClientOverPositionLimit"] = '2'
#会员超仓
defineDict["SECURITY_FTDC_FCC_MemberOverPositionLimit"] = '3'
#持仓非整数倍
defineDict["SECURITY_FTDC_FCC_NotMultiple"] = '4'
#违规
defineDict["SECURITY_FTDC_FCC_Violation"] = '5'
#其它
defineDict["SECURITY_FTDC_FCC_Other"] = '6'
#自然人临近交割
defineDict["SECURITY_FTDC_FCC_PersonDeliv"] = '7'
typedefDict["TSecurityFtdcForceCloseReasonType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcOrderTypeType是一个报单类型类型
#//////////////////////////////////////////////////////////////////////
#正常
defineDict["SECURITY_FTDC_ORDT_Normal"] = '0'
#报价衍生
defineDict["SECURITY_FTDC_ORDT_DeriveFromQuote"] = '1'
#组合衍生
defineDict["SECURITY_FTDC_ORDT_DeriveFromCombination"] = '2'
#组合报单
defineDict["SECURITY_FTDC_ORDT_Combination"] = '3'
#条件单
defineDict["SECURITY_FTDC_ORDT_ConditionalOrder"] = '4'
#互换单
defineDict["SECURITY_FTDC_ORDT_Swap"] = '5'
typedefDict["TSecurityFtdcOrderTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcTimeConditionType是一个有效期类型类型
#//////////////////////////////////////////////////////////////////////
#立即完成,否则撤销
defineDict["SECURITY_FTDC_TC_IOC"] = '1'
#本节有效
defineDict["SECURITY_FTDC_TC_GFS"] = '2'
#当日有效
defineDict["SECURITY_FTDC_TC_GFD"] = '3'
#指定日期前有效
defineDict["SECURITY_FTDC_TC_GTD"] = '4'
#撤销前有效
defineDict["SECURITY_FTDC_TC_GTC"] = '5'
#集合竞价有效
defineDict["SECURITY_FTDC_TC_GFA"] = '6'
typedefDict["TSecurityFtdcTimeConditionType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcVolumeConditionType是一个成交量类型类型
#//////////////////////////////////////////////////////////////////////
#任何数量
defineDict["SECURITY_FTDC_VC_AV"] = '1'
#最小数量
defineDict["SECURITY_FTDC_VC_MV"] = '2'
#全部数量
defineDict["SECURITY_FTDC_VC_CV"] = '3'
typedefDict["TSecurityFtdcVolumeConditionType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcContingentConditionType是一个触发条件类型
#//////////////////////////////////////////////////////////////////////
#立即
defineDict["SECURITY_FTDC_CC_Immediately"] = '1'
#止损
defineDict["SECURITY_FTDC_CC_Touch"] = '2'
#止赢
defineDict["SECURITY_FTDC_CC_TouchProfit"] = '3'
#预埋单
defineDict["SECURITY_FTDC_CC_ParkedOrder"] = '4'
#最新价大于条件价
defineDict["SECURITY_FTDC_CC_LastPriceGreaterThanStopPrice"] = '5'
#最新价大于等于条件价
defineDict["SECURITY_FTDC_CC_LastPriceGreaterEqualStopPrice"] = '6'
#最新价小于条件价
defineDict["SECURITY_FTDC_CC_LastPriceLesserThanStopPrice"] = '7'
#最新价小于等于条件价
defineDict["SECURITY_FTDC_CC_LastPriceLesserEqualStopPrice"] = '8'
#卖一价大于条件价
defineDict["SECURITY_FTDC_CC_AskPriceGreaterThanStopPrice"] = '9'
#卖一价大于等于条件价
defineDict["SECURITY_FTDC_CC_AskPriceGreaterEqualStopPrice"] = 'A'
#卖一价小于条件价
defineDict["SECURITY_FTDC_CC_AskPriceLesserThanStopPrice"] = 'B'
#卖一价小于等于条件价
defineDict["SECURITY_FTDC_CC_AskPriceLesserEqualStopPrice"] = 'C'
#买一价大于条件价
defineDict["SECURITY_FTDC_CC_BidPriceGreaterThanStopPrice"] = 'D'
#买一价大于等于条件价
defineDict["SECURITY_FTDC_CC_BidPriceGreaterEqualStopPrice"] = 'E'
#买一价小于条件价
defineDict["SECURITY_FTDC_CC_BidPriceLesserThanStopPrice"] = 'F'
#买一价小于等于条件价
defineDict["SECURITY_FTDC_CC_BidPriceLesserEqualStopPrice"] = 'H'
typedefDict["TSecurityFtdcContingentConditionType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcActionFlagType是一个操作标志类型
#//////////////////////////////////////////////////////////////////////
#删除
defineDict["SECURITY_FTDC_AF_Delete"] = '0'
#修改
defineDict["SECURITY_FTDC_AF_Modify"] = '3'
typedefDict["TSecurityFtdcActionFlagType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcTradingRightType是一个交易权限类型
#//////////////////////////////////////////////////////////////////////
#可以交易
defineDict["SECURITY_FTDC_TR_Allow"] = '0'
#不能交易
defineDict["SECURITY_FTDC_TR_Forbidden"] = '2'
typedefDict["TSecurityFtdcTradingRightType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcOrderSourceType是一个报单来源类型
#//////////////////////////////////////////////////////////////////////
#来自参与者
defineDict["SECURITY_FTDC_OSRC_Participant"] = '0'
#来自管理员
defineDict["SECURITY_FTDC_OSRC_Administrator"] = '1'
typedefDict["TSecurityFtdcOrderSourceType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcPriceSourceType是一个成交价来源类型
#//////////////////////////////////////////////////////////////////////
#前成交价
defineDict["SECURITY_FTDC_PSRC_LastPrice"] = '0'
#买委托价
defineDict["SECURITY_FTDC_PSRC_Buy"] = '1'
#卖委托价
defineDict["SECURITY_FTDC_PSRC_Sell"] = '2'
typedefDict["TSecurityFtdcPriceSourceType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcOrderActionRefType是一个报单操作引用类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcOrderActionRefType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcFrontIDType是一个前置编号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcFrontIDType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcSessionIDType是一个会话编号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcSessionIDType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcInstallIDType是一个安装编号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcInstallIDType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcSequenceNoType是一个序号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcSequenceNoType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcRequestIDType是一个请求编号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcRequestIDType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcCombOffsetFlagType是一个组合开平标志类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcCombOffsetFlagType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCombHedgeFlagType是一个组合投机套保标志类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcCombHedgeFlagType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcSequenceSeriesType是一个序列系列号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcSequenceSeriesType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcCommPhaseNoType是一个通讯时段编号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcCommPhaseNoType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcUserEventTypeType是一个用户事件类型类型
#//////////////////////////////////////////////////////////////////////
#登录
defineDict["SECURITY_FTDC_UET_Login"] = '1'
#登出
defineDict["SECURITY_FTDC_UET_Logout"] = '2'
#交易成功
defineDict["SECURITY_FTDC_UET_Trading"] = '3'
#交易失败
defineDict["SECURITY_FTDC_UET_TradingError"] = '4'
#修改密码
defineDict["SECURITY_FTDC_UET_UpdatePassword"] = '5'
#客户端认证
defineDict["SECURITY_FTDC_UET_Authenticate"] = '6'
#其他
defineDict["SECURITY_FTDC_UET_Other"] = '9'
typedefDict["TSecurityFtdcUserEventTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcUserEventInfoType是一个用户事件信息类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcUserEventInfoType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcOTPTypeType是一个动态令牌类型类型
#//////////////////////////////////////////////////////////////////////
#无动态令牌
defineDict["SECURITY_FTDC_OTP_NONE"] = '0'
#时间令牌
defineDict["SECURITY_FTDC_OTP_TOTP"] = '1'
typedefDict["TSecurityFtdcOTPTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcTradeSourceType是一个成交来源类型
#//////////////////////////////////////////////////////////////////////
#来自交易所普通回报
defineDict["SECURITY_FTDC_TSRC_NORMAL"] = '0'
#来自查询
defineDict["SECURITY_FTDC_TSRC_QUERY"] = '1'
typedefDict["TSecurityFtdcTradeSourceType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcBranchIDType是一个营业部编号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcBranchIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcStockPriceType是一个证券交易价格类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcStockPriceType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcRightModelIDType是一个股票权限模版代码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcRightModelIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcSerialNumberType是一个序列号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcSerialNumberType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcInstrumentRangeType是一个股票权限分类类型
#//////////////////////////////////////////////////////////////////////
#所有
defineDict["SECURITY_FTDC_INR_All"] = '1'
#产品
defineDict["SECURITY_FTDC_INR_Product"] = '2'
#股票权限模版
defineDict["SECURITY_FTDC_INR_Model"] = '3'
#股票
defineDict["SECURITY_FTDC_INR_Stock"] = '4'
#市场
defineDict["SECURITY_FTDC_INR_Market"] = '5'
typedefDict["TSecurityFtdcInstrumentRangeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcBusinessUnitType是一个业务单元类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcBusinessUnitType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcOTPVendorsIDType是一个动态令牌提供商类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcOTPVendorsIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcLastDriftType是一个上次OTP漂移值类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcLastDriftType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcLastSuccessType是一个上次OTP成功值类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcLastSuccessType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcAuthKeyType是一个令牌密钥类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcAuthKeyType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcUserSessionHashType是一个用户会话Hash值类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcUserSessionHashType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcStockTradeTypeType是一个证券交易类型类型
#//////////////////////////////////////////////////////////////////////
#可交易证券
defineDict["SECURITY_FTDC_STT_Stock"] = '0'
#买入网络密码服务
defineDict["SECURITY_FTDC_STT_BuyNetService"] = '1'
#回购注销
defineDict["SECURITY_FTDC_STT_CancelRepurchase"] = '2'
#指定撤销
defineDict["SECURITY_FTDC_STT_CancelRegister"] = '3'
#指定登记
defineDict["SECURITY_FTDC_STT_Register"] = '4'
#买入发行申购
defineDict["SECURITY_FTDC_STT_PurchaseIssue"] = '5'
#卖出配股
defineDict["SECURITY_FTDC_STT_Allotment"] = '6'
#卖出要约收购
defineDict["SECURITY_FTDC_STT_SellTender"] = '7'
#买入要约收购
defineDict["SECURITY_FTDC_STT_BuyTender"] = '8'
#网上投票
defineDict["SECURITY_FTDC_STT_NetVote"] = '9'
#卖出可转债回售
defineDict["SECURITY_FTDC_STT_SellConvertibleBonds"] = 'a'
#权证行权代码
defineDict["SECURITY_FTDC_STT_OptionExecute"] = 'b'
#开放式基金申购
defineDict["SECURITY_FTDC_STT_PurchaseOF"] = 'c'
#开放式基金赎回
defineDict["SECURITY_FTDC_STT_RedeemOF"] = 'd'
#开放式基金认购
defineDict["SECURITY_FTDC_STT_SubscribeOF"] = 'e'
#开放式基金转托管转出
defineDict["SECURITY_FTDC_STT_OFCustodianTranfer"] = 'f'
#开放式基金分红设置
defineDict["SECURITY_FTDC_STT_OFDividendConfig"] = 'g'
#开放式基金转成其他基金
defineDict["SECURITY_FTDC_STT_OFTransfer"] = 'h'
#债券入库
defineDict["SECURITY_FTDC_STT_BondsIn"] = 'i'
#债券出库
defineDict["SECURITY_FTDC_STT_BondsOut"] = 'j'
#EFT申购
defineDict["SECURITY_FTDC_STT_PurchaseETF"] = 'k'
#EFT赎回
defineDict["SECURITY_FTDC_STT_RedeemETF"] = 'l'
#可转债回售登记
defineDict["SECURITY_FTDC_STT_ConvertibleRegister"] = 'm'
typedefDict["TSecurityFtdcStockTradeTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcHandleTradingAccountAlgoIDType是一个资金处理算法编号类型
#//////////////////////////////////////////////////////////////////////
#基本
defineDict["SECURITY_FTDC_HTAA_Base"] = '1'
typedefDict["TSecurityFtdcHandleTradingAccountAlgoIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcStockWthType是一个股票使用流水号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcStockWthType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcStockSeqType是一个股票使用流水号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcStockSeqType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcWTFSType是一个委托方式类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcWTFSType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcWTLBType是一个委托类别类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcWTLBType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcWTRQType是一个委托日期类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcWTRQType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcINTEGERType是一个一般整型类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcINTEGERType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcINT3Type是一个三位数整型类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcINT3Type"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcINT6Type是一个六位数整型类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcINT6Type"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcINT12Type是一个十二位数整型类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcINT12Type"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcCHAR1Type是一个一字节CHAR类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcCHAR1Type"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCHAR2Type是一个二字节CHAR类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcCHAR2Type"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCHAR3Type是一个三字节CHAR类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcCHAR3Type"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCHAR4Type是一个四字节CHAR类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcCHAR4Type"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCHAR5Type是一个五字节CHAR类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcCHAR5Type"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCHAR6Type是一个六字节CHAR类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcCHAR6Type"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCHAR8Type是一个八字节CHAR类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcCHAR8Type"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCHAR10Type是一个十字节CHAR类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcCHAR10Type"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCHAR11Type是一个十一字节CHAR类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcCHAR11Type"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCHAR12Type是一个十二字节CHAR类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcCHAR12Type"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCHAR13Type是一个十三字节CHAR类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcCHAR13Type"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCHAR14Type是一个十四字节CHAR类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcCHAR14Type"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCHAR16Type是一个十六字节CHAR类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcCHAR16Type"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCHAR19Type是一个十九字节CHAR类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcCHAR19Type"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCHAR20Type是一个二十字节CHAR类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcCHAR20Type"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCHAR21Type是一个二十一字节CHAR类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcCHAR21Type"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCHAR23Type是一个二十三字节CHAR类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcCHAR23Type"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCHAR30Type是一个三十字节CHAR类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcCHAR30Type"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCHAR32Type是一个三十二字节CHAR类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcCHAR32Type"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCHAR50Type是一个五十字节CHAR类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcCHAR50Type"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCHAR64Type是一个六十四字节CHAR类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcCHAR64Type"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCHAR65Type是一个六十五字节CHAR类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcCHAR65Type"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcVCHAR4Type是一个四字节VCHAR类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcVCHAR4Type"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcVCHAR6Type是一个六字节VCHAR类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcVCHAR6Type"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcVCHAR8Type是一个八字节VCHAR类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcVCHAR8Type"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcVCHAR10Type是一个十字节VCHAR类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcVCHAR10Type"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcVCHAR12Type是一个十二字节VCHAR类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcVCHAR12Type"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcVCHAR16Type是一个十六字节VCHAR类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcVCHAR16Type"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcVCHAR20Type是一个二十字节VCHAR类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcVCHAR20Type"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcVCHAR30Type是一个三十字节VCHAR类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcVCHAR30Type"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcVCHAR50Type是一个五十字节VCHAR类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcVCHAR50Type"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcVCHAR60Type是一个六十字节VCHAR类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcVCHAR60Type"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcVCHAR65Type是一个六十五字节VCHAR类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcVCHAR65Type"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcVCHAR80Type是一个八十字节VCHAR类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcVCHAR80Type"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcVCHAR84Type是一个八十四字节VCHAR类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcVCHAR84Type"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcVCHAR255Type是一个二五五字节VCHAR类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcVCHAR255Type"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcVCHAR1024Type是一个一零二四字节VCHAR类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcVCHAR1024Type"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcREAL8P3Type是一个八点三实型类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcREAL8P3Type"] = "float"
#//////////////////////////////////////////////////////////////////////
#TFtdcREAL9P3Type是一个九点三实型类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcREAL9P3Type"] = "float"
#//////////////////////////////////////////////////////////////////////
#TFtdcREAL9P6Type是一个九点六实型类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcREAL9P6Type"] = "float"
#//////////////////////////////////////////////////////////////////////
#TFtdcREAL10P4Type是一个十点四实型类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcREAL10P4Type"] = "float"
#//////////////////////////////////////////////////////////////////////
#TFtdcREAL16P2Type是一个十六点二实型类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcREAL16P2Type"] = "float"
#//////////////////////////////////////////////////////////////////////
#TFtdcREAL16P8Type是一个十六点八实型类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcREAL16P8Type"] = "float"
#//////////////////////////////////////////////////////////////////////
#TFtdcREAL22P2Type是一个二十二点二实型类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcREAL22P2Type"] = "float"
#//////////////////////////////////////////////////////////////////////
#TFtdcCommandNoType是一个DB命令序号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcCommandNoType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcCommandTypeType是一个DB命令类型类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcCommandTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcSettlementGroupIDType是一个结算组代码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcSettlementGroupIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFieldNameType是一个字段名类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcFieldNameType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFieldContentType是一个字段内容类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcFieldContentType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcBankIDType是一个银行代码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcBankIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcBankNameType是一个银行名称类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcBankNameType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcBankBrchIDType是一个银行分中心代码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcBankBrchIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcLiberSerialType是一个Liber系统流水号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcLiberSerialType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcRoleIDType是一个角色编号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcRoleIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcRoleNameType是一个角色名称类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcRoleNameType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcDescriptionType是一个描述类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcDescriptionType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFunctionIDType是一个功能代码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcFunctionIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcBillNoType是一个票据号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcBillNoType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFundIOTypeType是一个出入金类型类型
#//////////////////////////////////////////////////////////////////////
#出入金
defineDict["SECURITY_FTDC_FIOT_FundIO"] = '1'
#银期转帐
defineDict["SECURITY_FTDC_FIOT_Transfer"] = '2'
typedefDict["TSecurityFtdcFundIOTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFundTypeType是一个资金类型类型
#//////////////////////////////////////////////////////////////////////
#银行存款
defineDict["SECURITY_FTDC_FT_Deposite"] = '1'
#分项资金
defineDict["SECURITY_FTDC_FT_ItemFund"] = '2'
#公司调整
defineDict["SECURITY_FTDC_FT_Company"] = '3'
typedefDict["TSecurityFtdcFundTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFundDirectionType是一个出入金方向类型
#//////////////////////////////////////////////////////////////////////
#入金
defineDict["SECURITY_FTDC_FD_In"] = '1'
#出金
defineDict["SECURITY_FTDC_FD_Out"] = '2'
typedefDict["TSecurityFtdcFundDirectionType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcBankFlagType是一个银行统一标识类型类型
#//////////////////////////////////////////////////////////////////////
#工商银行
defineDict["SECURITY_FTDC_BF_ICBC"] = '1'
#农业银行
defineDict["SECURITY_FTDC_BF_ABC"] = '2'
#中国银行
defineDict["SECURITY_FTDC_BF_BC"] = '3'
#建设银行
defineDict["SECURITY_FTDC_BF_CBC"] = '4'
#交通银行
defineDict["SECURITY_FTDC_BF_BOC"] = '5'
#其他银行
defineDict["SECURITY_FTDC_BF_Other"] = 'Z'
typedefDict["TSecurityFtdcBankFlagType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcOperationMemoType是一个操作摘要类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcOperationMemoType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFundStatusType是一个资金状态类型
#//////////////////////////////////////////////////////////////////////
#已录入
defineDict["SECURITY_FTDC_FS_Record"] = '1'
#已复核
defineDict["SECURITY_FTDC_FS_Check"] = '2'
#已冲销
defineDict["SECURITY_FTDC_FS_Charge"] = '3'
typedefDict["TSecurityFtdcFundStatusType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFundProjectIDType是一个资金项目编号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcFundProjectIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcOperatorIDType是一个操作员代码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcOperatorIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCounterIDType是一个计数器代码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcCounterIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFunctionNameType是一个功能名称类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcFunctionNameType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcTradeCodeType是一个交易代码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcTradeCodeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcBrokerBranchIDType是一个经纪公司分支机构代码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcBrokerBranchIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcTradeDateType是一个交易日期类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcTradeDateType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcTradeTimeType是一个交易时间类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcTradeTimeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcBankSerialType是一个银行流水号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcBankSerialType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcSerialType是一个流水号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcSerialType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcLastFragmentType是一个最后分片标志类型
#//////////////////////////////////////////////////////////////////////
#是最后分片
defineDict["SECURITY_FTDC_LF_Yes"] = '0'
#不是最后分片
defineDict["SECURITY_FTDC_LF_No"] = '1'
typedefDict["TSecurityFtdcLastFragmentType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcIndividualNameType是一个个人姓名类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcIndividualNameType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCustTypeType是一个客户类型类型
#//////////////////////////////////////////////////////////////////////
#自然人
defineDict["SECURITY_FTDC_CUSTT_Person"] = '0'
#机构户
defineDict["SECURITY_FTDC_CUSTT_Institution"] = '1'
typedefDict["TSecurityFtdcCustTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcBankAccountType是一个银行账户类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcBankAccountType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcYesNoIndicatorType是一个是或否标识类型
#//////////////////////////////////////////////////////////////////////
#是
defineDict["SECURITY_FTDC_YNI_Yes"] = '0'
#否
defineDict["SECURITY_FTDC_YNI_No"] = '1'
typedefDict["TSecurityFtdcYesNoIndicatorType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcTradeAmountType是一个交易金额(元)类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcTradeAmountType"] = "float"
#//////////////////////////////////////////////////////////////////////
#TFtdcCustFeeType是一个应收客户费用(元)类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcCustFeeType"] = "float"
#//////////////////////////////////////////////////////////////////////
#TFtdcBrokerFeeType是一个应收经纪公司费用(元)类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcBrokerFeeType"] = "float"
#//////////////////////////////////////////////////////////////////////
#TFtdcFeePayFlagType是一个费用支付标志类型
#//////////////////////////////////////////////////////////////////////
#由受益方支付费用
defineDict["SECURITY_FTDC_FPF_BEN"] = '0'
#由发送方支付费用
defineDict["SECURITY_FTDC_FPF_OUR"] = '1'
#由发送方支付发起的费用,受益方支付接受的费用
defineDict["SECURITY_FTDC_FPF_SHA"] = '2'
typedefDict["TSecurityFtdcFeePayFlagType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcAddInfoType是一个附加信息类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcAddInfoType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcDigestType是一个摘要类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcDigestType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcBankAccTypeType是一个银行帐号类型类型
#//////////////////////////////////////////////////////////////////////
#银行存折
defineDict["SECURITY_FTDC_BAT_BankBook"] = '1'
#储蓄卡
defineDict["SECURITY_FTDC_BAT_SavingCard"] = '2'
#信用卡
defineDict["SECURITY_FTDC_BAT_CreditCard"] = '3'
typedefDict["TSecurityFtdcBankAccTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcDeviceIDType是一个渠道标志类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcDeviceIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcPwdFlagType是一个密码核对标志类型
#//////////////////////////////////////////////////////////////////////
#不核对
defineDict["SECURITY_FTDC_BPWDF_NoCheck"] = '0'
#明文核对
defineDict["SECURITY_FTDC_BPWDF_BlankCheck"] = '1'
#密文核对
defineDict["SECURITY_FTDC_BPWDF_EncryptCheck"] = '2'
typedefDict["TSecurityFtdcPwdFlagType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcBankCodingForBrokerType是一个银行对经纪公司的编码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcBankCodingForBrokerType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcOperNoType是一个交易柜员类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcOperNoType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcTIDType是一个交易ID类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcTIDType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcTransferStatusType是一个转账交易状态类型
#//////////////////////////////////////////////////////////////////////
#正常
defineDict["SECURITY_FTDC_TRFS_Normal"] = '0'
#被冲正
defineDict["SECURITY_FTDC_TRFS_Repealed"] = '1'
typedefDict["TSecurityFtdcTransferStatusType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcPlateSerialType是一个平台流水号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcPlateSerialType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcAvailabilityFlagType是一个有效标志类型
#//////////////////////////////////////////////////////////////////////
#未确认
defineDict["SECURITY_FTDC_AVAF_Invalid"] = '0'
#有效
defineDict["SECURITY_FTDC_AVAF_Valid"] = '1'
#冲正
defineDict["SECURITY_FTDC_AVAF_Repeal"] = '2'
typedefDict["TSecurityFtdcAvailabilityFlagType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcOperatorCodeType是一个操作员类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcOperatorCodeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcRepayStockAlgoType是一个买券还券算法类型
#//////////////////////////////////////////////////////////////////////
#默认算法
defineDict["SECURITY_FTDC_RSA_Original"] = '0'
#按还券比例计算
defineDict["SECURITY_FTDC_RSA_Ratio"] = '1'
#Min[1,2]
defineDict["SECURITY_FTDC_RSA_Min"] = '2'
typedefDict["TSecurityFtdcRepayStockAlgoType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcTradeSpanType是一个交易时间段类型类型
#//////////////////////////////////////////////////////////////////////
#普通业务
defineDict["SECURITY_FTDC_TS_Common"] = '1'
#个股期权
defineDict["SECURITY_FTDC_TS_Options"] = '2'
typedefDict["TSecurityFtdcTradeSpanType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcSettleSystemTypeType是一个所属结算系统类型类型
#//////////////////////////////////////////////////////////////////////
#顶点系统
defineDict["SECURITY_FTDC_SST_Aboss"] = '1'
#恒生系统
defineDict["SECURITY_FTDC_SST_HS"] = '2'
typedefDict["TSecurityFtdcSettleSystemTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcLogLevelType是一个日志级别类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcLogLevelType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcProcessNameType是一个存储过程名称类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcProcessNameType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcTemplateIDType是一个模板代码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcTemplateIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcTradeIndexType是一个成交序号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TSecurityFtdcTradeIndexType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcSplitMergeStatusType是一个基金当天拆分合并状态类型
#//////////////////////////////////////////////////////////////////////
#表示允许拆分和合并
defineDict["SECURITY_FTDC_SMS_Allow"] = '0'
#允许拆分、不允许合并
defineDict["SECURITY_FTDC_SMS_OnlySplit"] = '1'
#不允许拆分、允许合并
defineDict["SECURITY_FTDC_SMS_OnlyMerge"] = '2'
#不允许拆分和合并
defineDict["SECURITY_FTDC_SMS_Forbidden"] = '3'
typedefDict["TSecurityFtdcSplitMergeStatusType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFundInterTransferTypeType是一个资金内转类型类型
#//////////////////////////////////////////////////////////////////////
#转入
defineDict["SECURITY_FTDC_FITT_TransferIn"] = '0'
#转出
defineDict["SECURITY_FTDC_FITT_TransferOut"] = '1'
typedefDict["TSecurityFtdcFundInterTransferTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcInstrumentTypeType是一个合约类型类型
#//////////////////////////////////////////////////////////////////////
#普通
defineDict["SECURITY_FTDC_IT_Normal"] = '0'
#看涨期权
defineDict["SECURITY_FTDC_IT_CallOptions"] = '1'
#看跌期权
defineDict["SECURITY_FTDC_IT_PutOptions"] = '2'
typedefDict["TSecurityFtdcInstrumentTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcInvestorLevelType是一个投资者期权交易等级类型
#//////////////////////////////////////////////////////////////////////
#一级
defineDict["SECURITY_FTDC_IL_Level_1"] = '0'
#二级
defineDict["SECURITY_FTDC_IL_Level_2"] = '1'
#三级
defineDict["SECURITY_FTDC_IL_Level_3"] = '2'
typedefDict["TSecurityFtdcInvestorLevelType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCloseDirectionType是一个平仓方向类型
#//////////////////////////////////////////////////////////////////////
#买平仓
defineDict["SECURITY_FTDC_CD_CloseBuy"] = '!'
#卖平仓
defineDict["SECURITY_FTDC_CD_CloseSell"] = '@'
#备兑平仓
defineDict["SECURITY_FTDC_CD_CloseCover"] = '#'
typedefDict["TSecurityFtdcCloseDirectionType"] = "string"
| mit |
leasual/TeamTalk | win-client/3rdParty/src/json/scons-tools/targz.py | 264 | 3055 | """tarball
Tool-specific initialization for tarball.
"""
## Commands to tackle a command based implementation:
##to unpack on the fly...
##gunzip < FILE.tar.gz | tar xvf -
##to pack on the fly...
##tar cvf - FILE-LIST | gzip -c > FILE.tar.gz
import os.path
import SCons.Builder
import SCons.Node.FS
import SCons.Util
try:
import gzip
import tarfile
internal_targz = 1
except ImportError:
internal_targz = 0
TARGZ_DEFAULT_COMPRESSION_LEVEL = 9
if internal_targz:
def targz(target, source, env):
def archive_name( path ):
path = os.path.normpath( os.path.abspath( path ) )
common_path = os.path.commonprefix( (base_dir, path) )
archive_name = path[len(common_path):]
return archive_name
def visit(tar, dirname, names):
for name in names:
path = os.path.join(dirname, name)
if os.path.isfile(path):
tar.add(path, archive_name(path) )
compression = env.get('TARGZ_COMPRESSION_LEVEL',TARGZ_DEFAULT_COMPRESSION_LEVEL)
base_dir = os.path.normpath( env.get('TARGZ_BASEDIR', env.Dir('.')).abspath )
target_path = str(target[0])
fileobj = gzip.GzipFile( target_path, 'wb', compression )
tar = tarfile.TarFile(os.path.splitext(target_path)[0], 'w', fileobj)
for source in source:
source_path = str(source)
if source.isdir():
os.path.walk(source_path, visit, tar)
else:
tar.add(source_path, archive_name(source_path) ) # filename, arcname
tar.close()
targzAction = SCons.Action.Action(targz, varlist=['TARGZ_COMPRESSION_LEVEL','TARGZ_BASEDIR'])
def makeBuilder( emitter = None ):
return SCons.Builder.Builder(action = SCons.Action.Action('$TARGZ_COM', '$TARGZ_COMSTR'),
source_factory = SCons.Node.FS.Entry,
source_scanner = SCons.Defaults.DirScanner,
suffix = '$TARGZ_SUFFIX',
multi = 1)
TarGzBuilder = makeBuilder()
def generate(env):
"""Add Builders and construction variables for zip to an Environment.
The following environnement variables may be set:
TARGZ_COMPRESSION_LEVEL: integer, [0-9]. 0: no compression, 9: best compression (same as gzip compression level).
TARGZ_BASEDIR: base-directory used to determine archive name (this allow archive name to be relative
to something other than top-dir).
"""
env['BUILDERS']['TarGz'] = TarGzBuilder
env['TARGZ_COM'] = targzAction
env['TARGZ_COMPRESSION_LEVEL'] = TARGZ_DEFAULT_COMPRESSION_LEVEL # range 0-9
env['TARGZ_SUFFIX'] = '.tar.gz'
env['TARGZ_BASEDIR'] = env.Dir('.') # Sources archive name are made relative to that directory.
else:
def generate(env):
pass
def exists(env):
return internal_targz
| apache-2.0 |
computersalat/ansible | test/support/integration/plugins/modules/postgresql_set.py | 47 | 13648 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: postgresql_set
short_description: Change a PostgreSQL server configuration parameter
description:
- Allows to change a PostgreSQL server configuration parameter.
- The module uses ALTER SYSTEM command and applies changes by reload server configuration.
- ALTER SYSTEM is used for changing server configuration parameters across the entire database cluster.
- It can be more convenient and safe than the traditional method of manually editing the postgresql.conf file.
- ALTER SYSTEM writes the given parameter setting to the $PGDATA/postgresql.auto.conf file,
which is read in addition to postgresql.conf.
- The module allows to reset parameter to boot_val (cluster initial value) by I(reset=yes) or remove parameter
string from postgresql.auto.conf and reload I(value=default) (for settings with postmaster context restart is required).
- After change you can see in the ansible output the previous and
the new parameter value and other information using returned values and M(debug) module.
version_added: '2.8'
options:
name:
description:
- Name of PostgreSQL server parameter.
type: str
required: true
value:
description:
- Parameter value to set.
- To remove parameter string from postgresql.auto.conf and
reload the server configuration you must pass I(value=default).
With I(value=default) the playbook always returns changed is true.
type: str
reset:
description:
- Restore parameter to initial state (boot_val). Mutually exclusive with I(value).
type: bool
default: false
session_role:
description:
- Switch to session_role after connecting. The specified session_role must
be a role that the current login_user is a member of.
- Permissions checking for SQL commands is carried out as though
the session_role were the one that had logged in originally.
type: str
db:
description:
- Name of database to connect.
type: str
aliases:
- login_db
notes:
- Supported version of PostgreSQL is 9.4 and later.
- Pay attention, change setting with 'postmaster' context can return changed is true
when actually nothing changes because the same value may be presented in
several different form, for example, 1024MB, 1GB, etc. However in pg_settings
system view it can be defined like 131072 number of 8kB pages.
The final check of the parameter value cannot compare it because the server was
not restarted and the value in pg_settings is not updated yet.
- For some parameters restart of PostgreSQL server is required.
See official documentation U(https://www.postgresql.org/docs/current/view-pg-settings.html).
seealso:
- module: postgresql_info
- name: PostgreSQL server configuration
description: General information about PostgreSQL server configuration.
link: https://www.postgresql.org/docs/current/runtime-config.html
- name: PostgreSQL view pg_settings reference
description: Complete reference of the pg_settings view documentation.
link: https://www.postgresql.org/docs/current/view-pg-settings.html
- name: PostgreSQL ALTER SYSTEM command reference
description: Complete reference of the ALTER SYSTEM command documentation.
link: https://www.postgresql.org/docs/current/sql-altersystem.html
author:
- Andrew Klychkov (@Andersson007)
extends_documentation_fragment: postgres
'''
EXAMPLES = r'''
- name: Restore wal_keep_segments parameter to initial state
postgresql_set:
name: wal_keep_segments
reset: yes
# Set work_mem parameter to 32MB and show what's been changed and restart is required or not
# (output example: "msg": "work_mem 4MB >> 64MB restart_req: False")
- name: Set work mem parameter
postgresql_set:
name: work_mem
value: 32mb
register: set
- debug:
msg: "{{ set.name }} {{ set.prev_val_pretty }} >> {{ set.value_pretty }} restart_req: {{ set.restart_required }}"
when: set.changed
# Ensure that the restart of PostgreSQL server must be required for some parameters.
# In this situation you see the same parameter in prev_val and value_prettyue, but 'changed=True'
# (If you passed the value that was different from the current server setting).
- name: Set log_min_duration_statement parameter to 1 second
postgresql_set:
name: log_min_duration_statement
value: 1s
- name: Set wal_log_hints parameter to default value (remove parameter from postgresql.auto.conf)
postgresql_set:
name: wal_log_hints
value: default
'''
RETURN = r'''
name:
description: Name of PostgreSQL server parameter.
returned: always
type: str
sample: 'shared_buffers'
restart_required:
description: Information about parameter current state.
returned: always
type: bool
sample: true
prev_val_pretty:
description: Information about previous state of the parameter.
returned: always
type: str
sample: '4MB'
value_pretty:
description: Information about current state of the parameter.
returned: always
type: str
sample: '64MB'
value:
description:
- Dictionary that contains the current parameter value (at the time of playbook finish).
- Pay attention that for real change some parameters restart of PostgreSQL server is required.
- Returns the current value in the check mode.
returned: always
type: dict
sample: { "value": 67108864, "unit": "b" }
context:
description:
- PostgreSQL setting context.
returned: always
type: str
sample: user
'''
try:
from psycopg2.extras import DictCursor
except Exception:
# psycopg2 is checked by connect_to_db()
# from ansible.module_utils.postgres
pass
from copy import deepcopy
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.postgres import (
connect_to_db,
get_conn_params,
postgres_common_argument_spec,
)
from ansible.module_utils._text import to_native
PG_REQ_VER = 90400
# To allow to set value like 1mb instead of 1MB, etc:
POSSIBLE_SIZE_UNITS = ("mb", "gb", "tb")
# ===========================================
# PostgreSQL module specific support methods.
#
def param_get(cursor, module, name):
query = ("SELECT name, setting, unit, context, boot_val "
"FROM pg_settings WHERE name = %(name)s")
try:
cursor.execute(query, {'name': name})
info = cursor.fetchall()
cursor.execute("SHOW %s" % name)
val = cursor.fetchone()
except Exception as e:
module.fail_json(msg="Unable to get %s value due to : %s" % (name, to_native(e)))
raw_val = info[0][1]
unit = info[0][2]
context = info[0][3]
boot_val = info[0][4]
if val[0] == 'True':
val[0] = 'on'
elif val[0] == 'False':
val[0] = 'off'
if unit == 'kB':
if int(raw_val) > 0:
raw_val = int(raw_val) * 1024
if int(boot_val) > 0:
boot_val = int(boot_val) * 1024
unit = 'b'
elif unit == 'MB':
if int(raw_val) > 0:
raw_val = int(raw_val) * 1024 * 1024
if int(boot_val) > 0:
boot_val = int(boot_val) * 1024 * 1024
unit = 'b'
return (val[0], raw_val, unit, boot_val, context)
def pretty_to_bytes(pretty_val):
# The function returns a value in bytes
# if the value contains 'B', 'kB', 'MB', 'GB', 'TB'.
# Otherwise it returns the passed argument.
val_in_bytes = None
if 'kB' in pretty_val:
num_part = int(''.join(d for d in pretty_val if d.isdigit()))
val_in_bytes = num_part * 1024
elif 'MB' in pretty_val.upper():
num_part = int(''.join(d for d in pretty_val if d.isdigit()))
val_in_bytes = num_part * 1024 * 1024
elif 'GB' in pretty_val.upper():
num_part = int(''.join(d for d in pretty_val if d.isdigit()))
val_in_bytes = num_part * 1024 * 1024 * 1024
elif 'TB' in pretty_val.upper():
num_part = int(''.join(d for d in pretty_val if d.isdigit()))
val_in_bytes = num_part * 1024 * 1024 * 1024 * 1024
elif 'B' in pretty_val.upper():
num_part = int(''.join(d for d in pretty_val if d.isdigit()))
val_in_bytes = num_part
else:
return pretty_val
return val_in_bytes
def param_set(cursor, module, name, value, context):
try:
if str(value).lower() == 'default':
query = "ALTER SYSTEM SET %s = DEFAULT" % name
else:
query = "ALTER SYSTEM SET %s = '%s'" % (name, value)
cursor.execute(query)
if context != 'postmaster':
cursor.execute("SELECT pg_reload_conf()")
except Exception as e:
module.fail_json(msg="Unable to get %s value due to : %s" % (name, to_native(e)))
return True
# ===========================================
# Module execution.
#
def main():
argument_spec = postgres_common_argument_spec()
argument_spec.update(
name=dict(type='str', required=True),
db=dict(type='str', aliases=['login_db']),
value=dict(type='str'),
reset=dict(type='bool'),
session_role=dict(type='str'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
name = module.params["name"]
value = module.params["value"]
reset = module.params["reset"]
# Allow to pass values like 1mb instead of 1MB, etc:
if value:
for unit in POSSIBLE_SIZE_UNITS:
if value[:-2].isdigit() and unit in value[-2:]:
value = value.upper()
if value and reset:
module.fail_json(msg="%s: value and reset params are mutually exclusive" % name)
if not value and not reset:
module.fail_json(msg="%s: at least one of value or reset param must be specified" % name)
conn_params = get_conn_params(module, module.params, warn_db_default=False)
db_connection = connect_to_db(module, conn_params, autocommit=True)
cursor = db_connection.cursor(cursor_factory=DictCursor)
kw = {}
# Check server version (needs 9.4 or later):
ver = db_connection.server_version
if ver < PG_REQ_VER:
module.warn("PostgreSQL is %s version but %s or later is required" % (ver, PG_REQ_VER))
kw = dict(
changed=False,
restart_required=False,
value_pretty="",
prev_val_pretty="",
value={"value": "", "unit": ""},
)
kw['name'] = name
db_connection.close()
module.exit_json(**kw)
# Set default returned values:
restart_required = False
changed = False
kw['name'] = name
kw['restart_required'] = False
# Get info about param state:
res = param_get(cursor, module, name)
current_value = res[0]
raw_val = res[1]
unit = res[2]
boot_val = res[3]
context = res[4]
if value == 'True':
value = 'on'
elif value == 'False':
value = 'off'
kw['prev_val_pretty'] = current_value
kw['value_pretty'] = deepcopy(kw['prev_val_pretty'])
kw['context'] = context
# Do job
if context == "internal":
module.fail_json(msg="%s: cannot be changed (internal context). See "
"https://www.postgresql.org/docs/current/runtime-config-preset.html" % name)
if context == "postmaster":
restart_required = True
# If check_mode, just compare and exit:
if module.check_mode:
if pretty_to_bytes(value) == pretty_to_bytes(current_value):
kw['changed'] = False
else:
kw['value_pretty'] = value
kw['changed'] = True
# Anyway returns current raw value in the check_mode:
kw['value'] = dict(
value=raw_val,
unit=unit,
)
kw['restart_required'] = restart_required
module.exit_json(**kw)
# Set param:
if value and value != current_value:
changed = param_set(cursor, module, name, value, context)
kw['value_pretty'] = value
# Reset param:
elif reset:
if raw_val == boot_val:
# nothing to change, exit:
kw['value'] = dict(
value=raw_val,
unit=unit,
)
module.exit_json(**kw)
changed = param_set(cursor, module, name, boot_val, context)
if restart_required:
module.warn("Restart of PostgreSQL is required for setting %s" % name)
cursor.close()
db_connection.close()
# Reconnect and recheck current value:
if context in ('sighup', 'superuser-backend', 'backend', 'superuser', 'user'):
db_connection = connect_to_db(module, conn_params, autocommit=True)
cursor = db_connection.cursor(cursor_factory=DictCursor)
res = param_get(cursor, module, name)
# f_ means 'final'
f_value = res[0]
f_raw_val = res[1]
if raw_val == f_raw_val:
changed = False
else:
changed = True
kw['value_pretty'] = f_value
kw['value'] = dict(
value=f_raw_val,
unit=unit,
)
cursor.close()
db_connection.close()
kw['changed'] = changed
kw['restart_required'] = restart_required
module.exit_json(**kw)
if __name__ == '__main__':
main()
| gpl-3.0 |
Carlstark/SAMA5D4-XULT | linux-at91-linux-3.10/tools/perf/scripts/python/syscall-counts-by-pid.py | 11180 | 1927 | # system call counts, by pid
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts-by-pid.py [comm]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
try:
syscalls[common_comm][common_pid][id] += 1
except TypeError:
syscalls[common_comm][common_pid][id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events by comm/pid:\n\n",
print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id, val in sorted(syscalls[comm][pid].iteritems(), \
key = lambda(k, v): (v, k), reverse = True):
print " %-38s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
gdw2/zim | zim/plugins/scoreeditor.py | 1 | 4552 | # -*- coding: utf-8 -*-
#
# scoreeditor.py
#
# This is a plugin for Zim, which allows to insert music score in zim using
# GNU Lilypond.
#
#
# Author: Shoban Preeth <shoban.preeth@gmail.com>
# Date: 2012-07-05
# Copyright (c) 2012, released under the GNU GPL v2 or higher
#
#
import glob
from zim.plugins.base.imagegenerator import ImageGeneratorPlugin, ImageGeneratorClass
from zim.fs import File, TmpFile
from zim.config import data_file
from zim.templates import GenericTemplate
from zim.applications import Application, ApplicationError
# TODO put these commands in preferences
lilypond_cmd = ('lilypond', '-ddelete-intermediate-files',
# '-dsafe', # Can't include files in safe mode
'-dbackend=eps', '--png', '--header=texidoc')
convertly_cmd = ('convert-ly', '--current-version', '--edit')
lilypondver_cmd = ('lilypond', '--version')
def _get_lilypond_version():
try:
lilypond = Application(lilypondver_cmd)
output = lilypond.pipe()
return output[0].split()[2]
except ApplicationError:
return '2.14.2'
class InsertScorePlugin(ImageGeneratorPlugin):
plugin_info = {
'name': _('Insert Score'), # T: plugin name
'description': _('''\
This plugin provides an score editor for zim based on GNU Lilypond.
This is a core plugin shipping with zim.
'''), # T: plugin description
'help': 'Plugins:Score Editor',
'author': 'Shoban Preeth',
}
plugin_preferences = [
# key, type, label, default
('include_header', 'string', _('Common include header'), '\include "predefined-guitar-fretboards.ly"'), # T: plugin preference
('include_footer', 'string', _('Common include footer'), ''), # T: plugin preference
]
object_type = 'score'
short_label = _('S_core')
insert_label = _('Insert Score')
edit_label = _('_Edit Score')
syntax = None
@classmethod
def check_dependencies(klass):
has_lilypond = Application(lilypond_cmd).tryexec()
return has_lilypond, [('GNU Lilypond', has_lilypond, True)]
class ScoreGenerator(ImageGeneratorClass):
object_type = 'score'
scriptname = 'score.ly'
imagename = 'score.png'
cur_lilypond_version = None
def __init__(self, plugin):
ImageGeneratorClass.__init__(self, plugin)
file = data_file('templates/plugins/scoreeditor.ly')
assert file, 'BUG: could not find templates/plugins/scoreeditor.ly'
self.template = GenericTemplate(file.readlines(), name=file)
self.scorefile = TmpFile(self.scriptname)
self.cur_lilypond_version = _get_lilypond_version()
self.include_header = plugin.preferences['include_header']
self.include_footer = plugin.preferences['include_footer']
def process_input(self, text):
'''Prepend version string to user input. It is also stored in
the script file.
'''
version_present = False
for l in text.splitlines(True):
if l.strip().startswith('\\version'):
version_present = True
if not version_present:
text = '\\version "{0}"\n\n'.format(self.cur_lilypond_version) + text
return text
def extract_version(self, text):
outtext = []
version = None
for l in text:
if l.strip().startswith('\\version'):
version = l.strip()
else:
outtext.append(l)
return (version, outtext)
def generate_image(self, text):
if isinstance(text, basestring):
text = text.splitlines(True)
(version, text) = self.extract_version(text)
text = ''.join(text)
#~ print '>>>%s<<<' % text
# Write to tmp file using the template for the header / footer
scorefile = self.scorefile
scorefile.writelines(
self.template.process({'score': text,
'version': version,
'include_header': self.include_header,
'include_footer': self.include_footer}) )
#~ print '>>>%s<<<' % scorefile.read()
# Call convert-ly to convert document of current version of
# Lilypond.
clogfile = File(scorefile.path[:-3] + '-convertly.log') # len('.ly) == 3
try:
convertly = Application(convertly_cmd)
convertly.run((scorefile.basename,), cwd=scorefile.dir)
except ApplicationError:
clogfile.write('convert-ly failed.\n')
return None, clogfile
# Call lilypond to generate image.
logfile = File(scorefile.path[:-3] + '.log') # len('.ly') == 3
try:
lilypond = Application(lilypond_cmd)
lilypond.run(('-dlog-file=' + logfile.basename[:-4], scorefile.basename,), cwd=scorefile.dir)
except ApplicationError:
# log should have details of failure
return None, logfile
pngfile = File(scorefile.path[:-3] + '.png') # len('.ly') == 3
return pngfile, logfile
def cleanup(self):
path = self.scorefile.path
for path in glob.glob(path[:-3]+'*'):
File(path).remove()
| gpl-2.0 |
Rona111/sale-workflow | sale_quotation_sourcing/tests/__init__.py | 34 | 1037 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Nicolas Bessi
# Copyright 2014 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import test_sourced_by
from . import test_consistent_route
from . import test_procurement_group
| agpl-3.0 |
tommybobbins/Raspi_433 | Python/quick_433.py | 1 | 2225 | #!/usr/bin/python
import time
# Written tng@chegwin.org 3-Jul-2014
# Python runs slower than C, so 18.9 factor below becomes 16.9 (microseconds)
sample_length=15.6/1000000.0
# Setup Pin 18 as output (Pin 12 in Pi numbering)
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
GPIO.setup(18, GPIO.OUT)
#Calculate our on/offs up front
thirty=30*sample_length #27
thirtyonepointfive=31.5*sample_length
six=6*sample_length
four=4*sample_length
def one():
#On = 1 = 31.5 samples high, 6 low
GPIO.output(18,True)
time.sleep(thirtyonepointfive)
GPIO.output(18,False)
time.sleep(four)
def zero():
#Off = 0 = 6 samples high, 30 low
GPIO.output(18,True)
time.sleep(six)
GPIO.output(18,False)
time.sleep(thirty)
def sequence(incoming_string):
# Splits an incoming set of ones and zeros, runs the appropriate function
# Loops 8 times through
count =0
# Make sure we are all off to start with
GPIO.output(18,False)
time.sleep(450*sample_length)
while count < 8:
# Split the 1's and 0 into component parts and then run associated
#function
for b in list(incoming_string):
b = int(b)
if (b == 0):
zero()
elif (b == 1):
one()
else:
print ("Something gone wrong")
count += 1
# Sleep 150 samples between repetitions
time.sleep(150*sample_length)
GPIO.output(18,False)
# Sleep 2 seconds between sequences
time.sleep(1)
def main():
# 1,2,3,4 on
# sequence('1011111100010000000011110')
# sequence('1011111100010000000001110')
sequence('1011111100010000000010110')
# sequence('1011111100010000000000110')
# 1,2,3,4 off
# sequence('1011111100010000000011100')
# sequence('1011111100010000000001100')
# sequence('1011111100010000000010100')
# sequence('1011111100010000000000100')
# Master
# sequence('1011111100010000000011010')
# sequence('1011111100010000000011000')
GPIO.cleanup()
if __name__ == "__main__":
try:
main()
except (KeyboardInterrupt, SystemExit):
print ("Thanks. Goodbye")
GPIO.cleanup()
exit();
| gpl-2.0 |
handspring/bite-project | deps/gdata-python-client/src/gdata/Crypto/PublicKey/qNEW.py | 228 | 5545 | #
# qNEW.py : The q-NEW signature algorithm.
#
# Part of the Python Cryptography Toolkit
#
# Distribute and use freely; there are no restrictions on further
# dissemination and usage except those imposed by the laws of your
# country of residence. This software is provided "as is" without
# warranty of fitness for use or suitability for any purpose, express
# or implied. Use at your own risk or not at all.
#
__revision__ = "$Id: qNEW.py,v 1.8 2003/04/04 15:13:35 akuchling Exp $"
from Crypto.PublicKey import pubkey
from Crypto.Util.number import *
from Crypto.Hash import SHA
class error (Exception):
pass
HASHBITS = 160 # Size of SHA digests
def generate(bits, randfunc, progress_func=None):
"""generate(bits:int, randfunc:callable, progress_func:callable)
Generate a qNEW key of length 'bits', using 'randfunc' to get
random data and 'progress_func', if present, to display
the progress of the key generation.
"""
obj=qNEWobj()
# Generate prime numbers p and q. q is a 160-bit prime
# number. p is another prime number (the modulus) whose bit
# size is chosen by the caller, and is generated so that p-1
# is a multiple of q.
#
# Note that only a single seed is used to
# generate p and q; if someone generates a key for you, you can
# use the seed to duplicate the key generation. This can
# protect you from someone generating values of p,q that have
# some special form that's easy to break.
if progress_func:
progress_func('p,q\n')
while (1):
obj.q = getPrime(160, randfunc)
# assert pow(2, 159L)<obj.q<pow(2, 160L)
obj.seed = S = long_to_bytes(obj.q)
C, N, V = 0, 2, {}
# Compute b and n such that bits-1 = b + n*HASHBITS
n= (bits-1) / HASHBITS
b= (bits-1) % HASHBITS ; powb=2L << b
powL1=pow(long(2), bits-1)
while C<4096:
# The V array will contain (bits-1) bits of random
# data, that are assembled to produce a candidate
# value for p.
for k in range(0, n+1):
V[k]=bytes_to_long(SHA.new(S+str(N)+str(k)).digest())
p = V[n] % powb
for k in range(n-1, -1, -1):
p= (p << long(HASHBITS) )+V[k]
p = p+powL1 # Ensure the high bit is set
# Ensure that p-1 is a multiple of q
p = p - (p % (2*obj.q)-1)
# If p is still the right size, and it's prime, we're done!
if powL1<=p and isPrime(p):
break
# Otherwise, increment the counter and try again
C, N = C+1, N+n+1
if C<4096:
break # Ended early, so exit the while loop
if progress_func:
progress_func('4096 values of p tried\n')
obj.p = p
power=(p-1)/obj.q
# Next parameter: g = h**((p-1)/q) mod p, such that h is any
# number <p-1, and g>1. g is kept; h can be discarded.
if progress_func:
progress_func('h,g\n')
while (1):
h=bytes_to_long(randfunc(bits)) % (p-1)
g=pow(h, power, p)
if 1<h<p-1 and g>1:
break
obj.g=g
# x is the private key information, and is
# just a random number between 0 and q.
# y=g**x mod p, and is part of the public information.
if progress_func:
progress_func('x,y\n')
while (1):
x=bytes_to_long(randfunc(20))
if 0 < x < obj.q:
break
obj.x, obj.y=x, pow(g, x, p)
return obj
# Construct a qNEW object
def construct(tuple):
"""construct(tuple:(long,long,long,long)|(long,long,long,long,long)
Construct a qNEW object from a 4- or 5-tuple of numbers.
"""
obj=qNEWobj()
if len(tuple) not in [4,5]:
raise error, 'argument for construct() wrong length'
for i in range(len(tuple)):
field = obj.keydata[i]
setattr(obj, field, tuple[i])
return obj
class qNEWobj(pubkey.pubkey):
keydata=['p', 'q', 'g', 'y', 'x']
def _sign(self, M, K=''):
if (self.q<=K):
raise error, 'K is greater than q'
if M<0:
raise error, 'Illegal value of M (<0)'
if M>=pow(2,161L):
raise error, 'Illegal value of M (too large)'
r=pow(self.g, K, self.p) % self.q
s=(K- (r*M*self.x % self.q)) % self.q
return (r,s)
def _verify(self, M, sig):
r, s = sig
if r<=0 or r>=self.q or s<=0 or s>=self.q:
return 0
if M<0:
raise error, 'Illegal value of M (<0)'
if M<=0 or M>=pow(2,161L):
return 0
v1 = pow(self.g, s, self.p)
v2 = pow(self.y, M*r, self.p)
v = ((v1*v2) % self.p)
v = v % self.q
if v==r:
return 1
return 0
def size(self):
"Return the maximum number of bits that can be handled by this key."
return 160
def has_private(self):
"""Return a Boolean denoting whether the object contains
private components."""
return hasattr(self, 'x')
def can_sign(self):
"""Return a Boolean value recording whether this algorithm can generate signatures."""
return 1
def can_encrypt(self):
"""Return a Boolean value recording whether this algorithm can encrypt data."""
return 0
def publickey(self):
"""Return a new key object containing only the public information."""
return construct((self.p, self.q, self.g, self.y))
object = qNEWobj
| apache-2.0 |
rcbops/keystone-buildpackage | keystone/logic/types/tenant.py | 3 | 6014 | # Copyright (c) 2010-2011 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from lxml import etree
from keystone.logic.types import fault
class Tenant(object):
"""Describes a tenant in the auth system"""
id = None
name = None
description = None
enabled = None
def __init__(self, id=None, name=None, description=None, enabled=None):
self.id = id
self.name = name
self.description = description
if enabled is not None:
self.enabled = bool(enabled)
else:
self.enabled = None
@staticmethod
def from_xml(xml_str):
try:
dom = etree.Element("root")
dom.append(etree.fromstring(xml_str))
root = dom.find(
"{http://docs.openstack.org/identity/api/v2.0}tenant")
if root == None:
raise fault.BadRequestFault("Expecting Tenant")
id = root.get("id")
name = root.get("name")
enabled = root.get("enabled")
if enabled == None or enabled == "true" or enabled == "yes":
set_enabled = True
elif enabled == "false" or enabled == "no":
set_enabled = False
else:
raise fault.BadRequestFault("Bad enabled attribute!")
desc = root.find("{http://docs.openstack.org/identity/api/v2.0}"
"description")
if desc == None:
raise fault.BadRequestFault("Expecting Tenant Description")
return Tenant(id=id, name=name, description=desc.text,
enabled=set_enabled)
except etree.LxmlError as e:
raise fault.BadRequestFault("Cannot parse Tenant", str(e))
@staticmethod
def from_json(json_str):
try:
obj = json.loads(json_str)
if not "tenant" in obj:
raise fault.BadRequestFault("Expecting tenant")
tenant = obj["tenant"]
id = tenant.get("id", None)
name = tenant.get("name", None)
set_enabled = True
if "enabled" in tenant:
set_enabled = tenant["enabled"]
if not isinstance(set_enabled, bool):
raise fault.BadRequestFault("Bad enabled attribute!")
if not "description" in tenant:
raise fault.BadRequestFault("Expecting Tenant Description")
description = tenant["description"]
return Tenant(id=id, name=name, description=description,
enabled=set_enabled)
except (ValueError, TypeError) as e:
raise fault.BadRequestFault("Cannot parse Tenant", str(e))
def to_dom(self):
dom = etree.Element("tenant",
xmlns="http://docs.openstack.org/identity/api/v2.0",
enabled=str(self.enabled).lower())
if self.id:
dom.set("id", unicode(self.id))
if self.name:
dom.set("name", unicode(self.name))
desc = etree.Element("description")
desc.text = unicode(self.description)
dom.append(desc)
return dom
def to_xml(self):
return etree.tostring(self.to_dom())
def to_dict(self):
tenant = {
"description": unicode(self.description),
"enabled": self.enabled}
if self.id:
tenant["id"] = unicode(self.id)
if self.name:
tenant["name"] = unicode(self.name)
return {"tenant": tenant}
def to_json(self):
return json.dumps(self.to_dict())
class Tenants(object):
"""A collection of tenants."""
def __init__(self, values, links):
self.values = values
self.links = links
def to_xml(self):
dom = etree.Element("tenants")
dom.set(u"xmlns", "http://docs.openstack.org/identity/api/v2.0")
for t in self.values:
dom.append(t.to_dom())
for t in self.links:
dom.append(t.to_dom())
return etree.tostring(dom)
def to_json(self):
values = [t.to_dict()["tenant"] for t in self.values]
links = [t.to_dict()["links"] for t in self.links]
return json.dumps({"tenants": {"values": values, "links": links}})
class User(object):
"""Describes a user in the auth system
TODO: This is basically a duplicate of keystone.logic.types.user.User and
should be considered deprecated.
"""
def __init__(self, user_id, email, enabled, tenant_id=None):
self.user_id = user_id
self.tenant_id = tenant_id
self.email = email
self.enabled = bool(enabled)
def to_dom(self):
dom = etree.Element("user",
xmlns="http://docs.openstack.org/identity/api/v2.0")
if self.user_id:
dom.set("id", self.user_id)
if self.tenant_id:
dom.set("tenantId", self.tenant_id)
if self.email:
dom.set("email", self.email)
if self.enabled:
dom.set("enabled", str(self.enabled).lower())
return dom
def to_xml(self):
return etree.tostring(self.to_dom())
def to_dict(self):
user = {}
user["id"] = self.user_id
user["email"] = self.email
user["enabled"] = str(self.enabled).lower()
if self.tenant_id:
user["tenantId"] = self.tenant_id
return {'user': user}
def to_json(self):
return json.dumps(self.to_dict())
| apache-2.0 |
riteshshrv/django | django/core/files/utils.py | 395 | 1338 | from django.utils import six
class FileProxyMixin(object):
"""
A mixin class used to forward file methods to an underlaying file
object. The internal file object has to be called "file"::
class FileProxy(FileProxyMixin):
def __init__(self, file):
self.file = file
"""
encoding = property(lambda self: self.file.encoding)
fileno = property(lambda self: self.file.fileno)
flush = property(lambda self: self.file.flush)
isatty = property(lambda self: self.file.isatty)
newlines = property(lambda self: self.file.newlines)
read = property(lambda self: self.file.read)
readinto = property(lambda self: self.file.readinto)
readline = property(lambda self: self.file.readline)
readlines = property(lambda self: self.file.readlines)
seek = property(lambda self: self.file.seek)
softspace = property(lambda self: self.file.softspace)
tell = property(lambda self: self.file.tell)
truncate = property(lambda self: self.file.truncate)
write = property(lambda self: self.file.write)
writelines = property(lambda self: self.file.writelines)
xreadlines = property(lambda self: self.file.xreadlines)
if six.PY3:
seekable = property(lambda self: self.file.seekable)
def __iter__(self):
return iter(self.file)
| bsd-3-clause |
taotie12010/bigfour | common/djangoapps/embargo/migrations/0003_add_countries.py | 102 | 6889 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
from django_countries import countries
class Migration(DataMigration):
def forwards(self, orm):
"""Populate the available countries with all 2-character ISO country codes. """
for country_code, __ in list(countries):
orm.Country.objects.get_or_create(country=country_code)
def backwards(self, orm):
"""Clear all available countries. """
orm.Country.objects.all().delete()
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'embargo.country': {
'Meta': {'object_name': 'Country'},
'country': ('django_countries.fields.CountryField', [], {'unique': 'True', 'max_length': '2', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'embargo.countryaccessrule': {
'Meta': {'unique_together': "(('restricted_course', 'rule_type'),)", 'object_name': 'CountryAccessRule'},
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['embargo.Country']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'restricted_course': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['embargo.RestrictedCourse']"}),
'rule_type': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'embargo.embargoedcourse': {
'Meta': {'object_name': 'EmbargoedCourse'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'embargoed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'embargo.embargoedstate': {
'Meta': {'object_name': 'EmbargoedState'},
'change_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'embargoed_countries': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'embargo.ipfilter': {
'Meta': {'object_name': 'IPFilter'},
'blacklist': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'change_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'whitelist': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'embargo.restrictedcourse': {
'Meta': {'object_name': 'RestrictedCourse'},
'access_msg_key': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'course_key': ('xmodule_django.models.CourseKeyField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'enroll_msg_key': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
}
}
complete_apps = ['embargo']
symmetrical = True
| agpl-3.0 |
jawed123/django-stripe-payments | payments/models.py | 4 | 33811 | import datetime
import decimal
import json
import traceback
import six
from django.conf import settings
from django.core.mail import EmailMessage
from django.db import models
from django.utils import timezone
from django.utils.encoding import smart_str
from django.template.loader import render_to_string
from django.contrib.sites.models import Site
import stripe
from jsonfield.fields import JSONField
from .managers import CustomerManager, ChargeManager, TransferManager
from .settings import (
DEFAULT_PLAN,
INVOICE_FROM_EMAIL,
PAYMENTS_PLANS,
plan_from_stripe_id,
SEND_EMAIL_RECEIPTS,
TRIAL_PERIOD_FOR_USER_CALLBACK,
PLAN_QUANTITY_CALLBACK
)
from .signals import (
cancelled,
card_changed,
subscription_made,
webhook_processing_error,
WEBHOOK_SIGNALS,
)
from .utils import (
convert_tstamp,
convert_amount_for_db,
convert_amount_for_api,
)
stripe.api_key = settings.STRIPE_SECRET_KEY
stripe.api_version = getattr(settings, "STRIPE_API_VERSION", "2012-11-07")
class StripeObject(models.Model):
stripe_id = models.CharField(max_length=255, unique=True)
created_at = models.DateTimeField(default=timezone.now)
class Meta: # pylint: disable=E0012,C1001
abstract = True
class EventProcessingException(models.Model):
event = models.ForeignKey("Event", null=True)
data = models.TextField()
message = models.CharField(max_length=500)
traceback = models.TextField()
created_at = models.DateTimeField(default=timezone.now)
@classmethod
def log(cls, data, exception, event):
cls.objects.create(
event=event,
data=data or "",
message=str(exception),
traceback=traceback.format_exc()
)
def __unicode__(self):
return six.u("<{}, pk={}, Event={}>").format(self.message, self.pk, self.event)
class Event(StripeObject):
kind = models.CharField(max_length=250)
livemode = models.BooleanField(default=False)
customer = models.ForeignKey("Customer", null=True)
webhook_message = JSONField()
validated_message = JSONField(null=True)
valid = models.NullBooleanField(null=True)
processed = models.BooleanField(default=False)
@property
def message(self):
return self.validated_message
def __unicode__(self):
return "%s - %s" % (self.kind, self.stripe_id)
def link_customer(self):
cus_id = None
customer_crud_events = [
"customer.created",
"customer.updated",
"customer.deleted"
]
if self.kind in customer_crud_events:
cus_id = self.message["data"]["object"]["id"]
else:
cus_id = self.message["data"]["object"].get("customer", None)
if cus_id is not None:
try:
self.customer = Customer.objects.get(stripe_id=cus_id)
self.save()
except Customer.DoesNotExist:
pass
def validate(self):
evt = stripe.Event.retrieve(self.stripe_id)
self.validated_message = json.loads(
json.dumps(
evt.to_dict(),
sort_keys=True,
cls=stripe.StripeObjectEncoder
)
)
if self.webhook_message["data"] == self.validated_message["data"]:
self.valid = True
else:
self.valid = False
self.save()
def process(self): # @@@ to complex, fix later # noqa
"""
"account.updated",
"account.application.deauthorized",
"charge.succeeded",
"charge.failed",
"charge.refunded",
"charge.dispute.created",
"charge.dispute.updated",
"charge.dispute.closed",
"customer.created",
"customer.updated",
"customer.deleted",
"customer.subscription.created",
"customer.subscription.updated",
"customer.subscription.deleted",
"customer.subscription.trial_will_end",
"customer.discount.created",
"customer.discount.updated",
"customer.discount.deleted",
"invoice.created",
"invoice.updated",
"invoice.payment_succeeded",
"invoice.payment_failed",
"invoiceitem.created",
"invoiceitem.updated",
"invoiceitem.deleted",
"plan.created",
"plan.updated",
"plan.deleted",
"coupon.created",
"coupon.updated",
"coupon.deleted",
"transfer.created",
"transfer.updated",
"transfer.failed",
"ping"
"""
if not self.valid or self.processed:
return
try:
if not self.kind.startswith("plan.") and not self.kind.startswith("transfer."):
self.link_customer()
if self.kind.startswith("invoice."):
Invoice.handle_event(self)
elif self.kind.startswith("charge."):
self.customer.record_charge(
self.message["data"]["object"]["id"]
)
elif self.kind.startswith("transfer."):
Transfer.process_transfer(
self,
self.message["data"]["object"]
)
elif self.kind.startswith("customer.subscription."):
if self.customer:
self.customer.sync_current_subscription()
elif self.kind == "customer.deleted":
self.customer.purge()
self.send_signal()
self.processed = True
self.save()
except stripe.StripeError as e:
EventProcessingException.log(
data=e.http_body,
exception=e,
event=self
)
webhook_processing_error.send(
sender=Event,
data=e.http_body,
exception=e
)
def send_signal(self):
signal = WEBHOOK_SIGNALS.get(self.kind)
if signal:
return signal.send(sender=Event, event=self)
class Transfer(StripeObject):
# pylint: disable=C0301
event = models.ForeignKey(Event, related_name="transfers")
amount = models.DecimalField(decimal_places=2, max_digits=9)
currency = models.CharField(max_length=25, default="usd")
status = models.CharField(max_length=25)
date = models.DateTimeField()
description = models.TextField(null=True, blank=True)
adjustment_count = models.IntegerField(null=True)
adjustment_fees = models.DecimalField(decimal_places=2, max_digits=9, null=True)
adjustment_gross = models.DecimalField(decimal_places=2, max_digits=9, null=True)
charge_count = models.IntegerField(null=True)
charge_fees = models.DecimalField(decimal_places=2, max_digits=9, null=True)
charge_gross = models.DecimalField(decimal_places=2, max_digits=9, null=True)
collected_fee_count = models.IntegerField(null=True)
collected_fee_gross = models.DecimalField(decimal_places=2, max_digits=9, null=True)
net = models.DecimalField(decimal_places=2, max_digits=9, null=True)
refund_count = models.IntegerField(null=True)
refund_fees = models.DecimalField(decimal_places=2, max_digits=9, null=True)
refund_gross = models.DecimalField(decimal_places=2, max_digits=9, null=True)
validation_count = models.IntegerField(null=True)
validation_fees = models.DecimalField(decimal_places=2, max_digits=9, null=True)
objects = TransferManager()
def update_status(self):
self.status = stripe.Transfer.retrieve(self.stripe_id).status
self.save()
@classmethod
def process_transfer(cls, event, transfer):
defaults = {
"amount": convert_amount_for_db(transfer["amount"], transfer["currency"]),
"currency": transfer["currency"],
"status": transfer["status"],
"date": convert_tstamp(transfer, "date"),
"description": transfer.get("description", "")
}
summary = transfer.get("summary")
if summary:
defaults.update({
"adjustment_count": summary.get("adjustment_count"),
"adjustment_fees": summary.get("adjustment_fees"),
"adjustment_gross": summary.get("adjustment_gross"),
"charge_count": summary.get("charge_count"),
"charge_fees": summary.get("charge_fees"),
"charge_gross": summary.get("charge_gross"),
"collected_fee_count": summary.get("collected_fee_count"),
"collected_fee_gross": summary.get("collected_fee_gross"),
"refund_count": summary.get("refund_count"),
"refund_fees": summary.get("refund_fees"),
"refund_gross": summary.get("refund_gross"),
"validation_count": summary.get("validation_count"),
"validation_fees": summary.get("validation_fees"),
"net": convert_amount_for_db(summary.get("net"), transfer["currency"]),
})
for field in defaults:
if field.endswith("fees") or field.endswith("gross"):
defaults[field] = convert_amount_for_db(defaults[field]) # assume in usd only
if event.kind == "transfer.paid":
defaults.update({"event": event})
obj, created = Transfer.objects.get_or_create(
stripe_id=transfer["id"],
defaults=defaults
)
else:
obj, created = Transfer.objects.get_or_create(
stripe_id=transfer["id"],
event=event,
defaults=defaults
)
if created and summary:
for fee in summary.get("charge_fee_details", []):
obj.charge_fee_details.create(
amount=convert_amount_for_db(fee["amount"], fee["currency"]),
currency=fee["currency"],
application=fee.get("application", ""),
description=fee.get("description", ""),
kind=fee["type"]
)
else:
obj.status = transfer["status"]
obj.save()
if event.kind == "transfer.updated":
obj.update_status()
class TransferChargeFee(models.Model):
transfer = models.ForeignKey(Transfer, related_name="charge_fee_details")
amount = models.DecimalField(decimal_places=2, max_digits=9)
currency = models.CharField(max_length=10, default="usd")
application = models.TextField(null=True, blank=True)
description = models.TextField(null=True, blank=True)
kind = models.CharField(max_length=150)
created_at = models.DateTimeField(default=timezone.now)
class Customer(StripeObject):
user = models.OneToOneField(
getattr(settings, "AUTH_USER_MODEL", "auth.User"),
null=True
)
card_fingerprint = models.CharField(max_length=200, blank=True)
card_last_4 = models.CharField(max_length=4, blank=True)
card_kind = models.CharField(max_length=50, blank=True)
date_purged = models.DateTimeField(null=True, editable=False)
objects = CustomerManager()
def __unicode__(self):
return smart_str(self.user)
@property
def stripe_customer(self):
return stripe.Customer.retrieve(self.stripe_id)
def purge(self):
try:
self.stripe_customer.delete()
except stripe.InvalidRequestError as e:
if smart_str(e).startswith("No such customer:"):
# The exception was thrown because the customer was already
# deleted on the stripe side, ignore the exception
pass
else:
# The exception was raised for another reason, re-raise it
raise
self.user = None
self.card_fingerprint = ""
self.card_last_4 = ""
self.card_kind = ""
self.date_purged = timezone.now()
self.save()
def delete(self, using=None):
# Only way to delete a customer is to use SQL
self.purge()
def can_charge(self):
return self.card_fingerprint and \
self.card_last_4 and \
self.card_kind and \
self.date_purged is None
def has_active_subscription(self):
try:
return self.current_subscription.is_valid()
except CurrentSubscription.DoesNotExist:
return False
def cancel(self, at_period_end=True):
try:
current = self.current_subscription
except CurrentSubscription.DoesNotExist:
return
sub = self.stripe_customer.cancel_subscription(
at_period_end=at_period_end
)
current.status = sub.status
current.cancel_at_period_end = sub.cancel_at_period_end
current.current_period_end = convert_tstamp(sub, "current_period_end")
current.save()
cancelled.send(sender=self, stripe_response=sub)
@classmethod
def create(cls, user, card=None, plan=None, charge_immediately=True):
if card and plan:
plan = PAYMENTS_PLANS[plan]["stripe_plan_id"]
elif DEFAULT_PLAN:
plan = PAYMENTS_PLANS[DEFAULT_PLAN]["stripe_plan_id"]
else:
plan = None
trial_end = None
if TRIAL_PERIOD_FOR_USER_CALLBACK and plan:
trial_days = TRIAL_PERIOD_FOR_USER_CALLBACK(user)
trial_end = datetime.datetime.utcnow() + datetime.timedelta(
days=trial_days
)
stripe_customer = stripe.Customer.create(
email=user.email,
card=card,
plan=plan or DEFAULT_PLAN,
trial_end=trial_end
)
if stripe_customer.active_card:
cus = cls.objects.create(
user=user,
stripe_id=stripe_customer.id,
card_fingerprint=stripe_customer.active_card.fingerprint,
card_last_4=stripe_customer.active_card.last4,
card_kind=stripe_customer.active_card.type
)
else:
cus = cls.objects.create(
user=user,
stripe_id=stripe_customer.id,
)
if plan:
if stripe_customer.subscription:
cus.sync_current_subscription(cu=stripe_customer)
if charge_immediately:
cus.send_invoice()
return cus
def update_card(self, token):
cu = self.stripe_customer
cu.card = token
cu.save()
self.save_card(cu)
def save_card(self, cu=None):
cu = cu or self.stripe_customer
active_card = cu.active_card
self.card_fingerprint = active_card.fingerprint
self.card_last_4 = active_card.last4
self.card_kind = active_card.type
self.save()
card_changed.send(sender=self, stripe_response=cu)
def retry_unpaid_invoices(self):
self.sync_invoices()
for inv in self.invoices.filter(paid=False, closed=False):
try:
inv.retry() # Always retry unpaid invoices
except stripe.InvalidRequestError as error:
if smart_str(error) != "Invoice is already paid":
raise error
def send_invoice(self):
try:
invoice = stripe.Invoice.create(customer=self.stripe_id)
if invoice.amount_due > 0:
invoice.pay()
return True
except stripe.InvalidRequestError:
return False # There was nothing to invoice
def sync(self, cu=None):
cu = cu or self.stripe_customer
updated = False
if hasattr(cu, "active_card") and cu.active_card:
# Test to make sure the card has changed, otherwise do not update it
# (i.e. refrain from sending any signals)
if (self.card_last_4 != cu.active_card.last4 or
self.card_fingerprint != cu.active_card.fingerprint or
self.card_kind != cu.active_card.type):
updated = True
self.card_last_4 = cu.active_card.last4
self.card_fingerprint = cu.active_card.fingerprint
self.card_kind = cu.active_card.type
else:
updated = True
self.card_fingerprint = ""
self.card_last_4 = ""
self.card_kind = ""
if updated:
self.save()
card_changed.send(sender=self, stripe_response=cu)
def sync_invoices(self, cu=None):
cu = cu or self.stripe_customer
for invoice in cu.invoices().data:
Invoice.sync_from_stripe_data(invoice, send_receipt=False)
def sync_charges(self, cu=None):
cu = cu or self.stripe_customer
for charge in cu.charges().data:
self.record_charge(charge.id)
def sync_current_subscription(self, cu=None):
cu = cu or self.stripe_customer
sub = getattr(cu, "subscription", None)
if sub is None:
try:
self.current_subscription.delete()
except CurrentSubscription.DoesNotExist:
pass
else:
try:
sub_obj = self.current_subscription
sub_obj.plan = plan_from_stripe_id(sub.plan.id)
sub_obj.current_period_start = convert_tstamp(
sub.current_period_start
)
sub_obj.current_period_end = convert_tstamp(
sub.current_period_end
)
sub_obj.amount = convert_amount_for_db(sub.plan.amount, sub.plan.currency)
sub_obj.currency = sub.plan.currency
sub_obj.status = sub.status
sub_obj.cancel_at_period_end = sub.cancel_at_period_end
sub_obj.start = convert_tstamp(sub.start)
sub_obj.quantity = sub.quantity
sub_obj.save()
except CurrentSubscription.DoesNotExist:
sub_obj = CurrentSubscription.objects.create(
customer=self,
plan=plan_from_stripe_id(sub.plan.id),
current_period_start=convert_tstamp(
sub.current_period_start
),
current_period_end=convert_tstamp(
sub.current_period_end
),
amount=convert_amount_for_db(sub.plan.amount, sub.plan.currency),
currency=sub.plan.currency,
status=sub.status,
cancel_at_period_end=sub.cancel_at_period_end,
start=convert_tstamp(sub.start),
quantity=sub.quantity
)
if sub.trial_start and sub.trial_end:
sub_obj.trial_start = convert_tstamp(sub.trial_start)
sub_obj.trial_end = convert_tstamp(sub.trial_end)
sub_obj.save()
return sub_obj
def update_plan_quantity(self, quantity, charge_immediately=False):
self.subscribe(
plan=plan_from_stripe_id(
self.stripe_customer.subscription.plan.id
),
quantity=quantity,
charge_immediately=charge_immediately
)
def subscribe(self, plan, quantity=None, trial_days=None,
charge_immediately=True, token=None, coupon=None):
if quantity is None:
if PLAN_QUANTITY_CALLBACK is not None:
quantity = PLAN_QUANTITY_CALLBACK(self)
else:
quantity = 1
cu = self.stripe_customer
subscription_params = {}
if trial_days:
subscription_params["trial_end"] = \
datetime.datetime.utcnow() + datetime.timedelta(days=trial_days)
if token:
subscription_params["card"] = token
subscription_params["plan"] = PAYMENTS_PLANS[plan]["stripe_plan_id"]
subscription_params["quantity"] = quantity
subscription_params["coupon"] = coupon
resp = cu.update_subscription(**subscription_params)
if token:
# Refetch the stripe customer so we have the updated card info
cu = self.stripe_customer
self.save_card(cu)
self.sync_current_subscription(cu)
if charge_immediately:
self.send_invoice()
subscription_made.send(sender=self, plan=plan, stripe_response=resp)
return resp
def charge(self, amount, currency="usd", description=None,
send_receipt=True, capture=True):
"""
This method expects `amount` to be a Decimal type representing a
dollar amount. It will be converted to cents so any decimals beyond
two will be ignored.
"""
if not isinstance(amount, decimal.Decimal):
raise ValueError(
"You must supply a decimal value representing dollars."
)
resp = stripe.Charge.create(
amount=convert_amount_for_api(amount, currency), # find the final amount
currency=currency,
customer=self.stripe_id,
description=description,
capture=capture,
)
obj = self.record_charge(resp["id"])
if send_receipt:
obj.send_receipt()
return obj
def record_charge(self, charge_id):
data = stripe.Charge.retrieve(charge_id)
return Charge.sync_from_stripe_data(data)
class CurrentSubscription(models.Model):
customer = models.OneToOneField(
Customer,
related_name="current_subscription",
null=True
)
plan = models.CharField(max_length=100)
quantity = models.IntegerField()
start = models.DateTimeField()
# trialing, active, past_due, canceled, or unpaid
status = models.CharField(max_length=25)
cancel_at_period_end = models.BooleanField(default=False)
canceled_at = models.DateTimeField(blank=True, null=True)
current_period_end = models.DateTimeField(blank=True, null=True)
current_period_start = models.DateTimeField(blank=True, null=True)
ended_at = models.DateTimeField(blank=True, null=True)
trial_end = models.DateTimeField(blank=True, null=True)
trial_start = models.DateTimeField(blank=True, null=True)
amount = models.DecimalField(decimal_places=2, max_digits=9)
currency = models.CharField(max_length=10, default="usd")
created_at = models.DateTimeField(default=timezone.now)
@property
def total_amount(self):
return self.amount * self.quantity
def plan_display(self):
return PAYMENTS_PLANS[self.plan]["name"]
def status_display(self):
return self.status.replace("_", " ").title()
def is_period_current(self):
return self.current_period_end > timezone.now()
def is_status_current(self):
return self.status in ["trialing", "active"]
def is_valid(self):
if not self.is_status_current():
return False
if self.cancel_at_period_end and not self.is_period_current():
return False
return True
def delete(self, using=None): # pylint: disable=E1002
"""
Set values to None while deleting the object so that any lingering
references will not show previous values (such as when an Event
signal is triggered after a subscription has been deleted)
"""
super(CurrentSubscription, self).delete(using=using)
self.plan = None
self.status = None
self.quantity = 0
self.amount = 0
class Invoice(models.Model):
stripe_id = models.CharField(max_length=255)
customer = models.ForeignKey(Customer, related_name="invoices")
attempted = models.NullBooleanField()
attempts = models.PositiveIntegerField(null=True)
closed = models.BooleanField(default=False)
paid = models.BooleanField(default=False)
period_end = models.DateTimeField()
period_start = models.DateTimeField()
subtotal = models.DecimalField(decimal_places=2, max_digits=9)
total = models.DecimalField(decimal_places=2, max_digits=9)
currency = models.CharField(max_length=10, default="usd")
date = models.DateTimeField()
charge = models.CharField(max_length=50, blank=True)
created_at = models.DateTimeField(default=timezone.now)
class Meta: # pylint: disable=E0012,C1001
ordering = ["-date"]
def retry(self):
if not self.paid and not self.closed:
inv = stripe.Invoice.retrieve(self.stripe_id)
inv.pay()
return True
return False
def status(self):
if self.paid:
return "Paid"
return "Open"
@classmethod
def sync_from_stripe_data(cls, stripe_invoice, send_receipt=True):
c = Customer.objects.get(stripe_id=stripe_invoice["customer"])
period_end = convert_tstamp(stripe_invoice, "period_end")
period_start = convert_tstamp(stripe_invoice, "period_start")
date = convert_tstamp(stripe_invoice, "date")
invoice, created = cls.objects.get_or_create(
stripe_id=stripe_invoice["id"],
defaults=dict(
customer=c,
attempted=stripe_invoice["attempted"],
attempts=stripe_invoice["attempt_count"],
closed=stripe_invoice["closed"],
paid=stripe_invoice["paid"],
period_end=period_end,
period_start=period_start,
subtotal=convert_amount_for_db(stripe_invoice["subtotal"], stripe_invoice["currency"]),
total=convert_amount_for_db(stripe_invoice["total"], stripe_invoice["currency"]),
currency=stripe_invoice["currency"],
date=date,
charge=stripe_invoice.get("charge") or ""
)
)
if not created:
# pylint: disable=C0301
invoice.attempted = stripe_invoice["attempted"]
invoice.attempts = stripe_invoice["attempt_count"]
invoice.closed = stripe_invoice["closed"]
invoice.paid = stripe_invoice["paid"]
invoice.period_end = period_end
invoice.period_start = period_start
invoice.subtotal = convert_amount_for_db(stripe_invoice["subtotal"], stripe_invoice["currency"])
invoice.total = convert_amount_for_db(stripe_invoice["total"], stripe_invoice["currency"])
invoice.currency = stripe_invoice["currency"]
invoice.date = date
invoice.charge = stripe_invoice.get("charge") or ""
invoice.save()
for item in stripe_invoice["lines"].get("data", []):
period_end = convert_tstamp(item["period"], "end")
period_start = convert_tstamp(item["period"], "start")
if item.get("plan"):
plan = plan_from_stripe_id(item["plan"]["id"])
else:
plan = ""
inv_item, inv_item_created = invoice.items.get_or_create(
stripe_id=item["id"],
defaults=dict(
amount=convert_amount_for_db(item["amount"], item["currency"]),
currency=item["currency"],
proration=item["proration"],
description=item.get("description") or "",
line_type=item["type"],
plan=plan,
period_start=period_start,
period_end=period_end,
quantity=item.get("quantity")
)
)
if not inv_item_created:
inv_item.amount = convert_amount_for_db(item["amount"], item["currency"])
inv_item.currency = item["currency"]
inv_item.proration = item["proration"]
inv_item.description = item.get("description") or ""
inv_item.line_type = item["type"]
inv_item.plan = plan
inv_item.period_start = period_start
inv_item.period_end = period_end
inv_item.quantity = item.get("quantity")
inv_item.save()
if stripe_invoice.get("charge"):
obj = c.record_charge(stripe_invoice["charge"])
obj.invoice = invoice
obj.save()
if send_receipt:
obj.send_receipt()
return invoice
@classmethod
def handle_event(cls, event, send_receipt=SEND_EMAIL_RECEIPTS):
valid_events = ["invoice.payment_failed", "invoice.payment_succeeded"]
if event.kind in valid_events:
invoice_data = event.message["data"]["object"]
stripe_invoice = stripe.Invoice.retrieve(invoice_data["id"])
cls.sync_from_stripe_data(stripe_invoice, send_receipt=send_receipt)
class InvoiceItem(models.Model):
stripe_id = models.CharField(max_length=255)
created_at = models.DateTimeField(default=timezone.now)
invoice = models.ForeignKey(Invoice, related_name="items")
amount = models.DecimalField(decimal_places=2, max_digits=9)
currency = models.CharField(max_length=10, default="usd")
period_start = models.DateTimeField()
period_end = models.DateTimeField()
proration = models.BooleanField(default=False)
line_type = models.CharField(max_length=50)
description = models.CharField(max_length=200, blank=True)
plan = models.CharField(max_length=100, blank=True)
quantity = models.IntegerField(null=True)
def plan_display(self):
return PAYMENTS_PLANS[self.plan]["name"]
class Charge(StripeObject):
customer = models.ForeignKey(Customer, related_name="charges")
invoice = models.ForeignKey(Invoice, null=True, related_name="charges")
card_last_4 = models.CharField(max_length=4, blank=True)
card_kind = models.CharField(max_length=50, blank=True)
currency = models.CharField(max_length=10, default="usd")
amount = models.DecimalField(decimal_places=2, max_digits=9, null=True)
amount_refunded = models.DecimalField(
decimal_places=2,
max_digits=9,
null=True
)
description = models.TextField(blank=True)
paid = models.NullBooleanField(null=True)
disputed = models.NullBooleanField(null=True)
refunded = models.NullBooleanField(null=True)
captured = models.NullBooleanField(null=True)
fee = models.DecimalField(decimal_places=2, max_digits=9, null=True)
receipt_sent = models.BooleanField(default=False)
charge_created = models.DateTimeField(null=True, blank=True)
objects = ChargeManager()
def calculate_refund_amount(self, amount=None):
eligible_to_refund = self.amount - (self.amount_refunded or 0)
if amount:
return min(eligible_to_refund, amount)
return eligible_to_refund
def refund(self, amount=None):
# pylint: disable=E1121
charge_obj = stripe.Charge.retrieve(
self.stripe_id
).refund(
amount=convert_amount_for_api(self.calculate_refund_amount(amount=amount), self.currency)
)
Charge.sync_from_stripe_data(charge_obj)
def capture(self, amount=None):
self.captured = True
charge_obj = stripe.Charge.retrieve(
self.stripe_id
).capture(
amount=convert_amount_for_api(self.calculate_refund_amount(amount=amount), self.currency)
)
Charge.sync_from_stripe_data(charge_obj)
@classmethod
def sync_from_stripe_data(cls, data):
customer = Customer.objects.get(stripe_id=data["customer"])
obj, _ = customer.charges.get_or_create(
stripe_id=data["id"]
)
invoice_id = data.get("invoice", None)
if obj.customer.invoices.filter(stripe_id=invoice_id).exists():
obj.invoice = obj.customer.invoices.get(stripe_id=invoice_id)
obj.card_last_4 = data["card"]["last4"]
obj.card_kind = data["card"]["type"]
obj.currency = data["currency"]
obj.amount = convert_amount_for_db(data["amount"], obj.currency)
obj.paid = data["paid"]
obj.refunded = data["refunded"]
obj.captured = data["captured"]
obj.fee = convert_amount_for_db(data["fee"]) # assume in usd only
obj.disputed = data["dispute"] is not None
obj.charge_created = convert_tstamp(data, "created")
if data.get("description"):
obj.description = data["description"]
if data.get("amount_refunded"):
# pylint: disable=C0301
obj.amount_refunded = convert_amount_for_db(data["amount_refunded"], obj.currency)
if data["refunded"]:
obj.amount_refunded = obj.amount
obj.save()
return obj
def send_receipt(self):
if not self.receipt_sent:
site = Site.objects.get_current()
protocol = getattr(settings, "DEFAULT_HTTP_PROTOCOL", "http")
ctx = {
"charge": self,
"site": site,
"protocol": protocol,
}
subject = render_to_string("payments/email/subject.txt", ctx)
subject = subject.strip()
message = render_to_string("payments/email/body.txt", ctx)
num_sent = EmailMessage(
subject,
message,
to=[self.customer.user.email],
from_email=INVOICE_FROM_EMAIL
).send()
self.receipt_sent = num_sent > 0
self.save()
| mit |
NoXPhasma/systemd | tools/make-directive-index.py | 86 | 10995 | # -*- Mode: python; coding: utf-8; indent-tabs-mode: nil -*- */
#
# This file is part of systemd.
#
# Copyright 2012-2013 Zbigniew Jędrzejewski-Szmek
#
# systemd is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
#
# systemd is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with systemd; If not, see <http://www.gnu.org/licenses/>.
import sys
import collections
import re
from xml_helper import *
from copy import deepcopy
TEMPLATE = '''\
<refentry id="systemd.directives" conditional="HAVE_PYTHON">
<refentryinfo>
<title>systemd.directives</title>
<productname>systemd</productname>
<authorgroup>
<author>
<contrib>Developer</contrib>
<firstname>Zbigniew</firstname>
<surname>Jędrzejewski-Szmek</surname>
<email>zbyszek@in.waw.pl</email>
</author>
</authorgroup>
</refentryinfo>
<refmeta>
<refentrytitle>systemd.directives</refentrytitle>
<manvolnum>7</manvolnum>
</refmeta>
<refnamediv>
<refname>systemd.directives</refname>
<refpurpose>Index of configuration directives</refpurpose>
</refnamediv>
<refsect1>
<title>Unit directives</title>
<para>Directives for configuring units, used in unit
files.</para>
<variablelist id='unit-directives' />
</refsect1>
<refsect1>
<title>Options on the kernel command line</title>
<para>Kernel boot options for configuring the behaviour of the
systemd process.</para>
<variablelist id='kernel-commandline-options' />
</refsect1>
<refsect1>
<title>Environment variables</title>
<para>Environment variables understood by the systemd
manager and other programs.</para>
<variablelist id='environment-variables' />
</refsect1>
<refsect1>
<title>UDEV directives</title>
<para>Directives for configuring systemd units through the
udev database.</para>
<variablelist id='udev-directives' />
</refsect1>
<refsect1>
<title>Network directives</title>
<para>Directives for configuring network links through the
net-setup-link udev builtin and networks through
systemd-networkd.</para>
<variablelist id='network-directives' />
</refsect1>
<refsect1>
<title>Journal fields</title>
<para>Fields in the journal events with a well known meaning.</para>
<variablelist id='journal-directives' />
</refsect1>
<refsect1>
<title>PAM configuration directives</title>
<para>Directives for configuring PAM behaviour.</para>
<variablelist id='pam-directives' />
</refsect1>
<refsect1>
<title><filename>/etc/crypttab</filename> and
<filename>/etc/fstab</filename> options</title>
<para>Options which influence mounted filesystems and
encrypted volumes.</para>
<variablelist id='fstab-options' />
</refsect1>
<refsect1>
<title>System manager directives</title>
<para>Directives for configuring the behaviour of the
systemd process.</para>
<variablelist id='systemd-directives' />
</refsect1>
<refsect1>
<title>bootchart.conf directives</title>
<para>Directives for configuring the behaviour of the
systemd-bootchart process.</para>
<variablelist id='bootchart-directives' />
</refsect1>
<refsect1>
<title>command line options</title>
<para>Command-line options accepted by programs in the
systemd suite.</para>
<variablelist id='options' />
</refsect1>
<refsect1>
<title>Constants</title>
<para>Various constant used and/or defined by systemd.</para>
<variablelist id='constants' />
</refsect1>
<refsect1>
<title>Miscellaneous options and directives</title>
<para>Other configuration elements which don't fit in
any of the above groups.</para>
<variablelist id='miscellaneous' />
</refsect1>
<refsect1>
<title>Files and directories</title>
<para>Paths and file names referred to in the
documentation.</para>
<variablelist id='filenames' />
</refsect1>
<refsect1>
<title>Colophon</title>
<para id='colophon' />
</refsect1>
</refentry>
'''
COLOPHON = '''\
This index contains {count} entries in {sections} sections,
referring to {pages} individual manual pages.
'''
def _extract_directives(directive_groups, formatting, page):
t = xml_parse(page)
section = t.find('./refmeta/manvolnum').text
pagename = t.find('./refmeta/refentrytitle').text
storopt = directive_groups['options']
for variablelist in t.iterfind('.//variablelist'):
klass = variablelist.attrib.get('class')
storvar = directive_groups[klass or 'miscellaneous']
# <option>s go in OPTIONS, unless class is specified
for xpath, stor in (('./varlistentry/term/varname', storvar),
('./varlistentry/term/option',
storvar if klass else storopt)):
for name in variablelist.iterfind(xpath):
text = re.sub(r'([= ]).*', r'\1', name.text).rstrip()
stor[text].append((pagename, section))
if text not in formatting:
# use element as formatted display
if name.text[-1] in '= ':
name.clear()
else:
name.tail = ''
name.text = text
formatting[text] = name
storfile = directive_groups['filenames']
for xpath, absolute_only in (('.//refsynopsisdiv//filename', False),
('.//refsynopsisdiv//command', False),
('.//filename', True)):
for name in t.iterfind(xpath):
if absolute_only and not (name.text and name.text.startswith('/')):
continue
if name.attrib.get('noindex'):
continue
name.tail = ''
if name.text:
if name.text.endswith('*'):
name.text = name.text[:-1]
if not name.text.startswith('.'):
text = name.text.partition(' ')[0]
if text != name.text:
name.clear()
name.text = text
if text.endswith('/'):
text = text[:-1]
storfile[text].append((pagename, section))
if text not in formatting:
# use element as formatted display
formatting[text] = name
else:
text = ' '.join(name.itertext())
storfile[text].append((pagename, section))
formatting[text] = name
storfile = directive_groups['constants']
for name in t.iterfind('.//constant'):
if name.attrib.get('noindex'):
continue
name.tail = ''
if name.text.startswith('('): # a cast, strip it
name.text = name.text.partition(' ')[2]
storfile[name.text].append((pagename, section))
formatting[name.text] = name
def _make_section(template, name, directives, formatting):
varlist = template.find(".//*[@id='{}']".format(name))
for varname, manpages in sorted(directives.items()):
entry = tree.SubElement(varlist, 'varlistentry')
term = tree.SubElement(entry, 'term')
display = deepcopy(formatting[varname])
term.append(display)
para = tree.SubElement(tree.SubElement(entry, 'listitem'), 'para')
b = None
for manpage, manvolume in sorted(set(manpages)):
if b is not None:
b.tail = ', '
b = tree.SubElement(para, 'citerefentry')
c = tree.SubElement(b, 'refentrytitle')
c.text = manpage
d = tree.SubElement(b, 'manvolnum')
d.text = manvolume
entry.tail = '\n\n'
def _make_colophon(template, groups):
count = 0
pages = set()
for group in groups:
count += len(group)
for pagelist in group.values():
pages |= set(pagelist)
para = template.find(".//para[@id='colophon']")
para.text = COLOPHON.format(count=count,
sections=len(groups),
pages=len(pages))
def _make_page(template, directive_groups, formatting):
"""Create an XML tree from directive_groups.
directive_groups = {
'class': {'variable': [('manpage', 'manvolume'), ...],
'variable2': ...},
...
}
"""
for name, directives in directive_groups.items():
_make_section(template, name, directives, formatting)
_make_colophon(template, directive_groups.values())
return template
def make_page(*xml_files):
"Extract directives from xml_files and return XML index tree."
template = tree.fromstring(TEMPLATE)
names = [vl.get('id') for vl in template.iterfind('.//variablelist')]
directive_groups = {name:collections.defaultdict(list)
for name in names}
formatting = {}
for page in xml_files:
try:
_extract_directives(directive_groups, formatting, page)
except Exception:
raise ValueError("failed to process " + page)
return _make_page(template, directive_groups, formatting)
if __name__ == '__main__':
with open(sys.argv[1], 'wb') as f:
f.write(xml_print(make_page(*sys.argv[2:])))
| gpl-2.0 |
nysan/yocto-autobuilder | lib/python2.6/site-packages/SQLAlchemy-0.7.1-py2.6-linux-x86_64.egg/sqlalchemy/sql/compiler.py | 8 | 65515 | # sql/compiler.py
# Copyright (C) 2005-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Base SQL and DDL compiler implementations.
Classes provided include:
:class:`~sqlalchemy.sql.compiler.SQLCompiler` - renders SQL
strings
:class:`~sqlalchemy.sql.compiler.DDLCompiler` - renders DDL
(data definition language) strings
:class:`~sqlalchemy.sql.compiler.GenericTypeCompiler` - renders
type specification strings.
To generate user-defined SQL strings, see
:module:`~sqlalchemy.ext.compiler`.
"""
import re
from sqlalchemy import schema, engine, util, exc
from sqlalchemy.sql import operators, functions, util as sql_util, \
visitors
from sqlalchemy.sql import expression as sql
import decimal
RESERVED_WORDS = set([
'all', 'analyse', 'analyze', 'and', 'any', 'array',
'as', 'asc', 'asymmetric', 'authorization', 'between',
'binary', 'both', 'case', 'cast', 'check', 'collate',
'column', 'constraint', 'create', 'cross', 'current_date',
'current_role', 'current_time', 'current_timestamp',
'current_user', 'default', 'deferrable', 'desc',
'distinct', 'do', 'else', 'end', 'except', 'false',
'for', 'foreign', 'freeze', 'from', 'full', 'grant',
'group', 'having', 'ilike', 'in', 'initially', 'inner',
'intersect', 'into', 'is', 'isnull', 'join', 'leading',
'left', 'like', 'limit', 'localtime', 'localtimestamp',
'natural', 'new', 'not', 'notnull', 'null', 'off', 'offset',
'old', 'on', 'only', 'or', 'order', 'outer', 'overlaps',
'placing', 'primary', 'references', 'right', 'select',
'session_user', 'set', 'similar', 'some', 'symmetric', 'table',
'then', 'to', 'trailing', 'true', 'union', 'unique', 'user',
'using', 'verbose', 'when', 'where'])
LEGAL_CHARACTERS = re.compile(r'^[A-Z0-9_$]+$', re.I)
ILLEGAL_INITIAL_CHARACTERS = set([str(x) for x in xrange(0, 10)]).union(['$'])
BIND_PARAMS = re.compile(r'(?<![:\w\$\x5c]):([\w\$]+)(?![:\w\$])', re.UNICODE)
BIND_PARAMS_ESC = re.compile(r'\x5c(:[\w\$]+)(?![:\w\$])', re.UNICODE)
BIND_TEMPLATES = {
'pyformat':"%%(%(name)s)s",
'qmark':"?",
'format':"%%s",
'numeric':":%(position)s",
'named':":%(name)s"
}
OPERATORS = {
# binary
operators.and_ : ' AND ',
operators.or_ : ' OR ',
operators.add : ' + ',
operators.mul : ' * ',
operators.sub : ' - ',
# Py2K
operators.div : ' / ',
# end Py2K
operators.mod : ' % ',
operators.truediv : ' / ',
operators.neg : '-',
operators.lt : ' < ',
operators.le : ' <= ',
operators.ne : ' != ',
operators.gt : ' > ',
operators.ge : ' >= ',
operators.eq : ' = ',
operators.concat_op : ' || ',
operators.between_op : ' BETWEEN ',
operators.match_op : ' MATCH ',
operators.in_op : ' IN ',
operators.notin_op : ' NOT IN ',
operators.comma_op : ', ',
operators.from_ : ' FROM ',
operators.as_ : ' AS ',
operators.is_ : ' IS ',
operators.isnot : ' IS NOT ',
operators.collate : ' COLLATE ',
# unary
operators.exists : 'EXISTS ',
operators.distinct_op : 'DISTINCT ',
operators.inv : 'NOT ',
# modifiers
operators.desc_op : ' DESC',
operators.asc_op : ' ASC',
operators.nullsfirst_op : ' NULLS FIRST',
operators.nullslast_op : ' NULLS LAST',
}
FUNCTIONS = {
functions.coalesce : 'coalesce%(expr)s',
functions.current_date: 'CURRENT_DATE',
functions.current_time: 'CURRENT_TIME',
functions.current_timestamp: 'CURRENT_TIMESTAMP',
functions.current_user: 'CURRENT_USER',
functions.localtime: 'LOCALTIME',
functions.localtimestamp: 'LOCALTIMESTAMP',
functions.random: 'random%(expr)s',
functions.sysdate: 'sysdate',
functions.session_user :'SESSION_USER',
functions.user: 'USER'
}
EXTRACT_MAP = {
'month': 'month',
'day': 'day',
'year': 'year',
'second': 'second',
'hour': 'hour',
'doy': 'doy',
'minute': 'minute',
'quarter': 'quarter',
'dow': 'dow',
'week': 'week',
'epoch': 'epoch',
'milliseconds': 'milliseconds',
'microseconds': 'microseconds',
'timezone_hour': 'timezone_hour',
'timezone_minute': 'timezone_minute'
}
COMPOUND_KEYWORDS = {
sql.CompoundSelect.UNION : 'UNION',
sql.CompoundSelect.UNION_ALL : 'UNION ALL',
sql.CompoundSelect.EXCEPT : 'EXCEPT',
sql.CompoundSelect.EXCEPT_ALL : 'EXCEPT ALL',
sql.CompoundSelect.INTERSECT : 'INTERSECT',
sql.CompoundSelect.INTERSECT_ALL : 'INTERSECT ALL'
}
class _CompileLabel(visitors.Visitable):
"""lightweight label object which acts as an expression._Label."""
__visit_name__ = 'label'
__slots__ = 'element', 'name'
def __init__(self, col, name):
self.element = col
self.name = name
@property
def type(self):
return self.element.type
@property
def quote(self):
return self.element.quote
class SQLCompiler(engine.Compiled):
"""Default implementation of Compiled.
Compiles ClauseElements into SQL strings. Uses a similar visit
paradigm as visitors.ClauseVisitor but implements its own traversal.
"""
extract_map = EXTRACT_MAP
compound_keywords = COMPOUND_KEYWORDS
# class-level defaults which can be set at the instance
# level to define if this Compiled instance represents
# INSERT/UPDATE/DELETE
isdelete = isinsert = isupdate = False
# holds the "returning" collection of columns if
# the statement is CRUD and defines returning columns
# either implicitly or explicitly
returning = None
# set to True classwide to generate RETURNING
# clauses before the VALUES or WHERE clause (i.e. MSSQL)
returning_precedes_values = False
# SQL 92 doesn't allow bind parameters to be used
# in the columns clause of a SELECT, nor does it allow
# ambiguous expressions like "? = ?". A compiler
# subclass can set this flag to False if the target
# driver/DB enforces this
ansi_bind_rules = False
def __init__(self, dialect, statement, column_keys=None,
inline=False, **kwargs):
"""Construct a new ``DefaultCompiler`` object.
dialect
Dialect to be used
statement
ClauseElement to be compiled
column_keys
a list of column names to be compiled into an INSERT or UPDATE
statement.
"""
self.column_keys = column_keys
# compile INSERT/UPDATE defaults/sequences inlined (no pre-
# execute)
self.inline = inline or getattr(statement, 'inline', False)
# a dictionary of bind parameter keys to _BindParamClause
# instances.
self.binds = {}
# a dictionary of _BindParamClause instances to "compiled" names
# that are actually present in the generated SQL
self.bind_names = util.column_dict()
# stack which keeps track of nested SELECT statements
self.stack = []
# relates label names in the final SQL to a tuple of local
# column/label name, ColumnElement object (if any) and
# TypeEngine. ResultProxy uses this for type processing and
# column targeting
self.result_map = {}
# true if the paramstyle is positional
self.positional = dialect.positional
if self.positional:
self.positiontup = []
self.bindtemplate = BIND_TEMPLATES[dialect.paramstyle]
# an IdentifierPreparer that formats the quoting of identifiers
self.preparer = dialect.identifier_preparer
self.label_length = dialect.label_length \
or dialect.max_identifier_length
# a map which tracks "anonymous" identifiers that are created on
# the fly here
self.anon_map = util.PopulateDict(self._process_anon)
# a map which tracks "truncated" names based on
# dialect.label_length or dialect.max_identifier_length
self.truncated_names = {}
engine.Compiled.__init__(self, dialect, statement, **kwargs)
@util.memoized_property
def _bind_processors(self):
return dict(
(key, value) for key, value in
( (self.bind_names[bindparam],
bindparam.type._cached_bind_processor(self.dialect))
for bindparam in self.bind_names )
if value is not None
)
def is_subquery(self):
return len(self.stack) > 1
@property
def sql_compiler(self):
return self
def construct_params(self, params=None, _group_number=None):
"""return a dictionary of bind parameter keys and values"""
if params:
pd = {}
for bindparam, name in self.bind_names.iteritems():
if bindparam.key in params:
pd[name] = params[bindparam.key]
elif name in params:
pd[name] = params[name]
elif bindparam.required:
if _group_number:
raise exc.InvalidRequestError(
"A value is required for bind parameter %r, "
"in parameter group %d" %
(bindparam.key, _group_number))
else:
raise exc.InvalidRequestError(
"A value is required for bind parameter %r"
% bindparam.key)
elif bindparam.callable:
pd[name] = bindparam.callable()
else:
pd[name] = bindparam.value
return pd
else:
pd = {}
for bindparam in self.bind_names:
if bindparam.callable:
pd[self.bind_names[bindparam]] = bindparam.callable()
else:
pd[self.bind_names[bindparam]] = bindparam.value
return pd
params = property(construct_params, doc="""
Return the bind params for this compiled object.
""")
def default_from(self):
"""Called when a SELECT statement has no froms, and no FROM clause is
to be appended.
Gives Oracle a chance to tack on a ``FROM DUAL`` to the string output.
"""
return ""
def visit_grouping(self, grouping, asfrom=False, **kwargs):
return "(" + grouping.element._compiler_dispatch(self, **kwargs) + ")"
def visit_label(self, label, result_map=None,
within_label_clause=False,
within_columns_clause=False, **kw):
# only render labels within the columns clause
# or ORDER BY clause of a select. dialect-specific compilers
# can modify this behavior.
if within_columns_clause and not within_label_clause:
if isinstance(label.name, sql._generated_label):
labelname = self._truncated_identifier("colident", label.name)
else:
labelname = label.name
if result_map is not None:
result_map[labelname.lower()] = \
(label.name, (label, label.element, labelname),\
label.type)
return label.element._compiler_dispatch(self,
within_columns_clause=True,
within_label_clause=True,
**kw) + \
OPERATORS[operators.as_] + \
self.preparer.format_label(label, labelname)
else:
return label.element._compiler_dispatch(self,
within_columns_clause=False,
**kw)
def visit_column(self, column, result_map=None, **kwargs):
name = column.name
if name is None:
raise exc.CompileError("Cannot compile Column object until "
"it's 'name' is assigned.")
is_literal = column.is_literal
if not is_literal and isinstance(name, sql._generated_label):
name = self._truncated_identifier("colident", name)
if result_map is not None:
result_map[name.lower()] = (name, (column, ), column.type)
if is_literal:
name = self.escape_literal_column(name)
else:
name = self.preparer.quote(name, column.quote)
table = column.table
if table is None or not table.named_with_column:
return name
else:
if table.schema:
schema_prefix = self.preparer.quote_schema(
table.schema,
table.quote_schema) + '.'
else:
schema_prefix = ''
tablename = table.name
if isinstance(tablename, sql._generated_label):
tablename = self._truncated_identifier("alias", tablename)
return schema_prefix + \
self.preparer.quote(tablename, table.quote) + \
"." + name
def escape_literal_column(self, text):
"""provide escaping for the literal_column() construct."""
# TODO: some dialects might need different behavior here
return text.replace('%', '%%')
def visit_fromclause(self, fromclause, **kwargs):
return fromclause.name
def visit_index(self, index, **kwargs):
return index.name
def visit_typeclause(self, typeclause, **kwargs):
return self.dialect.type_compiler.process(typeclause.type)
def post_process_text(self, text):
return text
def visit_textclause(self, textclause, **kwargs):
if textclause.typemap is not None:
for colname, type_ in textclause.typemap.iteritems():
self.result_map[colname.lower()] = (colname, None, type_)
def do_bindparam(m):
name = m.group(1)
if name in textclause.bindparams:
return self.process(textclause.bindparams[name])
else:
return self.bindparam_string(name)
# un-escape any \:params
return BIND_PARAMS_ESC.sub(lambda m: m.group(1),
BIND_PARAMS.sub(do_bindparam,
self.post_process_text(textclause.text))
)
def visit_null(self, expr, **kw):
return 'NULL'
def visit_true(self, expr, **kw):
return 'true'
def visit_false(self, expr, **kw):
return 'false'
def visit_clauselist(self, clauselist, **kwargs):
sep = clauselist.operator
if sep is None:
sep = " "
else:
sep = OPERATORS[clauselist.operator]
return sep.join(
s for s in
(c._compiler_dispatch(self, **kwargs)
for c in clauselist.clauses)
if s is not None)
def visit_case(self, clause, **kwargs):
x = "CASE "
if clause.value is not None:
x += clause.value._compiler_dispatch(self, **kwargs) + " "
for cond, result in clause.whens:
x += "WHEN " + cond._compiler_dispatch(
self, **kwargs
) + " THEN " + result._compiler_dispatch(
self, **kwargs) + " "
if clause.else_ is not None:
x += "ELSE " + clause.else_._compiler_dispatch(
self, **kwargs
) + " "
x += "END"
return x
def visit_cast(self, cast, **kwargs):
return "CAST(%s AS %s)" % \
(cast.clause._compiler_dispatch(self, **kwargs),
cast.typeclause._compiler_dispatch(self, **kwargs))
def visit_over(self, over, **kwargs):
x ="%s OVER (" % over.func._compiler_dispatch(self, **kwargs)
if over.partition_by is not None:
x += "PARTITION BY %s" % \
over.partition_by._compiler_dispatch(self, **kwargs)
if over.order_by is not None:
x += " "
if over.order_by is not None:
x += "ORDER BY %s" % \
over.order_by._compiler_dispatch(self, **kwargs)
x += ")"
return x
def visit_extract(self, extract, **kwargs):
field = self.extract_map.get(extract.field, extract.field)
return "EXTRACT(%s FROM %s)" % (field,
extract.expr._compiler_dispatch(self, **kwargs))
def visit_function(self, func, result_map=None, **kwargs):
if result_map is not None:
result_map[func.name.lower()] = (func.name, None, func.type)
disp = getattr(self, "visit_%s_func" % func.name.lower(), None)
if disp:
return disp(func, **kwargs)
else:
name = FUNCTIONS.get(func.__class__, func.name + "%(expr)s")
return ".".join(list(func.packagenames) + [name]) % \
{'expr':self.function_argspec(func, **kwargs)}
def visit_next_value_func(self, next_value, **kw):
return self.visit_sequence(next_value.sequence)
def visit_sequence(self, sequence):
raise NotImplementedError(
"Dialect '%s' does not support sequence increments." % self.dialect.name
)
def function_argspec(self, func, **kwargs):
return func.clause_expr._compiler_dispatch(self, **kwargs)
def visit_compound_select(self, cs, asfrom=False,
parens=True, compound_index=1, **kwargs):
entry = self.stack and self.stack[-1] or {}
self.stack.append({'from':entry.get('from', None), 'iswrapper':True})
keyword = self.compound_keywords.get(cs.keyword)
text = (" " + keyword + " ").join(
(c._compiler_dispatch(self,
asfrom=asfrom, parens=False,
compound_index=i, **kwargs)
for i, c in enumerate(cs.selects))
)
group_by = cs._group_by_clause._compiler_dispatch(
self, asfrom=asfrom, **kwargs)
if group_by:
text += " GROUP BY " + group_by
text += self.order_by_clause(cs, **kwargs)
text += (cs._limit is not None or cs._offset is not None) and \
self.limit_clause(cs) or ""
self.stack.pop(-1)
if asfrom and parens:
return "(" + text + ")"
else:
return text
def visit_unary(self, unary, **kw):
s = unary.element._compiler_dispatch(self, **kw)
if unary.operator:
s = OPERATORS[unary.operator] + s
if unary.modifier:
s = s + OPERATORS[unary.modifier]
return s
def visit_binary(self, binary, **kw):
# don't allow "? = ?" to render
if self.ansi_bind_rules and \
isinstance(binary.left, sql._BindParamClause) and \
isinstance(binary.right, sql._BindParamClause):
kw['literal_binds'] = True
return self._operator_dispatch(binary.operator,
binary,
lambda opstr: binary.left._compiler_dispatch(self, **kw) +
opstr +
binary.right._compiler_dispatch(
self, **kw),
**kw
)
def visit_like_op(self, binary, **kw):
escape = binary.modifiers.get("escape", None)
return '%s LIKE %s' % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw)) \
+ (escape and
(' ESCAPE ' + self.render_literal_value(escape, None))
or '')
def visit_notlike_op(self, binary, **kw):
escape = binary.modifiers.get("escape", None)
return '%s NOT LIKE %s' % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw)) \
+ (escape and
(' ESCAPE ' + self.render_literal_value(escape, None))
or '')
def visit_ilike_op(self, binary, **kw):
escape = binary.modifiers.get("escape", None)
return 'lower(%s) LIKE lower(%s)' % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw)) \
+ (escape and
(' ESCAPE ' + self.render_literal_value(escape, None))
or '')
def visit_notilike_op(self, binary, **kw):
escape = binary.modifiers.get("escape", None)
return 'lower(%s) NOT LIKE lower(%s)' % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw)) \
+ (escape and
(' ESCAPE ' + self.render_literal_value(escape, None))
or '')
def _operator_dispatch(self, operator, element, fn, **kw):
if util.callable(operator):
disp = getattr(self, "visit_%s" % operator.__name__, None)
if disp:
return disp(element, **kw)
else:
return fn(OPERATORS[operator])
else:
return fn(" " + operator + " ")
def visit_bindparam(self, bindparam, within_columns_clause=False,
literal_binds=False, **kwargs):
if literal_binds or \
(within_columns_clause and \
self.ansi_bind_rules):
if bindparam.value is None:
raise exc.CompileError("Bind parameter without a "
"renderable value not allowed here.")
return self.render_literal_bindparam(bindparam,
within_columns_clause=True, **kwargs)
name = self._truncate_bindparam(bindparam)
if name in self.binds:
existing = self.binds[name]
if existing is not bindparam:
if existing.unique or bindparam.unique:
raise exc.CompileError(
"Bind parameter '%s' conflicts with "
"unique bind parameter of the same name" %
bindparam.key
)
elif getattr(existing, '_is_crud', False) or \
getattr(bindparam, '_is_crud', False):
raise exc.CompileError(
"bindparam() name '%s' is reserved "
"for automatic usage in the VALUES or SET "
"clause of this "
"insert/update statement. Please use a "
"name other than column name when using bindparam() "
"with insert() or update() (for example, 'b_%s')."
% (bindparam.key, bindparam.key)
)
self.binds[bindparam.key] = self.binds[name] = bindparam
return self.bindparam_string(name)
def render_literal_bindparam(self, bindparam, **kw):
value = bindparam.value
processor = bindparam.type._cached_bind_processor(self.dialect)
if processor:
value = processor(value)
return self.render_literal_value(value, bindparam.type)
def render_literal_value(self, value, type_):
"""Render the value of a bind parameter as a quoted literal.
This is used for statement sections that do not accept bind paramters
on the target driver/database.
This should be implemented by subclasses using the quoting services
of the DBAPI.
"""
if isinstance(value, basestring):
value = value.replace("'", "''")
return "'%s'" % value
elif value is None:
return "NULL"
elif isinstance(value, (float, int, long)):
return repr(value)
elif isinstance(value, decimal.Decimal):
return str(value)
else:
raise NotImplementedError(
"Don't know how to literal-quote value %r" % value)
def _truncate_bindparam(self, bindparam):
if bindparam in self.bind_names:
return self.bind_names[bindparam]
bind_name = bindparam.key
if isinstance(bind_name, sql._generated_label):
bind_name = self._truncated_identifier("bindparam", bind_name)
# add to bind_names for translation
self.bind_names[bindparam] = bind_name
return bind_name
def _truncated_identifier(self, ident_class, name):
if (ident_class, name) in self.truncated_names:
return self.truncated_names[(ident_class, name)]
anonname = name % self.anon_map
if len(anonname) > self.label_length:
counter = self.truncated_names.get(ident_class, 1)
truncname = anonname[0:max(self.label_length - 6, 0)] + \
"_" + hex(counter)[2:]
self.truncated_names[ident_class] = counter + 1
else:
truncname = anonname
self.truncated_names[(ident_class, name)] = truncname
return truncname
def _anonymize(self, name):
return name % self.anon_map
def _process_anon(self, key):
(ident, derived) = key.split(' ', 1)
anonymous_counter = self.anon_map.get(derived, 1)
self.anon_map[derived] = anonymous_counter + 1
return derived + "_" + str(anonymous_counter)
def bindparam_string(self, name):
if self.positional:
self.positiontup.append(name)
return self.bindtemplate % {
'name':name, 'position':len(self.positiontup)}
else:
return self.bindtemplate % {'name':name}
def visit_alias(self, alias, asfrom=False, ashint=False,
fromhints=None, **kwargs):
if asfrom or ashint:
if isinstance(alias.name, sql._generated_label):
alias_name = self._truncated_identifier("alias", alias.name)
else:
alias_name = alias.name
if ashint:
return self.preparer.format_alias(alias, alias_name)
elif asfrom:
ret = alias.original._compiler_dispatch(self,
asfrom=True, **kwargs) + \
" AS " + \
self.preparer.format_alias(alias, alias_name)
if fromhints and alias in fromhints:
hinttext = self.get_from_hint_text(alias, fromhints[alias])
if hinttext:
ret += " " + hinttext
return ret
else:
return alias.original._compiler_dispatch(self, **kwargs)
def label_select_column(self, select, column, asfrom):
"""label columns present in a select()."""
if isinstance(column, sql._Label):
return column
elif select is not None and select.use_labels and column._label:
return _CompileLabel(column, column._label)
elif \
asfrom and \
isinstance(column, sql.ColumnClause) and \
not column.is_literal and \
column.table is not None and \
not isinstance(column.table, sql.Select):
return _CompileLabel(column, sql._generated_label(column.name))
elif not isinstance(column,
(sql._UnaryExpression, sql._TextClause)) \
and (not hasattr(column, 'name') or \
isinstance(column, sql.Function)):
return _CompileLabel(column, column.anon_label)
else:
return column
def get_select_hint_text(self, byfroms):
return None
def get_from_hint_text(self, table, text):
return None
def visit_select(self, select, asfrom=False, parens=True,
iswrapper=False, fromhints=None,
compound_index=1, **kwargs):
entry = self.stack and self.stack[-1] or {}
existingfroms = entry.get('from', None)
froms = select._get_display_froms(existingfroms)
correlate_froms = set(sql._from_objects(*froms))
# TODO: might want to propagate existing froms for
# select(select(select)) where innermost select should correlate
# to outermost if existingfroms: correlate_froms =
# correlate_froms.union(existingfroms)
self.stack.append({'from': correlate_froms, 'iswrapper'
: iswrapper})
if compound_index==1 and not entry or entry.get('iswrapper', False):
column_clause_args = {'result_map':self.result_map}
else:
column_clause_args = {}
# the actual list of columns to print in the SELECT column list.
inner_columns = [
c for c in [
self.label_select_column(select, co, asfrom=asfrom).\
_compiler_dispatch(self,
within_columns_clause=True,
**column_clause_args)
for co in util.unique_list(select.inner_columns)
]
if c is not None
]
text = "SELECT " # we're off to a good start !
if select._hints:
byfrom = dict([
(from_, hinttext % {
'name':from_._compiler_dispatch(
self, ashint=True)
})
for (from_, dialect), hinttext in
select._hints.iteritems()
if dialect in ('*', self.dialect.name)
])
hint_text = self.get_select_hint_text(byfrom)
if hint_text:
text += hint_text + " "
if select._prefixes:
text += " ".join(
x._compiler_dispatch(self, **kwargs)
for x in select._prefixes) + " "
text += self.get_select_precolumns(select)
text += ', '.join(inner_columns)
if froms:
text += " \nFROM "
if select._hints:
text += ', '.join([f._compiler_dispatch(self,
asfrom=True, fromhints=byfrom,
**kwargs)
for f in froms])
else:
text += ', '.join([f._compiler_dispatch(self,
asfrom=True, **kwargs)
for f in froms])
else:
text += self.default_from()
if select._whereclause is not None:
t = select._whereclause._compiler_dispatch(self, **kwargs)
if t:
text += " \nWHERE " + t
if select._group_by_clause.clauses:
group_by = select._group_by_clause._compiler_dispatch(
self, **kwargs)
if group_by:
text += " GROUP BY " + group_by
if select._having is not None:
t = select._having._compiler_dispatch(self, **kwargs)
if t:
text += " \nHAVING " + t
if select._order_by_clause.clauses:
text += self.order_by_clause(select, **kwargs)
if select._limit is not None or select._offset is not None:
text += self.limit_clause(select)
if select.for_update:
text += self.for_update_clause(select)
self.stack.pop(-1)
if asfrom and parens:
return "(" + text + ")"
else:
return text
def get_select_precolumns(self, select):
"""Called when building a ``SELECT`` statement, position is just
before column list.
"""
return select._distinct and "DISTINCT " or ""
def order_by_clause(self, select, **kw):
order_by = select._order_by_clause._compiler_dispatch(self, **kw)
if order_by:
return " ORDER BY " + order_by
else:
return ""
def for_update_clause(self, select):
if select.for_update:
return " FOR UPDATE"
else:
return ""
def limit_clause(self, select):
text = ""
if select._limit is not None:
text += "\n LIMIT " + self.process(sql.literal(select._limit))
if select._offset is not None:
if select._limit is None:
text += "\n LIMIT -1"
text += " OFFSET " + self.process(sql.literal(select._offset))
return text
def visit_table(self, table, asfrom=False, ashint=False,
fromhints=None, **kwargs):
if asfrom or ashint:
if getattr(table, "schema", None):
ret = self.preparer.quote_schema(table.schema,
table.quote_schema) + \
"." + self.preparer.quote(table.name,
table.quote)
else:
ret = self.preparer.quote(table.name, table.quote)
if fromhints and table in fromhints:
hinttext = self.get_from_hint_text(table, fromhints[table])
if hinttext:
ret += " " + hinttext
return ret
else:
return ""
def visit_join(self, join, asfrom=False, **kwargs):
return (
join.left._compiler_dispatch(self, asfrom=True, **kwargs) +
(join.isouter and " LEFT OUTER JOIN " or " JOIN ") +
join.right._compiler_dispatch(self, asfrom=True, **kwargs) +
" ON " +
join.onclause._compiler_dispatch(self, **kwargs)
)
def visit_insert(self, insert_stmt):
self.isinsert = True
colparams = self._get_colparams(insert_stmt)
if not colparams and \
not self.dialect.supports_default_values and \
not self.dialect.supports_empty_insert:
raise exc.CompileError("The version of %s you are using does "
"not support empty inserts." %
self.dialect.name)
preparer = self.preparer
supports_default_values = self.dialect.supports_default_values
text = "INSERT"
prefixes = [self.process(x) for x in insert_stmt._prefixes]
if prefixes:
text += " " + " ".join(prefixes)
text += " INTO " + preparer.format_table(insert_stmt.table)
if colparams or not supports_default_values:
text += " (%s)" % ', '.join([preparer.format_column(c[0])
for c in colparams])
if self.returning or insert_stmt._returning:
self.returning = self.returning or insert_stmt._returning
returning_clause = self.returning_clause(
insert_stmt, self.returning)
if self.returning_precedes_values:
text += " " + returning_clause
if not colparams and supports_default_values:
text += " DEFAULT VALUES"
else:
text += " VALUES (%s)" % \
', '.join([c[1] for c in colparams])
if self.returning and not self.returning_precedes_values:
text += " " + returning_clause
return text
def visit_update(self, update_stmt):
self.stack.append({'from': set([update_stmt.table])})
self.isupdate = True
colparams = self._get_colparams(update_stmt)
text = "UPDATE " + self.preparer.format_table(update_stmt.table)
text += ' SET ' + \
', '.join(
self.preparer.quote(c[0].name, c[0].quote) +
'=' + c[1]
for c in colparams
)
if update_stmt._returning:
self.returning = update_stmt._returning
if self.returning_precedes_values:
text += " " + self.returning_clause(
update_stmt, update_stmt._returning)
if update_stmt._whereclause is not None:
text += " WHERE " + self.process(update_stmt._whereclause)
if self.returning and not self.returning_precedes_values:
text += " " + self.returning_clause(
update_stmt, update_stmt._returning)
self.stack.pop(-1)
return text
def _create_crud_bind_param(self, col, value, required=False):
bindparam = sql.bindparam(col.key, value,
type_=col.type, required=required)
bindparam._is_crud = True
return bindparam._compiler_dispatch(self)
def _get_colparams(self, stmt):
"""create a set of tuples representing column/string pairs for use
in an INSERT or UPDATE statement.
Also generates the Compiled object's postfetch, prefetch, and
returning column collections, used for default handling and ultimately
populating the ResultProxy's prefetch_cols() and postfetch_cols()
collections.
"""
self.postfetch = []
self.prefetch = []
self.returning = []
# no parameters in the statement, no parameters in the
# compiled params - return binds for all columns
if self.column_keys is None and stmt.parameters is None:
return [
(c, self._create_crud_bind_param(c,
None, required=True))
for c in stmt.table.columns
]
required = object()
# if we have statement parameters - set defaults in the
# compiled params
if self.column_keys is None:
parameters = {}
else:
parameters = dict((sql._column_as_key(key), required)
for key in self.column_keys
if not stmt.parameters or
key not in stmt.parameters)
if stmt.parameters is not None:
for k, v in stmt.parameters.iteritems():
parameters.setdefault(sql._column_as_key(k), v)
# create a list of column assignment clauses as tuples
values = []
need_pks = self.isinsert and \
not self.inline and \
not stmt._returning
implicit_returning = need_pks and \
self.dialect.implicit_returning and \
stmt.table.implicit_returning
postfetch_lastrowid = need_pks and self.dialect.postfetch_lastrowid
# iterating through columns at the top to maintain ordering.
# otherwise we might iterate through individual sets of
# "defaults", "primary key cols", etc.
for c in stmt.table.columns:
if c.key in parameters:
value = parameters[c.key]
if sql._is_literal(value):
value = self._create_crud_bind_param(
c, value, required=value is required)
elif c.primary_key and implicit_returning:
self.returning.append(c)
value = self.process(value.self_group())
else:
self.postfetch.append(c)
value = self.process(value.self_group())
values.append((c, value))
elif self.isinsert:
if c.primary_key and \
need_pks and \
(
implicit_returning or
not postfetch_lastrowid or
c is not stmt.table._autoincrement_column
):
if implicit_returning:
if c.default is not None:
if c.default.is_sequence:
if self.dialect.supports_sequences and \
(not c.default.optional or \
not self.dialect.sequences_optional):
proc = self.process(c.default)
values.append((c, proc))
self.returning.append(c)
elif c.default.is_clause_element:
values.append(
(c,
self.process(c.default.arg.self_group()))
)
self.returning.append(c)
else:
values.append(
(c, self._create_crud_bind_param(c, None))
)
self.prefetch.append(c)
else:
self.returning.append(c)
else:
if c.default is not None or \
c is stmt.table._autoincrement_column and (
self.dialect.supports_sequences or
self.dialect.preexecute_autoincrement_sequences
):
values.append(
(c, self._create_crud_bind_param(c, None))
)
self.prefetch.append(c)
elif c.default is not None:
if c.default.is_sequence:
if self.dialect.supports_sequences and \
(not c.default.optional or \
not self.dialect.sequences_optional):
proc = self.process(c.default)
values.append((c, proc))
if not c.primary_key:
self.postfetch.append(c)
elif c.default.is_clause_element:
values.append(
(c, self.process(c.default.arg.self_group()))
)
if not c.primary_key:
# dont add primary key column to postfetch
self.postfetch.append(c)
else:
values.append(
(c, self._create_crud_bind_param(c, None))
)
self.prefetch.append(c)
elif c.server_default is not None:
if not c.primary_key:
self.postfetch.append(c)
elif self.isupdate:
if c.onupdate is not None and not c.onupdate.is_sequence:
if c.onupdate.is_clause_element:
values.append(
(c, self.process(c.onupdate.arg.self_group()))
)
self.postfetch.append(c)
else:
values.append(
(c, self._create_crud_bind_param(c, None))
)
self.prefetch.append(c)
elif c.server_onupdate is not None:
self.postfetch.append(c)
return values
def visit_delete(self, delete_stmt):
self.stack.append({'from': set([delete_stmt.table])})
self.isdelete = True
text = "DELETE FROM " + self.preparer.format_table(delete_stmt.table)
if delete_stmt._returning:
self.returning = delete_stmt._returning
if self.returning_precedes_values:
text += " " + self.returning_clause(
delete_stmt, delete_stmt._returning)
if delete_stmt._whereclause is not None:
text += " WHERE " + self.process(delete_stmt._whereclause)
if self.returning and not self.returning_precedes_values:
text += " " + self.returning_clause(
delete_stmt, delete_stmt._returning)
self.stack.pop(-1)
return text
def visit_savepoint(self, savepoint_stmt):
return "SAVEPOINT %s" % self.preparer.format_savepoint(savepoint_stmt)
def visit_rollback_to_savepoint(self, savepoint_stmt):
return "ROLLBACK TO SAVEPOINT %s" % \
self.preparer.format_savepoint(savepoint_stmt)
def visit_release_savepoint(self, savepoint_stmt):
return "RELEASE SAVEPOINT %s" % \
self.preparer.format_savepoint(savepoint_stmt)
class DDLCompiler(engine.Compiled):
@util.memoized_property
def sql_compiler(self):
return self.dialect.statement_compiler(self.dialect, None)
@property
def preparer(self):
return self.dialect.identifier_preparer
def construct_params(self, params=None):
return None
def visit_ddl(self, ddl, **kwargs):
# table events can substitute table and schema name
context = ddl.context
if isinstance(ddl.target, schema.Table):
context = context.copy()
preparer = self.dialect.identifier_preparer
path = preparer.format_table_seq(ddl.target)
if len(path) == 1:
table, sch = path[0], ''
else:
table, sch = path[-1], path[0]
context.setdefault('table', table)
context.setdefault('schema', sch)
context.setdefault('fullname', preparer.format_table(ddl.target))
return self.sql_compiler.post_process_text(ddl.statement % context)
def visit_create_table(self, create):
table = create.element
preparer = self.dialect.identifier_preparer
text = "\n" + " ".join(['CREATE'] + \
table._prefixes + \
['TABLE',
preparer.format_table(table),
"("])
separator = "\n"
# if only one primary key, specify it along with the column
first_pk = False
for column in table.columns:
text += separator
separator = ", \n"
text += "\t" + self.get_column_specification(
column,
first_pk=column.primary_key and \
not first_pk
)
if column.primary_key:
first_pk = True
const = " ".join(self.process(constraint) \
for constraint in column.constraints)
if const:
text += " " + const
const = self.create_table_constraints(table)
if const:
text += ", \n\t" + const
text += "\n)%s\n\n" % self.post_create_table(table)
return text
def create_table_constraints(self, table):
# On some DB order is significant: visit PK first, then the
# other constraints (engine.ReflectionTest.testbasic failed on FB2)
constraints = []
if table.primary_key:
constraints.append(table.primary_key)
constraints.extend([c for c in table._sorted_constraints
if c is not table.primary_key])
return ", \n\t".join(p for p in
(self.process(constraint)
for constraint in constraints
if (
constraint._create_rule is None or
constraint._create_rule(self))
and (
not self.dialect.supports_alter or
not getattr(constraint, 'use_alter', False)
)) if p is not None
)
def visit_drop_table(self, drop):
return "\nDROP TABLE " + self.preparer.format_table(drop.element)
def _index_identifier(self, ident):
if isinstance(ident, sql._generated_label):
max = self.dialect.max_index_name_length or \
self.dialect.max_identifier_length
if len(ident) > max:
return ident[0:max - 8] + \
"_" + util.md5_hex(ident)[-4:]
else:
return ident
else:
self.dialect.validate_identifier(ident)
return ident
def visit_create_index(self, create):
index = create.element
preparer = self.preparer
text = "CREATE "
if index.unique:
text += "UNIQUE "
text += "INDEX %s ON %s (%s)" \
% (preparer.quote(self._index_identifier(index.name),
index.quote),
preparer.format_table(index.table),
', '.join(preparer.quote(c.name, c.quote)
for c in index.columns))
return text
def visit_drop_index(self, drop):
index = drop.element
return "\nDROP INDEX " + \
self.preparer.quote(
self._index_identifier(index.name), index.quote)
def visit_add_constraint(self, create):
preparer = self.preparer
return "ALTER TABLE %s ADD %s" % (
self.preparer.format_table(create.element.table),
self.process(create.element)
)
def visit_create_sequence(self, create):
text = "CREATE SEQUENCE %s" % \
self.preparer.format_sequence(create.element)
if create.element.increment is not None:
text += " INCREMENT BY %d" % create.element.increment
if create.element.start is not None:
text += " START WITH %d" % create.element.start
return text
def visit_drop_sequence(self, drop):
return "DROP SEQUENCE %s" % \
self.preparer.format_sequence(drop.element)
def visit_drop_constraint(self, drop):
preparer = self.preparer
return "ALTER TABLE %s DROP CONSTRAINT %s%s" % (
self.preparer.format_table(drop.element.table),
self.preparer.format_constraint(drop.element),
drop.cascade and " CASCADE" or ""
)
def get_column_specification(self, column, **kwargs):
colspec = self.preparer.format_column(column) + " " + \
self.dialect.type_compiler.process(column.type)
default = self.get_column_default_string(column)
if default is not None:
colspec += " DEFAULT " + default
if not column.nullable:
colspec += " NOT NULL"
return colspec
def post_create_table(self, table):
return ''
def get_column_default_string(self, column):
if isinstance(column.server_default, schema.DefaultClause):
if isinstance(column.server_default.arg, basestring):
return "'%s'" % column.server_default.arg
else:
return self.sql_compiler.process(column.server_default.arg)
else:
return None
def visit_check_constraint(self, constraint):
text = ""
if constraint.name is not None:
text += "CONSTRAINT %s " % \
self.preparer.format_constraint(constraint)
sqltext = sql_util.expression_as_ddl(constraint.sqltext)
text += "CHECK (%s)" % self.sql_compiler.process(sqltext)
text += self.define_constraint_deferrability(constraint)
return text
def visit_column_check_constraint(self, constraint):
text = "CHECK (%s)" % constraint.sqltext
text += self.define_constraint_deferrability(constraint)
return text
def visit_primary_key_constraint(self, constraint):
if len(constraint) == 0:
return ''
text = ""
if constraint.name is not None:
text += "CONSTRAINT %s " % \
self.preparer.format_constraint(constraint)
text += "PRIMARY KEY "
text += "(%s)" % ', '.join(self.preparer.quote(c.name, c.quote)
for c in constraint)
text += self.define_constraint_deferrability(constraint)
return text
def visit_foreign_key_constraint(self, constraint):
preparer = self.dialect.identifier_preparer
text = ""
if constraint.name is not None:
text += "CONSTRAINT %s " % \
preparer.format_constraint(constraint)
remote_table = list(constraint._elements.values())[0].column.table
text += "FOREIGN KEY(%s) REFERENCES %s (%s)" % (
', '.join(preparer.quote(f.parent.name, f.parent.quote)
for f in constraint._elements.values()),
self.define_constraint_remote_table(
constraint, remote_table, preparer),
', '.join(preparer.quote(f.column.name, f.column.quote)
for f in constraint._elements.values())
)
text += self.define_constraint_cascades(constraint)
text += self.define_constraint_deferrability(constraint)
return text
def define_constraint_remote_table(self, constraint, table, preparer):
"""Format the remote table clause of a CREATE CONSTRAINT clause."""
return preparer.format_table(table)
def visit_unique_constraint(self, constraint):
text = ""
if constraint.name is not None:
text += "CONSTRAINT %s " % \
self.preparer.format_constraint(constraint)
text += "UNIQUE (%s)" % (
', '.join(self.preparer.quote(c.name, c.quote)
for c in constraint))
text += self.define_constraint_deferrability(constraint)
return text
def define_constraint_cascades(self, constraint):
text = ""
if constraint.ondelete is not None:
text += " ON DELETE %s" % constraint.ondelete
if constraint.onupdate is not None:
text += " ON UPDATE %s" % constraint.onupdate
return text
def define_constraint_deferrability(self, constraint):
text = ""
if constraint.deferrable is not None:
if constraint.deferrable:
text += " DEFERRABLE"
else:
text += " NOT DEFERRABLE"
if constraint.initially is not None:
text += " INITIALLY %s" % constraint.initially
return text
class GenericTypeCompiler(engine.TypeCompiler):
def visit_CHAR(self, type_):
return "CHAR" + (type_.length and "(%d)" % type_.length or "")
def visit_NCHAR(self, type_):
return "NCHAR" + (type_.length and "(%d)" % type_.length or "")
def visit_FLOAT(self, type_):
return "FLOAT"
def visit_REAL(self, type_):
return "REAL"
def visit_NUMERIC(self, type_):
if type_.precision is None:
return "NUMERIC"
elif type_.scale is None:
return "NUMERIC(%(precision)s)" % \
{'precision': type_.precision}
else:
return "NUMERIC(%(precision)s, %(scale)s)" % \
{'precision': type_.precision,
'scale' : type_.scale}
def visit_DECIMAL(self, type_):
return "DECIMAL"
def visit_INTEGER(self, type_):
return "INTEGER"
def visit_SMALLINT(self, type_):
return "SMALLINT"
def visit_BIGINT(self, type_):
return "BIGINT"
def visit_TIMESTAMP(self, type_):
return 'TIMESTAMP'
def visit_DATETIME(self, type_):
return "DATETIME"
def visit_DATE(self, type_):
return "DATE"
def visit_TIME(self, type_):
return "TIME"
def visit_CLOB(self, type_):
return "CLOB"
def visit_NCLOB(self, type_):
return "NCLOB"
def visit_VARCHAR(self, type_):
return "VARCHAR" + (type_.length and "(%d)" % type_.length or "")
def visit_NVARCHAR(self, type_):
return "NVARCHAR" + (type_.length and "(%d)" % type_.length or "")
def visit_BLOB(self, type_):
return "BLOB"
def visit_BINARY(self, type_):
return "BINARY" + (type_.length and "(%d)" % type_.length or "")
def visit_VARBINARY(self, type_):
return "VARBINARY" + (type_.length and "(%d)" % type_.length or "")
def visit_BOOLEAN(self, type_):
return "BOOLEAN"
def visit_TEXT(self, type_):
return "TEXT"
def visit_large_binary(self, type_):
return self.visit_BLOB(type_)
def visit_boolean(self, type_):
return self.visit_BOOLEAN(type_)
def visit_time(self, type_):
return self.visit_TIME(type_)
def visit_datetime(self, type_):
return self.visit_DATETIME(type_)
def visit_date(self, type_):
return self.visit_DATE(type_)
def visit_big_integer(self, type_):
return self.visit_BIGINT(type_)
def visit_small_integer(self, type_):
return self.visit_SMALLINT(type_)
def visit_integer(self, type_):
return self.visit_INTEGER(type_)
def visit_real(self, type_):
return self.visit_REAL(type_)
def visit_float(self, type_):
return self.visit_FLOAT(type_)
def visit_numeric(self, type_):
return self.visit_NUMERIC(type_)
def visit_string(self, type_):
return self.visit_VARCHAR(type_)
def visit_unicode(self, type_):
return self.visit_VARCHAR(type_)
def visit_text(self, type_):
return self.visit_TEXT(type_)
def visit_unicode_text(self, type_):
return self.visit_TEXT(type_)
def visit_enum(self, type_):
return self.visit_VARCHAR(type_)
def visit_null(self, type_):
raise NotImplementedError("Can't generate DDL for the null type")
def visit_type_decorator(self, type_):
return self.process(type_.type_engine(self.dialect))
def visit_user_defined(self, type_):
return type_.get_col_spec()
class IdentifierPreparer(object):
"""Handle quoting and case-folding of identifiers based on options."""
reserved_words = RESERVED_WORDS
legal_characters = LEGAL_CHARACTERS
illegal_initial_characters = ILLEGAL_INITIAL_CHARACTERS
def __init__(self, dialect, initial_quote='"',
final_quote=None, escape_quote='"', omit_schema=False):
"""Construct a new ``IdentifierPreparer`` object.
initial_quote
Character that begins a delimited identifier.
final_quote
Character that ends a delimited identifier. Defaults to
`initial_quote`.
omit_schema
Prevent prepending schema name. Useful for databases that do
not support schemae.
"""
self.dialect = dialect
self.initial_quote = initial_quote
self.final_quote = final_quote or self.initial_quote
self.escape_quote = escape_quote
self.escape_to_quote = self.escape_quote * 2
self.omit_schema = omit_schema
self._strings = {}
def _escape_identifier(self, value):
"""Escape an identifier.
Subclasses should override this to provide database-dependent
escaping behavior.
"""
return value.replace(self.escape_quote, self.escape_to_quote)
def _unescape_identifier(self, value):
"""Canonicalize an escaped identifier.
Subclasses should override this to provide database-dependent
unescaping behavior that reverses _escape_identifier.
"""
return value.replace(self.escape_to_quote, self.escape_quote)
def quote_identifier(self, value):
"""Quote an identifier.
Subclasses should override this to provide database-dependent
quoting behavior.
"""
return self.initial_quote + \
self._escape_identifier(value) + \
self.final_quote
def _requires_quotes(self, value):
"""Return True if the given identifier requires quoting."""
lc_value = value.lower()
return (lc_value in self.reserved_words
or value[0] in self.illegal_initial_characters
or not self.legal_characters.match(unicode(value))
or (lc_value != value))
def quote_schema(self, schema, force):
"""Quote a schema.
Subclasses should override this to provide database-dependent
quoting behavior.
"""
return self.quote(schema, force)
def quote(self, ident, force):
if force is None:
if ident in self._strings:
return self._strings[ident]
else:
if self._requires_quotes(ident):
self._strings[ident] = self.quote_identifier(ident)
else:
self._strings[ident] = ident
return self._strings[ident]
elif force:
return self.quote_identifier(ident)
else:
return ident
def format_sequence(self, sequence, use_schema=True):
name = self.quote(sequence.name, sequence.quote)
if not self.omit_schema and use_schema and \
sequence.schema is not None:
name = self.quote_schema(sequence.schema, sequence.quote) + \
"." + name
return name
def format_label(self, label, name=None):
return self.quote(name or label.name, label.quote)
def format_alias(self, alias, name=None):
return self.quote(name or alias.name, alias.quote)
def format_savepoint(self, savepoint, name=None):
return self.quote(name or savepoint.ident, savepoint.quote)
def format_constraint(self, constraint):
return self.quote(constraint.name, constraint.quote)
def format_table(self, table, use_schema=True, name=None):
"""Prepare a quoted table and schema name."""
if name is None:
name = table.name
result = self.quote(name, table.quote)
if not self.omit_schema and use_schema \
and getattr(table, "schema", None):
result = self.quote_schema(table.schema, table.quote_schema) + \
"." + result
return result
def format_column(self, column, use_table=False,
name=None, table_name=None):
"""Prepare a quoted column name."""
if name is None:
name = column.name
if not getattr(column, 'is_literal', False):
if use_table:
return self.format_table(
column.table, use_schema=False,
name=table_name) + "." + \
self.quote(name, column.quote)
else:
return self.quote(name, column.quote)
else:
# literal textual elements get stuck into ColumnClause alot,
# which shouldnt get quoted
if use_table:
return self.format_table(column.table,
use_schema=False, name=table_name) + '.' + name
else:
return name
def format_table_seq(self, table, use_schema=True):
"""Format table name and schema as a tuple."""
# Dialects with more levels in their fully qualified references
# ('database', 'owner', etc.) could override this and return
# a longer sequence.
if not self.omit_schema and use_schema and \
getattr(table, 'schema', None):
return (self.quote_schema(table.schema, table.quote_schema),
self.format_table(table, use_schema=False))
else:
return (self.format_table(table, use_schema=False), )
@util.memoized_property
def _r_identifiers(self):
initial, final, escaped_final = \
[re.escape(s) for s in
(self.initial_quote, self.final_quote,
self._escape_identifier(self.final_quote))]
r = re.compile(
r'(?:'
r'(?:%(initial)s((?:%(escaped)s|[^%(final)s])+)%(final)s'
r'|([^\.]+))(?=\.|$))+' %
{ 'initial': initial,
'final': final,
'escaped': escaped_final })
return r
def unformat_identifiers(self, identifiers):
"""Unpack 'schema.table.column'-like strings into components."""
r = self._r_identifiers
return [self._unescape_identifier(i)
for i in [a or b for a, b in r.findall(identifiers)]]
| gpl-2.0 |
Murdius/GW2Snapshot | app/views.py | 1 | 1917 | import urllib
from flask import render_template, request, session
from flask_restful import Resource
from app import app, api
from bank import get_bank
from inventory import *
from material import *
from sharedInventory import get_shared_inventory
from wallet import *
@app.route('/')
@app.route('/index')
def index():
return render_template('index.html')
class Snapshot(Resource):
def post(self):
api_key = request.cookies.get('key')
key = {'access_token': api_key}
encoded_key = urllib.urlencode(key)
wallet_json = get_wallet(encoded_key)
if wallet_json == "Access denied!":
return 'Access denied!'
inventory_json = get_all_inventory(encoded_key)
print "Retrieved inventory data"
shared_json = get_shared_inventory(encoded_key)
print "Retrieved shared data"
bank_json = get_bank(encoded_key)
print "Retrieved bank data"
materials_json = get_materials(encoded_key)
print "Retrieved materials data"
exists = models.db.session.query(models.Snapshot.api_key).filter_by(
api_key=api_key).scalar() is not None
if exists:
snapshot = models.Snapshot.query.filter_by(api_key=api_key).first_or_404()
snapshot.inventory = inventory_json
snapshot.materials = materials_json
snapshot.bank = bank_json
snapshot.shared = shared_json
snapshot.wallet = wallet_json
models.db.session.commit()
else:
snapshot = models.Snapshot(api_key, inventory_json, materials_json, bank_json, shared_json, wallet_json)
models.db.session.add(snapshot)
models.db.session.commit()
models.db.session.close()
print "Snapshot added to database"
session['wallet'] = wallet_json
return api_key
api.add_resource(Snapshot, '/snapshot')
| gpl-3.0 |
ykim306/ud851-Exercises | flatten.py | 35 | 4312 | #! /usr/local/bin/python
import argparse
import os
import shutil
import sys
import tempfile
import git
IGNORE_PATTERNS = ('.git', ".DS_Store")
SAFE_CHARS = ["-", "_", "."]
MAX_LENGTH = 100
STUDENT = "student"
DEVELOP = "develop-"
DEVELOP_DEFAULT = "all develop branches"
def flatten(repo_dir, target_dir, student, develop_branches, remove_branches, links):
repo = git.Repo(repo_dir)
if develop_branches == DEVELOP_DEFAULT:
develop_branches = [branch for branch in repo.branches if DEVELOP in branch.name]
remove_local_branches(repo, student, develop_branches)
try:
temp_dir = tempfile.mkdtemp()
for develop in develop_branches:
to_temp_dir(repo, repo_dir, develop, temp_dir)
copy_snapshots(repo, student, temp_dir, target_dir)
finally:
if os.path.exists(temp_dir):
shutil.rmtree(temp_dir)
print "Done! Review and commit the", student, "branch at your leisure."
print "Then run $ git push --all --prune"
def remove_local_branches(repo, student, develop_branches):
for branch in repo.branches:
if branch.name != student and branch not in develop_branches:
print "Removing local branch:", branch.name
repo.git.branch(branch.name, "-D")
def to_temp_dir(repo, repo_dir, develop, temp_dir):
for rev in repo.git.rev_list(develop).split("\n"):
commit = repo.commit(rev)
branch_name = clean_commit_message(commit.message)
if "Exercise" in branch_name or "Solution" in branch_name:
if branch_name in repo.branches:
repo.git.branch(branch_name, "-D")
new_branch = repo.create_head(branch_name)
new_branch.set_commit(rev)
repo.git.checkout(commit)
print "Saving snapshot of:", branch_name
repo.git.clean("-fdx")
folder_name = develop.name.split("-",1)[1]
target_dir = os.path.join(temp_dir, folder_name, branch_name)
shutil.copytree(repo_dir, target_dir,
ignore=shutil.ignore_patterns(*IGNORE_PATTERNS))
def clean_commit_message(message):
first_line = message.split("\n")[0]
safe_message = "".join(
c for c in first_line if c.isalnum() or c in SAFE_CHARS).strip()
return safe_message[:MAX_LENGTH] if len(safe_message) > MAX_LENGTH else safe_message
def copy_snapshots(repo, student, temp_dir, target_dir):
if target_dir == os.getcwd():
repo.git.checkout(student)
for item in os.listdir(temp_dir):
source_dir = os.path.join(temp_dir, item)
dest_dir = os.path.join(target_dir, item)
if os.path.exists(dest_dir):
shutil.rmtree(dest_dir)
print "Copying: ", item
shutil.copytree(source_dir, dest_dir)
DESCRIPTION = "This script "
EPILOG = " To make changes to "
def main():
parser = argparse.ArgumentParser(
description=DESCRIPTION,
epilog=EPILOG,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-b', '--remove',
action='store_true',
help='delete all local branches except the student and develop branches')
parser.add_argument('-d', '--directory',
default=os.getcwd(),
help="the directory of the source repository")
parser.add_argument('-t', '--target',
default=os.getcwd(),
help="target directory")
parser.add_argument('-s', '--student',
default=STUDENT,
help="branch where snapshots will be copied")
parser.add_argument('-l', '--links',
action='store_true',
help="Add links to branches and diff to README files")
parser.add_argument('develop_branches',
nargs="*",
default=DEVELOP_DEFAULT,
help="the branches where snapshots will be copied from")
parsed = parser.parse_args()
flatten(
parsed.directory,
parsed.target,
parsed.student,
parsed.develop_branches,
parsed.remove,
parsed.links
)
if __name__ == "__main__":
sys.exit(main())
| apache-2.0 |
zasdfgbnm/tensorflow | tensorflow/contrib/learn/python/learn/utils/inspect_checkpoint.py | 123 | 2686 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A simple script for inspect checkpoint files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
from tensorflow.contrib.framework.python.framework import checkpoint_utils
from tensorflow.python.platform import app
FLAGS = None
def print_tensors_in_checkpoint_file(file_name, tensor_name):
"""Prints tensors in a checkpoint file.
If no `tensor_name` is provided, prints the tensor names and shapes
in the checkpoint file.
If `tensor_name` is provided, prints the content of the tensor.
Args:
file_name: Name of the checkpoint file.
tensor_name: Name of the tensor in the checkpoint file to print.
"""
try:
if not tensor_name:
variables = checkpoint_utils.list_variables(file_name)
for name, shape in variables:
print("%s\t%s" % (name, str(shape)))
else:
print("tensor_name: ", tensor_name)
print(checkpoint_utils.load_variable(file_name, tensor_name))
except Exception as e: # pylint: disable=broad-except
print(str(e))
if "corrupted compressed block contents" in str(e):
print("It's likely that your checkpoint file has been compressed "
"with SNAPPY.")
def main(unused_argv):
if not FLAGS.file_name:
print("Usage: inspect_checkpoint --file_name=<checkpoint_file_name "
"or directory> [--tensor_name=tensor_to_print]")
sys.exit(1)
else:
print_tensors_in_checkpoint_file(FLAGS.file_name, FLAGS.tensor_name)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--file_name",
type=str,
default="",
help="Checkpoint filename"
)
parser.add_argument(
"--tensor_name",
type=str,
default="",
help="Name of the tensor to inspect"
)
FLAGS, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
lefnire/tensorforce | tensorforce/models/dpg_target_model.py | 1 | 14627 | # Copyright 2017 reinforce.io. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import tensorflow as tf
from tensorforce import util, TensorForceError
from tensorforce.models import DistributionModel
from tensorforce.core.networks import Network, LayerBasedNetwork, Dense, Linear, TFLayer, Nonlinearity
from tensorforce.core.optimizers import Optimizer, Synchronization
class DDPGCriticNetwork(LayerBasedNetwork):
def __init__(self, scope='ddpg-critic-network', summary_labels=(), size_t0=400, size_t1=300):
super(DDPGCriticNetwork, self).__init__(scope=scope, summary_labels=summary_labels)
self.t0l = Linear(size=size_t0, scope='linear0')
self.t0b = TFLayer(layer='batch_normalization', scope='batchnorm0', center=True, scale=True)
self.t0n = Nonlinearity(name='relu', scope='relu0')
self.t1l = Linear(size=size_t1, scope='linear1')
self.t1b = TFLayer(layer='batch_normalization', scope='batchnorm1', center=True, scale=True)
self.t1n = Nonlinearity(name='relu', scope='relu1')
self.t2d = Dense(size=1, activation='tanh', scope='dense0',
weights=tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3))
self.add_layer(self.t0l)
self.add_layer(self.t0b)
self.add_layer(self.t0n)
self.add_layer(self.t1l)
self.add_layer(self.t1b)
self.add_layer(self.t1n)
self.add_layer(self.t2d)
def tf_apply(self, x, internals, update, return_internals=False):
assert x['states'], x['actions']
if isinstance(x['states'], dict):
if len(x['states']) != 1:
raise TensorForceError('DDPG critic network must have only one state input, but {} given.'.format(
len(x['states'])))
x_states = next(iter(x['states'].values()))
else:
x_states = x['states']
if isinstance(x['actions'], dict):
if len(x['actions']) != 1:
raise TensorForceError('DDPG critic network must have only one action input, but {} given.'.format(
len(x['actions'])))
x_actions = next(iter(x['actions'].values()))
else:
x_actions = x['actions']
x_actions = tf.reshape(tf.cast(x_actions, dtype=tf.float32), (-1, 1))
out = self.t0l.apply(x=x_states, update=update)
out = self.t0b.apply(x=out, update=update)
out = self.t0n.apply(x=out, update=update)
out = self.t1l.apply(x=tf.concat([out, x_actions], axis=-1), update=update)
out = self.t1b.apply(x=out, update=update)
out = self.t1n.apply(x=out, update=update)
out = self.t2d.apply(x=out, update=update)
# Remove last dimension because we only return Q values for one state and action
out = tf.squeeze(out)
if return_internals:
# Todo: Internals management
return out, None
else:
return out
class DPGTargetModel(DistributionModel):
"""
Policy gradient model log likelihood model with target network (e.g. DDPG)
"""
COMPONENT_CRITIC = "critic"
COMPONENT_TARGET_NETWORK = "target_network"
COMPONENT_TARGET_DISTRIBUTION = "target_distribution"
def __init__(
self,
states,
actions,
scope,
device,
saver,
summarizer,
execution,
batching_capacity,
variable_noise,
states_preprocessing,
actions_exploration,
reward_preprocessing,
update_mode,
memory,
optimizer,
discount,
network,
distributions,
entropy_regularization,
critic_network,
critic_optimizer,
target_sync_frequency,
target_update_weight
):
self.critic_network_spec = critic_network
self.critic_optimizer_spec = critic_optimizer
self.target_sync_frequency = target_sync_frequency
self.target_update_weight = target_update_weight
# self.network is the actor, self.critic is the critic
self.target_network = None
self.target_network_optimizer = None
self.critic = None
self.critic_optimizer = None
self.target_critic = None
self.target_critic_optimizer = None
super(DPGTargetModel, self).__init__(
states=states,
actions=actions,
scope=scope,
device=device,
saver=saver,
summarizer=summarizer,
execution=execution,
batching_capacity=batching_capacity,
variable_noise=variable_noise,
states_preprocessing=states_preprocessing,
actions_exploration=actions_exploration,
reward_preprocessing=reward_preprocessing,
update_mode=update_mode,
memory=memory,
optimizer=optimizer,
discount=discount,
network=network,
distributions=distributions,
entropy_regularization=entropy_regularization,
requires_deterministic=True
)
assert self.memory_spec["include_next_states"]
assert self.requires_deterministic == True
def initialize(self, custom_getter):
super(DPGTargetModel, self).initialize(custom_getter)
# Target network
self.target_network = Network.from_spec(
spec=self.network_spec,
kwargs=dict(scope='target-network', summary_labels=self.summary_labels)
)
# Target network optimizer
self.target_network_optimizer = Synchronization(
sync_frequency=self.target_sync_frequency,
update_weight=self.target_update_weight
)
# Target network distributions
self.target_distributions = self.create_distributions()
# Critic
size_t0 = self.critic_network_spec['size_t0']
size_t1 = self.critic_network_spec['size_t1']
self.critic = DDPGCriticNetwork(scope='critic', size_t0=size_t0, size_t1=size_t1)
self.critic_optimizer = Optimizer.from_spec(
spec=self.critic_optimizer_spec,
kwargs=dict(summary_labels=self.summary_labels)
)
self.target_critic = DDPGCriticNetwork(scope='target-critic', size_t0=size_t0, size_t1=size_t1)
# Target critic optimizer
self.target_critic_optimizer = Synchronization(
sync_frequency=self.target_sync_frequency,
update_weight=self.target_update_weight
)
self.fn_target_actions_and_internals = tf.make_template(
name_='target-actions-and-internals',
func_=self.tf_target_actions_and_internals,
custom_getter_=custom_getter
)
self.fn_predict_target_q = tf.make_template(
name_='predict-target-q',
func_=self.tf_predict_target_q,
custom_getter_=custom_getter
)
def tf_target_actions_and_internals(self, states, internals, deterministic=True):
embedding, internals = self.target_network.apply(
x=states,
internals=internals,
update=tf.constant(value=False),
return_internals=True
)
actions = dict()
for name, distribution in self.target_distributions.items():
distr_params = distribution.parameterize(x=embedding)
actions[name] = distribution.sample(
distr_params=distr_params,
deterministic=tf.logical_or(x=deterministic, y=self.requires_deterministic)
)
return actions, internals
def tf_loss_per_instance(self, states, internals, actions, terminal, reward, next_states, next_internals, update, reference=None):
q = self.critic.apply(dict(states=states, actions=actions), internals=internals, update=update)
return -q
def tf_predict_target_q(self, states, internals, terminal, actions, reward, update):
q_value = self.target_critic.apply(dict(states=states, actions=actions), internals=internals, update=update)
return reward + (1. - tf.cast(terminal, dtype=tf.float32)) * self.discount * q_value
def tf_optimization(self, states, internals, actions, terminal, reward, next_states=None, next_internals=None):
update = tf.constant(value=True)
# Predict actions from target actor
next_target_actions, next_target_internals = self.fn_target_actions_and_internals(
states=next_states, internals=next_internals, deterministic=True
)
# Predicted Q value of next states
predicted_q = self.fn_predict_target_q(
states=next_states, internals=next_internals, actions=next_target_actions, terminal=terminal,
reward=reward, update=update
)
predicted_q = tf.stop_gradient(input=predicted_q)
real_q = self.critic.apply(dict(states=states, actions=actions), internals=internals, update=update)
# Update critic
def fn_critic_loss(predicted_q, real_q):
return tf.reduce_mean(tf.square(real_q - predicted_q))
critic_optimization = self.critic_optimizer.minimize(
time=self.timestep,
variables=self.critic.get_variables(),
arguments=dict(
predicted_q=predicted_q,
real_q=real_q
),
fn_loss=fn_critic_loss)
# Update actor
predicted_actions, predicted_internals = self.fn_actions_and_internals(
states=states, internals=internals, deterministic=True
)
optimization = super(DPGTargetModel, self).tf_optimization(
states=states,
internals=internals,
actions=predicted_actions,
terminal=terminal,
reward=reward,
next_states=next_states,
next_internals=next_internals
)
# Update target actor (network) and critic
network_distributions_variables = [
variable for name in sorted(self.distributions)
for variable in self.distributions[name].get_variables(include_nontrainable=False)
]
target_distributions_variables = [
variable for name in sorted(self.target_distributions)
for variable in self.target_distributions[name].get_variables(include_nontrainable=False)
]
target_optimization = self.target_network_optimizer.minimize(
time=self.timestep,
variables=self.target_network.get_variables() + target_distributions_variables,
source_variables=self.network.get_variables() + network_distributions_variables
)
target_critic_optimization = self.target_critic_optimizer.minimize(
time=self.timestep,
variables=self.target_critic.get_variables(),
source_variables=self.critic.get_variables()
)
return tf.group(critic_optimization, optimization, target_optimization, target_critic_optimization)
def get_variables(self, include_submodules=False, include_nontrainable=False):
model_variables = super(DPGTargetModel, self).get_variables(
include_submodules=include_submodules,
include_nontrainable=include_nontrainable
)
critic_variables = self.critic.get_variables(include_nontrainable=include_nontrainable)
model_variables += critic_variables
if include_nontrainable:
critic_optimizer_variables = self.critic_optimizer.get_variables()
for variable in critic_optimizer_variables:
if variable in model_variables:
model_variables.remove(variable)
model_variables += critic_optimizer_variables
if include_submodules:
target_variables = self.target_network.get_variables(include_nontrainable=include_nontrainable)
model_variables += target_variables
target_distributions_variables = [
variable for name in sorted(self.target_distributions)
for variable in self.target_distributions[name].get_variables(include_nontrainable=include_nontrainable)
]
model_variables += target_distributions_variables
target_critic_variables = self.target_critic.get_variables(include_nontrainable=include_nontrainable)
model_variables += target_critic_variables
if include_nontrainable:
target_optimizer_variables = self.target_network_optimizer.get_variables()
model_variables += target_optimizer_variables
target_critic_optimizer_variables = self.target_critic_optimizer.get_variables()
model_variables += target_critic_optimizer_variables
return model_variables
def get_summaries(self):
target_network_summaries = self.target_network.get_summaries()
target_distributions_summaries = [
summary for name in sorted(self.target_distributions)
for summary in self.target_distributions[name].get_summaries()
]
# Todo: Critic summaries
return super(DPGTargetModel, self).get_summaries() + target_network_summaries \
+ target_distributions_summaries
def get_components(self):
result = dict(super(DPGTargetModel, self).get_components())
result[DPGTargetModel.COMPONENT_CRITIC] = self.critic
result[DPGTargetModel.COMPONENT_TARGET_NETWORK] = self.target_network
for action, distribution in self.target_distributions.items():
result["%s_%s" % (DPGTargetModel.COMPONENT_TARGET_DISTRIBUTION, action)] = distribution
if len(self.target_distributions) == 1:
result[DPGTargetModel.COMPONENT_TARGET_DISTRIBUTION] = next(iter(self.target_distributions.values()))
return result
| apache-2.0 |
tectronics/gsiege | guadaboard/human_game_handler.py | 3 | 7167 | # -*- coding: utf-8 -*-
###############################################################################
# This file is part of Resistencia Cadiz 1812. #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
# Copyright(C) 2009,2010 Pablo Recio Quijano <pablo.recioquijano@alum.uca.es> #
###############################################################################
"""
This module intialize the Clips environment for the 1 player mode
"""
import os
import random
import clips
import guadaboard.real_interaction as r_intact
from libguadalete import funciones, f1, mover, texto
from libguadalete import traducirF, traducirM, fA, fB, mirroring, interaccion
from libguadalete.parsear_fichero_formacion import parsear_fichero_formacion
from resistencia import configure, filenames, xdg
from resistencia.nls import gettext as _
def _generate_file_name(team_a, team_b):
"""This function generate a proper filename for the game log
Return a string like 'game_YYYY-MM-DD_hh:mm:ss_team_a-vs-team_b.txt'
"""
des = filenames.generate_filename_keys('game',
(team_a, team_b))
base_path = configure.load_configuration()['games_path']
return '%s/%s' % (base_path, des)
def _rename_output_file(des):
"""
Simple function that rename the output file named 'resultado.txt'
to the proper filename with the date, names and so on.
"""
src = "resultado.txt"
_file = open(src, "a")
_file.write("fin\n")
os.rename(src, des)
class Error(Exception):
"""Base class for exceptions in this module."""
pass
class FileError(Error):
"""Exception raised for errors parsing files
Attributes:
msg -- explanation of the error
"""
def __init__(self, msg):
self.msg = msg
def init_human_game(player_formation, computer_team, player_as,
number_turns, dont_save=False):
"""
Intialize the clips environment
"""
player_num = 0
team_a = None
team_b = None
name_team_a = ''
name_team_b = ''
team_a_piece = xdg.get_data_path('images/piece-orange.png')
team_b_piece = xdg.get_data_path('images/piece-violete.png')
default_piece = xdg.get_data_path('images/piece-default.png')
formacion_temporal_player = None
formacion_temporal_pc = None
formacion_temporal_player = parsear_fichero_formacion(player_formation)
player_formation = formacion_temporal_player
formacion_temporal_pc = parsear_fichero_formacion(computer_team[1])
computer_team = (computer_team[0], formacion_temporal_pc)
if player_as == 'A':
player_num = 1
team_a = player_formation
team_b = computer_team
name_team_a = filenames.extract_simple_name_es((None, team_a))
name_team_b = filenames.extract_name_expert_system(team_b)
else:
player_num = -1
team_b = player_formation
team_a = computer_team
name_team_b = filenames.extract_simple_name_es((None, team_b))
name_team_a = filenames.extract_name_expert_system(team_a)
aux_team_a = (name_team_a, team_a_piece)
aux_team_b = (name_team_b, team_b_piece)
clips.Eval('(reset)')
clips.Eval('(clear)')
clips.EngineConfig.Strategy = clips.RANDOM_STRATEGY
random.seed()
clips.Eval("(seed %d)" % random.randint(0, 9999))
funciones.LoadFunctions(clips)
f1.init_world(clips, number_turns)
try:
f1.LoadFunctions(clips)
except Exception:
print clips.ErrorStream.Read()
exit(-1)
mover.LoadFunctions(clips)
texto.LoadFunctions(clips)
traducirF.LoadFunctions(clips)
traducirM.LoadFunctions(clips)
if player_num == 1:
int_team = mirroring.interactive_formation(team_a)
temp_team = mirroring.mirroring_team(team_b[1])
try:
clips.Load(int_team)
except clips.ClipsError:
os.remove(int_team)
raise FileError(_('Error parsing the file ') + team_a)
try:
clips.Load(temp_team)
except clips.ClipsError:
os.remove(temp_team)
raise FileError(_('Error parsing the file ') + team_b[1])
os.remove(int_team)
os.remove(temp_team)
fB.LoadFunctions(clips)
temp_rules = mirroring.mirroring_rules(team_b[0])
try:
clips.Load(temp_rules)
except clips.ClipsError:
os.remove(temp_rules)
raise FileError(_('Error parsing the file ') + team_b[0])
os.remove(temp_rules)
else:
try:
clips.Load(team_a[1])
except clips.ClipsError:
raise FileError(_('Error parsing the file ') + team_a[1])
int_team = mirroring.interactive_formation(team_b)
temp_team = mirroring.mirroring_team(int_team)
try:
clips.Load(temp_team)
except clips.ClipsError:
os.remove(temp_team)
raise FileError(_('Error parsing the file ') + team_a[1])
os.remove(temp_team)
fA.LoadFunctions(clips)
try:
clips.Load(team_a[0])
except clips.ClipsError:
raise FileError(_('Error parsing the file ') + team_a[0])
interaccion.LoadFunctions(clips, player_as)
interaccion.interaction_object = r_intact.HumanInteraction(
aux_team_a, aux_team_b, default_piece, player_num, number_turns)
clips.Reset() # restart the environment
clips.Run() # start the simulation
interaccion.interaction_object.finish()
_stream = clips.StdoutStream.Read() # print the output
# print _stream
# print interaccion.interaction_object.define_winner()
if not dont_save:
_rename_output_file(_generate_file_name(name_team_a, name_team_b))
if os.path.isfile("resultado.txt"):
os.remove('resultado.txt')
os.remove(formacion_temporal_pc)
os.remove(formacion_temporal_player)
clips.Eval('(reset)')
clips.Eval('(clear)')
| gpl-3.0 |
hortonworks/hortonworks-sandbox | desktop/core/ext-py/Django-1.2.3/django/core/management/commands/flush.py | 52 | 3433 | from optparse import make_option
from django.conf import settings
from django.db import connections, router, transaction, models, DEFAULT_DB_ALIAS
from django.core.management import call_command
from django.core.management.base import NoArgsCommand, CommandError
from django.core.management.color import no_style
from django.core.management.sql import sql_flush, emit_post_sync_signal
from django.utils.importlib import import_module
class Command(NoArgsCommand):
option_list = NoArgsCommand.option_list + (
make_option('--noinput', action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.'),
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a database to flush. '
'Defaults to the "default" database.'),
)
help = "Executes ``sqlflush`` on the current database."
def handle_noargs(self, **options):
db = options.get('database', DEFAULT_DB_ALIAS)
connection = connections[db]
verbosity = int(options.get('verbosity', 1))
interactive = options.get('interactive')
self.style = no_style()
# Import the 'management' module within each installed app, to register
# dispatcher events.
for app_name in settings.INSTALLED_APPS:
try:
import_module('.management', app_name)
except ImportError:
pass
sql_list = sql_flush(self.style, connection, only_django=True)
if interactive:
confirm = raw_input("""You have requested a flush of the database.
This will IRREVERSIBLY DESTROY all data currently in the %r database,
and return each table to the state it was in after syncdb.
Are you sure you want to do this?
Type 'yes' to continue, or 'no' to cancel: """ % connection.settings_dict['NAME'])
else:
confirm = 'yes'
if confirm == 'yes':
try:
cursor = connection.cursor()
for sql in sql_list:
cursor.execute(sql)
except Exception, e:
transaction.rollback_unless_managed(using=db)
raise CommandError("""Database %s couldn't be flushed. Possible reasons:
* The database isn't running or isn't configured correctly.
* At least one of the expected database tables doesn't exist.
* The SQL was invalid.
Hint: Look at the output of 'django-admin.py sqlflush'. That's the SQL this command wasn't able to run.
The full error: %s""" % (connection.settings_dict['NAME'], e))
transaction.commit_unless_managed(using=db)
# Emit the post sync signal. This allows individual
# applications to respond as if the database had been
# sync'd from scratch.
all_models = []
for app in models.get_apps():
all_models.extend([
m for m in models.get_models(app, include_auto_created=True)
if router.allow_syncdb(db, m)
])
emit_post_sync_signal(all_models, verbosity, interactive, db)
# Reinstall the initial_data fixture.
kwargs = options.copy()
kwargs['database'] = db
call_command('loaddata', 'initial_data', **kwargs)
else:
print "Flush cancelled."
| apache-2.0 |
qingwen220/eagle | eagle-external/hadoop_jmx_collector/lib/kafka-python/kafka/consumer/simple.py | 18 | 12765 | from __future__ import absolute_import
try:
from itertools import zip_longest as izip_longest, repeat # pylint: disable-msg=E0611
except ImportError: # python 2
from itertools import izip_longest as izip_longest, repeat
import logging
import time
import six
try:
from Queue import Empty, Queue
except ImportError: # python 2
from queue import Empty, Queue
from kafka.common import (
FetchRequest, OffsetRequest,
ConsumerFetchSizeTooSmall, ConsumerNoMoreData
)
from .base import (
Consumer,
FETCH_DEFAULT_BLOCK_TIMEOUT,
AUTO_COMMIT_MSG_COUNT,
AUTO_COMMIT_INTERVAL,
FETCH_MIN_BYTES,
FETCH_BUFFER_SIZE_BYTES,
MAX_FETCH_BUFFER_SIZE_BYTES,
FETCH_MAX_WAIT_TIME,
ITER_TIMEOUT_SECONDS,
NO_MESSAGES_WAIT_TIME_SECONDS
)
log = logging.getLogger("kafka")
class FetchContext(object):
"""
Class for managing the state of a consumer during fetch
"""
def __init__(self, consumer, block, timeout):
self.consumer = consumer
self.block = block
if block:
if not timeout:
timeout = FETCH_DEFAULT_BLOCK_TIMEOUT
self.timeout = timeout * 1000
def __enter__(self):
"""Set fetch values based on blocking status"""
self.orig_fetch_max_wait_time = self.consumer.fetch_max_wait_time
self.orig_fetch_min_bytes = self.consumer.fetch_min_bytes
if self.block:
self.consumer.fetch_max_wait_time = self.timeout
self.consumer.fetch_min_bytes = 1
else:
self.consumer.fetch_min_bytes = 0
def __exit__(self, type, value, traceback):
"""Reset values"""
self.consumer.fetch_max_wait_time = self.orig_fetch_max_wait_time
self.consumer.fetch_min_bytes = self.orig_fetch_min_bytes
class SimpleConsumer(Consumer):
"""
A simple consumer implementation that consumes all/specified partitions
for a topic
Arguments:
client: a connected KafkaClient
group: a name for this consumer, used for offset storage and must be unique
topic: the topic to consume
Keyword Arguments:
partitions: An optional list of partitions to consume the data from
auto_commit: default True. Whether or not to auto commit the offsets
auto_commit_every_n: default 100. How many messages to consume
before a commit
auto_commit_every_t: default 5000. How much time (in milliseconds) to
wait before commit
fetch_size_bytes: number of bytes to request in a FetchRequest
buffer_size: default 4K. Initial number of bytes to tell kafka we
have available. This will double as needed.
max_buffer_size: default 16K. Max number of bytes to tell kafka we have
available. None means no limit.
iter_timeout: default None. How much time (in seconds) to wait for a
message in the iterator before exiting. None means no
timeout, so it will wait forever.
Auto commit details:
If both auto_commit_every_n and auto_commit_every_t are set, they will
reset one another when one is triggered. These triggers simply call the
commit method on this class. A manual call to commit will also reset
these triggers
"""
def __init__(self, client, group, topic, auto_commit=True, partitions=None,
auto_commit_every_n=AUTO_COMMIT_MSG_COUNT,
auto_commit_every_t=AUTO_COMMIT_INTERVAL,
fetch_size_bytes=FETCH_MIN_BYTES,
buffer_size=FETCH_BUFFER_SIZE_BYTES,
max_buffer_size=MAX_FETCH_BUFFER_SIZE_BYTES,
iter_timeout=None):
super(SimpleConsumer, self).__init__(
client, group, topic,
partitions=partitions,
auto_commit=auto_commit,
auto_commit_every_n=auto_commit_every_n,
auto_commit_every_t=auto_commit_every_t)
if max_buffer_size is not None and buffer_size > max_buffer_size:
raise ValueError("buffer_size (%d) is greater than "
"max_buffer_size (%d)" %
(buffer_size, max_buffer_size))
self.buffer_size = buffer_size
self.max_buffer_size = max_buffer_size
self.partition_info = False # Do not return partition info in msgs
self.fetch_max_wait_time = FETCH_MAX_WAIT_TIME
self.fetch_min_bytes = fetch_size_bytes
self.fetch_offsets = self.offsets.copy()
self.iter_timeout = iter_timeout
self.queue = Queue()
def __repr__(self):
return '<SimpleConsumer group=%s, topic=%s, partitions=%s>' % \
(self.group, self.topic, str(self.offsets.keys()))
def provide_partition_info(self):
"""
Indicates that partition info must be returned by the consumer
"""
self.partition_info = True
def seek(self, offset, whence):
"""
Alter the current offset in the consumer, similar to fseek
Arguments:
offset: how much to modify the offset
whence: where to modify it from
* 0 is relative to the earliest available offset (head)
* 1 is relative to the current offset
* 2 is relative to the latest known offset (tail)
"""
if whence == 1: # relative to current position
for partition, _offset in self.offsets.items():
self.offsets[partition] = _offset + offset
elif whence in (0, 2): # relative to beginning or end
# divide the request offset by number of partitions,
# distribute the remained evenly
(delta, rem) = divmod(offset, len(self.offsets))
deltas = {}
for partition, r in izip_longest(self.offsets.keys(),
repeat(1, rem), fillvalue=0):
deltas[partition] = delta + r
reqs = []
for partition in self.offsets.keys():
if whence == 0:
reqs.append(OffsetRequest(self.topic, partition, -2, 1))
elif whence == 2:
reqs.append(OffsetRequest(self.topic, partition, -1, 1))
else:
pass
resps = self.client.send_offset_request(reqs)
for resp in resps:
self.offsets[resp.partition] = \
resp.offsets[0] + deltas[resp.partition]
else:
raise ValueError("Unexpected value for `whence`, %d" % whence)
# Reset queue and fetch offsets since they are invalid
self.fetch_offsets = self.offsets.copy()
if self.auto_commit:
self.count_since_commit += 1
self.commit()
self.queue = Queue()
def get_messages(self, count=1, block=True, timeout=0.1):
"""
Fetch the specified number of messages
Keyword Arguments:
count: Indicates the maximum number of messages to be fetched
block: If True, the API will block till some messages are fetched.
timeout: If block is True, the function will block for the specified
time (in seconds) until count messages is fetched. If None,
it will block forever.
"""
messages = []
if timeout is not None:
max_time = time.time() + timeout
new_offsets = {}
while count > 0 and (timeout is None or timeout > 0):
result = self._get_message(block, timeout, get_partition_info=True,
update_offset=False)
if result:
partition, message = result
if self.partition_info:
messages.append(result)
else:
messages.append(message)
new_offsets[partition] = message.offset + 1
count -= 1
else:
# Ran out of messages for the last request.
if not block:
# If we're not blocking, break.
break
# If we have a timeout, reduce it to the
# appropriate value
if timeout is not None:
timeout = max_time - time.time()
# Update and commit offsets if necessary
self.offsets.update(new_offsets)
self.count_since_commit += len(messages)
self._auto_commit()
return messages
def get_message(self, block=True, timeout=0.1, get_partition_info=None):
return self._get_message(block, timeout, get_partition_info)
def _get_message(self, block=True, timeout=0.1, get_partition_info=None,
update_offset=True):
"""
If no messages can be fetched, returns None.
If get_partition_info is None, it defaults to self.partition_info
If get_partition_info is True, returns (partition, message)
If get_partition_info is False, returns message
"""
if self.queue.empty():
# We're out of messages, go grab some more.
with FetchContext(self, block, timeout):
self._fetch()
try:
partition, message = self.queue.get_nowait()
if update_offset:
# Update partition offset
self.offsets[partition] = message.offset + 1
# Count, check and commit messages if necessary
self.count_since_commit += 1
self._auto_commit()
if get_partition_info is None:
get_partition_info = self.partition_info
if get_partition_info:
return partition, message
else:
return message
except Empty:
return None
def __iter__(self):
if self.iter_timeout is None:
timeout = ITER_TIMEOUT_SECONDS
else:
timeout = self.iter_timeout
while True:
message = self.get_message(True, timeout)
if message:
yield message
elif self.iter_timeout is None:
# We did not receive any message yet but we don't have a
# timeout, so give up the CPU for a while before trying again
time.sleep(NO_MESSAGES_WAIT_TIME_SECONDS)
else:
# Timed out waiting for a message
break
def _fetch(self):
# Create fetch request payloads for all the partitions
partitions = dict((p, self.buffer_size)
for p in self.fetch_offsets.keys())
while partitions:
requests = []
for partition, buffer_size in six.iteritems(partitions):
requests.append(FetchRequest(self.topic, partition,
self.fetch_offsets[partition],
buffer_size))
# Send request
responses = self.client.send_fetch_request(
requests,
max_wait_time=int(self.fetch_max_wait_time),
min_bytes=self.fetch_min_bytes)
retry_partitions = {}
for resp in responses:
partition = resp.partition
buffer_size = partitions[partition]
try:
for message in resp.messages:
# Put the message in our queue
self.queue.put((partition, message))
self.fetch_offsets[partition] = message.offset + 1
except ConsumerFetchSizeTooSmall:
if (self.max_buffer_size is not None and
buffer_size == self.max_buffer_size):
log.error("Max fetch size %d too small",
self.max_buffer_size)
raise
if self.max_buffer_size is None:
buffer_size *= 2
else:
buffer_size = min(buffer_size * 2,
self.max_buffer_size)
log.warn("Fetch size too small, increase to %d (2x) "
"and retry", buffer_size)
retry_partitions[partition] = buffer_size
except ConsumerNoMoreData as e:
log.debug("Iteration was ended by %r", e)
except StopIteration:
# Stop iterating through this partition
log.debug("Done iterating over partition %s" % partition)
partitions = retry_partitions
| apache-2.0 |
hetica/webeni | main/views.py | 1 | 2128 | # -*- coding: utf-8 -*-
from django.shortcuts import render
from django.utils.encoding import smart_unicode
import sys, os, locale
from datetime import datetime, timedelta
from main.forms import StagiaireForms
# Create your views here.
def home(request):
# CISCO
# Ajouter au path
_path = os.path.dirname(os.path.dirname(__file__)) + "/static/lib"
sys.path.append(_path)
import unicodedata
import cisco_clt
# définir le répertoire des fichiers
ciscofic = "/var/local/pytacad"
ciscoliststags = ciscofic + "/liste_stagiaires"
ciscorepclasses = ciscofic + "/classes"
# date de dernière modif du fichier "liste_stagiaires"
locale.setlocale(locale.LC_ALL,"fr_FR.UTF-8")
sm_fic = os.stat(ciscoliststags).st_mtime
last_update = datetime.fromtimestamp(sm_fic).strftime('%A %d %B %Y à %Hh%m')
# nombre de classes
nb_classes = sum(1 for _ in os.listdir(ciscorepclasses))
# nombre de stagiaires
nb_stags = sum(1 for _ in open(ciscoliststags))
# La dernière mise à jour date t-elle de plus de plus de 24 heures ?
one_days_ago = datetime.now() - timedelta(days=1)
filetime = datetime.fromtimestamp(os.path.getctime(ciscoliststags))
to_old = one_days_ago > filetime
# Formulaire de recherche de stagiaires
if request.method == 'POST':
form = StagiaireForms(request.POST)
if form.is_valid():
choisir = form.cleaned_data['choisir']
chercher = form.cleaned_data['chercher']
# mesg = type(choisir)
if choisir == "1":
# Si le choix est stagiaire
search = cisco_clt.find_user(chercher)
entete = str(search[0]) + " occurences trouvées"
if search[0] == 1:
# Si un stagiaire trouvé, afficher ses caractéristiques
mesg = cisco_clt.afficher_stags(search[1])
if search[0] > 1:
# Si plusieurs stagiaires sont trouvés, afficher leurs caractéristiques
mesg = cisco_clt.afficher_stags(search[1])
if choisir == "2":
mesg = cisco_clt.find_classe(chercher)
envoi = "stag_post"
else:
form = StagiaireForms()
return render(request, 'main/home.html', locals() )
def cisco(request):
return render(request, 'main/cisco.html')
| lgpl-3.0 |
deanhiller/databus | webapp/play1.3.x/python/Lib/Queue.py | 8 | 8818 | """A multi-producer, multi-consumer queue."""
from time import time as _time
from collections import deque
import heapq
__all__ = ['Empty', 'Full', 'Queue', 'PriorityQueue', 'LifoQueue']
class Empty(Exception):
"Exception raised by Queue.get(block=0)/get_nowait()."
pass
class Full(Exception):
"Exception raised by Queue.put(block=0)/put_nowait()."
pass
class Queue:
"""Create a queue object with a given maximum size.
If maxsize is <= 0, the queue size is infinite.
"""
def __init__(self, maxsize=0):
try:
import threading
except ImportError:
import dummy_threading as threading
self.maxsize = maxsize
self._init(maxsize)
# mutex must be held whenever the queue is mutating. All methods
# that acquire mutex must release it before returning. mutex
# is shared between the three conditions, so acquiring and
# releasing the conditions also acquires and releases mutex.
self.mutex = threading.Lock()
# Notify not_empty whenever an item is added to the queue; a
# thread waiting to get is notified then.
self.not_empty = threading.Condition(self.mutex)
# Notify not_full whenever an item is removed from the queue;
# a thread waiting to put is notified then.
self.not_full = threading.Condition(self.mutex)
# Notify all_tasks_done whenever the number of unfinished tasks
# drops to zero; thread waiting to join() is notified to resume
self.all_tasks_done = threading.Condition(self.mutex)
self.unfinished_tasks = 0
def task_done(self):
"""Indicate that a formerly enqueued task is complete.
Used by Queue consumer threads. For each get() used to fetch a task,
a subsequent call to task_done() tells the queue that the processing
on the task is complete.
If a join() is currently blocking, it will resume when all items
have been processed (meaning that a task_done() call was received
for every item that had been put() into the queue).
Raises a ValueError if called more times than there were items
placed in the queue.
"""
self.all_tasks_done.acquire()
try:
unfinished = self.unfinished_tasks - 1
if unfinished <= 0:
if unfinished < 0:
raise ValueError('task_done() called too many times')
self.all_tasks_done.notify_all()
self.unfinished_tasks = unfinished
finally:
self.all_tasks_done.release()
def join(self):
"""Blocks until all items in the Queue have been gotten and processed.
The count of unfinished tasks goes up whenever an item is added to the
queue. The count goes down whenever a consumer thread calls task_done()
to indicate the item was retrieved and all work on it is complete.
When the count of unfinished tasks drops to zero, join() unblocks.
"""
self.all_tasks_done.acquire()
try:
while self.unfinished_tasks:
self.all_tasks_done.wait()
finally:
self.all_tasks_done.release()
def qsize(self):
"""Return the approximate size of the queue (not reliable!)."""
self.mutex.acquire()
n = self._qsize()
self.mutex.release()
return n
def empty(self):
"""Return True if the queue is empty, False otherwise (not reliable!)."""
self.mutex.acquire()
n = not self._qsize()
self.mutex.release()
return n
def full(self):
"""Return True if the queue is full, False otherwise (not reliable!)."""
self.mutex.acquire()
n = 0 < self.maxsize == self._qsize()
self.mutex.release()
return n
def put(self, item, block=True, timeout=None):
"""Put an item into the queue.
If optional args 'block' is true and 'timeout' is None (the default),
block if necessary until a free slot is available. If 'timeout' is
a positive number, it blocks at most 'timeout' seconds and raises
the Full exception if no free slot was available within that time.
Otherwise ('block' is false), put an item on the queue if a free slot
is immediately available, else raise the Full exception ('timeout'
is ignored in that case).
"""
self.not_full.acquire()
try:
if self.maxsize > 0:
if not block:
if self._qsize() == self.maxsize:
raise Full
elif timeout is None:
while self._qsize() == self.maxsize:
self.not_full.wait()
elif timeout < 0:
raise ValueError("'timeout' must be a positive number")
else:
endtime = _time() + timeout
while self._qsize() == self.maxsize:
remaining = endtime - _time()
if remaining <= 0.0:
raise Full
self.not_full.wait(remaining)
self._put(item)
self.unfinished_tasks += 1
self.not_empty.notify()
finally:
self.not_full.release()
def put_nowait(self, item):
"""Put an item into the queue without blocking.
Only enqueue the item if a free slot is immediately available.
Otherwise raise the Full exception.
"""
return self.put(item, False)
def get(self, block=True, timeout=None):
"""Remove and return an item from the queue.
If optional args 'block' is true and 'timeout' is None (the default),
block if necessary until an item is available. If 'timeout' is
a positive number, it blocks at most 'timeout' seconds and raises
the Empty exception if no item was available within that time.
Otherwise ('block' is false), return an item if one is immediately
available, else raise the Empty exception ('timeout' is ignored
in that case).
"""
self.not_empty.acquire()
try:
if not block:
if not self._qsize():
raise Empty
elif timeout is None:
while not self._qsize():
self.not_empty.wait()
elif timeout < 0:
raise ValueError("'timeout' must be a positive number")
else:
endtime = _time() + timeout
while not self._qsize():
remaining = endtime - _time()
if remaining <= 0.0:
raise Empty
self.not_empty.wait(remaining)
item = self._get()
self.not_full.notify()
return item
finally:
self.not_empty.release()
def get_nowait(self):
"""Remove and return an item from the queue without blocking.
Only get an item if one is immediately available. Otherwise
raise the Empty exception.
"""
return self.get(False)
# Override these methods to implement other queue organizations
# (e.g. stack or priority queue).
# These will only be called with appropriate locks held
# Initialize the queue representation
def _init(self, maxsize):
self.queue = deque()
def _qsize(self, len=len):
return len(self.queue)
# Put a new item in the queue
def _put(self, item):
self.queue.append(item)
# Get an item from the queue
def _get(self):
return self.queue.popleft()
class PriorityQueue(Queue):
'''Variant of Queue that retrieves open entries in priority order (lowest first).
Entries are typically tuples of the form: (priority number, data).
'''
def _init(self, maxsize):
self.queue = []
def _qsize(self, len=len):
return len(self.queue)
def _put(self, item, heappush=heapq.heappush):
heappush(self.queue, item)
def _get(self, heappop=heapq.heappop):
return heappop(self.queue)
class LifoQueue(Queue):
'''Variant of Queue that retrieves most recently added entries first.'''
def _init(self, maxsize):
self.queue = []
def _qsize(self, len=len):
return len(self.queue)
def _put(self, item):
self.queue.append(item)
def _get(self):
return self.queue.pop()
| mpl-2.0 |
andela-mfalade/python-pandas-csv-records-analysis | scripts/processor.py | 1 | 5798 | """Initiate file analysis.
This module is used to find the discrepancies between two given files
"""
import argparse
import csv
import logging
import pandas as pd
logger = logging.getLogger(__file__)
logging.basicConfig(level=logging.DEBUG)
mathching_records_path = 'matching_records.csv'
non_mathching_records_path = 'non_matching_records.csv'
records_diff = "customers_in_chartio_but_not_in_responsys.csv"
no_project_key_path = 'records_with_no_project_key.csv'
no_customer_key_path = 'records_with_no_customer_key.csv'
no_project_status_path = 'records_with_no_project_status.csv'
CHARTIO_GRADES = ['exceeded', 'failed', 'passed', 'ungradeable']
def get_file_df(file_path):
"""Read the file from file path then create a Pandas data frame from file.
It eventually extracts the needed keys from this df and stored it in a dict
Args:
file_path(str): This is the path to the csv file to be read
Returns:
target_dict(dict): This holds key value pairs for future comparison
"""
logger.info("Reading CSV file.")
contacts_df = pd.read_csv(file_path)
target_dict = dict()
for contact_info in contacts_df.itertuples():
# Each unique_key is a concatenation of the contact_info's account_key
# and the project_id.
_, contact_id, project_id = contact_info
unique_key = "{x}-{y}".format(x=contact_id, y=project_id)
target_dict.update({unique_key: contact_info})
return target_dict
def write_to_csv(file_path, content):
"""Write content to file.
This simple method writes the given content to the file in the specified
file path.
It creates a new file in the path if no file exists there
It keeps appending to the file per call.
Args:
file_path(str): Path to file
content(list): A list of records which represent each row of the file
TODO: Make whole process run faster by opening each file during the
whole process
Opening and closing a file per call slows down the whole write process
"""
with open(file_path, 'a') as f:
writer = csv.writer(f)
writer.writerow(content)
def get_unique_key(project_status, project_id, customer_id):
"""Return unique key from given record.
Returns:
unique_key(str): This is the unique key which is used to search
the dict which holds the overall records
"""
project_key = project_id.replace('.0', '')
customer_key = customer_id.replace('.0', '')
record = [project_status, project_key, customer_key]
invalid_result = project_status not in CHARTIO_GRADES
invalid_project_key = project_key == 'nan'
invalid_customer_key = customer_key == 'nan'
if invalid_result or invalid_project_key or invalid_customer_key:
if invalid_result and project_status == 'nan':
record[0] = None
write_to_csv(no_project_status_path, record)
return False
if project_key == 'nan':
record[1] = None
write_to_csv(no_project_key_path, record)
return False
elif customer_key == 'nan':
record[2] = None
write_to_csv(no_customer_key_path, record)
return False
else:
unique_key = "{x}-{y}".format(x=customer_key, y=project_key)
return unique_key
def translate_result(student_grade):
"""Interprete what a student_grade in one file means in another.
Args:
student_grade(str): a string which represents the grade the student
in one file
Returns:
Student grade equivalent in another file
"""
thesauraus = {
'ungradeable': ['INCOMPLETE', 'UNGRADED', 'SUBMITTED'],
'failed': ['INCOMPLETE'],
'passed': ['PASSED'],
'exceeded': ['DISTINCTION']
}
return thesauraus[student_grade]
def check_status(unique_key, project_status, keys_dict):
"""Compare two status against each other.
Compares two statuses against each other and calls the appropriate function
"""
result_list = translate_result(project_status)
try:
unique_record = keys_dict[unique_key]
project_result = unique_record[3]
if project_result in result_list:
record = list(unique_record)[1:4]
record.append(project_status)
write_to_csv(mathching_records_path, record)
else:
record = list(unique_record)[1:4]
record.append(project_status)
write_to_csv(non_mathching_records_path, record)
except (KeyError, ValueError, TypeError):
account_project_keys = unique_key.split('-')
record = [
account_project_keys[0],
account_project_keys[1],
project_status
]
write_to_csv(records_diff, record)
def compare_keys_with_files(file_path, keys_dict):
"""Go through a file and extract and processes its contents."""
contacts_df = pd.read_csv(file_path)
for contact_info in contacts_df.itertuples():
index, project_status, project_key, customer_key = contact_info
unique_key = get_unique_key(
str(project_status),
str(project_key),
str(customer_key)
)
if unique_key:
check_status(unique_key, str(project_status), keys_dict)
def main():
"""Run all scripts from here.
This is the master script that initiates all the other scripts.
"""
parser = argparse.ArgumentParser()
parser.add_argument('path1', help="Path to first CSV file.")
parser.add_argument('path2', help="Path to second CSV file.")
args = parser.parse_args()
account_project_keys = get_file_df(args.path1)
compare_keys_with_files(args.path2, account_project_keys)
if __name__ == '__main__':
main()
| mit |
Ichag/odoo | addons/account/report/account_print_overdue.py | 380 | 3907 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.report import report_sxw
from openerp.osv import osv
class Overdue(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(Overdue, self).__init__(cr, uid, name, context=context)
ids = context.get('active_ids')
partner_obj = self.pool['res.partner']
docs = partner_obj.browse(cr, uid, ids, context)
due = {}
paid = {}
mat = {}
for partner in docs:
due[partner.id] = reduce(lambda x, y: x + ((y['account_id']['type'] == 'receivable' and y['debit'] or 0) or (y['account_id']['type'] == 'payable' and y['credit'] * -1 or 0)), self._lines_get(partner), 0)
paid[partner.id] = reduce(lambda x, y: x + ((y['account_id']['type'] == 'receivable' and y['credit'] or 0) or (y['account_id']['type'] == 'payable' and y['debit'] * -1 or 0)), self._lines_get(partner), 0)
mat[partner.id] = reduce(lambda x, y: x + (y['debit'] - y['credit']), filter(lambda x: x['date_maturity'] < time.strftime('%Y-%m-%d'), self._lines_get(partner)), 0)
addresses = self.pool['res.partner']._address_display(cr, uid, ids, None, None)
self.localcontext.update({
'docs': docs,
'time': time,
'getLines': self._lines_get,
'tel_get': self._tel_get,
'message': self._message,
'due': due,
'paid': paid,
'mat': mat,
'addresses': addresses
})
self.context = context
def _tel_get(self,partner):
if not partner:
return False
res_partner = self.pool['res.partner']
addresses = res_partner.address_get(self.cr, self.uid, [partner.id], ['invoice'])
adr_id = addresses and addresses['invoice'] or False
if adr_id:
adr=res_partner.read(self.cr, self.uid, [adr_id])[0]
return adr['phone']
else:
return partner.phone or False
return False
def _lines_get(self, partner):
moveline_obj = self.pool['account.move.line']
movelines = moveline_obj.search(self.cr, self.uid,
[('partner_id', '=', partner.id),
('account_id.type', 'in', ['receivable', 'payable']),
('state', '<>', 'draft'), ('reconcile_id', '=', False)])
movelines = moveline_obj.browse(self.cr, self.uid, movelines)
return movelines
def _message(self, obj, company):
company_pool = self.pool['res.company']
message = company_pool.browse(self.cr, self.uid, company.id, {'lang':obj.lang}).overdue_msg
return message.split('\n')
class report_overdue(osv.AbstractModel):
_name = 'report.account.report_overdue'
_inherit = 'report.abstract_report'
_template = 'account.report_overdue'
_wrapped_report_class = Overdue
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
toshywoshy/ansible | lib/ansible/modules/clustering/k8s/k8s_auth.py | 26 | 11386 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2018, KubeVirt Team <@kubevirt>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: k8s_auth
short_description: Authenticate to Kubernetes clusters which require an explicit login step
version_added: "2.8"
author: KubeVirt Team (@kubevirt)
description:
- "This module handles authenticating to Kubernetes clusters requiring I(explicit) authentication procedures,
meaning ones where a client logs in (obtains an authentication token), performs API operations using said
token and then logs out (revokes the token). An example of a Kubernetes distribution requiring this module
is OpenShift."
- "On the other hand a popular configuration for username+password authentication is one utilizing HTTP Basic
Auth, which does not involve any additional login/logout steps (instead login credentials can be attached
to each and every API call performed) and as such is handled directly by the C(k8s) module (and other
resource–specific modules) by utilizing the C(host), C(username) and C(password) parameters. Please
consult your preferred module's documentation for more details."
options:
state:
description:
- If set to I(present) connect to the API server using the URL specified in C(host) and attempt to log in.
- If set to I(absent) attempt to log out by revoking the authentication token specified in C(api_key).
default: present
choices:
- present
- absent
host:
description:
- Provide a URL for accessing the API server.
required: true
username:
description:
- Provide a username for authenticating with the API server.
password:
description:
- Provide a password for authenticating with the API server.
ca_cert:
description:
- "Path to a CA certificate file used to verify connection to the API server. The full certificate chain
must be provided to avoid certificate validation errors."
aliases: [ ssl_ca_cert ]
validate_certs:
description:
- "Whether or not to verify the API server's SSL certificates."
type: bool
default: true
aliases: [ verify_ssl ]
api_key:
description:
- When C(state) is set to I(absent), this specifies the token to revoke.
requirements:
- python >= 2.7
- urllib3
- requests
- requests-oauthlib
'''
EXAMPLES = '''
- hosts: localhost
module_defaults:
group/k8s:
host: https://k8s.example.com/
ca_cert: ca.pem
tasks:
- block:
# It's good practice to store login credentials in a secure vault and not
# directly in playbooks.
- include_vars: k8s_passwords.yml
- name: Log in (obtain access token)
k8s_auth:
username: admin
password: "{{ k8s_admin_password }}"
register: k8s_auth_results
# Previous task provides the token/api_key, while all other parameters
# are taken from module_defaults
- name: Get a list of all pods from any namespace
k8s_info:
api_key: "{{ k8s_auth_results.k8s_auth.api_key }}"
kind: Pod
register: pod_list
always:
- name: If login succeeded, try to log out (revoke access token)
when: k8s_auth_results.k8s_auth.api_key is defined
k8s_auth:
state: absent
api_key: "{{ k8s_auth_results.k8s_auth.api_key }}"
'''
# Returned value names need to match k8s modules parameter names, to make it
# easy to pass returned values of k8s_auth to other k8s modules.
# Discussion: https://github.com/ansible/ansible/pull/50807#discussion_r248827899
RETURN = '''
k8s_auth:
description: Kubernetes authentication facts.
returned: success
type: complex
contains:
api_key:
description: Authentication token.
returned: success
type: str
host:
description: URL for accessing the API server.
returned: success
type: str
ca_cert:
description: Path to a CA certificate file used to verify connection to the API server.
returned: success
type: str
validate_certs:
description: "Whether or not to verify the API server's SSL certificates."
returned: success
type: bool
username:
description: Username for authenticating with the API server.
returned: success
type: str
'''
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves.urllib_parse import urlparse, parse_qs, urlencode
# 3rd party imports
try:
import requests
HAS_REQUESTS = True
except ImportError:
HAS_REQUESTS = False
try:
from requests_oauthlib import OAuth2Session
HAS_REQUESTS_OAUTH = True
except ImportError:
HAS_REQUESTS_OAUTH = False
try:
from urllib3.util import make_headers
HAS_URLLIB3 = True
except ImportError:
HAS_URLLIB3 = False
K8S_AUTH_ARG_SPEC = {
'state': {
'default': 'present',
'choices': ['present', 'absent'],
},
'host': {'required': True},
'username': {},
'password': {'no_log': True},
'ca_cert': {'type': 'path', 'aliases': ['ssl_ca_cert']},
'validate_certs': {
'type': 'bool',
'default': True,
'aliases': ['verify_ssl']
},
'api_key': {'no_log': True},
}
class KubernetesAuthModule(AnsibleModule):
def __init__(self):
AnsibleModule.__init__(
self,
argument_spec=K8S_AUTH_ARG_SPEC,
required_if=[
('state', 'present', ['username', 'password']),
('state', 'absent', ['api_key']),
]
)
if not HAS_REQUESTS:
self.fail("This module requires the python 'requests' package. Try `pip install requests`.")
if not HAS_REQUESTS_OAUTH:
self.fail("This module requires the python 'requests-oauthlib' package. Try `pip install requests-oauthlib`.")
if not HAS_URLLIB3:
self.fail("This module requires the python 'urllib3' package. Try `pip install urllib3`.")
def execute_module(self):
state = self.params.get('state')
verify_ssl = self.params.get('validate_certs')
ssl_ca_cert = self.params.get('ca_cert')
self.auth_username = self.params.get('username')
self.auth_password = self.params.get('password')
self.auth_api_key = self.params.get('api_key')
self.con_host = self.params.get('host')
# python-requests takes either a bool or a path to a ca file as the 'verify' param
if verify_ssl and ssl_ca_cert:
self.con_verify_ca = ssl_ca_cert # path
else:
self.con_verify_ca = verify_ssl # bool
# Get needed info to access authorization APIs
self.openshift_discover()
if state == 'present':
new_api_key = self.openshift_login()
result = dict(
host=self.con_host,
validate_certs=verify_ssl,
ca_cert=ssl_ca_cert,
api_key=new_api_key,
username=self.auth_username,
)
else:
self.openshift_logout()
result = dict()
self.exit_json(changed=False, k8s_auth=result)
def openshift_discover(self):
url = '{0}/.well-known/oauth-authorization-server'.format(self.con_host)
ret = requests.get(url, verify=self.con_verify_ca)
if ret.status_code != 200:
self.fail_request("Couldn't find OpenShift's OAuth API", method='GET', url=url,
reason=ret.reason, status_code=ret.status_code)
try:
oauth_info = ret.json()
self.openshift_auth_endpoint = oauth_info['authorization_endpoint']
self.openshift_token_endpoint = oauth_info['token_endpoint']
except Exception as e:
self.fail_json(msg="Something went wrong discovering OpenShift OAuth details.",
exception=traceback.format_exc())
def openshift_login(self):
os_oauth = OAuth2Session(client_id='openshift-challenging-client')
authorization_url, state = os_oauth.authorization_url(self.openshift_auth_endpoint,
state="1", code_challenge_method='S256')
auth_headers = make_headers(basic_auth='{0}:{1}'.format(self.auth_username, self.auth_password))
# Request authorization code using basic auth credentials
ret = os_oauth.get(
authorization_url,
headers={'X-Csrf-Token': state, 'authorization': auth_headers.get('authorization')},
verify=self.con_verify_ca,
allow_redirects=False
)
if ret.status_code != 302:
self.fail_request("Authorization failed.", method='GET', url=authorization_url,
reason=ret.reason, status_code=ret.status_code)
# In here we have `code` and `state`, I think `code` is the important one
qwargs = {}
for k, v in parse_qs(urlparse(ret.headers['Location']).query).items():
qwargs[k] = v[0]
qwargs['grant_type'] = 'authorization_code'
# Using authorization code given to us in the Location header of the previous request, request a token
ret = os_oauth.post(
self.openshift_token_endpoint,
headers={
'Accept': 'application/json',
'Content-Type': 'application/x-www-form-urlencoded',
# This is just base64 encoded 'openshift-challenging-client:'
'Authorization': 'Basic b3BlbnNoaWZ0LWNoYWxsZW5naW5nLWNsaWVudDo='
},
data=urlencode(qwargs),
verify=self.con_verify_ca
)
if ret.status_code != 200:
self.fail_request("Failed to obtain an authorization token.", method='POST',
url=self.openshift_token_endpoint,
reason=ret.reason, status_code=ret.status_code)
return ret.json()['access_token']
def openshift_logout(self):
url = '{0}/apis/oauth.openshift.io/v1/oauthaccesstokens/{1}'.format(self.con_host, self.auth_api_key)
headers = {
'Accept': 'application/json',
'Content-Type': 'application/json',
'Authorization': 'Bearer {0}'.format(self.auth_api_key)
}
json = {
"apiVersion": "oauth.openshift.io/v1",
"kind": "DeleteOptions"
}
ret = requests.delete(url, headers=headers, json=json, verify=self.con_verify_ca)
# Ignore errors, the token will time out eventually anyway
def fail(self, msg=None):
self.fail_json(msg=msg)
def fail_request(self, msg, **kwargs):
req_info = {}
for k, v in kwargs.items():
req_info['req_' + k] = v
self.fail_json(msg=msg, **req_info)
def main():
module = KubernetesAuthModule()
try:
module.execute_module()
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()
| gpl-3.0 |
AMObox/teammaniac | plugin.program.maniac/plugintools.py | 37 | 16974 | # -*- coding: utf-8 -*-
#---------------------------------------------------------------------------
# Plugin Tools v1.0.8
#---------------------------------------------------------------------------
# License: GPL (http://www.gnu.org/licenses/gpl-3.0.html)
# Based on code from youtube, parsedom and pelisalacarta addons
# Author:
# Jesús
# tvalacarta@gmail.com
# http://www.mimediacenter.info/plugintools
#---------------------------------------------------------------------------
# Changelog:
# 1.0.0
# - First release
# 1.0.1
# - If find_single_match can't find anything, it returns an empty string
# - Remove addon id from this module, so it remains clean
# 1.0.2
# - Added parameter on "add_item" to say that item is playable
# 1.0.3
# - Added direct play
# - Fixed bug when video isPlayable=True
# 1.0.4
# - Added get_temp_path, get_runtime_path, get_data_path
# - Added get_setting, set_setting, open_settings_dialog and get_localized_string
# - Added keyboard_input
# - Added message
# 1.0.5
# - Added read_body_and_headers for advanced http handling
# - Added show_picture for picture addons support
# - Added optional parameters "title" and "hidden" to keyboard_input
# 1.0.6
# - Added fanart, show, episode and infolabels to add_item
# 1.0.7
# - Added set_view function
# 1.0.8
# - Added selector
#---------------------------------------------------------------------------
import xbmc,xbmcplugin,xbmcaddon,xbmcgui,urllib,urllib2,re,sys,os,time,socket,gzip
from StringIO import StringIO
module_log_enabled=False; http_debug_log_enabled=False; LIST="list"; THUMBNAIL="thumbnail"; MOVIES="movies"; TV_SHOWS="tvshows"; SEASONS="seasons"; EPISODES="episodes"; OTHER="other";
# Suggested view codes for each type from different skins (initial list thanks to xbmcswift2 library)
ALL_VIEW_CODES={
'list': {
'skin.confluence': 50, # List
'skin.aeon.nox': 50, # List
'skin.droid': 50, # List
'skin.quartz': 50, # List
'skin.re-touched': 50, # List
},
'thumbnail': {
'skin.confluence': 500, # Thumbnail
'skin.aeon.nox': 500, # Wall
'skin.droid': 51, # Big icons
'skin.quartz': 51, # Big icons
'skin.re-touched': 500, #Thumbnail
},
'movies': {
'skin.confluence': 500, # Thumbnail 515, # Media Info 3
'skin.aeon.nox': 500, # Wall
'skin.droid': 51, # Big icons
'skin.quartz': 52, # Media info
'skin.re-touched': 500, #Thumbnail
},
'tvshows': {
'skin.confluence': 500, # Thumbnail 515, # Media Info 3
'skin.aeon.nox': 500, # Wall
'skin.droid': 51, # Big icons
'skin.quartz': 52, # Media info
'skin.re-touched': 500, #Thumbnail
},
'seasons': {
'skin.confluence': 50, # List
'skin.aeon.nox': 50, # List
'skin.droid': 50, # List
'skin.quartz': 52, # Media info
'skin.re-touched': 50, # List
},
'episodes': {
'skin.confluence': 504, # Media Info
'skin.aeon.nox': 518, # Infopanel
'skin.droid': 50, # List
'skin.quartz': 52, # Media info
'skin.re-touched': 550, # Wide
},
}
def log(message): xbmc.log(message) # Write something on XBMC log
def _log(message): # Write this module messages on XBMC log
if module_log_enabled: xbmc.log("plugintools."+message)
def get_params(): # Parse XBMC params - based on script.module.parsedom addon
_log("get_params"); param_string=sys.argv[2]; _log("get_params "+str(param_string)); commands={}
if param_string:
split_commands=param_string[param_string.find('?') + 1:].split('&')
for command in split_commands:
_log("get_params command="+str(command))
if len(command) > 0:
if "=" in command: split_command=command.split('='); key=split_command[0]; value=urllib.unquote_plus(split_command[1]); commands[key]=value
else: commands[command]=""
_log("get_params "+repr(commands))
return commands
# Fetch text content from an URL
def read(url): _log("read "+url); f=urllib2.urlopen(url); data=f.read(); f.close(); return data
def read_body_and_headers(url,post=None,headers=[],follow_redirects=False,timeout=None):
_log("read_body_and_headers "+url)
if post is not None: _log("read_body_and_headers post="+post)
if len(headers)==0: headers.append(["User-Agent","Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:18.0) Gecko/20100101 Firefox/18.0"])
# Start cookie lib
ficherocookies=os.path.join(get_data_path(),'cookies.dat'); _log("read_body_and_headers cookies_file="+ficherocookies); cj=None; ClientCookie=None; cookielib=None
try: _log("read_body_and_headers importing cookielib"); import cookielib # Let's see if cookielib is available
except ImportError:
_log("read_body_and_headers cookielib no disponible") # If importing cookielib fails # let's try ClientCookie
try: _log("read_body_and_headers importing ClientCookie"); import ClientCookie
except ImportError: _log("read_body_and_headers ClientCookie not available"); urlopen=urllib2.urlopen; Request=urllib2.Request # ClientCookie isn't available either
else: _log("read_body_and_headers ClientCookie available"); urlopen=ClientCookie.urlopen; Request=ClientCookie.Request; cj=ClientCookie.MozillaCookieJar() # imported ClientCookie
else:
_log("read_body_and_headers cookielib available"); urlopen=urllib2.urlopen; Request=urllib2.Request; cj=cookielib.MozillaCookieJar() # importing cookielib worked
# This is a subclass of FileCookieJar # that has useful load and save methods
if cj is not None: # we successfully imported # one of the two cookie handling modules
_log("read_body_and_headers Cookies enabled")
if os.path.isfile(ficherocookies):
_log("read_body_and_headers Reading cookie file")
try: cj.load(ficherocookies) # if we have a cookie file already saved # then load the cookies into the Cookie Jar
except: _log("read_body_and_headers Wrong cookie file, deleting..."); os.remove(ficherocookies)
# Now we need to get our Cookie Jar # installed in the opener; # for fetching URLs
if cookielib is not None:
_log("read_body_and_headers opener using urllib2 (cookielib)")
# if we use cookielib # then we get the HTTPCookieProcessor # and install the opener in urllib2
if not follow_redirects: opener=urllib2.build_opener(urllib2.HTTPHandler(debuglevel=http_debug_log_enabled),urllib2.HTTPCookieProcessor(cj),NoRedirectHandler())
else: opener=urllib2.build_opener(urllib2.HTTPHandler(debuglevel=http_debug_log_enabled),urllib2.HTTPCookieProcessor(cj))
urllib2.install_opener(opener)
else:
_log("read_body_and_headers opener using ClientCookie")
# if we use ClientCookie # then we get the HTTPCookieProcessor # and install the opener in ClientCookie
opener=ClientCookie.build_opener(ClientCookie.HTTPCookieProcessor(cj)); ClientCookie.install_opener(opener)
# -------------------------------------------------
# Cookies instaladas, lanza la petición
# -------------------------------------------------
inicio=time.clock() # Contador
txheaders={} # Diccionario para las cabeceras
if post is None: _log("read_body_and_headers GET request") # Construye el request
else: _log("read_body_and_headers POST request")
_log("read_body_and_headers ---------------------------") # Añade las cabeceras
for header in headers: _log("read_body_and_headers header %s=%s" % (str(header[0]),str(header[1]))); txheaders[header[0]]=header[1]
_log("read_body_and_headers ---------------------------"); req=Request(url,post,txheaders)
if timeout is None: handle=urlopen(req)
else:
#Disponible en python 2.6 en adelante --> handle = urlopen(req, timeout=timeout) #Para todas las versiones:
try: import socket; deftimeout=socket.getdefaulttimeout(); socket.setdefaulttimeout(timeout); handle=urlopen(req); socket.setdefaulttimeout(deftimeout)
except:
import sys
for line in sys.exc_info(): _log( "%s" % line )
cj.save(ficherocookies) # Actualiza el almacén de cookies
# Lee los datos y cierra
if handle.info().get('Content-Encoding')=='gzip': buf=StringIO(handle.read()); f=gzip.GzipFile(fileobj=buf); data=f.read()
else: data=handle.read()
info=handle.info(); _log("read_body_and_headers Response"); returnheaders=[]; _log("read_body_and_headers ---------------------------")
for header in info: _log("read_body_and_headers "+header+"="+info[header]); returnheaders.append([header,info[header]])
handle.close(); _log("read_body_and_headers ---------------------------")
'''
# Lanza la petición
try: response = urllib2.urlopen(req)
# Si falla la repite sustituyendo caracteres especiales
except:
req = urllib2.Request(url.replace(" ","%20"))
# Añade las cabeceras
for header in headers: req.add_header(header[0],header[1])
response = urllib2.urlopen(req)
'''
# Tiempo transcurrido
fin=time.clock(); _log("read_body_and_headers Downloaded in %d seconds " % (fin-inicio+1)); _log("read_body_and_headers body="+data); return data,returnheaders
class NoRedirectHandler(urllib2.HTTPRedirectHandler):
def http_error_302(self,req,fp,code,msg,headers): infourl=urllib.addinfourl(fp,headers,req.get_full_url()); infourl.status=code; infourl.code=code; return infourl
http_error_300=http_error_302; http_error_301=http_error_302; http_error_303=http_error_302; http_error_307=http_error_302
def find_multiple_matches(text,pattern): _log("find_multiple_matches pattern="+pattern); matches=re.findall(pattern,text,re.DOTALL); return matches # Parse string and extracts multiple matches using regular expressions
def find_single_match(text,pattern): # Parse string and extracts first match as a string
_log("find_single_match pattern="+pattern); result=""
try: matches=re.findall(pattern,text,flags=re.DOTALL); result=matches[0]
except: result=""
return result
def add_item(action="",title="",plot="",url="",thumbnail="",fanart="",show="",episode="",extra="",page="",info_labels=None,isPlayable=False,folder=True):
_log("add_item action=["+action+"] title=["+title+"] url=["+url+"] thumbnail=["+thumbnail+"] fanart=["+fanart+"] show=["+show+"] episode=["+episode+"] extra=["+extra+"] page=["+page+"] isPlayable=["+str(isPlayable)+"] folder=["+str(folder)+"]")
listitem=xbmcgui.ListItem(title,iconImage="DefaultVideo.png",thumbnailImage=thumbnail)
if info_labels is None: info_labels={"Title":title,"FileName":title,"Plot":plot}
listitem.setInfo( "video", info_labels )
if fanart!="": listitem.setProperty('fanart_image',fanart); xbmcplugin.setPluginFanart(int(sys.argv[1]),fanart)
if url.startswith("plugin://"): itemurl=url; listitem.setProperty('IsPlayable','true'); xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=itemurl,listitem=listitem,isFolder=folder)
elif isPlayable: listitem.setProperty("Video","true"); listitem.setProperty('IsPlayable','true'); itemurl='%s?action=%s&title=%s&url=%s&thumbnail=%s&plot=%s&extra=%s&page=%s' % (sys.argv[0],action,urllib.quote_plus(title),urllib.quote_plus(url),urllib.quote_plus(thumbnail),urllib.quote_plus(plot),urllib.quote_plus(extra),urllib.quote_plus(page)); xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=itemurl,listitem=listitem,isFolder=folder)
else: itemurl='%s?action=%s&title=%s&url=%s&thumbnail=%s&plot=%s&extra=%s&page=%s' % (sys.argv[0],action,urllib.quote_plus(title),urllib.quote_plus(url),urllib.quote_plus(thumbnail),urllib.quote_plus(plot),urllib.quote_plus(extra),urllib.quote_plus(page)); xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=itemurl,listitem=listitem,isFolder=folder)
def close_item_list(): _log("close_item_list"); xbmcplugin.endOfDirectory(handle=int(sys.argv[1]),succeeded=True)
def play_resolved_url(url):
_log("play_resolved_url ["+url+"]"); listitem=xbmcgui.ListItem(path=url); listitem.setProperty('IsPlayable','true')
return xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, listitem)
def direct_play(url,title=""):
_log("direct_play ["+url+"]")
try: xlistitem=xbmcgui.ListItem(title,iconImage="DefaultVideo.png",path=url)
except: xlistitem=xbmcgui.ListItem(title,iconImage="DefaultVideo.png",)
xlistitem.setInfo("video",{"Title":title}); playlist=xbmc.PlayList(xbmc.PLAYLIST_VIDEO); playlist.clear(); playlist.add(url,xlistitem); player_type=xbmc.PLAYER_CORE_AUTO; xbmcPlayer=xbmc.Player(player_type); xbmcPlayer.play(playlist)
def show_picture(url):
local_folder=os.path.join(get_data_path(),"images")
if not os.path.exists(local_folder):
try: os.mkdir(local_folder)
except: pass
local_file=os.path.join(local_folder,"temp.jpg")
urllib.urlretrieve(url,local_file) # Download picture
xbmc.executebuiltin("SlideShow("+local_folder+")") # Show picture
def get_temp_path(): _log("get_temp_path"); dev = xbmc.translatePath("special://temp/"); _log("get_temp_path ->'"+str(dev)+"'"); return dev
def get_runtime_path(): _log("get_runtime_path"); dev=xbmc.translatePath(__settings__.getAddonInfo('Path')); _log("get_runtime_path ->'"+str(dev)+"'"); return dev
def get_data_path():
_log("get_data_path"); dev=xbmc.translatePath(__settings__.getAddonInfo('Profile'))
if not os.path.exists(dev): os.makedirs(dev) # Parche para XBMC4XBOX
_log("get_data_path ->'"+str(dev)+"'"); return dev
def get_setting(name): _log("get_setting name='"+name+"'"); dev=__settings__.getSetting(name); _log("get_setting ->'"+str(dev)+"'"); return dev
def set_setting(name,value): _log("set_setting name='"+name+"','"+value+"'"); __settings__.setSetting( name,value )
def open_settings_dialog(): _log("open_settings_dialog"); __settings__.openSettings()
def get_localized_string(code):
_log("get_localized_string code="+str(code))
dev=__language__(code)
try: dev=dev.encode("utf-8")
except: pass
_log("get_localized_string ->'"+dev+"'"); return dev
def keyboard_input(default_text="",title="",hidden=False):
_log("keyboard_input default_text='"+default_text+"'")
keyboard=xbmc.Keyboard(default_text,title,hidden); keyboard.doModal()
if (keyboard.isConfirmed()): tecleado=keyboard.getText()
else: tecleado=""
_log("keyboard_input ->'"+tecleado+"'"); return tecleado
def message(text1,text2="",text3=""):
_log("message text1='"+text1+"', text2='"+text2+"', text3='"+text3+"'")
if text3=="": xbmcgui.Dialog().ok(text1,text2)
elif text2=="": xbmcgui.Dialog().ok("",text1)
else: xbmcgui.Dialog().ok(text1,text2,text3)
def message_yes_no(text1,text2="",text3=""):
_log("message_yes_no text1='"+text1+"', text2='"+text2+"', text3='"+text3+"'")
if text3=="": yes_pressed=xbmcgui.Dialog().yesno(text1,text2)
elif text2=="": yes_pressed=xbmcgui.Dialog().yesno("",text1)
else: yes_pressed=xbmcgui.Dialog().yesno(text1,text2,text3)
return yes_pressed
def selector(option_list,title="Select one"):
_log("selector title='"+title+"', options="+repr(option_list))
dia=xbmcgui.Dialog(); selection=dia.select(title,option_list)
return selection
def set_view(view_mode, view_code=0):
_log("set_view view_mode='"+view_mode+"', view_code="+str(view_code))
# Set the content for extended library views if needed
if view_mode==MOVIES: _log("set_view content is movies"); xbmcplugin.setContent( int(sys.argv[1]) ,"movies" )
elif view_mode==TV_SHOWS: _log("set_view content is tvshows"); xbmcplugin.setContent( int(sys.argv[1]) ,"tvshows" )
elif view_mode==SEASONS: _log("set_view content is seasons"); xbmcplugin.setContent( int(sys.argv[1]) ,"seasons" )
elif view_mode==EPISODES: _log("set_view content is episodes"); xbmcplugin.setContent( int(sys.argv[1]) ,"episodes" )
skin_name=xbmc.getSkinDir() # Reads skin name
_log("set_view skin_name='"+skin_name+"'")
try:
if view_code==0:
_log("set_view view mode is "+view_mode)
view_codes=ALL_VIEW_CODES.get(view_mode)
view_code=view_codes.get(skin_name)
_log("set_view view code for "+view_mode+" in "+skin_name+" is "+str(view_code))
xbmc.executebuiltin("Container.SetViewMode("+str(view_code)+")")
else:
_log("set_view view code forced to "+str(view_code))
xbmc.executebuiltin("Container.SetViewMode("+str(view_code)+")")
except:
_log("Unable to find view code for view mode "+str(view_mode)+" and skin "+skin_name)
f=open(os.path.join(os.path.dirname(__file__),"addon.xml")); data=f.read(); f.close()
addon_id=find_single_match(data,'id="([^"]+)"')
if addon_id=="": addon_id=find_single_match(data,"id='([^']+)'")
__settings__=xbmcaddon.Addon(id=addon_id); __language__=__settings__.getLocalizedString
| gpl-2.0 |
kadircet/CENG | 783/HW3/cs231n/classifiers/rnn.py | 1 | 14153 | import numpy as np
from cs231n.layers import *
from cs231n.rnn_layers import *
class CaptioningRNN(object):
"""
A CaptioningRNN produces captions from image features using a recurrent
neural network.
The RNN receives input vectors of size D, has a vocab size of V, works on
sequences of length T, has an RNN hidden dimension of H, uses word vectors
of dimension W, and operates on minibatches of size N.
Note that we don't use any regularization for the CaptioningRNN.
"""
def __init__(self, word_to_idx, input_dim=512, wordvec_dim=128,
hidden_dim=128, cell_type='rnn', dtype=np.float32):
"""
Construct a new CaptioningRNN instance.
Inputs:
- word_to_idx: A dictionary giving the vocabulary. It contains V entries,
and maps each string to a unique integer in the range [0, V).
- input_dim: Dimension D of input image feature vectors.
- wordvec_dim: Dimension W of word vectors.
- hidden_dim: Dimension H for the hidden state of the RNN.
- cell_type: What type of RNN to use; either 'rnn' or 'lstm'.
- dtype: numpy datatype to use; use float32 for training and float64 for
numeric gradient checking.
"""
if cell_type not in {'rnn', 'lstm'}:
raise ValueError('Invalid cell_type "%s"' % cell_type)
self.cell_type = cell_type
self.dtype = dtype
self.word_to_idx = word_to_idx
self.idx_to_word = {i: w for w, i in word_to_idx.iteritems()}
self.params = {}
vocab_size = len(word_to_idx)
self._null = word_to_idx['<NULL>']
self._start = word_to_idx.get('<START>', None)
self._end = word_to_idx.get('<END>', None)
# Initialize word vectors
self.params['W_embed'] = np.random.randn(vocab_size, wordvec_dim)
self.params['W_embed'] /= 100
# Initialize CNN -> hidden state projection parameters
self.params['W_proj'] = np.random.randn(input_dim, hidden_dim)
self.params['W_proj'] /= np.sqrt(input_dim)
self.params['b_proj'] = np.zeros(hidden_dim)
# Initialize parameters for the RNN
dim_mul = {'lstm': 4, 'rnn': 1}[cell_type]
self.params['Wx'] = np.random.randn(wordvec_dim, dim_mul * hidden_dim)
self.params['Wx'] /= np.sqrt(wordvec_dim)
self.params['Wh'] = np.random.randn(hidden_dim, dim_mul * hidden_dim)
self.params['Wh'] /= np.sqrt(hidden_dim)
self.params['b'] = np.zeros(dim_mul * hidden_dim)
# Initialize output to vocab weights
self.params['W_vocab'] = np.random.randn(hidden_dim, vocab_size)
self.params['W_vocab'] /= np.sqrt(hidden_dim)
self.params['b_vocab'] = np.zeros(vocab_size)
# Cast parameters to correct dtype
for k, v in self.params.iteritems():
self.params[k] = v.astype(self.dtype)
def loss(self, features, captions):
"""
Compute training-time loss for the RNN. We input image features and
ground-truth captions for those images, and use an RNN (or LSTM) to compute
loss and gradients on all parameters.
Inputs:
- features: Input image features, of shape (N, D)
- captions: Ground-truth captions; an integer array of shape (N, T) where
each element is in the range 0 <= y[i, t] < V
Returns a tuple of:
- loss: Scalar loss
- grads: Dictionary of gradients parallel to self.params
"""
# Cut captions into two pieces: captions_in has everything but the last word
# and will be input to the RNN; captions_out has everything but the first
# word and this is what we will expect the RNN to generate. These are offset
# by one relative to each other because the RNN should produce word (t+1)
# after receiving word t. The first element of captions_in will be the START
# token, and the first element of captions_out will be the first word.
captions_in = captions[:, :-1]
captions_out = captions[:, 1:]
# You'll need this
mask = (captions_out != self._null)
# Weight and bias for the affine transform from image features to initial
# hidden state
W_proj, b_proj = self.params['W_proj'], self.params['b_proj']
# Word embedding matrix
W_embed = self.params['W_embed']
# Input-to-hidden, hidden-to-hidden, and biases for the RNN
Wx, Wh, b = self.params['Wx'], self.params['Wh'], self.params['b']
# Weight and bias for the hidden-to-vocab transformation.
W_vocab, b_vocab = self.params['W_vocab'], self.params['b_vocab']
loss, grads = 0.0, {}
############################################################################
# TODO: Implement the forward and backward passes for the CaptioningRNN. #
# In the forward pass you will need to do the following: #
# (1) Use an affine transformation to compute the initial hidden state #
# from the image features. This should produce an array of shape (N, H)#
# (2) Use a word embedding layer to transform the words in captions_in #
# from indices to vectors, giving an array of shape (N, T, W). #
# (3) Use either a vanilla RNN or LSTM (depending on self.cell_type) to #
# process the sequence of input word vectors and produce hidden state #
# vectors for all timesteps, producing an array of shape (N, T, H). #
# (4) Use a (temporal) affine transformation to compute scores over the #
# vocabulary at every timestep using the hidden states, giving an #
# array of shape (N, T, V). #
# (5) Use (temporal) softmax to compute loss using captions_out, ignoring #
# the points where the output word is <NULL> using the mask above. #
# #
# In the backward pass you will need to compute the gradient of the loss #
# with respect to all model parameters. Use the loss and grads variables #
# defined above to store loss and gradients; grads[k] should give the #
# gradients for self.params[k]. #
############################################################################
#h0=features.dot(W_proj)+b_proj
h0,cache_h0 = temporal_affine_forward(features[:,None,:], W_proj, b_proj)
h0=h0.reshape(h0.shape[0],h0.shape[2])
inp, cache_we = word_embedding_forward(captions_in, W_embed)
func = rnn_forward
if self.cell_type=='lstm':
func = lstm_forward
h, cache_rnn = func(inp, h0, Wx, Wh, b)
scores, cache_aff = temporal_affine_forward(h, W_vocab, b_vocab)
loss, dscores = temporal_softmax_loss(scores, captions_out, mask)
dscores, grads['W_vocab'], grads['b_vocab'] = temporal_affine_backward(dscores, cache_aff)
func = rnn_backward
if self.cell_type=='lstm':
func = lstm_backward
dinp, dh0, grads['Wx'], grads['Wh'], grads['b'] = func(dscores, cache_rnn)
grads['W_embed'] = word_embedding_backward(dinp, cache_we)
_, grads['W_proj'], grads['b_proj'] = temporal_affine_backward(dh0[:,None,:], cache_h0)
############################################################################
# END OF YOUR CODE #
############################################################################
return loss, grads
def sample(self, features, max_length=30):
"""
Run a test-time forward pass for the model, sampling captions for input
feature vectors.
At each timestep, we embed the current word, pass it and the previous hidden
state to the RNN to get the next hidden state, use the hidden state to get
scores for all vocab words, and choose the word with the highest score as
the next word. The initial hidden state is computed by applying an affine
transform to the input image features, and the initial word is the <START>
token.
For LSTMs you will also have to keep track of the cell state; in that case
the initial cell state should be zero.
Inputs:
- features: Array of input image features of shape (N, D).
- max_length: Maximum length T of generated captions.
Returns:
- captions: Array of shape (N, max_length) giving sampled captions,
where each element is an integer in the range [0, V). The first element
of captions should be the first sampled word, not the <START> token.
"""
N = features.shape[0]
captions = self._null * np.ones((N, max_length), dtype=np.int32)
# Unpack parameters
W_proj, b_proj = self.params['W_proj'], self.params['b_proj']
W_embed = self.params['W_embed']
Wx, Wh, b = self.params['Wx'], self.params['Wh'], self.params['b']
W_vocab, b_vocab = self.params['W_vocab'], self.params['b_vocab']
###########################################################################
# TODO: Implement test-time sampling for the model. You will need to #
# initialize the hidden state of the RNN by applying the learned affine #
# transform to the input image features. The first word that you feed to #
# the RNN should be the <START> token; its value is stored in the #
# variable self._start. At each timestep you will need to do to: #
# (1) Embed the previous word using the learned word embeddings #
# (2) Make an RNN step using the previous hidden state and the embedded #
# current word to get the next hidden state. #
# (3) Apply the learned affine transformation to the next hidden state to #
# get scores for all words in the vocabulary #
# (4) Select the word with the highest score as the next word, writing it #
# to the appropriate slot in the captions variable #
# #
# For simplicity, you do not need to stop generating after an <END> token #
# is sampled, but you can if you want to. #
# #
# HINT: You will not be able to use the rnn_forward or lstm_forward #
# functions; you'll need to call rnn_step_forward or lstm_step_forward in #
# a loop. #
###########################################################################
h, _ = temporal_affine_forward(features[:,None,:], W_proj, b_proj)
h=h.reshape(h.shape[0],h.shape[2])
c=np.zeros_like(h)
captions[:,0] = self._start
for i in xrange(max_length):
we, _ = word_embedding_forward(captions[:,i], W_embed)
if self.cell_type=='rnn':
h, _ = rnn_step_forward(we, h, Wx, Wh, b)
else:
h, c, _ = lstm_step_forward(we, h, c, Wx, Wh, b)
scores, _ = temporal_affine_forward(h[:,None,:], W_vocab, b_vocab)
captions[:,i] = scores[:, 0, :].argmax(axis=1)
if i+1<max_length:
captions[:,i+1] = captions[:,i]
############################################################################
# END OF YOUR CODE #
############################################################################
return captions
class NextCharRNN(object):
def __init__(self, input_dim=512, charvec_dim=1,
hidden_dim=128, cell_type='rnn', dtype=np.float32):
if cell_type not in {'rnn', 'lstm'}:
raise ValueError('Invalid cell_type "%s"' % cell_type)
self.cell_type = cell_type
self.dtype = dtype
self.char_to_idx = {chr(c): c for c in xrange(256)}
self.idx_to_char = {i: w for w, i in self.char_to_idx.iteritems()}
self.params = {}
vocab_size = len(self.char_to_idx)
# Initialize parameters for the RNN
dim_mul = {'lstm': 4, 'rnn': 1}[cell_type]
self.params['Wx'] = np.random.randn(charvec_dim, dim_mul * hidden_dim)
self.params['Wx'] /= np.sqrt(charvec_dim)
self.params['Wh'] = np.random.randn(hidden_dim, dim_mul * hidden_dim)
self.params['Wh'] /= np.sqrt(hidden_dim)
self.params['b'] = np.zeros(dim_mul * hidden_dim)
# Initialize output to vocab weights
self.params['W_vocab'] = np.random.randn(hidden_dim, vocab_size)
self.params['W_vocab'] /= np.sqrt(hidden_dim)
self.params['b_vocab'] = np.zeros(vocab_size)
# Cast parameters to correct dtype
for k, v in self.params.iteritems():
self.params[k] = v.astype(self.dtype)
def loss(self, X):
X_in = X[:, :, :].astype(int)
X_out = X[:, :, :].astype(int)
# Input-to-hidden, hidden-to-hidden, and biases for the RNN
Wx, Wh, b = self.params['Wx'], self.params['Wh'], self.params['b']
# Weight and bias for the hidden-to-vocab transformation.
W_vocab, b_vocab = self.params['W_vocab'], self.params['b_vocab']
loss, grads = 0.0, {}
h0 = np.zeros((X.shape[0], Wh.shape[0]))
h, cache_rnn = rnn_forward(X_in, h0, Wx, Wh, b)
scores, cache_aff = temporal_affine_forward(h, W_vocab, b_vocab)
loss, dscores = temporal_softmax_loss(scores, X_out)
dscores, grads['W_vocab'], grads['b_vocab'] = temporal_affine_backward(dscores, cache_aff)
func = rnn_backward
if self.cell_type=='lstm':
func = lstm_backward
dX_in, dh0, grads['Wx'], grads['Wh'], grads['b'] = func(dscores, cache_rnn)
return loss, grads
def sample(self, X, max_length=30):
N = X.shape[0]
out = np.zeros((N, max_length), dtype=np.int32)
# Unpack parameters
Wx, Wh, b = self.params['Wx'], self.params['Wh'], self.params['b']
W_vocab, b_vocab = self.params['W_vocab'], self.params['b_vocab']
h = np.zeros((N, Wh.shape[0]))
out[:,0] = X[:,0,0]
for i in xrange(max_length):
if self.cell_type=='rnn':
h, _ = rnn_step_forward(out[:,i,None], h, Wx, Wh, b)
scores, _ = temporal_affine_forward(h[:,None,:], W_vocab, b_vocab)
out[:,i] = scores[:, 0, :].argmax(axis=1)
if i+1<max_length:
out[:,i+1] = X[:,i+1,0]
return out
| gpl-3.0 |
stevezilla/u-boot-amherst | tools/buildman/kconfiglib.py | 13 | 139461 | #
# SPDX-License-Identifier: ISC
#
# Author: Ulf Magnusson
# https://github.com/ulfalizer/Kconfiglib
# This is Kconfiglib, a Python library for scripting, debugging, and extracting
# information from Kconfig-based configuration systems. To view the
# documentation, run
#
# $ pydoc kconfiglib
#
# or, if you prefer HTML,
#
# $ pydoc -w kconfiglib
#
# The examples/ subdirectory contains examples, to be run with e.g.
#
# $ make scriptconfig SCRIPT=Kconfiglib/examples/print_tree.py
#
# Look in testsuite.py for the test suite.
"""
Kconfiglib is a Python library for scripting and extracting information from
Kconfig-based configuration systems. Features include the following:
- Symbol values and properties can be looked up and values assigned
programmatically.
- .config files can be read and written.
- Expressions can be evaluated in the context of a Kconfig configuration.
- Relations between symbols can be quickly determined, such as finding all
symbols that reference a particular symbol.
- Highly compatible with the scripts/kconfig/*conf utilities. The test suite
automatically compares outputs between Kconfiglib and the C implementation
for a large number of cases.
For the Linux kernel, scripts are run using
$ make scriptconfig SCRIPT=<path to script> [SCRIPT_ARG=<arg>]
Running scripts via the 'scriptconfig' target ensures that required environment
variables (SRCARCH, ARCH, srctree, KERNELVERSION, etc.) are set up correctly.
Alternative architectures can be specified like for other 'make *config'
targets:
$ make scriptconfig ARCH=mips SCRIPT=<path to script> [SCRIPT_ARG=<arg>]
The script will receive the name of the Kconfig file to load in sys.argv[1].
(As of Linux 3.7.0-rc8 this is always "Kconfig" from the kernel top-level
directory.) If an argument is provided with SCRIPT_ARG, it will appear in
sys.argv[2].
To get an interactive Python prompt with Kconfiglib preloaded and a Config
object 'c' created, use
$ make iscriptconfig [ARCH=<architecture>]
Kconfiglib requires Python 2. For (i)scriptconfig the command to run the Python
interpreter can be passed in the environment variable PYTHONCMD (defaults to
'python'; PyPy works too and is a bit faster).
Look in the examples/ subdirectory for examples, which can be run with e.g.
$ make scriptconfig SCRIPT=Kconfiglib/examples/print_tree.py
or
$ make scriptconfig SCRIPT=Kconfiglib/examples/help_grep.py SCRIPT_ARG="kernel"
Look in testsuite.py for the test suite.
Credits: Written by Ulf "Ulfalizer" Magnusson
Send bug reports, suggestions and other feedback to kconfiglib@gmail.com .
Don't wrestle with internal APIs. Tell me what you need and I might add it in a
safe way as a client API instead."""
# If you have Psyco installed (32-bit installations, Python <= 2.6 only),
# setting this to True (right here, not at runtime) might give a nice speedup.
# (22% faster for parsing arch/x86/Kconfig and 58% faster for evaluating all
# symbols in it without a .config on my Core Duo.)
use_psyco = False
import os
import re
import string
import sys
class Config():
"""Represents a Kconfig configuration, e.g. for i386 or ARM. This is the
set of symbols and other items appearing in the configuration together with
their values. Creating any number of Config objects -- including for
different architectures -- is safe; Kconfiglib has no global state."""
#
# Public interface
#
def __init__(self,
filename = "Kconfig",
base_dir = "$srctree",
print_warnings = True,
print_undef_assign = False):
"""Creates a new Config object, representing a Kconfig configuration.
Raises Kconfig_Syntax_Error on syntax errors.
filename (default: "Kconfig") -- The base Kconfig file of the
configuration. For the Linux kernel, this should usually be be
"Kconfig" from the top-level directory, as environment
variables will make sure the right Kconfig is included from
there (usually arch/<architecture>/Kconfig). If you are using
kconfiglib via 'make scriptconfig' the filename of the
correct Kconfig will be in sys.argv[1].
base_dir (default: "$srctree") -- The base directory relative to which
'source' statements within Kconfig files will work. For the
Linux kernel this should be the top-level directory of the
kernel tree. $-references to environment variables will be
expanded.
The environment variable 'srctree' is set by the Linux makefiles
to the top-level kernel directory. A default of "." would not
work if an alternative build directory is used.
print_warnings (default: True) -- Set to True if warnings related to
this configuration should be printed to stderr. This can
be changed later with Config.set_print_warnings(). It is
provided as a constructor argument since warnings might
be generated during parsing.
print_undef_assign (default: False) -- Set to True if informational
messages related to assignments to undefined symbols
should be printed to stderr for this configuration.
Can be changed later with
Config.set_print_undef_assign()."""
# The set of all symbols, indexed by name (a string)
self.syms = {}
# The set of all defined symbols in the configuration in the order they
# appear in the Kconfig files. This excludes the special symbols n, m,
# and y as well as symbols that are referenced but never defined.
self.kconfig_syms = []
# The set of all named choices (yes, choices can have names), indexed
# by name (a string)
self.named_choices = {}
def register_special_symbol(type, name, value):
sym = Symbol()
sym.is_special_ = True
sym.is_defined_ = True
sym.config = self
sym.name = name
sym.type = type
sym.cached_value = value
self.syms[name] = sym
return sym
# The special symbols n, m and y, used as shorthand for "n", "m" and
# "y"
self.n = register_special_symbol(TRISTATE, "n", "n")
self.m = register_special_symbol(TRISTATE, "m", "m")
self.y = register_special_symbol(TRISTATE, "y", "y")
# DEFCONFIG_LIST uses this
register_special_symbol(STRING, "UNAME_RELEASE", os.uname()[2])
# The symbol with "option defconfig_list" set, containing a list of
# default .config files
self.defconfig_sym = None
# See Symbol.get_(src)arch()
self.arch = os.environ.get("ARCH")
self.srcarch = os.environ.get("SRCARCH")
# See Config.__init__(). We need this for get_defconfig_filename().
self.srctree = os.environ.get("srctree")
if self.srctree is None:
self.srctree = "."
self.filename = filename
self.base_dir = _strip_trailing_slash(os.path.expandvars(base_dir))
# The 'mainmenu' text
self.mainmenu_text = None
# The filename of the most recently loaded .config file
self.config_filename = None
# The textual header of the most recently loaded .config, uncommented
self.config_header = None
self.print_warnings = print_warnings
self.print_undef_assign = print_undef_assign
# Lists containing all choices, menus and comments in the configuration
self.choices = []
self.menus = []
self.comments = []
# For parsing routines that stop when finding a line belonging to a
# different construct, these holds that line and the tokenized version
# of that line. The purpose is to avoid having to re-tokenize the line,
# which is inefficient and causes problems when recording references to
# symbols.
self.end_line = None
self.end_line_tokens = None
# See the comment in _parse_expr().
self.parse_expr_cur_sym_or_choice = None
self.parse_expr_line = None
self.parse_expr_filename = None
self.parse_expr_linenr = None
self.parse_expr_transform_m = None
# Parse the Kconfig files
self.top_block = self._parse_file(filename, None, None, None)
# Build Symbol.dep for all symbols
self._build_dep()
def load_config(self, filename, replace = True):
"""Loads symbol values from a file in the familiar .config format.
Equivalent to calling Symbol.set_user_value() to set each of the
values.
filename -- The .config file to load. $-references to environment
variables will be expanded. For scripts to work even
when an alternative build directory is used with the
Linux kernel, you need to refer to the top-level kernel
directory with "$srctree".
replace (default: True) -- True if the configuration should replace
the old configuration; False if it should add to it."""
def warn_override(filename, linenr, name, old_user_val, new_user_val):
self._warn("overriding the value of {0}. "
'Old value: "{1}", new value: "{2}".'
.format(name, old_user_val, new_user_val),
filename,
linenr)
filename = os.path.expandvars(filename)
# Put this first so that a missing file doesn't screw up our state
line_feeder = _FileFeed(_get_lines(filename), filename)
self.config_filename = filename
# Invalidate everything. This is usually faster than finding the
# minimal set of symbols that needs to be invalidated, as nearly all
# symbols will tend to be affected anyway.
if replace:
self.unset_user_values()
else:
self._invalidate_all()
# Read header
self.config_header = None
def is_header_line(line):
return line.startswith("#") and \
not unset_re.match(line)
first_line = line_feeder.get_next()
if first_line is None:
return
if not is_header_line(first_line):
line_feeder.go_back()
else:
self.config_header = first_line[1:]
# Read remaining header lines
while 1:
line = line_feeder.get_next()
if line is None:
break
if not is_header_line(line):
line_feeder.go_back()
break
self.config_header += line[1:]
# Remove trailing newline
if self.config_header.endswith("\n"):
self.config_header = self.config_header[:-1]
# Read assignments
filename = line_feeder.get_filename()
while 1:
line = line_feeder.get_next()
if line is None:
return
linenr = line_feeder.get_linenr()
line = line.strip()
set_re_match = set_re.match(line)
if set_re_match:
name, val = set_re_match.groups()
# The unescaping producedure below should be safe since " can
# only appear as \" inside the string
val = _strip_quotes(val, line, filename, linenr)\
.replace('\\"', '"').replace("\\\\", "\\")
if name in self.syms:
sym = self.syms[name]
old_user_val = sym.user_val
if old_user_val is not None:
warn_override(filename, linenr, name, old_user_val, val)
if sym.is_choice_symbol_:
user_mode = sym.parent.user_mode
if user_mode is not None and user_mode != val:
self._warn("assignment to {0} changes mode of containing "
'choice from "{1}" to "{2}".'
.format(name, val, user_mode),
filename,
linenr)
sym._set_user_value_no_invalidate(val, True)
else:
self._undef_assign('attempt to assign the value "{0}" to the '
"undefined symbol {1}."
.format(val, name),
filename,
linenr)
else:
unset_re_match = unset_re.match(line)
if unset_re_match:
name = unset_re_match.group(1)
if name in self.syms:
sym = self.syms[name]
old_user_val = sym.user_val
if old_user_val is not None:
warn_override(filename, linenr, name, old_user_val, "n")
sym._set_user_value_no_invalidate("n", True)
def write_config(self, filename, header = None):
"""Writes out symbol values in the familiar .config format.
filename -- The filename under which to save the configuration.
header (default: None) -- A textual header that will appear at the
beginning of the file, with each line commented out
automatically. None means no header."""
# already_written is set when _make_conf() is called on a symbol, so
# that symbols defined in multiple locations only get one entry in the
# .config. We need to reset it prior to writing out a new .config.
for sym in self.syms.itervalues():
sym.already_written = False
with open(filename, "w") as f:
# Write header
if header is not None:
f.write(_comment(header))
f.write("\n")
# Write configuration.
# (You'd think passing a list around to all the nodes and appending
# to it to avoid copying would be faster, but it's actually a lot
# slower with PyPy, and about as fast with Python. Passing the file
# around is slower too.)
f.write("\n".join(self.top_block._make_conf()))
f.write("\n")
def get_kconfig_filename(self):
"""Returns the name of the (base) kconfig file this configuration was
loaded from."""
return self.filename
def get_arch(self):
"""Returns the value the environment variable ARCH had at the time the
Config instance was created, or None if ARCH was not set. For the
kernel, this corresponds to the architecture being built for, with
values such as "i386" or "mips"."""
return self.arch
def get_srcarch(self):
"""Returns the value the environment variable SRCARCH had at the time
the Config instance was created, or None if SRCARCH was not set. For
the kernel, this corresponds to the arch/ subdirectory containing
architecture-specific source code."""
return self.srcarch
def get_srctree(self):
"""Returns the value the environment variable srctree had at the time
the Config instance was created, or None if srctree was not defined.
This variable points to the source directory and is used when building
in a separate directory."""
return self.srctree
def get_config_filename(self):
"""Returns the name of the most recently loaded configuration file, or
None if no configuration has been loaded."""
return self.config_filename
def get_mainmenu_text(self):
"""Returns the text of the 'mainmenu' statement (with $-references to
symbols replaced by symbol values), or None if the configuration has no
'mainmenu' statement."""
return None if self.mainmenu_text is None else \
self._expand_sym_refs(self.mainmenu_text)
def get_defconfig_filename(self):
"""Returns the name of the defconfig file, which is the first existing
file in the list given in a symbol having 'option defconfig_list' set.
$-references to symbols will be expanded ("$FOO bar" -> "foo bar" if
FOO has the value "foo"). Returns None in case of no defconfig file.
Setting 'option defconfig_list' on multiple symbols currently results
in undefined behavior.
If the environment variable 'srctree' was set when the Config was
created, get_defconfig_filename() will first look relative to that
directory before looking in the current directory; see
Config.__init__()."""
if self.defconfig_sym is None:
return None
for (filename, cond_expr) in self.defconfig_sym.def_exprs:
if self._eval_expr(cond_expr) == "y":
filename = self._expand_sym_refs(filename)
# We first look in $srctree. os.path.join() won't work here as
# an absolute path in filename would override $srctree.
srctree_filename = os.path.normpath(self.srctree + "/" + filename)
if os.path.exists(srctree_filename):
return srctree_filename
if os.path.exists(filename):
return filename
return None
def get_symbol(self, name):
"""Returns the symbol with name 'name', or None if no such symbol
appears in the configuration. An alternative shorthand is conf[name],
where conf is a Config instance, though that will instead raise
KeyError if the symbol does not exist."""
return self.syms.get(name)
def get_top_level_items(self):
"""Returns a list containing the items (symbols, menus, choice
statements and comments) at the top level of the configuration -- that
is, all items that do not appear within a menu or choice. The items
appear in the same order as within the configuration."""
return self.top_block.get_items()
def get_symbols(self, all_symbols = True):
"""Returns a list of symbols from the configuration. An alternative for
iterating over all defined symbols (in the order of definition) is
for sym in config:
...
which relies on Config implementing __iter__() and is equivalent to
for sym in config.get_symbols(False):
...
all_symbols (default: True) -- If True, all symbols - including special
and undefined symbols - will be included in the result, in
an undefined order. If False, only symbols actually defined
and not merely referred to in the configuration will be
included in the result, and will appear in the order that
they are defined within the Kconfig configuration files."""
return self.syms.values() if all_symbols else self.kconfig_syms
def get_choices(self):
"""Returns a list containing all choice statements in the
configuration, in the order they appear in the Kconfig files."""
return self.choices
def get_menus(self):
"""Returns a list containing all menus in the configuration, in the
order they appear in the Kconfig files."""
return self.menus
def get_comments(self):
"""Returns a list containing all comments in the configuration, in the
order they appear in the Kconfig files."""
return self.comments
def eval(self, s):
"""Returns the value of the expression 's' -- where 's' is represented
as a string -- in the context of the configuration. Raises
Kconfig_Syntax_Error if syntax errors are detected in 's'.
For example, if FOO and BAR are tristate symbols at least one of which
has the value "y", then config.eval("y && (FOO || BAR)") => "y"
This functions always yields a tristate value. To get the value of
non-bool, non-tristate symbols, use Symbol.get_value().
The result of this function is consistent with how evaluation works for
conditional expressions in the configuration as well as in the C
implementation. "m" and m are rewritten as '"m" && MODULES' and 'm &&
MODULES', respectively, and a result of "m" will get promoted to "y" if
we're running without modules."""
return self._eval_expr(self._parse_expr(self._tokenize(s, True), # Feed
None, # Current symbol or choice
s)) # line
def get_config_header(self):
"""Returns the (uncommented) textual header of the .config file most
recently loaded with load_config(). Returns None if no .config file has
been loaded or if the most recently loaded .config file has no header.
The header comprises all lines up to but not including the first line
that either
1. Does not start with "#"
2. Has the form "# CONFIG_FOO is not set."
"""
return self.config_header
def get_base_dir(self):
"""Returns the base directory relative to which 'source' statements
will work, passed as an argument to Config.__init__()."""
return self.base_dir
def set_print_warnings(self, print_warnings):
"""Determines whether warnings related to this configuration (for
things like attempting to assign illegal values to symbols with
Symbol.set_user_value()) should be printed to stderr.
print_warnings -- True if warnings should be
printed, otherwise False."""
self.print_warnings = print_warnings
def set_print_undef_assign(self, print_undef_assign):
"""Determines whether informational messages related to assignments to
undefined symbols should be printed to stderr for this configuration.
print_undef_assign -- If True, such messages will be printed."""
self.print_undef_assign = print_undef_assign
def __getitem__(self, key):
"""Returns the symbol with name 'name'. Raises KeyError if the symbol
does not appear in the configuration."""
return self.syms[key]
def __iter__(self):
"""Convenience function for iterating over the set of all defined
symbols in the configuration, used like
for sym in conf:
...
The iteration happens in the order of definition within the Kconfig
configuration files. Symbols only referred to but not defined will not
be included, nor will the special symbols n, m, and y. If you want to
include such symbols as well, see config.get_symbols()."""
return iter(self.kconfig_syms)
def unset_user_values(self):
"""Resets the values of all symbols, as if Config.load_config() or
Symbol.set_user_value() had never been called."""
for sym in self.syms.itervalues():
sym._unset_user_value_no_recursive_invalidate()
def __str__(self):
"""Returns a string containing various information about the Config."""
return _sep_lines("Configuration",
"File : " + self.filename,
"Base directory : " + self.base_dir,
"Value of $ARCH at creation time : " +
("(not set)" if self.arch is None else self.arch),
"Value of $SRCARCH at creation time : " +
("(not set)" if self.srcarch is None else self.srcarch),
"Source tree (derived from $srctree;",
"defaults to '.' if $srctree isn't set) : " + self.srctree,
"Most recently loaded .config : " +
("(no .config loaded)" if self.config_filename is None else
self.config_filename),
"Print warnings : " +
bool_str[self.print_warnings],
"Print assignments to undefined symbols : " +
bool_str[self.print_undef_assign])
#
# Private methods
#
def _invalidate_all(self):
for sym in self.syms.itervalues():
sym._invalidate()
def _tokenize(self,
s,
for_eval = False,
filename = None,
linenr = None):
"""Returns a _Feed instance containing tokens derived from the string
's'. Registers any new symbols encountered (via _sym_lookup()).
(I experimented with a pure regular expression implementation, but it
came out slower, less readable, and wouldn't have been as flexible.)
for_eval -- True when parsing an expression for a call to
Config.eval(), in which case we should not treat the first
token specially nor register new symbols."""
s = s.lstrip()
if s == "" or s[0] == "#":
return _Feed([])
if for_eval:
i = 0 # The current index in the string being tokenized
previous = None # The previous token seen
tokens = []
else:
# The initial word on a line is parsed specially. Let
# command_chars = [A-Za-z0-9_]. Then
# - leading non-command_chars characters on the line are ignored, and
# - the first token consists the following one or more command_chars
# characters.
# This is why things like "----help--" are accepted.
initial_token_match = initial_token_re.match(s)
if initial_token_match is None:
return _Feed([])
# The current index in the string being tokenized
i = initial_token_match.end()
keyword = keywords.get(initial_token_match.group(1))
if keyword is None:
# We expect a keyword as the first token
_tokenization_error(s, len(s), filename, linenr)
if keyword == T_HELP:
# Avoid junk after "help", e.g. "---", being registered as a
# symbol
return _Feed([T_HELP])
tokens = [keyword]
previous = keyword
# _tokenize() is a hotspot during parsing, and this speeds things up a
# bit
strlen = len(s)
append = tokens.append
# Main tokenization loop. (Handles tokens past the first one.)
while i < strlen:
# Test for an identifier/keyword preceded by whitespace first; this
# is the most common case.
id_keyword_match = id_keyword_re.match(s, i)
if id_keyword_match:
# We have an identifier or keyword. The above also stripped any
# whitespace for us.
name = id_keyword_match.group(1)
# Jump past it
i = id_keyword_match.end()
# Keyword?
keyword = keywords.get(name)
if keyword is not None:
append(keyword)
# What would ordinarily be considered a name is treated as a
# string after certain tokens.
elif previous in string_lex:
append(name)
else:
# We're dealing with a symbol. _sym_lookup() will take care
# of allocating a new Symbol instance if it's the first
# time we see it.
sym = self._sym_lookup(name, not for_eval)
if previous == T_CONFIG or previous == T_MENUCONFIG:
# If the previous token is T_(MENU)CONFIG
# ("(menu)config"), we're tokenizing the first line of
# a symbol definition, and should remember this as a
# location where the symbol is defined.
sym.def_locations.append((filename, linenr))
else:
# Otherwise, it's a reference to the symbol
sym.ref_locations.append((filename, linenr))
append(sym)
else:
# This restrips whitespace that could have been stripped in the
# regex above, but it's worth it since identifiers/keywords are
# more common
s = s[i:].lstrip()
if s == "":
break
strlen = len(s)
i = 0
c = s[0]
# String literal (constant symbol)
if c == '"' or c == "'":
i += 1
if "\\" in s:
# Slow path: This could probably be sped up, but it's a
# very unusual case anyway.
quote = c
value = ""
while 1:
if i >= strlen:
_tokenization_error(s, strlen, filename,
linenr)
c = s[i]
if c == quote:
break
if c == "\\":
if i + 1 >= strlen:
_tokenization_error(s, strlen, filename,
linenr)
value += s[i + 1]
i += 2
else:
value += c
i += 1
i += 1
append(value)
else:
# Fast path: If the string contains no backslashes (almost
# always) we can simply look for the matching quote.
end = s.find(c, i)
if end == -1:
_tokenization_error(s, strlen, filename, linenr)
append(s[i:end])
i = end + 1
elif c == "&":
if i + 1 >= strlen:
# Invalid characters are ignored
continue
if s[i + 1] != "&":
# Invalid characters are ignored
i += 1
continue
append(T_AND)
i += 2
elif c == "|":
if i + 1 >= strlen:
# Invalid characters are ignored
continue
if s[i + 1] != "|":
# Invalid characters are ignored
i += 1
continue
append(T_OR)
i += 2
elif c == "!":
if i + 1 >= strlen:
_tokenization_error(s, strlen, filename, linenr)
if s[i + 1] == "=":
append(T_UNEQUAL)
i += 2
else:
append(T_NOT)
i += 1
elif c == "=":
append(T_EQUAL)
i += 1
elif c == "(":
append(T_OPEN_PAREN)
i += 1
elif c == ")":
append(T_CLOSE_PAREN)
i += 1
elif c == "#":
break
else:
# Invalid characters are ignored
i += 1
continue
previous = tokens[-1]
return _Feed(tokens)
#
# Parsing
#
# Expression grammar:
#
# <expr> -> <symbol>
# <symbol> '=' <symbol>
# <symbol> '!=' <symbol>
# '(' <expr> ')'
# '!' <expr>
# <expr> '&&' <expr>
# <expr> '||' <expr>
def _parse_expr(self,
feed,
cur_sym_or_choice,
line,
filename = None,
linenr = None,
transform_m = True):
"""Parse an expression from the tokens in 'feed' using a simple
top-down approach. The result has the form (<operator>, <list
containing parsed operands>).
feed -- _Feed instance containing the tokens for the expression.
cur_sym_or_choice -- The symbol or choice currently being parsed, or
None if we're not parsing a symbol or choice.
Used for recording references to symbols.
line -- The line containing the expression being parsed.
filename (default: None) -- The file containing the expression.
linenr (default: None) -- The line number containing the expression.
transform_m (default: False) -- Determines if 'm' should be rewritten to
'm && MODULES' -- see
parse_val_and_cond()."""
# Use instance variables to avoid having to pass these as arguments
# through the top-down parser in _parse_expr_2(), which is tedious and
# obfuscates the code. A profiler run shows no noticeable performance
# difference.
self.parse_expr_cur_sym_or_choice = cur_sym_or_choice
self.parse_expr_line = line
self.parse_expr_filename = filename
self.parse_expr_linenr = linenr
self.parse_expr_transform_m = transform_m
return self._parse_expr_2(feed)
def _parse_expr_2(self, feed):
or_terms = [self._parse_or_term(feed)]
# Keep parsing additional terms while the lookahead is '||'
while feed.check(T_OR):
or_terms.append(self._parse_or_term(feed))
return or_terms[0] if len(or_terms) == 1 else (OR, or_terms)
def _parse_or_term(self, feed):
and_terms = [self._parse_factor(feed)]
# Keep parsing additional terms while the lookahead is '&&'
while feed.check(T_AND):
and_terms.append(self._parse_factor(feed))
return and_terms[0] if len(and_terms) == 1 else (AND, and_terms)
def _parse_factor(self, feed):
if feed.check(T_OPEN_PAREN):
expr_parse = self._parse_expr_2(feed)
if not feed.check(T_CLOSE_PAREN):
_parse_error(self.parse_expr_line,
"missing end parenthesis.",
self.parse_expr_filename,
self.parse_expr_linenr)
return expr_parse
if feed.check(T_NOT):
return (NOT, self._parse_factor(feed))
sym_or_string = feed.get_next()
if not isinstance(sym_or_string, (Symbol, str)):
_parse_error(self.parse_expr_line,
"malformed expression.",
self.parse_expr_filename,
self.parse_expr_linenr)
if self.parse_expr_cur_sym_or_choice is not None and \
isinstance(sym_or_string, Symbol):
self.parse_expr_cur_sym_or_choice.referenced_syms.add(sym_or_string)
next_token = feed.peek_next()
# For conditional expressions ('depends on <expr>', '... if <expr>',
# etc.), "m" and m are rewritten to "m" && MODULES.
if next_token != T_EQUAL and next_token != T_UNEQUAL:
if self.parse_expr_transform_m and (sym_or_string is self.m or
sym_or_string == "m"):
return (AND, ["m", self._sym_lookup("MODULES")])
return sym_or_string
relation = EQUAL if (feed.get_next() == T_EQUAL) else UNEQUAL
sym_or_string_2 = feed.get_next()
if self.parse_expr_cur_sym_or_choice is not None and \
isinstance(sym_or_string_2, Symbol):
self.parse_expr_cur_sym_or_choice.referenced_syms.add(sym_or_string_2)
if sym_or_string is self.m:
sym_or_string = "m"
if sym_or_string_2 is self.m:
sym_or_string_2 = "m"
return (relation, sym_or_string, sym_or_string_2)
def _parse_file(self, filename, parent, deps, visible_if_deps, res = None):
"""Parse the Kconfig file 'filename'. The result is a _Block with all
items from the file. See _parse_block() for the meaning of the
parameters."""
line_feeder = _FileFeed(_get_lines(filename), filename)
return self._parse_block(line_feeder, None, parent, deps, visible_if_deps, res)
def _parse_block(self, line_feeder, end_marker, parent, deps,
visible_if_deps = None, res = None):
"""Parses a block, which is the contents of either a file or an if,
menu, or choice statement. The result is a _Block with the items from
the block.
end_marker -- The token that ends the block, e.g. T_ENDIF ("endif") for
if's. None for files.
parent -- The enclosing menu, choice or if, or None if we're at the top
level.
deps -- Dependencies from enclosing menus, choices and if's.
visible_if_deps (default: None) -- 'visible if' dependencies from
enclosing menus.
res (default: None) -- The _Block to add items to. If None, a new
_Block is created to hold the items."""
block = _Block() if res is None else res
filename = line_feeder.get_filename()
while 1:
# Do we already have a tokenized line that we determined wasn't
# part of whatever we were parsing earlier? See comment in
# Config.__init__().
if self.end_line is not None:
assert self.end_line_tokens is not None
tokens = self.end_line_tokens
tokens.go_to_start()
line = self.end_line
linenr = line_feeder.get_linenr()
self.end_line = None
self.end_line_tokens = None
else:
line = line_feeder.get_next()
if line is None:
if end_marker is not None:
raise Kconfig_Syntax_Error, (
"Unexpected end of file {0}."
.format(line_feeder.get_filename()))
return block
linenr = line_feeder.get_linenr()
tokens = self._tokenize(line, False, filename, linenr)
if tokens.is_empty():
continue
t0 = tokens.get_next()
# Have we reached the end of the block?
if t0 == end_marker:
return block
if t0 == T_CONFIG or t0 == T_MENUCONFIG:
# The tokenizer will automatically allocate a new Symbol object
# for any new names it encounters, so we don't need to worry
# about that here.
sym = tokens.get_next()
# Symbols defined in multiple places get the parent of their
# first definition. However, for symbols whose parents are choice
# statements, the choice statement takes precedence.
if not sym.is_defined_ or isinstance(parent, Choice):
sym.parent = parent
sym.is_defined_ = True
self.kconfig_syms.append(sym)
block.add_item(sym)
self._parse_properties(line_feeder, sym, deps, visible_if_deps)
elif t0 == T_MENU:
menu = Menu()
self.menus.append(menu)
menu.config = self
menu.parent = parent
menu.title = tokens.get_next()
menu.filename = filename
menu.linenr = linenr
# Parse properties and contents
self._parse_properties(line_feeder, menu, deps, visible_if_deps)
menu.block = self._parse_block(line_feeder,
T_ENDMENU,
menu,
menu.dep_expr,
_make_and(visible_if_deps,
menu.visible_if_expr))
block.add_item(menu)
elif t0 == T_IF:
# If statements are treated as syntactic sugar for adding
# dependencies to enclosed items and do not have an explicit
# object representation.
dep_expr = self._parse_expr(tokens, None, line, filename, linenr)
self._parse_block(line_feeder,
T_ENDIF,
parent,
_make_and(dep_expr, deps),
visible_if_deps,
block) # Add items to the same block
elif t0 == T_CHOICE:
# We support named choices
already_defined = False
name = None
if len(tokens) > 1 and isinstance(tokens[1], str):
name = tokens[1]
already_defined = name in self.named_choices
if already_defined:
choice = self.named_choices[name]
else:
choice = Choice()
self.choices.append(choice)
if name is not None:
choice.name = name
self.named_choices[name] = choice
choice.config = self
choice.parent = parent
choice.def_locations.append((filename, linenr))
# Parse properties and contents
self._parse_properties(line_feeder, choice, deps, visible_if_deps)
choice.block = self._parse_block(line_feeder,
T_ENDCHOICE,
choice,
None,
visible_if_deps)
choice._determine_actual_symbols()
# If no type is set for the choice, its type is that of the first
# choice item
if choice.type == UNKNOWN:
for item in choice.get_symbols():
if item.type != UNKNOWN:
choice.type = item.type
break
# Each choice item of UNKNOWN type gets the type of the choice
for item in choice.get_symbols():
if item.type == UNKNOWN:
item.type = choice.type
# For named choices defined in multiple locations, only record
# at the first definition
if not already_defined:
block.add_item(choice)
elif t0 == T_COMMENT:
comment = Comment()
comment.config = self
comment.parent = parent
comment.filename = filename
comment.linenr = linenr
comment.text = tokens.get_next()
self._parse_properties(line_feeder, comment, deps, visible_if_deps)
block.add_item(comment)
self.comments.append(comment)
elif t0 == T_SOURCE:
kconfig_file = tokens.get_next()
exp_kconfig_file = self._expand_sym_refs(kconfig_file)
f = os.path.join(self.base_dir, exp_kconfig_file)
if not os.path.exists(f):
raise IOError, ('{0}:{1}: sourced file "{2}" (expands to\n'
'"{3}") not found. Perhaps base_dir\n'
'(argument to Config.__init__(), currently\n'
'"{4}") is set to the wrong value.'
.format(filename,
linenr,
kconfig_file,
exp_kconfig_file,
self.base_dir))
# Add items to the same block
self._parse_file(f, parent, deps, visible_if_deps, block)
elif t0 == T_MAINMENU:
text = tokens.get_next()
if self.mainmenu_text is not None:
self._warn("overriding 'mainmenu' text. "
'Old value: "{0}", new value: "{1}".'
.format(self.mainmenu_text, text),
filename,
linenr)
self.mainmenu_text = text
else:
_parse_error(line, "unrecognized construct.", filename, linenr)
def _parse_properties(self, line_feeder, stmt, deps, visible_if_deps):
"""Parsing of properties for symbols, menus, choices, and comments."""
def parse_val_and_cond(tokens, line, filename, linenr):
"""Parses '<expr1> if <expr2>' constructs, where the 'if' part is
optional. Returns a tuple containing the parsed expressions, with
None as the second element if the 'if' part is missing."""
val = self._parse_expr(tokens, stmt, line, filename, linenr, False)
if tokens.check(T_IF):
return (val, self._parse_expr(tokens, stmt, line, filename, linenr))
return (val, None)
# In case the symbol is defined in multiple locations, we need to
# remember what prompts, defaults, and selects are new for this
# definition, as "depends on" should only apply to the local
# definition.
new_prompt = None
new_def_exprs = []
new_selects = []
# Dependencies from 'depends on' statements
depends_on_expr = None
while 1:
line = line_feeder.get_next()
if line is None:
break
filename = line_feeder.get_filename()
linenr = line_feeder.get_linenr()
tokens = self._tokenize(line, False, filename, linenr)
if tokens.is_empty():
continue
t0 = tokens.get_next()
if t0 == T_HELP:
# Find first non-empty line and get its indentation
line_feeder.remove_while(str.isspace)
line = line_feeder.get_next()
if line is None:
stmt.help = ""
break
indent = _indentation(line)
# If the first non-empty lines has zero indent, there is no
# help text
if indent == 0:
stmt.help = ""
line_feeder.go_back()
break
help_lines = [_deindent(line, indent)]
# The help text goes on till the first non-empty line with less
# indent
while 1:
line = line_feeder.get_next()
if (line is None) or \
(not line.isspace() and _indentation(line) < indent):
stmt.help = "".join(help_lines)
break
help_lines.append(_deindent(line, indent))
if line is None:
break
line_feeder.go_back()
elif t0 == T_PROMPT:
# 'prompt' properties override each other within a single
# definition of a symbol, but additional prompts can be added
# by defining the symbol multiple times; hence 'new_prompt'
# instead of 'prompt'.
new_prompt = parse_val_and_cond(tokens, line, filename, linenr)
elif t0 == T_DEFAULT:
new_def_exprs.append(parse_val_and_cond(tokens, line, filename, linenr))
elif t0 == T_DEPENDS:
if not tokens.check(T_ON):
_parse_error(line, 'expected "on" after "depends".', filename, linenr)
parsed_deps = self._parse_expr(tokens, stmt, line, filename, linenr)
if isinstance(stmt, (Menu, Comment)):
stmt.dep_expr = _make_and(stmt.dep_expr, parsed_deps)
else:
depends_on_expr = _make_and(depends_on_expr, parsed_deps)
elif t0 == T_VISIBLE:
if not tokens.check(T_IF):
_parse_error(line, 'expected "if" after "visible".', filename, linenr)
if not isinstance(stmt, Menu):
_parse_error(line,
"'visible if' is only valid for menus.",
filename,
linenr)
parsed_deps = self._parse_expr(tokens, stmt, line, filename, linenr)
stmt.visible_if_expr = _make_and(stmt.visible_if_expr, parsed_deps)
elif t0 == T_SELECT:
target = tokens.get_next()
stmt.referenced_syms.add(target)
stmt.selected_syms.add(target)
if tokens.check(T_IF):
new_selects.append((target,
self._parse_expr(tokens, stmt, line, filename, linenr)))
else:
new_selects.append((target, None))
elif t0 in (T_BOOL, T_TRISTATE, T_INT, T_HEX, T_STRING):
stmt.type = token_to_type[t0]
if len(tokens) > 1:
new_prompt = parse_val_and_cond(tokens, line, filename, linenr)
elif t0 == T_RANGE:
lower = tokens.get_next()
upper = tokens.get_next()
stmt.referenced_syms.add(lower)
stmt.referenced_syms.add(upper)
if tokens.check(T_IF):
stmt.ranges.append((lower, upper,
self._parse_expr(tokens, stmt, line, filename, linenr)))
else:
stmt.ranges.append((lower, upper, None))
elif t0 == T_DEF_BOOL:
stmt.type = BOOL
if len(tokens) > 1:
new_def_exprs.append(parse_val_and_cond(tokens, line, filename, linenr))
elif t0 == T_DEF_TRISTATE:
stmt.type = TRISTATE
if len(tokens) > 1:
new_def_exprs.append(parse_val_and_cond(tokens, line, filename, linenr))
elif t0 == T_OPTIONAL:
if not isinstance(stmt, Choice):
_parse_error(line,
'"optional" is only valid for choices.',
filename,
linenr)
stmt.optional = True
elif t0 == T_OPTION:
if tokens.check(T_ENV) and tokens.check(T_EQUAL):
env_var = tokens.get_next()
stmt.is_special_ = True
stmt.is_from_env = True
if env_var not in os.environ:
self._warn("""
The symbol {0} references the non-existent environment variable {1} and will
get the empty string as its value.
If you're using kconfiglib via 'make (i)scriptconfig' it should have set up the
environment correctly for you. If you still got this message, that might be an
error, and you should e-mail kconfiglib@gmail.com.
.""" .format(stmt.name, env_var),
filename,
linenr)
stmt.cached_value = ""
else:
stmt.cached_value = os.environ[env_var]
elif tokens.check(T_DEFCONFIG_LIST):
self.defconfig_sym = stmt
elif tokens.check(T_MODULES):
self._warn("the 'modules' option is not supported. "
"Let me know if this is a problem for you; "
"it shouldn't be that hard to implement.",
filename,
linenr)
else:
_parse_error(line, "unrecognized option.", filename, linenr)
else:
# See comment in Config.__init__()
self.end_line = line
self.end_line_tokens = tokens
break
# Propagate dependencies from enclosing menus and if's.
# For menus and comments..
if isinstance(stmt, (Menu, Comment)):
stmt.orig_deps = stmt.dep_expr
stmt.deps_from_containing = deps
stmt.dep_expr = _make_and(stmt.dep_expr, deps)
stmt.all_referenced_syms = \
stmt.referenced_syms | _get_expr_syms(deps)
# For symbols and choices..
else:
# See comment for 'menu_dep'
stmt.menu_dep = depends_on_expr
# Propagate dependencies specified with 'depends on' to any new
# default expressions, prompts, and selections. ("New" since a
# symbol might be defined in multiple places and the dependencies
# should only apply to the local definition.)
new_def_exprs = [(val_expr, _make_and(cond_expr, depends_on_expr))
for (val_expr, cond_expr) in new_def_exprs]
new_selects = [(target, _make_and(cond_expr, depends_on_expr))
for (target, cond_expr) in new_selects]
if new_prompt is not None:
prompt, cond_expr = new_prompt
# 'visible if' dependencies from enclosing menus get propagated
# to prompts
if visible_if_deps is not None:
cond_expr = _make_and(cond_expr, visible_if_deps)
new_prompt = (prompt, _make_and(cond_expr, depends_on_expr))
# We save the original expressions -- before any menu and if
# conditions have been propagated -- so these can be retrieved
# later.
stmt.orig_def_exprs.extend(new_def_exprs)
if new_prompt is not None:
stmt.orig_prompts.append(new_prompt)
# Only symbols can select
if isinstance(stmt, Symbol):
stmt.orig_selects.extend(new_selects)
# Save dependencies from enclosing menus and if's
stmt.deps_from_containing = deps
# The set of symbols referenced directly by the symbol/choice plus
# all symbols referenced by enclosing menus and if's.
stmt.all_referenced_syms = \
stmt.referenced_syms | _get_expr_syms(deps)
# Propagate dependencies from enclosing menus and if's
stmt.def_exprs.extend([(val_expr, _make_and(cond_expr, deps))
for (val_expr, cond_expr) in new_def_exprs])
for (target, cond) in new_selects:
target.rev_dep = _make_or(target.rev_dep,
_make_and(stmt,
_make_and(cond, deps)))
if new_prompt is not None:
prompt, cond_expr = new_prompt
stmt.prompts.append((prompt, _make_and(cond_expr, deps)))
#
# Symbol table manipulation
#
def _sym_lookup(self, name, add_sym_if_not_exists = True):
"""Fetches the symbol 'name' from the symbol table, optionally adding
it if it does not exist (this is usually what we want)."""
if name in self.syms:
return self.syms[name]
new_sym = Symbol()
new_sym.config = self
new_sym.name = name
if add_sym_if_not_exists:
self.syms[name] = new_sym
else:
# This warning is generated while evaluating an expression
# containing undefined symbols using Config.eval()
self._warn("no symbol {0} in configuration".format(name))
return new_sym
#
# Evaluation of symbols and expressions
#
def _eval_expr(self, expr):
"""Evaluates an expression and returns one of the tristate values "n",
"m" or "y"."""
res = self._eval_expr_2(expr)
# Promote "m" to "y" if we're running without modules. Internally, "m"
# is often rewritten to "m" && MODULES by both the C implementation and
# kconfiglib, which takes care of cases where "m" should be false if
# we're running without modules.
if res == "m" and not self._has_modules():
return "y"
return res
def _eval_expr_2(self, expr):
if expr is None:
return "y"
if isinstance(expr, Symbol):
# Non-bool/tristate symbols are always "n" in a tristate sense,
# regardless of their value
if expr.type != BOOL and expr.type != TRISTATE:
return "n"
return expr.get_value()
if isinstance(expr, str):
return expr if (expr == "y" or expr == "m") else "n"
first_expr = expr[0]
if first_expr == OR:
res = "n"
for subexpr in expr[1]:
ev = self._eval_expr_2(subexpr)
# Return immediately upon discovering a "y" term
if ev == "y":
return "y"
if ev == "m":
res = "m"
# 'res' is either "n" or "m" here; we already handled the
# short-circuiting "y" case in the loop.
return res
if first_expr == AND:
res = "y"
for subexpr in expr[1]:
ev = self._eval_expr_2(subexpr)
# Return immediately upon discovering an "n" term
if ev == "n":
return "n"
if ev == "m":
res = "m"
# 'res' is either "m" or "y" here; we already handled the
# short-circuiting "n" case in the loop.
return res
if first_expr == NOT:
ev = self._eval_expr_2(expr[1])
if ev == "y":
return "n"
return "y" if (ev == "n") else "m"
if first_expr == EQUAL:
return "y" if (self._get_str_value(expr[1]) ==
self._get_str_value(expr[2])) else "n"
if first_expr == UNEQUAL:
return "y" if (self._get_str_value(expr[1]) !=
self._get_str_value(expr[2])) else "n"
_internal_error("Internal error while evaluating expression: "
"unknown operation {0}.".format(first_expr))
def _get_str_value(self, obj):
if isinstance(obj, str):
return obj
# obj is a Symbol
return obj.get_value()
def _eval_min(self, e1, e2):
e1_eval = self._eval_expr(e1)
e2_eval = self._eval_expr(e2)
return e1_eval if tri_less(e1_eval, e2_eval) else e2_eval
def _eval_max(self, e1, e2):
e1_eval = self._eval_expr(e1)
e2_eval = self._eval_expr(e2)
return e1_eval if tri_greater(e1_eval, e2_eval) else e2_eval
#
# Methods related to the MODULES symbol
#
def _has_modules(self):
modules_sym = self.syms.get("MODULES")
return (modules_sym is not None) and (modules_sym.get_value() == "y")
#
# Dependency tracking
#
def _build_dep(self):
"""Populates the Symbol.dep sets, linking the symbol to the symbols
that immediately depend on it in the sense that changing the value of
the symbol might affect the values of those other symbols. This is used
for caching/invalidation purposes. The calculated sets might be larger
than necessary as we don't do any complicated analysis of the
expressions."""
for sym in self.syms.itervalues():
sym.dep = set()
# Adds 'sym' as a directly dependent symbol to all symbols that appear
# in the expression 'e'
def add_expr_deps(e, sym):
for s in _get_expr_syms(e):
s.dep.add(sym)
# The directly dependent symbols of a symbol are:
# - Any symbols whose prompts, default values, rev_dep (select
# condition), or ranges depend on the symbol
# - Any symbols that belong to the same choice statement as the symbol
# (these won't be included in 'dep' as that makes the dependency
# graph unwieldy, but Symbol._get_dependent() will include them)
# - Any symbols in a choice statement that depends on the symbol
for sym in self.syms.itervalues():
for (_, e) in sym.prompts:
add_expr_deps(e, sym)
for (v, e) in sym.def_exprs:
add_expr_deps(v, sym)
add_expr_deps(e, sym)
add_expr_deps(sym.rev_dep, sym)
for (l, u, e) in sym.ranges:
add_expr_deps(l, sym)
add_expr_deps(u, sym)
add_expr_deps(e, sym)
if sym.is_choice_symbol_:
choice = sym.parent
for (_, e) in choice.prompts:
add_expr_deps(e, sym)
for (_, e) in choice.def_exprs:
add_expr_deps(e, sym)
def _expr_val_str(self, expr, no_value_str = "(none)", get_val_instead_of_eval = False):
# Since values are valid expressions, _expr_to_str() will get a nice
# string representation for those as well.
if expr is None:
return no_value_str
if get_val_instead_of_eval:
if isinstance(expr, str):
return _expr_to_str(expr)
val = expr.get_value()
else:
val = self._eval_expr(expr)
return "{0} (value: {1})".format(_expr_to_str(expr), _expr_to_str(val))
def _expand_sym_refs(self, s):
"""Expands $-references to symbols in 's' to symbol values, or to the
empty string for undefined symbols."""
while 1:
sym_ref_re_match = sym_ref_re.search(s)
if sym_ref_re_match is None:
return s
sym_name = sym_ref_re_match.group(0)[1:]
sym = self.syms.get(sym_name)
expansion = "" if sym is None else sym.get_value()
s = s[:sym_ref_re_match.start()] + \
expansion + \
s[sym_ref_re_match.end():]
def _get_sym_or_choice_str(self, sc):
"""Symbols and choices have many properties in common, so we factor out
common __str__() stuff here. "sc" is short for "symbol or choice"."""
# As we deal a lot with string representations here, use some
# convenient shorthand:
s = _expr_to_str
#
# Common symbol/choice properties
#
user_value_str = "(no user value)" if sc.user_val is None else s(sc.user_val)
visibility_str = s(sc.get_visibility())
# Build prompts string
if sc.prompts == []:
prompts_str = " (no prompts)"
else:
prompts_str_rows = []
for (prompt, cond_expr) in sc.orig_prompts:
if cond_expr is None:
prompts_str_rows.append(' "{0}"'.format(prompt))
else:
prompts_str_rows.append(' "{0}" if '.format(prompt) +
self._expr_val_str(cond_expr))
prompts_str = "\n".join(prompts_str_rows)
# Build locations string
if sc.def_locations == []:
locations_str = "(no locations)"
else:
locations_str = " ".join(["{0}:{1}".format(filename, linenr) for
(filename, linenr) in sc.def_locations])
# Build additional-dependencies-from-menus-and-if's string
additional_deps_str = " " + self._expr_val_str(sc.deps_from_containing,
"(no additional dependencies)")
#
# Symbol-specific stuff
#
if isinstance(sc, Symbol):
# Build value string
value_str = s(sc.get_value())
# Build ranges string
if isinstance(sc, Symbol):
if sc.ranges == []:
ranges_str = " (no ranges)"
else:
ranges_str_rows = []
for (l, u, cond_expr) in sc.ranges:
if cond_expr is None:
ranges_str_rows.append(" [{0}, {1}]".format(s(l), s(u)))
else:
ranges_str_rows.append(" [{0}, {1}] if {2}"
.format(s(l), s(u), self._expr_val_str(cond_expr)))
ranges_str = "\n".join(ranges_str_rows)
# Build default values string
if sc.def_exprs == []:
defaults_str = " (no default values)"
else:
defaults_str_rows = []
for (val_expr, cond_expr) in sc.orig_def_exprs:
row_str = " " + self._expr_val_str(val_expr, "(none)", sc.type == STRING)
defaults_str_rows.append(row_str)
defaults_str_rows.append(" Condition: " + self._expr_val_str(cond_expr))
defaults_str = "\n".join(defaults_str_rows)
# Build selects string
if sc.orig_selects == []:
selects_str = " (no selects)"
else:
selects_str_rows = []
for (target, cond_expr) in sc.orig_selects:
if cond_expr is None:
selects_str_rows.append(" {0}".format(target.name))
else:
selects_str_rows.append(" {0} if ".format(target.name) +
self._expr_val_str(cond_expr))
selects_str = "\n".join(selects_str_rows)
# Build reverse dependencies string
if sc.rev_dep == "n":
rev_dep_str = " (no reverse dependencies)"
else:
rev_dep_str = " " + self._expr_val_str(sc.rev_dep)
res = _sep_lines("Symbol " + (sc.name if sc.name is not None else "(no name)"),
"Type : " + typename[sc.type],
"Value : " + value_str,
"User value : " + user_value_str,
"Visibility : " + visibility_str,
"Is choice item : " + bool_str[sc.is_choice_symbol_],
"Is defined : " + bool_str[sc.is_defined_],
"Is from env. : " + bool_str[sc.is_from_env],
"Is special : " + bool_str[sc.is_special_] + "\n")
if sc.ranges != []:
res += _sep_lines("Ranges:",
ranges_str + "\n")
res += _sep_lines("Prompts:",
prompts_str,
"Default values:",
defaults_str,
"Selects:",
selects_str,
"Reverse dependencies:",
rev_dep_str,
"Additional dependencies from enclosing menus and if's:",
additional_deps_str,
"Locations: " + locations_str)
return res
#
# Choice-specific stuff
#
# Build name string (for named choices)
if sc.name is None:
name_str = "(no name)"
else:
name_str = sc.name
# Build selected symbol string
sel = sc.get_selection()
if sel is None:
sel_str = "(no selection)"
else:
sel_str = sel.name
# Build mode string
mode_str = s(sc.get_mode())
# Build default values string
if sc.def_exprs == []:
defaults_str = " (no default values)"
else:
defaults_str_rows = []
for (sym, cond_expr) in sc.orig_def_exprs:
if cond_expr is None:
defaults_str_rows.append(" {0}".format(sym.name))
else:
defaults_str_rows.append(" {0} if ".format(sym.name) +
self._expr_val_str(cond_expr))
defaults_str = "\n".join(defaults_str_rows)
# Build contained symbols string
names = [sym.name for sym in sc.get_symbols()]
if names == []:
syms_string = "(empty)"
else:
syms_string = " ".join(names)
return _sep_lines("Choice",
"Name (for named choices): " + name_str,
"Type : " + typename[sc.type],
"Selected symbol : " + sel_str,
"User value : " + user_value_str,
"Mode : " + mode_str,
"Visibility : " + visibility_str,
"Optional : " + bool_str[sc.optional],
"Prompts:",
prompts_str,
"Defaults:",
defaults_str,
"Choice symbols:",
" " + syms_string,
"Additional dependencies from enclosing menus and if's:",
additional_deps_str,
"Locations: " + locations_str)
def _expr_depends_on(self, expr, sym):
"""Reimplementation of expr_depends_symbol() from mconf.c. Used to
determine if a submenu should be implicitly created, which influences what
items inside choice statements are considered choice items."""
if expr is None:
return False
def rec(expr):
if isinstance(expr, str):
return False
if isinstance(expr, Symbol):
return expr is sym
e0 = expr[0]
if e0 == EQUAL or e0 == UNEQUAL:
return self._eq_to_sym(expr) is sym
if e0 == AND:
for and_expr in expr[1]:
if rec(and_expr):
return True
return False
return rec(expr)
def _eq_to_sym(self, eq):
"""_expr_depends_on() helper. For (in)equalities of the form sym = y/m
or sym != n, returns sym. For other (in)equalities, returns None."""
relation, left, right = eq
left = self._transform_n_m_y(left)
right = self._transform_n_m_y(right)
# Make sure the symbol (if any) appears to the left
if not isinstance(left, Symbol):
left, right = right, left
if not isinstance(left, Symbol):
return None
if (relation == EQUAL and (right == "m" or right == "y")) or \
(relation == UNEQUAL and right == "n"):
return left
return None
def _transform_n_m_y(self, item):
"""_eq_to_sym() helper. Translates the symbols n, m, and y to their
string equivalents."""
if item is self.n:
return "n"
if item is self.m:
return "m"
if item is self.y:
return "y"
return item
def _warn(self, msg, filename = None, linenr = None):
"""For printing warnings to stderr."""
if self.print_warnings:
self._warn_or_undef_assign(msg, WARNING, filename, linenr)
def _undef_assign(self, msg, filename = None, linenr = None):
"""For printing informational messages related to assignments
to undefined variables to stderr."""
if self.print_undef_assign:
self._warn_or_undef_assign(msg, UNDEF_ASSIGN, filename, linenr)
def _warn_or_undef_assign(self, msg, msg_type, filename, linenr):
if filename is not None:
sys.stderr.write("{0}:".format(_clean_up_path(filename)))
if linenr is not None:
sys.stderr.write("{0}:".format(linenr))
if msg_type == WARNING:
sys.stderr.write("warning: ")
elif msg_type == UNDEF_ASSIGN:
sys.stderr.write("info: ")
else:
_internal_error('Internal error while printing warning: unknown warning type "{0}".'
.format(msg_type))
sys.stderr.write(msg + "\n")
def _get_expr_syms(expr):
"""Returns the set() of symbols appearing in expr."""
res = set()
if expr is None:
return res
def rec(expr):
if isinstance(expr, Symbol):
res.add(expr)
return
if isinstance(expr, str):
return
e0 = expr[0]
if e0 == OR or e0 == AND:
for term in expr[1]:
rec(term)
elif e0 == NOT:
rec(expr[1])
elif e0 == EQUAL or e0 == UNEQUAL:
_, v1, v2 = expr
if isinstance(v1, Symbol):
res.add(v1)
if isinstance(v2, Symbol):
res.add(v2)
else:
_internal_error("Internal error while fetching symbols from an "
"expression with token stream {0}.".format(expr))
rec(expr)
return res
#
# Construction of expressions
#
# These functions as well as the _eval_min/max() functions above equate
# None with "y", which is usually what we want, but needs to be kept in
# mind.
def _make_or(e1, e2):
# Perform trivial simplification and avoid None's (which
# correspond to y's)
if e1 is None or e2 is None or \
e1 == "y" or e2 == "y":
return "y"
if e1 == "n":
return e2
if e2 == "n":
return e1
# Prefer to merge/update argument list if possible instead of creating
# a new OR node
if isinstance(e1, tuple) and e1[0] == OR:
if isinstance(e2, tuple) and e2[0] == OR:
return (OR, e1[1] + e2[1])
return (OR, e1[1] + [e2])
if isinstance(e2, tuple) and e2[0] == OR:
return (OR, e2[1] + [e1])
return (OR, [e1, e2])
# Note: returns None if e1 == e2 == None
def _make_and(e1, e2):
if e1 == "n" or e2 == "n":
return "n"
if e1 is None or e1 == "y":
return e2
if e2 is None or e2 == "y":
return e1
# Prefer to merge/update argument list if possible instead of creating
# a new AND node
if isinstance(e1, tuple) and e1[0] == AND:
if isinstance(e2, tuple) and e2[0] == AND:
return (AND, e1[1] + e2[1])
return (AND, e1[1] + [e2])
if isinstance(e2, tuple) and e2[0] == AND:
return (AND, e2[1] + [e1])
return (AND, [e1, e2])
#
# Constants and functions related to types, parsing, evaluation and printing,
# put globally to unclutter the Config class a bit.
#
# Tokens
(T_OR, T_AND, T_NOT,
T_OPEN_PAREN, T_CLOSE_PAREN,
T_EQUAL, T_UNEQUAL,
T_MAINMENU, T_MENU, T_ENDMENU,
T_SOURCE, T_CHOICE, T_ENDCHOICE,
T_COMMENT, T_CONFIG, T_MENUCONFIG,
T_HELP, T_IF, T_ENDIF, T_DEPENDS, T_ON,
T_OPTIONAL, T_PROMPT, T_DEFAULT,
T_BOOL, T_TRISTATE, T_HEX, T_INT, T_STRING,
T_DEF_BOOL, T_DEF_TRISTATE,
T_SELECT, T_RANGE, T_OPTION, T_ENV,
T_DEFCONFIG_LIST, T_MODULES, T_VISIBLE) = range(0, 38)
# Keyword to token map
keywords = {
"mainmenu" : T_MAINMENU,
"menu" : T_MENU,
"endmenu" : T_ENDMENU,
"endif" : T_ENDIF,
"endchoice" : T_ENDCHOICE,
"source" : T_SOURCE,
"choice" : T_CHOICE,
"config" : T_CONFIG,
"comment" : T_COMMENT,
"menuconfig" : T_MENUCONFIG,
"help" : T_HELP,
"if" : T_IF,
"depends" : T_DEPENDS,
"on" : T_ON,
"optional" : T_OPTIONAL,
"prompt" : T_PROMPT,
"default" : T_DEFAULT,
"bool" : T_BOOL,
"boolean" : T_BOOL,
"tristate" : T_TRISTATE,
"int" : T_INT,
"hex" : T_HEX,
"def_bool" : T_DEF_BOOL,
"def_tristate" : T_DEF_TRISTATE,
"string" : T_STRING,
"select" : T_SELECT,
"range" : T_RANGE,
"option" : T_OPTION,
"env" : T_ENV,
"defconfig_list" : T_DEFCONFIG_LIST,
"modules" : T_MODULES,
"visible" : T_VISIBLE }
# Strings to use for True and False
bool_str = { False : "false", True : "true" }
# Tokens after which identifier-like lexemes are treated as strings. T_CHOICE
# is included to avoid symbols being registered for named choices.
string_lex = frozenset((T_BOOL, T_TRISTATE, T_INT, T_HEX, T_STRING, T_CHOICE,
T_PROMPT, T_MENU, T_COMMENT, T_SOURCE, T_MAINMENU))
# Matches the initial token on a line; see _tokenize().
initial_token_re = re.compile(r"[^\w]*(\w+)")
# Matches an identifier/keyword optionally preceded by whitespace
id_keyword_re = re.compile(r"\s*([\w./-]+)")
# Regular expressions for parsing .config files
set_re = re.compile(r"CONFIG_(\w+)=(.*)")
unset_re = re.compile(r"# CONFIG_(\w+) is not set")
# Regular expression for finding $-references to symbols in strings
sym_ref_re = re.compile(r"\$[A-Za-z_]+")
# Integers representing symbol types
UNKNOWN, BOOL, TRISTATE, STRING, HEX, INT = range(0, 6)
# Strings to use for types
typename = {
UNKNOWN : "unknown",
BOOL : "bool",
TRISTATE : "tristate",
STRING : "string",
HEX : "hex",
INT : "int" }
# Token to type mapping
token_to_type = { T_BOOL : BOOL,
T_TRISTATE : TRISTATE,
T_STRING : STRING,
T_INT : INT,
T_HEX : HEX }
# Default values for symbols of different types (the value the symbol gets if
# it is not assigned a user value and none of its 'default' clauses kick in)
default_value = { BOOL : "n",
TRISTATE : "n",
STRING : "",
INT : "",
HEX : "" }
# Indicates that no item is selected in a choice statement
NO_SELECTION = 0
# Integers representing expression types
OR, AND, NOT, EQUAL, UNEQUAL = range(0, 5)
# Map from tristate values to integers
tri_to_int = { "n" : 0, "m" : 1, "y" : 2 }
# Printing-related stuff
op_to_str = { AND : " && ",
OR : " || ",
EQUAL : " = ",
UNEQUAL : " != " }
precedence = { OR : 0, AND : 1, NOT : 2 }
# Types of informational messages
WARNING = 0
UNDEF_ASSIGN = 1
def _intersperse(lst, op):
"""_expr_to_str() helper. Gets the string representation of each expression in lst
and produces a list where op has been inserted between the elements."""
if lst == []:
return ""
res = []
def handle_sub_expr(expr):
no_parens = isinstance(expr, (str, Symbol)) or \
expr[0] in (EQUAL, UNEQUAL) or \
precedence[op] <= precedence[expr[0]]
if not no_parens:
res.append("(")
res.extend(_expr_to_str_rec(expr))
if not no_parens:
res.append(")")
op_str = op_to_str[op]
handle_sub_expr(lst[0])
for expr in lst[1:]:
res.append(op_str)
handle_sub_expr(expr)
return res
def _expr_to_str(expr):
s = "".join(_expr_to_str_rec(expr))
return s
def _sym_str_string(sym_or_str):
if isinstance(sym_or_str, str):
return '"{0}"'.format(sym_or_str)
return sym_or_str.name
def _expr_to_str_rec(expr):
if expr is None:
return [""]
if isinstance(expr, (Symbol, str)):
return [_sym_str_string(expr)]
e0 = expr[0]
if e0 == OR or e0 == AND:
return _intersperse(expr[1], expr[0])
if e0 == NOT:
need_parens = not isinstance(expr[1], (str, Symbol))
res = ["!"]
if need_parens:
res.append("(")
res.extend(_expr_to_str_rec(expr[1]))
if need_parens:
res.append(")")
return res
if e0 == EQUAL or e0 == UNEQUAL:
return [_sym_str_string(expr[1]),
op_to_str[expr[0]],
_sym_str_string(expr[2])]
class _Block:
"""Represents a list of items (symbols, menus, choice statements and
comments) appearing at the top-level of a file or witin a menu, choice or
if statement."""
def __init__(self):
self.items = []
def get_items(self):
return self.items
def add_item(self, item):
self.items.append(item)
def _make_conf(self):
# Collect the substrings in a list and later use join() instead of +=
# to build the final .config contents. With older Python versions, this
# yields linear instead of quadratic complexity.
strings = []
for item in self.items:
strings.extend(item._make_conf())
return strings
def add_depend_expr(self, expr):
for item in self.items:
item.add_depend_expr(expr)
class Item():
"""Base class for symbols and other Kconfig constructs. Subclasses are
Symbol, Choice, Menu, and Comment."""
def is_symbol(self):
"""Returns True if the item is a symbol, otherwise False. Short for
isinstance(item, kconfiglib.Symbol)."""
return isinstance(self, Symbol)
def is_choice(self):
"""Returns True if the item is a choice, otherwise False. Short for
isinstance(item, kconfiglib.Choice)."""
return isinstance(self, Choice)
def is_menu(self):
"""Returns True if the item is a menu, otherwise False. Short for
isinstance(item, kconfiglib.Menu)."""
return isinstance(self, Menu)
def is_comment(self):
"""Returns True if the item is a comment, otherwise False. Short for
isinstance(item, kconfiglib.Comment)."""
return isinstance(self, Comment)
class _HasVisibility():
"""Base class for elements that have a "visibility" that acts as an upper
limit on the values a user can set for them. Subclasses are Symbol and
Choice (which supply some of the attributes)."""
def __init__(self):
self.cached_visibility = None
self.prompts = []
def _invalidate(self):
self.cached_visibility = None
def _get_visibility(self):
if self.cached_visibility is None:
vis = "n"
for (prompt, cond_expr) in self.prompts:
vis = self.config._eval_max(vis, cond_expr)
if isinstance(self, Symbol) and self.is_choice_symbol_:
vis = self.config._eval_min(vis, self.parent._get_visibility())
# Promote "m" to "y" if we're dealing with a non-tristate
if vis == "m" and self.type != TRISTATE:
vis = "y"
self.cached_visibility = vis
return self.cached_visibility
class Symbol(Item, _HasVisibility):
"""Represents a configuration symbol - e.g. FOO for
config FOO
..."""
#
# Public interface
#
def get_value(self):
"""Calculate and return the value of the symbol. See also
Symbol.set_user_value()."""
if self.cached_value is not None:
return self.cached_value
self.write_to_conf = False
# As a quirk of Kconfig, undefined symbols get their name as their
# value. This is why things like "FOO = bar" work for seeing if FOO has
# the value "bar".
if self.type == UNKNOWN:
self.cached_value = self.name
return self.name
new_val = default_value[self.type]
vis = self._get_visibility()
if self.type == BOOL or self.type == TRISTATE:
# The visibility and mode (modules-only or single-selection) of
# choice items will be taken into account in self._get_visibility()
if self.is_choice_symbol_:
if vis != "n":
choice = self.parent
mode = choice.get_mode()
self.write_to_conf = (mode != "n")
if mode == "y":
new_val = "y" if (choice.get_selection() is self) else "n"
elif mode == "m":
if self.user_val == "m" or self.user_val == "y":
new_val = "m"
else:
use_defaults = True
if vis != "n":
# If the symbol is visible and has a user value, use that.
# Otherwise, look at defaults.
self.write_to_conf = True
if self.user_val is not None:
new_val = self.config._eval_min(self.user_val, vis)
use_defaults = False
if use_defaults:
for (val_expr, cond_expr) in self.def_exprs:
cond_eval = self.config._eval_expr(cond_expr)
if cond_eval != "n":
self.write_to_conf = True
new_val = self.config._eval_min(val_expr, cond_eval)
break
# Reverse dependencies take precedence
rev_dep_val = self.config._eval_expr(self.rev_dep)
if rev_dep_val != "n":
self.write_to_conf = True
new_val = self.config._eval_max(new_val, rev_dep_val)
# Promote "m" to "y" for booleans
if new_val == "m" and self.type == BOOL:
new_val = "y"
elif self.type == STRING:
use_defaults = True
if vis != "n":
self.write_to_conf = True
if self.user_val is not None:
new_val = self.user_val
use_defaults = False
if use_defaults:
for (val_expr, cond_expr) in self.def_exprs:
if self.config._eval_expr(cond_expr) != "n":
self.write_to_conf = True
new_val = self.config._get_str_value(val_expr)
break
elif self.type == HEX or self.type == INT:
has_active_range = False
low = None
high = None
use_defaults = True
base = 16 if self.type == HEX else 10
for(l, h, cond_expr) in self.ranges:
if self.config._eval_expr(cond_expr) != "n":
has_active_range = True
low_str = self.config._get_str_value(l)
high_str = self.config._get_str_value(h)
low = int(low_str, base) if \
_is_base_n(low_str, base) else 0
high = int(high_str, base) if \
_is_base_n(high_str, base) else 0
break
if vis != "n":
self.write_to_conf = True
if self.user_val is not None and \
_is_base_n(self.user_val, base) and \
(not has_active_range or
low <= int(self.user_val, base) <= high):
# If the user value is OK, it is stored in exactly the same
# form as specified in the assignment (with or without
# "0x", etc).
use_defaults = False
new_val = self.user_val
if use_defaults:
for (val_expr, cond_expr) in self.def_exprs:
if self.config._eval_expr(cond_expr) != "n":
self.write_to_conf = True
# If the default value is OK, it is stored in exactly
# the same form as specified. Otherwise, it is clamped
# to the range, and the output has "0x" as appropriate
# for the type.
new_val = self.config._get_str_value(val_expr)
if _is_base_n(new_val, base):
new_val_num = int(new_val, base)
if has_active_range:
clamped_val = None
if new_val_num < low:
clamped_val = low
elif new_val_num > high:
clamped_val = high
if clamped_val is not None:
new_val = (hex(clamped_val) if \
self.type == HEX else str(clamped_val))
break
else: # For the for loop
# If no user value or default kicks in but the hex/int has
# an active range, then the low end of the range is used,
# provided it's > 0, with "0x" prepended as appropriate.
if has_active_range and low > 0:
new_val = (hex(low) if self.type == HEX else str(low))
self.cached_value = new_val
return new_val
def set_user_value(self, v):
"""Sets the user value of the symbol.
Equal in effect to assigning the value to the symbol within a .config
file. Use get_lower/upper_bound() or get_assignable_values() to find
the range of currently assignable values for bool and tristate symbols;
setting values outside this range will cause the user value to differ
from the result of Symbol.get_value() (be truncated). Values that are
invalid for the type (such as a_bool.set_user_value("foo")) are
ignored, and a warning is emitted if an attempt is made to assign such
a value.
For any type of symbol, is_modifiable() can be used to check if a user
value will currently have any effect on the symbol, as determined by
its visibility and range of assignable values. Any value that is valid
for the type (bool, tristate, etc.) will end up being reflected in
get_user_value() though, and might have an effect later if conditions
change. To get rid of the user value, use unset_user_value().
Any symbols dependent on the symbol are (recursively) invalidated, so
things will just work with regards to dependencies.
v -- The user value to give to the symbol."""
self._set_user_value_no_invalidate(v, False)
# There might be something more efficient you could do here, but play
# it safe.
if self.name == "MODULES":
self.config._invalidate_all()
return
self._invalidate()
self._invalidate_dependent()
def unset_user_value(self):
"""Resets the user value of the symbol, as if the symbol had never
gotten a user value via Config.load_config() or
Symbol.set_user_value()."""
self._unset_user_value_no_recursive_invalidate()
self._invalidate_dependent()
def get_user_value(self):
"""Returns the value assigned to the symbol in a .config or via
Symbol.set_user_value() (provided the value was valid for the type of the
symbol). Returns None in case of no user value."""
return self.user_val
def get_name(self):
"""Returns the name of the symbol."""
return self.name
def get_prompts(self):
"""Returns a list of prompts defined for the symbol, in the order they
appear in the configuration files. Returns the empty list for symbols
with no prompt.
This list will have a single entry for the vast majority of symbols
having prompts, but having multiple prompts for a single symbol is
possible through having multiple 'config' entries for it."""
return [prompt for prompt, _ in self.orig_prompts]
def get_upper_bound(self):
"""For string/hex/int symbols and for bool and tristate symbols that
cannot be modified (see is_modifiable()), returns None.
Otherwise, returns the highest value the symbol can be set to with
Symbol.set_user_value() (that will not be truncated): one of "m" or "y",
arranged from lowest to highest. This corresponds to the highest value
the symbol could be given in e.g. the 'make menuconfig' interface.
See also the tri_less*() and tri_greater*() functions, which could come
in handy."""
if self.type != BOOL and self.type != TRISTATE:
return None
rev_dep = self.config._eval_expr(self.rev_dep)
# A bool selected to "m" gets promoted to "y"
if self.type == BOOL and rev_dep == "m":
rev_dep = "y"
vis = self._get_visibility()
if (tri_to_int[vis] - tri_to_int[rev_dep]) > 0:
return vis
return None
def get_lower_bound(self):
"""For string/hex/int symbols and for bool and tristate symbols that
cannot be modified (see is_modifiable()), returns None.
Otherwise, returns the lowest value the symbol can be set to with
Symbol.set_user_value() (that will not be truncated): one of "n" or "m",
arranged from lowest to highest. This corresponds to the lowest value
the symbol could be given in e.g. the 'make menuconfig' interface.
See also the tri_less*() and tri_greater*() functions, which could come
in handy."""
if self.type != BOOL and self.type != TRISTATE:
return None
rev_dep = self.config._eval_expr(self.rev_dep)
# A bool selected to "m" gets promoted to "y"
if self.type == BOOL and rev_dep == "m":
rev_dep = "y"
if (tri_to_int[self._get_visibility()] - tri_to_int[rev_dep]) > 0:
return rev_dep
return None
def get_assignable_values(self):
"""For string/hex/int symbols and for bool and tristate symbols that
cannot be modified (see is_modifiable()), returns the empty list.
Otherwise, returns a list containing the user values that can be
assigned to the symbol (that won't be truncated). Usage example:
if "m" in sym.get_assignable_values():
sym.set_user_value("m")
This is basically a more convenient interface to
get_lower/upper_bound() when wanting to test if a particular tristate
value can be assigned."""
if self.type != BOOL and self.type != TRISTATE:
return []
rev_dep = self.config._eval_expr(self.rev_dep)
# A bool selected to "m" gets promoted to "y"
if self.type == BOOL and rev_dep == "m":
rev_dep = "y"
res = ["n", "m", "y"][tri_to_int[rev_dep] :
tri_to_int[self._get_visibility()] + 1]
return res if len(res) > 1 else []
def get_type(self):
"""Returns the type of the symbol: one of UNKNOWN, BOOL, TRISTATE,
STRING, HEX, or INT. These are defined at the top level of the module,
so you'd do something like
if sym.get_type() == kconfiglib.STRING:
..."""
return self.type
def get_visibility(self):
"""Returns the visibility of the symbol: one of "n", "m" or "y". For
bool and tristate symbols, this is an upper bound on the value users
can set for the symbol. For other types of symbols, a visibility of "n"
means the user value will be ignored. A visibility of "n" corresponds
to not being visible in the 'make *config' interfaces.
Example (assuming we're running with modules enabled -- i.e., MODULES
set to 'y'):
# Assume this has been assigned 'n'
config N_SYM
tristate "N_SYM"
# Assume this has been assigned 'm'
config M_SYM
tristate "M_SYM"
# Has visibility 'n'
config A
tristate "A"
depends on N_SYM
# Has visibility 'm'
config B
tristate "B"
depends on M_SYM
# Has visibility 'y'
config C
tristate "C"
# Has no prompt, and hence visibility 'n'
config D
tristate
Having visibility be tri-valued ensures that e.g. a symbol cannot be
set to "y" by the user if it depends on a symbol with value "m", which
wouldn't be safe.
You should probably look at get_lower/upper_bound(),
get_assignable_values() and is_modifiable() before using this."""
return self._get_visibility()
def get_parent(self):
"""Returns the menu or choice statement that contains the symbol, or
None if the symbol is at the top level. Note that if statements are
treated as syntactic and do not have an explicit class
representation."""
return self.parent
def get_referenced_symbols(self, refs_from_enclosing = False):
"""Returns the set() of all symbols referenced by this symbol. For
example, the symbol defined by
config FOO
bool
prompt "foo" if A && B
default C if D
depends on E
select F if G
references the symbols A through G.
refs_from_enclosing (default: False) -- If True, the symbols
referenced by enclosing menus and if's will be
included in the result."""
return self.all_referenced_syms if refs_from_enclosing else self.referenced_syms
def get_selected_symbols(self):
"""Returns the set() of all symbols X for which this symbol has a
'select X' or 'select X if Y' (regardless of whether Y is satisfied or
not). This is a subset of the symbols returned by
get_referenced_symbols()."""
return self.selected_syms
def get_help(self):
"""Returns the help text of the symbol, or None if the symbol has no
help text."""
return self.help
def get_config(self):
"""Returns the Config instance this symbol is from."""
return self.config
def get_def_locations(self):
"""Returns a list of (filename, linenr) tuples, where filename (string)
and linenr (int) represent a location where the symbol is defined. For
the vast majority of symbols this list will only contain one element.
For the following Kconfig, FOO would get two entries: the lines marked
with *.
config FOO *
bool "foo prompt 1"
config FOO *
bool "foo prompt 2"
"""
return self.def_locations
def get_ref_locations(self):
"""Returns a list of (filename, linenr) tuples, where filename (string)
and linenr (int) represent a location where the symbol is referenced in
the configuration. For example, the lines marked by * would be included
for FOO below:
config A
bool
default BAR || FOO *
config B
tristate
depends on FOO *
default m if FOO *
if FOO *
config A
bool "A"
endif
config FOO (definition not included)
bool
"""
return self.ref_locations
def is_modifiable(self):
"""Returns True if the value of the symbol could be modified by calling
Symbol.set_user_value() and False otherwise.
For bools and tristates, this corresponds to the symbol being visible
in the 'make menuconfig' interface and not already being pinned to a
specific value (e.g. because it is selected by another symbol).
For strings and numbers, this corresponds to just being visible. (See
Symbol.get_visibility().)"""
if self.is_special_:
return False
if self.type == BOOL or self.type == TRISTATE:
rev_dep = self.config._eval_expr(self.rev_dep)
# A bool selected to "m" gets promoted to "y"
if self.type == BOOL and rev_dep == "m":
rev_dep = "y"
return (tri_to_int[self._get_visibility()] -
tri_to_int[rev_dep]) > 0
return self._get_visibility() != "n"
def is_defined(self):
"""Returns False if the symbol is referred to in the Kconfig but never
actually defined, otherwise True."""
return self.is_defined_
def is_special(self):
"""Returns True if the symbol is one of the special symbols n, m, y, or
UNAME_RELEASE, or gets its value from the environment. Otherwise,
returns False."""
return self.is_special_
def is_from_environment(self):
"""Returns True if the symbol gets its value from the environment.
Otherwise, returns False."""
return self.is_from_env
def has_ranges(self):
"""Returns True if the symbol is of type INT or HEX and has ranges that
limits what values it can take on, otherwise False."""
return self.ranges != []
def is_choice_symbol(self):
"""Returns True if the symbol is in a choice statement and is an actual
choice symbol (see Choice.get_symbols()); otherwise, returns
False."""
return self.is_choice_symbol_
def is_choice_selection(self):
"""Returns True if the symbol is contained in a choice statement and is
the selected item, otherwise False. Equivalent to 'sym.is_choice_symbol()
and sym.get_parent().get_selection() is sym'."""
return self.is_choice_symbol_ and self.parent.get_selection() is self
def __str__(self):
"""Returns a string containing various information about the symbol."""
return self.config._get_sym_or_choice_str(self)
#
# Private methods
#
def __init__(self):
"""Symbol constructor -- not intended to be called directly by
kconfiglib clients."""
# Set default values
_HasVisibility.__init__(self)
self.config = None
self.parent = None
self.name = None
self.type = UNKNOWN
self.def_exprs = []
self.ranges = []
self.rev_dep = "n"
# The prompt, default value and select conditions without any
# dependencies from menus or if's propagated to them
self.orig_prompts = []
self.orig_def_exprs = []
self.orig_selects = []
# Dependencies inherited from containing menus and if's
self.deps_from_containing = None
self.help = None
# The set of symbols referenced by this symbol (see
# get_referenced_symbols())
self.referenced_syms = set()
# The set of symbols selected by this symbol (see
# get_selected_symbols())
self.selected_syms = set()
# Like 'referenced_syms', but includes symbols from
# dependencies inherited from enclosing menus and if's
self.all_referenced_syms = set()
# This is set to True for "actual" choice symbols. See
# Choice._determine_actual_symbols(). The trailing underscore avoids a
# collision with is_choice_symbol().
self.is_choice_symbol_ = False
# This records only dependencies specified with 'depends on'. Needed
# when determining actual choice items (hrrrr...). See also
# Choice._determine_actual_symbols().
self.menu_dep = None
# See Symbol.get_ref/def_locations().
self.def_locations = []
self.ref_locations = []
self.user_val = None
# Flags
# Should the symbol get an entry in .config?
self.write_to_conf = False
# Caches the calculated value
self.cached_value = None
# Note: An instance variable 'self.dep' gets set on the Symbol in
# Config._build_dep(), linking the symbol to the symbols that
# immediately depend on it (in a caching/invalidation sense). The total
# set of dependent symbols for the symbol (the transitive closure) is
# calculated on an as-needed basis in _get_dependent().
# Caches the total list of dependent symbols. Calculated in
# _get_dependent().
self.cached_deps = None
# Does the symbol have an entry in the Kconfig file? The trailing
# underscore avoids a collision with is_defined().
self.is_defined_ = False
# Does the symbol get its value in some special way, e.g. from the
# environment or by being one of the special symbols n, m, and y? If
# so, the value is stored in self.cached_value, which is never
# invalidated. The trailing underscore avoids a collision with
# is_special().
self.is_special_ = False
# Does the symbol get its value from the environment?
self.is_from_env = False
def _invalidate(self):
if self.is_special_:
return
if self.is_choice_symbol_:
self.parent._invalidate()
_HasVisibility._invalidate(self)
self.write_to_conf = False
self.cached_value = None
def _invalidate_dependent(self):
for sym in self._get_dependent():
sym._invalidate()
def _set_user_value_no_invalidate(self, v, suppress_load_warnings):
"""Like set_user_value(), but does not invalidate any symbols.
suppress_load_warnings --
some warnings are annoying when loading a .config that can be helpful
when manually invoking set_user_value(). This flag is set to True to
suppress such warnings.
Perhaps this could be made optional for load_config() instead."""
if self.is_special_:
if self.is_from_env:
self.config._warn('attempt to assign the value "{0}" to the '
'symbol {1}, which gets its value from the '
'environment. Assignment ignored.'
.format(v, self.name))
else:
self.config._warn('attempt to assign the value "{0}" to the '
'special symbol {1}. Assignment ignored.'
.format(v, self.name))
return
if not self.is_defined_:
filename, linenr = self.ref_locations[0]
self.config._undef_assign('attempt to assign the value "{0}" to {1}, '
"which is referenced at {2}:{3} but never "
"defined. Assignment ignored."
.format(v, self.name, filename, linenr))
return
# Check if the value is valid for our type
if not (( self.type == BOOL and (v == "n" or v == "y") ) or
( self.type == TRISTATE and (v == "n" or v == "m" or
v == "y") ) or
( self.type == STRING ) or
( self.type == INT and _is_base_n(v, 10) ) or
( self.type == HEX and _is_base_n(v, 16) )):
self.config._warn('the value "{0}" is invalid for {1}, which has type {2}. '
"Assignment ignored."
.format(v, self.name, typename[self.type]))
return
if self.prompts == [] and not suppress_load_warnings:
self.config._warn('assigning "{0}" to the symbol {1} which '
'lacks prompts and thus has visibility "n". '
'The assignment will have no effect.'
.format(v, self.name))
self.user_val = v
if self.is_choice_symbol_ and (self.type == BOOL or
self.type == TRISTATE):
choice = self.parent
if v == "y":
choice.user_val = self
choice.user_mode = "y"
elif v == "m":
choice.user_val = None
choice.user_mode = "m"
def _unset_user_value_no_recursive_invalidate(self):
self._invalidate()
self.user_val = None
if self.is_choice_symbol_:
self.parent._unset_user_value()
def _make_conf(self):
if self.already_written:
return []
self.already_written = True
# Note: write_to_conf is determined in get_value()
val = self.get_value()
if not self.write_to_conf:
return []
if self.type == BOOL or self.type == TRISTATE:
if val == "m" or val == "y":
return ["CONFIG_{0}={1}".format(self.name, val)]
return ["# CONFIG_{0} is not set".format(self.name)]
elif self.type == STRING:
# Escape \ and "
return ['CONFIG_{0}="{1}"'
.format(self.name,
val.replace("\\", "\\\\").replace('"', '\\"'))]
elif self.type == INT or self.type == HEX:
return ["CONFIG_{0}={1}".format(self.name, val)]
else:
_internal_error('Internal error while creating .config: unknown type "{0}".'
.format(self.type))
def _get_dependent(self):
"""Returns the set of symbols that should be invalidated if the value
of the symbol changes, because they might be affected by the change.
Note that this is an internal API -- it's probably of limited
usefulness to clients."""
if self.cached_deps is not None:
return self.cached_deps
res = set()
self._add_dependent_ignore_siblings(res)
if self.is_choice_symbol_:
for s in self.parent.get_symbols():
if s is not self:
res.add(s)
s._add_dependent_ignore_siblings(res)
self.cached_deps = res
return res
def _add_dependent_ignore_siblings(self, to):
"""Calculating dependencies gets a bit tricky for choice items as they
all depend on each other, potentially leading to infinite recursion.
This helper function calculates dependencies ignoring the other symbols
in the choice. It also works fine for symbols that are not choice
items."""
for s in self.dep:
to.add(s)
to |= s._get_dependent()
def _has_auto_menu_dep_on(self, on):
"""See Choice._determine_actual_symbols()."""
if not isinstance(self.parent, Choice):
_internal_error("Attempt to determine auto menu dependency for symbol ouside of choice.")
if self.prompts == []:
# If we have no prompt, use the menu dependencies instead (what was
# specified with 'depends on')
return self.menu_dep is not None and \
self.config._expr_depends_on(self.menu_dep, on)
for (_, cond_expr) in self.prompts:
if self.config._expr_depends_on(cond_expr, on):
return True
return False
class Menu(Item):
"""Represents a menu statement."""
#
# Public interface
#
def get_config(self):
"""Return the Config instance this menu is from."""
return self.config
def get_visibility(self):
"""Returns the visibility of the menu. This also affects the visibility
of subitems. See also Symbol.get_visibility()."""
return self.config._eval_expr(self.dep_expr)
def get_visible_if_visibility(self):
"""Returns the visibility the menu gets from its 'visible if'
condition. "y" if the menu has no 'visible if' condition."""
return self.config._eval_expr(self.visible_if_expr)
def get_items(self, recursive = False):
"""Returns a list containing the items (symbols, menus, choice
statements and comments) in in the menu, in the same order that the
items appear within the menu.
recursive (default: False) -- True if items contained in items within
the menu should be included
recursively (preorder)."""
if not recursive:
return self.block.get_items()
res = []
for item in self.block.get_items():
res.append(item)
if isinstance(item, Menu):
res.extend(item.get_items(True))
elif isinstance(item, Choice):
res.extend(item.get_items())
return res
def get_symbols(self, recursive = False):
"""Returns a list containing the symbols in the menu, in the same order
that they appear within the menu.
recursive (default: False) -- True if symbols contained in items within
the menu should be included
recursively."""
return [item for item in self.get_items(recursive) if isinstance(item, Symbol)]
def get_title(self):
"""Returns the title text of the menu."""
return self.title
def get_parent(self):
"""Returns the menu or choice statement that contains the menu, or
None if the menu is at the top level. Note that if statements are
treated as syntactic sugar and do not have an explicit class
representation."""
return self.parent
def get_referenced_symbols(self, refs_from_enclosing = False):
"""See Symbol.get_referenced_symbols()."""
return self.all_referenced_syms if refs_from_enclosing else self.referenced_syms
def get_location(self):
"""Returns the location of the menu as a (filename, linenr) tuple,
where filename is a string and linenr an int."""
return (self.filename, self.linenr)
def __str__(self):
"""Returns a string containing various information about the menu."""
depends_on_str = self.config._expr_val_str(self.orig_deps,
"(no dependencies)")
visible_if_str = self.config._expr_val_str(self.visible_if_expr,
"(no dependencies)")
additional_deps_str = " " + self.config._expr_val_str(self.deps_from_containing,
"(no additional dependencies)")
return _sep_lines("Menu",
"Title : " + self.title,
"'depends on' dependencies : " + depends_on_str,
"'visible if' dependencies : " + visible_if_str,
"Additional dependencies from enclosing menus and if's:",
additional_deps_str,
"Location: {0}:{1}".format(self.filename, self.linenr))
#
# Private methods
#
def __init__(self):
"""Menu constructor -- not intended to be called directly by
kconfiglib clients."""
self.config = None
self.parent = None
self.title = None
self.block = None
self.dep_expr = None
# Dependency expression without dependencies from enclosing menus and
# if's propagated
self.orig_deps = None
# Dependencies inherited from containing menus and if's
self.deps_from_containing = None
# The 'visible if' expression
self.visible_if_expr = None
# The set of symbols referenced by this menu (see
# get_referenced_symbols())
self.referenced_syms = set()
# Like 'referenced_syms', but includes symbols from
# dependencies inherited from enclosing menus and if's
self.all_referenced_syms = None
self.filename = None
self.linenr = None
def _make_conf(self):
item_conf = self.block._make_conf()
if self.config._eval_expr(self.dep_expr) != "n" and \
self.config._eval_expr(self.visible_if_expr) != "n":
return ["\n#\n# {0}\n#".format(self.title)] + item_conf
return item_conf
class Choice(Item, _HasVisibility):
"""Represents a choice statement. A choice can be in one of three modes:
"n" - The choice is not visible and no symbols can be selected.
"m" - Any number of symbols can be set to "m". The rest will be "n". This
is safe since potentially conflicting options don't actually get
compiled into the kernel simultaneously with "m".
"y" - One symbol will be "y" while the rest are "n".
Only tristate choices can be in "m" mode, and the visibility of the choice
is an upper bound on the mode, so that e.g. a choice that depends on a
symbol with value "m" will be in "m" mode.
The mode changes automatically when a value is assigned to a symbol within
the choice.
See Symbol.get_visibility() too."""
#
# Public interface
#
def get_selection(self):
"""Returns the symbol selected (either by the user or through
defaults), or None if either no symbol is selected or the mode is not
"y"."""
if self.cached_selection is not None:
if self.cached_selection == NO_SELECTION:
return None
return self.cached_selection
if self.get_mode() != "y":
return self._cache_ret(None)
# User choice available?
if self.user_val is not None and \
self.user_val._get_visibility() == "y":
return self._cache_ret(self.user_val)
if self.optional:
return self._cache_ret(None)
return self._cache_ret(self.get_selection_from_defaults())
def get_selection_from_defaults(self):
"""Like Choice.get_selection(), but acts as if no symbol has been
selected by the user and no 'optional' flag is in effect."""
if self.actual_symbols == []:
return None
for (symbol, cond_expr) in self.def_exprs:
if self.config._eval_expr(cond_expr) != "n":
chosen_symbol = symbol
break
else:
chosen_symbol = self.actual_symbols[0]
# Is the chosen symbol visible?
if chosen_symbol._get_visibility() != "n":
return chosen_symbol
# Otherwise, pick the first visible symbol
for sym in self.actual_symbols:
if sym._get_visibility() != "n":
return sym
return None
def get_user_selection(self):
"""If the choice is in "y" mode and has a user-selected symbol, returns
that symbol. Otherwise, returns None."""
return self.user_val
def get_config(self):
"""Returns the Config instance this choice is from."""
return self.config
def get_name(self):
"""For named choices, returns the name. Returns None for unnamed
choices. No named choices appear anywhere in the kernel Kconfig files
as of Linux 3.7.0-rc8."""
return self.name
def get_prompts(self):
"""Returns a list of prompts defined for the choice, in the order they
appear in the configuration files. Returns the empty list for choices
with no prompt.
This list will have a single entry for the vast majority of choices
having prompts, but having multiple prompts for a single choice is
possible through having multiple 'choice' entries for it (though I'm
not sure if that ever happens in practice)."""
return [prompt for prompt, _ in self.orig_prompts]
def get_help(self):
"""Returns the help text of the choice, or None if the choice has no
help text."""
return self.help
def get_type(self):
"""Returns the type of the choice. See Symbol.get_type()."""
return self.type
def get_items(self):
"""Gets all items contained in the choice in the same order as within
the configuration ("items" instead of "symbols" since choices and
comments might appear within choices. This only happens in one place as
of Linux 3.7.0-rc8, in drivers/usb/gadget/Kconfig)."""
return self.block.get_items()
def get_symbols(self):
"""Returns a list containing the choice's symbols.
A quirk (perhaps a bug) of Kconfig is that you can put items within a
choice that will not be considered members of the choice insofar as
selection is concerned. This happens for example if one symbol within a
choice 'depends on' the symbol preceding it, or if you put non-symbol
items within choices.
As of Linux 3.7.0-rc8, this seems to be used intentionally in one
place: drivers/usb/gadget/Kconfig.
This function returns the "proper" symbols of the choice in the order
they appear in the choice, excluding such items. If you want all items
in the choice, use get_items()."""
return self.actual_symbols
def get_parent(self):
"""Returns the menu or choice statement that contains the choice, or
None if the choice is at the top level. Note that if statements are
treated as syntactic sugar and do not have an explicit class
representation."""
return self.parent
def get_referenced_symbols(self, refs_from_enclosing = False):
"""See Symbol.get_referenced_symbols()."""
return self.all_referenced_syms if refs_from_enclosing else self.referenced_syms
def get_def_locations(self):
"""Returns a list of (filename, linenr) tuples, where filename (string)
and linenr (int) represent a location where the choice is defined. For
the vast majority of choices (all of them as of Linux 3.7.0-rc8) this
list will only contain one element, but its possible for named choices
to be defined in multiple locations."""
return self.def_locations
def get_visibility(self):
"""Returns the visibility of the choice statement: one of "n", "m" or
"y". This acts as an upper limit on the mode of the choice (though bool
choices can only have the mode "y"). See the class documentation for an
explanation of modes."""
return self._get_visibility()
def get_mode(self):
"""Returns the mode of the choice. See the class documentation for
an explanation of modes."""
minimum_mode = "n" if self.optional else "m"
mode = self.user_mode if self.user_mode is not None else minimum_mode
mode = self.config._eval_min(mode, self._get_visibility())
# Promote "m" to "y" for boolean choices
if mode == "m" and self.type == BOOL:
return "y"
return mode
def is_optional(self):
"""Returns True if the symbol has the optional flag set (and so will default
to "n" mode). Otherwise, returns False."""
return self.optional
def __str__(self):
"""Returns a string containing various information about the choice
statement."""
return self.config._get_sym_or_choice_str(self)
#
# Private methods
#
def __init__(self):
"""Choice constructor -- not intended to be called directly by
kconfiglib clients."""
_HasVisibility.__init__(self)
self.config = None
self.parent = None
self.name = None # Yes, choices can be named
self.type = UNKNOWN
self.def_exprs = []
self.help = None
self.optional = False
self.block = None
# The prompts and default values without any dependencies from
# enclosing menus or if's propagated
self.orig_prompts = []
self.orig_def_exprs = []
# Dependencies inherited from containing menus and if's
self.deps_from_containing = None
# We need to filter out symbols that appear within the choice block but
# are not considered choice items (see
# Choice._determine_actual_symbols()) This list holds the "actual" choice
# items.
self.actual_symbols = []
# The set of symbols referenced by this choice (see
# get_referenced_symbols())
self.referenced_syms = set()
# Like 'referenced_syms', but includes symbols from
# dependencies inherited from enclosing menus and if's
self.all_referenced_syms = set()
# See Choice.get_def_locations()
self.def_locations = []
self.user_val = None
self.user_mode = None
self.cached_selection = None
def _determine_actual_symbols(self):
"""If a symbol's visibility depends on the preceding symbol within a
choice, it is no longer viewed as a choice item (quite possibly a bug,
but some things consciously use it.. ugh. It stems from automatic
submenu creation). In addition, it's possible to have choices and
comments within choices, and those shouldn't be considered as choice
items either. Only drivers/usb/gadget/Kconfig seems to depend on any of
this. This method computes the "actual" items in the choice and sets
the is_choice_symbol_ flag on them (retrieved via is_choice_symbol()).
Don't let this scare you: an earlier version simply checked for a
sequence of symbols where all symbols after the first appeared in the
'depends on' expression of the first, and that worked fine. The added
complexity is to be future-proof in the event that
drivers/usb/gadget/Kconfig turns even more sinister. It might very well
be overkilling things (especially if that file is refactored ;)."""
items = self.block.get_items()
# Items might depend on each other in a tree structure, so we need a
# stack to keep track of the current tentative parent
stack = []
for item in items:
if not isinstance(item, Symbol):
stack = []
continue
while stack != []:
if item._has_auto_menu_dep_on(stack[-1]):
# The item should not be viewed as a choice item, so don't
# set item.is_choice_symbol_.
stack.append(item)
break
else:
stack.pop()
else:
item.is_choice_symbol_ = True
self.actual_symbols.append(item)
stack.append(item)
def _cache_ret(self, selection):
# As None is used to indicate the lack of a cached value we can't use
# that to cache the fact that the choice has no selection. Instead, we
# use the symbolic constant NO_SELECTION.
if selection is None:
self.cached_selection = NO_SELECTION
else:
self.cached_selection = selection
return selection
def _invalidate(self):
_HasVisibility._invalidate(self)
self.cached_selection = None
def _unset_user_value(self):
self._invalidate()
self.user_val = None
self.user_mode = None
def _make_conf(self):
return self.block._make_conf()
class Comment(Item):
"""Represents a comment statement."""
#
# Public interface
#
def get_config(self):
"""Returns the Config instance this comment is from."""
return self.config
def get_visibility(self):
"""Returns the visibility of the comment. See also
Symbol.get_visibility()."""
return self.config._eval_expr(self.dep_expr)
def get_text(self):
"""Returns the text of the comment."""
return self.text
def get_parent(self):
"""Returns the menu or choice statement that contains the comment, or
None if the comment is at the top level. Note that if statements are
treated as syntactic sugar and do not have an explicit class
representation."""
return self.parent
def get_referenced_symbols(self, refs_from_enclosing = False):
"""See Symbol.get_referenced_symbols()."""
return self.all_referenced_syms if refs_from_enclosing else self.referenced_syms
def get_location(self):
"""Returns the location of the comment as a (filename, linenr) tuple,
where filename is a string and linenr an int."""
return (self.filename, self.linenr)
def __str__(self):
"""Returns a string containing various information about the comment."""
dep_str = self.config._expr_val_str(self.orig_deps, "(no dependencies)")
additional_deps_str = " " + self.config._expr_val_str(self.deps_from_containing,
"(no additional dependencies)")
return _sep_lines("Comment",
"Text: " + str(self.text),
"Dependencies: " + dep_str,
"Additional dependencies from enclosing menus and if's:",
additional_deps_str,
"Location: {0}:{1}".format(self.filename, self.linenr))
#
# Private methods
#
def __init__(self):
"""Comment constructor -- not intended to be called directly by
kconfiglib clients."""
self.config = None
self.parent = None
self.text = None
self.dep_expr = None
# Dependency expression without dependencies from enclosing menus and
# if's propagated
self.orig_deps = None
# Dependencies inherited from containing menus and if's
self.deps_from_containing = None
# The set of symbols referenced by this comment (see
# get_referenced_symbols())
self.referenced_syms = set()
# Like 'referenced_syms', but includes symbols from
# dependencies inherited from enclosing menus and if's
self.all_referenced_syms = None
self.filename = None
self.linenr = None
def _make_conf(self):
if self.config._eval_expr(self.dep_expr) != "n":
return ["\n#\n# {0}\n#".format(self.text)]
return []
class _Feed:
"""Class for working with sequences in a stream-like fashion; handy for tokens."""
def __init__(self, items):
self.items = items
self.length = len(self.items)
self.i = 0
def get_next(self):
if self.i >= self.length:
return None
item = self.items[self.i]
self.i += 1
return item
def peek_next(self):
return None if self.i >= self.length else self.items[self.i]
def go_to_start(self):
self.i = 0
def __getitem__(self, index):
return self.items[index]
def __len__(self):
return len(self.items)
def is_empty(self):
return self.items == []
def check(self, token):
"""Check if the next token is 'token'. If so, remove it from the token
feed and return True. Otherwise, leave it in and return False."""
if self.i >= self.length:
return None
if self.items[self.i] == token:
self.i += 1
return True
return False
def remove_while(self, pred):
while self.i < self.length and pred(self.items[self.i]):
self.i += 1
def go_back(self):
if self.i <= 0:
_internal_error("Attempt to move back in Feed while already at the beginning.")
self.i -= 1
class _FileFeed(_Feed):
"""Feed subclass that keeps track of the current filename and line
number."""
def __init__(self, lines, filename):
self.filename = _clean_up_path(filename)
_Feed.__init__(self, lines)
def get_filename(self):
return self.filename
def get_linenr(self):
return self.i
#
# Misc. public global utility functions
#
def tri_less(v1, v2):
"""Returns True if the tristate v1 is less than the tristate v2, where "n",
"m" and "y" are ordered from lowest to highest. Otherwise, returns
False."""
return tri_to_int[v1] < tri_to_int[v2]
def tri_less_eq(v1, v2):
"""Returns True if the tristate v1 is less than or equal to the tristate
v2, where "n", "m" and "y" are ordered from lowest to highest. Otherwise,
returns False."""
return tri_to_int[v1] <= tri_to_int[v2]
def tri_greater(v1, v2):
"""Returns True if the tristate v1 is greater than the tristate v2, where
"n", "m" and "y" are ordered from lowest to highest. Otherwise, returns
False."""
return tri_to_int[v1] > tri_to_int[v2]
def tri_greater_eq(v1, v2):
"""Returns True if the tristate v1 is greater than or equal to the tristate
v2, where "n", "m" and "y" are ordered from lowest to highest. Otherwise,
returns False."""
return tri_to_int[v1] >= tri_to_int[v2]
#
# Helper functions, mostly related to text processing
#
def _strip_quotes(s, line, filename, linenr):
"""Removes any quotes surrounding 's' if it has them; otherwise returns 's'
unmodified."""
s = s.strip()
if not s:
return ""
if s[0] == '"' or s[0] == "'":
if len(s) < 2 or s[-1] != s[0]:
_parse_error(line,
"malformed string literal",
filename,
linenr)
return s[1:-1]
return s
def _indentation(line):
"""Returns the indentation of the line, treating tab stops as being spaced
8 characters apart."""
if line.isspace():
_internal_error("Attempt to take indentation of blank line.")
indent = 0
for c in line:
if c == " ":
indent += 1
elif c == "\t":
# Go to the next tab stop
indent = (indent + 8) & ~7
else:
return indent
def _deindent(line, indent):
"""Deindent 'line' by 'indent' spaces."""
line = line.expandtabs()
if len(line) <= indent:
return line
return line[indent:]
def _is_base_n(s, n):
try:
int(s, n)
return True
except ValueError:
return False
def _sep_lines(*args):
"""Returns a string comprised of all arguments, with newlines inserted
between them."""
return "\n".join(args)
def _comment(s):
"""Returns a new string with "#" inserted before each line in 's'."""
if not s:
return "#"
res = "".join(["#" + line for line in s.splitlines(True)])
if s.endswith("\n"):
return res + "#"
return res
def _get_lines(filename):
"""Returns a list of lines from 'filename', joining any line ending in \\
with the following line."""
with open(filename, "r") as f:
lines = []
accum = ""
while 1:
line = f.readline()
if line == "":
return lines
if line.endswith("\\\n"):
accum += line[:-2]
else:
accum += line
lines.append(accum)
accum = ""
def _strip_trailing_slash(path):
"""Removes any trailing slash from 'path'."""
return path[:-1] if path.endswith("/") else path
def _clean_up_path(path):
"""Strips any initial "./" and trailing slash from 'path'."""
if path.startswith("./"):
path = path[2:]
return _strip_trailing_slash(path)
#
# Error handling
#
class Kconfig_Syntax_Error(Exception):
"""Exception raised for syntax errors."""
pass
class Internal_Error(Exception):
"""Exception raised for internal errors."""
pass
def _tokenization_error(s, index, filename, linenr):
if filename is not None:
assert linenr is not None
sys.stderr.write("{0}:{1}:\n".format(filename, linenr))
if s.endswith("\n"):
s = s[:-1]
# Calculate the visual offset corresponding to index 'index' in 's'
# assuming tabstops are spaced 8 characters apart
vis_index = 0
for c in s[:index]:
if c == "\t":
vis_index = (vis_index + 8) & ~7
else:
vis_index += 1
# Don't output actual tabs to be independent of how the terminal renders
# them
s = s.expandtabs()
raise Kconfig_Syntax_Error, (
_sep_lines("Error during tokenization at location indicated by caret.\n",
s,
" " * vis_index + "^\n"))
def _parse_error(s, msg, filename, linenr):
error_str = ""
if filename is not None:
assert linenr is not None
error_str += "{0}:{1}: ".format(filename, linenr)
if s.endswith("\n"):
s = s[:-1]
error_str += 'Error while parsing "{0}"'.format(s) + \
("." if msg is None else ": " + msg)
raise Kconfig_Syntax_Error, error_str
def _internal_error(msg):
msg += "\nSorry! You may want to send an email to kconfiglib@gmail.com " \
"to tell me about this. Include the message above and the stack " \
"trace and describe what you were doing."
raise Internal_Error, msg
if use_psyco:
import psyco
Config._tokenize = psyco.proxy(Config._tokenize)
Config._eval_expr = psyco.proxy(Config._eval_expr)
_indentation = psyco.proxy(_indentation)
_get_lines = psyco.proxy(_get_lines)
| gpl-2.0 |
onesfreedom/pybuilder | src/unittest/python/plugins/python/cram_plugin_tests.py | 7 | 8281 | # -*- coding: utf-8 -*-
#
# This file is part of PyBuilder
#
# Copyright 2011-2015 PyBuilder Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from mock import patch, Mock, call
from pybuilder.core import Project
from pybuilder.errors import BuildFailedException
from pybuilder.plugins.python.cram_plugin import (
_cram_command_for,
_find_files,
_report_file,
run_cram_tests,
)
class CramPluginTests(unittest.TestCase):
def test_command_respects_no_verbose(self):
project = Project('.')
project.set_property('verbose', False)
expected = ['cram']
received = _cram_command_for(project)
self.assertEquals(expected, received)
def test_command_respects_verbose(self):
project = Project('.')
project.set_property('verbose', True)
expected = ['cram', '--verbose']
received = _cram_command_for(project)
self.assertEquals(expected, received)
@patch('pybuilder.plugins.python.cram_plugin.discover_files_matching')
def test_find_files(self, discover_mock):
project = Project('.')
project.set_property('dir_source_cmdlinetest', '/any/dir')
project.set_property('cram_test_file_glob', '*.t')
expected = ['/any/dir/test.cram']
discover_mock.return_value = expected
received = _find_files(project)
self.assertEquals(expected, received)
discover_mock.assert_called_once_with('/any/dir', '*.t')
def test_report(self):
project = Project('.')
project.set_property('dir_reports', '/any/dir')
expected = './any/dir/cram.err'
received = _report_file(project)
self.assertEquals(expected, received)
@patch('pybuilder.plugins.python.cram_plugin._cram_command_for')
@patch('pybuilder.plugins.python.cram_plugin._find_files')
@patch('pybuilder.plugins.python.cram_plugin._report_file')
@patch('os.environ')
@patch('pybuilder.plugins.python.cram_plugin.read_file')
@patch('pybuilder.plugins.python.cram_plugin.execute_command')
def test_running_plugin(self,
execute_mock,
read_file_mock,
os_mock,
report_mock,
find_files_mock,
command_mock
):
project = Project('.')
project.set_property('verbose', False)
project.set_property('dir_source_main_python', 'python')
project.set_property('dir_source_main_scripts', 'scripts')
logger = Mock()
command_mock.return_value = ['cram']
find_files_mock.return_value = ['test1.cram', 'test2.cram']
report_mock.return_value = 'report_file'
os_mock.copy.return_value = {}
read_file_mock.return_value = ['test failes for file', '# results']
execute_mock.return_value = 0
run_cram_tests(project, logger)
execute_mock.assert_called_once_with(
['cram', 'test1.cram', 'test2.cram'], 'report_file',
error_file_name='report_file',
env={'PYTHONPATH': './python:', 'PATH': './scripts:'}
)
expected_info_calls = [call('Running Cram command line tests'),
call('Cram tests were fine'),
call('results'),
]
self.assertEquals(expected_info_calls, logger.info.call_args_list)
@patch('pybuilder.plugins.python.cram_plugin._cram_command_for')
@patch('pybuilder.plugins.python.cram_plugin._find_files')
@patch('pybuilder.plugins.python.cram_plugin._report_file')
@patch('os.environ')
@patch('pybuilder.plugins.python.cram_plugin.read_file')
@patch('pybuilder.plugins.python.cram_plugin.execute_command')
def test_running_plugin_fails(self,
execute_mock,
read_file_mock,
os_mock,
report_mock,
find_files_mock,
command_mock
):
project = Project('.')
project.set_property('verbose', False)
project.set_property('dir_source_main_python', 'python')
project.set_property('dir_source_main_scripts', 'scripts')
logger = Mock()
command_mock.return_value = ['cram']
find_files_mock.return_value = ['test1.cram', 'test2.cram']
report_mock.return_value = 'report_file'
os_mock.copy.return_value = {}
read_file_mock.return_value = ['test failes for file', '# results']
execute_mock.return_value = 1
self.assertRaises(BuildFailedException, run_cram_tests, project, logger)
execute_mock.assert_called_once_with(
['cram', 'test1.cram', 'test2.cram'], 'report_file',
error_file_name='report_file',
env={'PYTHONPATH': './python:', 'PATH': './scripts:'}
)
expected_info_calls = [call('Running Cram command line tests'),
]
expected_error_calls = [call('Cram tests failed!'),
call('results'),
call("See: 'report_file' for details"),
]
self.assertEquals(expected_info_calls, logger.info.call_args_list)
self.assertEquals(expected_error_calls, logger.error.call_args_list)
@patch('pybuilder.plugins.python.cram_plugin._cram_command_for')
@patch('pybuilder.plugins.python.cram_plugin._find_files')
@patch('pybuilder.plugins.python.cram_plugin._report_file')
@patch('os.environ')
@patch('pybuilder.plugins.python.cram_plugin.read_file')
@patch('pybuilder.plugins.python.cram_plugin.execute_command')
def test_running_plugin_fails_with_verbose(self,
execute_mock,
read_file_mock,
os_mock,
report_mock,
find_files_mock,
command_mock
):
project = Project('.')
project.set_property('verbose', True)
project.set_property('dir_source_main_python', 'python')
project.set_property('dir_source_main_scripts', 'scripts')
logger = Mock()
command_mock.return_value = ['cram']
find_files_mock.return_value = ['test1.cram', 'test2.cram']
report_mock.return_value = 'report_file'
os_mock.copy.return_value = {}
read_file_mock.return_value = ['test failes for file', '# results']
execute_mock.return_value = 1
self.assertRaises(BuildFailedException, run_cram_tests, project, logger)
execute_mock.assert_called_once_with(
['cram', 'test1.cram', 'test2.cram'], 'report_file',
error_file_name='report_file',
env={'PYTHONPATH': './python:', 'PATH': './scripts:'}
)
expected_info_calls = [call('Running Cram command line tests'),
]
expected_error_calls = [call('Cram tests failed!'),
call('test failes for file'),
call('# results'),
call("See: 'report_file' for details"),
]
self.assertEquals(expected_info_calls, logger.info.call_args_list)
self.assertEquals(expected_error_calls, logger.error.call_args_list)
| apache-2.0 |
dbckz/ansible | test/units/modules/network/ios/test_ios_system.py | 59 | 5262 | #
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from ansible.compat.tests.mock import patch
from ansible.modules.network.ios import ios_system
from .ios_module import TestIosModule, load_fixture, set_module_args
class TestIosSystemModule(TestIosModule):
module = ios_system
def setUp(self):
self.mock_get_config = patch('ansible.modules.network.ios.ios_system.get_config')
self.get_config = self.mock_get_config.start()
self.mock_load_config = patch('ansible.modules.network.ios.ios_system.load_config')
self.load_config = self.mock_load_config.start()
def tearDown(self):
self.mock_get_config.stop()
self.mock_load_config.stop()
def load_fixtures(self, commnads=None):
self.get_config.return_value = load_fixture('ios_system_config.cfg')
self.load_config.return_value = None
def test_ios_system_hostname_changed(self):
set_module_args(dict(hostname='foo'))
commands = ['hostname foo']
self.execute_module(changed=True, commands=commands)
def test_ios_system_domain_name(self):
set_module_args(dict(domain_name=['test.com']))
commands = ['ip domain name test.com',
'no ip domain name eng.example.net',
'no ip domain name vrf management eng.example.net']
self.execute_module(changed=True, commands=commands)
def test_ios_system_domain_name_complex(self):
set_module_args(dict(domain_name=[{'name': 'test.com', 'vrf': 'test'},
{'name': 'eng.example.net'}]))
commands = ['ip domain name vrf test test.com',
'no ip domain name vrf management eng.example.net']
self.execute_module(changed=True, commands=commands)
def test_ios_system_domain_search(self):
set_module_args(dict(domain_search=['ansible.com', 'redhat.com']))
commands = ['no ip domain list vrf management example.net',
'no ip domain list example.net',
'no ip domain list example.com',
'ip domain list ansible.com',
'ip domain list redhat.com']
self.execute_module(changed=True, commands=commands, sort=False)
def test_ios_system_domain_search_complex(self):
set_module_args(dict(domain_search=[{'name': 'ansible.com', 'vrf': 'test'}]))
commands = ['no ip domain list vrf management example.net',
'no ip domain list example.net',
'no ip domain list example.com',
'ip domain list vrf test ansible.com']
self.execute_module(changed=True, commands=commands, sort=False)
def test_ios_system_lookup_source(self):
set_module_args(dict(lookup_source='Ethernet1'))
commands = ['ip domain lookup source-interface Ethernet1']
self.execute_module(changed=True, commands=commands)
def test_ios_system_name_servers(self):
name_servers = ['8.8.8.8', '8.8.4.4']
set_module_args(dict(name_servers=name_servers))
commands = ['no ip name-server vrf management 8.8.8.8',
'ip name-server 8.8.4.4']
self.execute_module(changed=True, commands=commands, sort=False)
def rest_ios_system_name_servers_complex(self):
name_servers = dict(server='8.8.8.8', vrf='test')
set_module_args(dict(name_servers=name_servers))
commands = ['no name-server 8.8.8.8',
'no name-server vrf management 8.8.8.8',
'ip name-server vrf test 8.8.8.8']
self.execute_module(changed=True, commands=commands, sort=False)
def test_ios_system_state_absent(self):
set_module_args(dict(state='absent'))
commands = ['no hostname',
'no ip domain lookup source-interface GigabitEthernet0/0',
'no ip domain list vrf management', 'no ip domain list',
'no ip domain name vrf management', 'no ip domain name',
'no ip name-server vrf management', 'no ip name-server']
self.execute_module(changed=True, commands=commands)
def test_ios_system_no_change(self):
set_module_args(dict(hostname='ios01'))
self.execute_module(commands=[])
def test_ios_system_missing_vrf(self):
name_servers = dict(server='8.8.8.8', vrf='missing')
set_module_args(dict(name_servers=name_servers))
self.execute_module(failed=True)
| gpl-3.0 |
tony/kivy | examples/widgets/codeinput.py | 21 | 7046 | from kivy.app import App
from kivy.extras.highlight import KivyLexer
from kivy.uix.spinner import Spinner, SpinnerOption
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.codeinput import CodeInput
from kivy.uix.behaviors import EmacsBehavior
from kivy.uix.popup import Popup
from kivy.properties import ListProperty
from kivy.core.window import Window
from kivy.core.text import LabelBase
from pygments import lexers
import codecs
import os
example_text = '''
---------------------Python----------------------------------
import kivy
kivy.require('1.0.6') # replace with your current kivy version !
from kivy.app import App
from kivy.uix.button import Button
class MyApp(App):
def build(self):
return Button(text='Hello World')
if __name__ == '__main__':
MyApp().run()
----------------------Java-----------------------------------
public static byte toUnsignedByte(int intVal) {
byte byteVal;
return (byte)(intVal & 0xFF);
}
---------------------kv lang---------------------------------
#:kivy 1.0
<YourWidget>:
canvas:
Color:
rgb: .5, .5, .5
Rectangle:
pos: self.pos
size: self.size
---------------------HTML------------------------------------
<!-- Place this tag where you want the +1 button to render. -->
<div class="g-plusone" data-annotation="inline" data-width="300"></div>
<!-- Place this tag after the last +1 button tag. -->
<script type="text/javascript">
(function() {
var po = document.createElement('script');
po.type = 'text/javascript';
po.async = true;
po.src = 'https://apis.google.com/js/plusone.js';
var s = document.getElementsByTagName('script')[0];
s.parentNode.insertBefore(po, s);
})();
</script>
----------------------Emacs key bindings---------------------
This CodeInput inherits from EmacsBehavior, so you can use Emacs key bindings
if you want! To try out Emacs key bindings, set the "Key bindings" option to
"Emacs". Experiment with the shortcuts below on some of the text in this window
(just be careful not to delete the cheat sheet before you have made note of the
commands!)
Shortcut Description
-------- -----------
Control + a Move cursor to the beginning of the line
Control + e Move cursor to the end of the line
Control + f Move cursor one character to the right
Control + b Move cursor one character to the left
Alt + f Move cursor to the end of the word to the right
Alt + b Move cursor to the start of the word to the left
Alt + Backspace Delete text left of the cursor to the beginning of word
Alt + d Delete text right of the cursor to the end of the word
Alt + w Copy selection
Control + w Cut selection
Control + y Paste selection
'''
class Fnt_SpinnerOption(SpinnerOption):
pass
class LoadDialog(Popup):
def load(self, path, selection):
self.choosen_file = [None, ]
self.choosen_file = selection
Window.title = selection[0][selection[0].rfind(os.sep) + 1:]
self.dismiss()
def cancel(self):
self.dismiss()
class SaveDialog(Popup):
def save(self, path, selection):
_file = codecs.open(selection, 'w', encoding='utf8')
_file.write(self.text)
Window.title = selection[selection.rfind(os.sep) + 1:]
_file.close()
self.dismiss()
def cancel(self):
self.dismiss()
class CodeInputWithBindings(EmacsBehavior, CodeInput):
'''CodeInput with keybindings.
To add more bindings, add the behavior before CodeInput in the class
definition.
'''
pass
class CodeInputTest(App):
files = ListProperty([None, ])
def build(self):
b = BoxLayout(orientation='vertical')
languages = Spinner(
text='language',
values=sorted(['KvLexer', ] + list(lexers.LEXERS.keys())))
languages.bind(text=self.change_lang)
menu = BoxLayout(
size_hint_y=None,
height='30pt')
fnt_size = Spinner(
text='12',
values=list(map(str, list(range(5, 40)))))
fnt_size.bind(text=self._update_size)
fonts = [
file for file in LabelBase._font_dirs_files
if file.endswith('.ttf')]
fnt_name = Spinner(
text='RobotoMono',
option_cls=Fnt_SpinnerOption,
values=fonts)
fnt_name.bind(text=self._update_font)
mnu_file = Spinner(
text='File',
values=('Open', 'SaveAs', 'Save', 'Close'))
mnu_file.bind(text=self._file_menu_selected)
key_bindings = Spinner(
text='Key bindings',
values=('Default key bindings', 'Emacs key bindings'))
key_bindings.bind(text=self._bindings_selected)
menu.add_widget(mnu_file)
menu.add_widget(fnt_size)
menu.add_widget(fnt_name)
menu.add_widget(languages)
menu.add_widget(key_bindings)
b.add_widget(menu)
self.codeinput = CodeInputWithBindings(
lexer=KivyLexer(),
font_size=12,
text=example_text,
key_bindings='default',
)
b.add_widget(self.codeinput)
return b
def _update_size(self, instance, size):
self.codeinput.font_size = float(size)
def _update_font(self, instance, fnt_name):
instance.font_name = self.codeinput.font_name = fnt_name
def _file_menu_selected(self, instance, value):
if value == 'File':
return
instance.text = 'File'
if value == 'Open':
if not hasattr(self, 'load_dialog'):
self.load_dialog = LoadDialog()
self.load_dialog.open()
self.load_dialog.bind(choosen_file=self.setter('files'))
elif value == 'SaveAs':
if not hasattr(self, 'saveas_dialog'):
self.saveas_dialog = SaveDialog()
self.saveas_dialog.text = self.codeinput.text
self.saveas_dialog.open()
elif value == 'Save':
if self.files[0]:
_file = codecs.open(self.files[0], 'w', encoding='utf8')
_file.write(self.codeinput.text)
_file.close()
elif value == 'Close':
if self.files[0]:
self.codeinput.text = ''
Window.title = 'untitled'
def _bindings_selected(self, instance, value):
value = value.split(' ')[0]
self.codeinput.key_bindings = value.lower()
def on_files(self, instance, values):
if not values[0]:
return
_file = codecs.open(values[0], 'r', encoding='utf8')
self.codeinput.text = _file.read()
_file.close()
def change_lang(self, instance, z):
if z == 'KvLexer':
lx = KivyLexer()
else:
lx = lexers.get_lexer_by_name(lexers.LEXERS[z][2][0])
self.codeinput.lexer = lx
if __name__ == '__main__':
CodeInputTest().run()
| mit |
yyjiang/scikit-learn | sklearn/neighbors/tests/test_kd_tree.py | 129 | 7848 | import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.neighbors.kd_tree import (KDTree, NeighborsHeap,
simultaneous_sort, kernel_norm,
nodeheap_sort, DTYPE, ITYPE)
from sklearn.neighbors.dist_metrics import DistanceMetric
from sklearn.utils.testing import SkipTest, assert_allclose
V = np.random.random((3, 3))
V = np.dot(V, V.T)
DIMENSION = 3
METRICS = {'euclidean': {},
'manhattan': {},
'chebyshev': {},
'minkowski': dict(p=3)}
def brute_force_neighbors(X, Y, k, metric, **kwargs):
D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
ind = np.argsort(D, axis=1)[:, :k]
dist = D[np.arange(Y.shape[0])[:, None], ind]
return dist, ind
def test_kd_tree_query():
np.random.seed(0)
X = np.random.random((40, DIMENSION))
Y = np.random.random((10, DIMENSION))
def check_neighbors(dualtree, breadth_first, k, metric, kwargs):
kdt = KDTree(X, leaf_size=1, metric=metric, **kwargs)
dist1, ind1 = kdt.query(Y, k, dualtree=dualtree,
breadth_first=breadth_first)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric, **kwargs)
# don't check indices here: if there are any duplicate distances,
# the indices may not match. Distances should not have this problem.
assert_array_almost_equal(dist1, dist2)
for (metric, kwargs) in METRICS.items():
for k in (1, 3, 5):
for dualtree in (True, False):
for breadth_first in (True, False):
yield (check_neighbors,
dualtree, breadth_first,
k, metric, kwargs)
def test_kd_tree_query_radius(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
kdt = KDTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind = kdt.query_radius(query_pt, r + eps)[0]
i = np.where(rad <= r + eps)[0]
ind.sort()
i.sort()
assert_array_almost_equal(i, ind)
def test_kd_tree_query_radius_distance(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
kdt = KDTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind, dist = kdt.query_radius(query_pt, r + eps, return_distance=True)
ind = ind[0]
dist = dist[0]
d = np.sqrt(((query_pt - X[ind]) ** 2).sum(1))
assert_array_almost_equal(d, dist)
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel)
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_kd_tree_kde(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
kdt = KDTree(X, leaf_size=10)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for h in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, h)
def check_results(kernel, h, atol, rtol, breadth_first):
dens = kdt.kernel_density(Y, h, atol=atol, rtol=rtol,
kernel=kernel,
breadth_first=breadth_first)
assert_allclose(dens, dens_true, atol=atol,
rtol=max(rtol, 1e-7))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, h, atol, rtol,
breadth_first)
def test_gaussian_kde(n_samples=1000):
# Compare gaussian KDE results to scipy.stats.gaussian_kde
from scipy.stats import gaussian_kde
np.random.seed(0)
x_in = np.random.normal(0, 1, n_samples)
x_out = np.linspace(-5, 5, 30)
for h in [0.01, 0.1, 1]:
kdt = KDTree(x_in[:, None])
try:
gkde = gaussian_kde(x_in, bw_method=h / np.std(x_in))
except TypeError:
raise SkipTest("Old scipy, does not accept explicit bandwidth.")
dens_kdt = kdt.kernel_density(x_out[:, None], h) / n_samples
dens_gkde = gkde.evaluate(x_out)
assert_array_almost_equal(dens_kdt, dens_gkde, decimal=3)
def test_kd_tree_two_point(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
r = np.linspace(0, 1, 10)
kdt = KDTree(X, leaf_size=10)
D = DistanceMetric.get_metric("euclidean").pairwise(Y, X)
counts_true = [(D <= ri).sum() for ri in r]
def check_two_point(r, dualtree):
counts = kdt.two_point_correlation(Y, r=r, dualtree=dualtree)
assert_array_almost_equal(counts, counts_true)
for dualtree in (True, False):
yield check_two_point, r, dualtree
def test_kd_tree_pickle():
import pickle
np.random.seed(0)
X = np.random.random((10, 3))
kdt1 = KDTree(X, leaf_size=1)
ind1, dist1 = kdt1.query(X)
def check_pickle_protocol(protocol):
s = pickle.dumps(kdt1, protocol=protocol)
kdt2 = pickle.loads(s)
ind2, dist2 = kdt2.query(X)
assert_array_almost_equal(ind1, ind2)
assert_array_almost_equal(dist1, dist2)
for protocol in (0, 1, 2):
yield check_pickle_protocol, protocol
def test_neighbors_heap(n_pts=5, n_nbrs=10):
heap = NeighborsHeap(n_pts, n_nbrs)
for row in range(n_pts):
d_in = np.random.random(2 * n_nbrs).astype(DTYPE)
i_in = np.arange(2 * n_nbrs, dtype=ITYPE)
for d, i in zip(d_in, i_in):
heap.push(row, d, i)
ind = np.argsort(d_in)
d_in = d_in[ind]
i_in = i_in[ind]
d_heap, i_heap = heap.get_arrays(sort=True)
assert_array_almost_equal(d_in[:n_nbrs], d_heap[row])
assert_array_almost_equal(i_in[:n_nbrs], i_heap[row])
def test_node_heap(n_nodes=50):
vals = np.random.random(n_nodes).astype(DTYPE)
i1 = np.argsort(vals)
vals2, i2 = nodeheap_sort(vals)
assert_array_almost_equal(i1, i2)
assert_array_almost_equal(vals[i1], vals2)
def test_simultaneous_sort(n_rows=10, n_pts=201):
dist = np.random.random((n_rows, n_pts)).astype(DTYPE)
ind = (np.arange(n_pts) + np.zeros((n_rows, 1))).astype(ITYPE)
dist2 = dist.copy()
ind2 = ind.copy()
# simultaneous sort rows using function
simultaneous_sort(dist, ind)
# simultaneous sort rows using numpy
i = np.argsort(dist2, axis=1)
row_ind = np.arange(n_rows)[:, None]
dist2 = dist2[row_ind, i]
ind2 = ind2[row_ind, i]
assert_array_almost_equal(dist, dist2)
assert_array_almost_equal(ind, ind2)
| bsd-3-clause |
HailStorm32/Q.bo_stacks | qbo_webi/src/teleoperation/sip2rtmp/p2p-sip/src/std/rfc4566.py | 4 | 7850 | # Copyright (c) 2007, Kundan Singh. All rights reserved. See LICENSING for details.
# @implements RFC4566 (SDP)
import socket, time
class attrs(object):
'''A generic class that allows uniformly accessing the attribute and items,
and returns None for invalid attribute instead of throwing an acception.'''
def __init__(self, **kwargs):
for n,v in kwargs.items(): self[n] = v
# attribute access: use container if not found
def __getattr__(self, name): return self.__getitem__(name)
# container access: use key in __dict__
def __getitem__(self, name): return self.__dict__.get(name, None)
def __setitem__(self, name, value): self.__dict__[name] = value
def __contains__(self, name): return name in self.__dict__
#def __repr__(self): return repr(self.__dict__)
# @implements RFC4566 P3L3-P3L21
class SDP(attrs):
'''A SDP packet with dynamic properties.
The header names can be accessed as attributes or items.
Accessing an unavailable header gives None instead of exception.
'''
# header names that can appear multiple times.
_multiple = 'tramb'
def __init__(self, value=None):
if value:
self._parse(value)
# @implements RFC4566 P11L1-P12L10
class originator(attrs):
'''Represents a o= line with attributes username (str), sessionid (long),
version (long), nettype (str), addrtype (str), address (str).'''
def __init__(self, value=None):
if value:
self.username, self.sessionid, self.version, self.nettype, self.addrtype, self.address = value.split(' ')
self.sessionid = int(self.sessionid)
self.version = int(self.version)
else:
hostname = socket.gethostname()
self.username, self.sessionid, self.version, self.nettype, self.addrtype, self.address = \
'-', int(time.time()), int(time.time()), 'IN', 'IP4', (hostname.find('.')>0 and hostname or socket.gethostbyname(hostname))
def __repr__(self):
return ' '.join(map(lambda x: str(x), [self.username, self.sessionid, self.version, self.nettype, self.addrtype, self.address]))
# @implements RFC4566 P14L7-P16L9
class connection(attrs):
'''Represents a c= line with attributes nettype (str), addrtype (str), address (str)
and optionally ttl (int) and count (int).'''
def __init__(self, value=None, **kwargs):
if value:
self.nettype, self.addrtype, rest = value.split(' ')
rest = rest.split('/')
if len(rest) == 1: self.address = rest[0]
elif len(rest) == 2: self.address, self.ttl = rest[0], int(rest[1])
else: self.address, self.ttl, self.count = rest[0], int(rest[1]), int(rest[2])
elif 'address' in kwargs:
self.address = kwargs.get('address')
self.nettype = kwargs.get('nettype', 'IN')
self.addrtype = kwargs.get('addrtype', 'IP4')
if 'ttl' in kwargs: self.ttl = int(kwargs.get('ttl'))
if 'count' in kwargs: self.count = int(kwargs.get('count'))
def __repr__(self):
return self.nettype + ' ' + self.addrtype + ' ' + self.address + ('/' + str(self.ttl) if self.ttl else '') + ('/' + str(self.count) if self.count else '')
# @implements RFC4566 P22L17-P24L33
class media(attrs):
'''Represents a m= line and all subsequent lines until next m= or end.
It has attributes such as media (str), port (int), proto (str), fmt (list).'''
def __init__(self, value=None, **kwargs):
if value:
self.media, self.port, self.proto, rest = value.split(' ', 3)
self.port = int(self.port)
self.fmt = []
for f in rest.split(' '):
a = attrs()
try: a.pt = int(f) # if payload type is numeric
except: a.pt = f
self.fmt.append(a)
elif 'media' in kwargs:
self.media = kwargs.get('media')
self.port = int(kwargs.get('port', 0))
self.proto = kwargs.get('proto', 'RTP/AVP')
self.fmt = kwargs.get('fmt', [])
def __repr__(self):
result = self.media + ' ' + str(self.port) + ' ' + self.proto + ' ' + ' '.join(map(lambda x: str(x.pt), self.fmt))
for k in filter(lambda x: x in self, 'icbka'): # order is important
if k not in SDP._multiple: # single header
result += '\r\n' + k + '=' + str(self[k])
else:
for v in self[k]:
result += '\r\n' + k + '=' + str(v)
for f in self.fmt:
if f.name:
result += '\r\n' + 'a=rtpmap:' + str(f.pt) + ' ' + f.name + '/' + str(f.rate) + (f.params and ('/'+f.params) or '')
return result
def dup(self): # use this method instead of SDP.media(str(m)) to duplicate m. Otherwise, fmt will be incomplete
return SDP.media(media=self.media, port=self.port, proto=self.proto, fmt=map(lambda f: attrs(pt=f.pt, name=f.name, rate=f.rate, params=f.params), self.fmt))
# @implements RFC4566 P8L17-P10L5
def _parse(self, text):
g = True # whether we are in global line or per media line?
for line in text.replace('\r\n', '\n').split('\n'):
k, sep, v = line.partition('=')
if k == 'o': v = SDP.originator(v)
elif k == 'c': v = SDP.connection(v)
elif k == 'm': v = SDP.media(v)
if k == 'm': # new m= line
if not self['m']:
self['m'] = []
self['m'].append(v)
obj = self['m'][-1]
elif self['m']: # not in global
obj = self['m'][-1]
# @implements RFC4566 P25L41-P27L7
if k == 'a' and v.startswith('rtpmap:'):
pt, rest = v[7:].split(' ', 1)
name, sep, rest = rest.partition('/')
rate, sep, params = rest.partition('/')
for f in filter(lambda x: str(x.pt) == str(pt), obj.fmt):
f.name = name; f.rate = int(rate); f.params = params or None
else:
obj[k] = (k in SDP._multiple and ((k in obj) and (obj[k]+[v]) or [v])) or v
else: # global
obj = self
obj[k] = ((k in SDP._multiple) and ((k in obj) and (obj[k]+[v]) or [v])) or v
def __repr__(self):
result = ''
for k in filter(lambda x: x in self, 'vosiuepcbtam'): # order is important
if k not in SDP._multiple: # single header
result += k + '=' + str(self[k]) + '\r\n'
else:
for v in self[k]:
result += k + '=' + str(v) + '\r\n'
return result
#--------------------------- Testing --------------------------------------
# @implements RFC4566 P10L7-P10L21
def testSDP():
s = '''v=0\r
o=jdoe 2890844526 2890842807 IN IP4 10.47.16.5\r
s=SDP Seminar\r
i=A Seminar on the session description protocol\r
u=http://www.example.com/seminars/sdp.pdf\r
e=j.doe@example.com (Jane Doe)\r
c=IN IP4 224.2.17.12/127\r
t=2873397496 2873404696\r
a=recvonly\r
m=audio 49170 RTP/AVP 0\r
m=video 51372 RTP/AVP 99\r
a=rtpmap:99 h263-1998/90000\r
'''
sdp = SDP(s)
assert str(sdp) == s
if __name__ == '__main__':
import doctest
doctest.testmod()
testSDP()
| lgpl-2.1 |
felipenaselva/repo.felipe | plugin.video.superlistamilton/playtvfr.py | 297 | 8750 | import struct
import urllib2,urllib
import re
import json
import math
CRYPT_XXTEA_DELTA= 0x9E3779B9
headers = [('Accept','text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'),( 'Connection','Keep-Alive')]
class Crypt_XXTEA:
_key=None
def setKey(self,key):
if isinstance(key, basestring):
k = self._str2long(key, False);
elif isinstance(key, list):
k = key;
else:
print "The secret key must be a string or long integer array"
if (len(k) > 4):
print "The secret key cannot be more than 16 characters or 4 long values"
elif (len(k) == 0):
print "The secret key cannot be empty"
elif (len(k) < 4):
for i in range(len(k),4):
k.append(0)
#k[i] = 0;
#print k
self._key = k;
def encrypt(self,plaintext):
if (self._key == None):
print "Secret key is undefined"
if isinstance(plaintext, basestring):
return self._encryptString(plaintext)
elif isinstance(plaintext, list):
return self._encryptArray(plaintext)
else:
print "The plain text must be a string or long integer array"
def decrypt(self,ciphertext):
if (self._key == None):
print "Secret key is undefined"
#print 'dec',isinstance(ciphertext, basestring)
if isinstance(ciphertext, basestring):
return self._decryptString(ciphertext)
elif isinstance(ciphertext, list):
return self._decryptArray(ciphertext)
else:
print "The plain text must be a string or long integer array"
def _encryptString(self,str):
if (str == ''):
return ''
v = self._str2long(str, False);
v = self._encryptArray(v);
return self._long2str(v, False);
def _encryptArray(self,v):
n = len(v) - 1;
z = v[n];
y = v[0];
q = math.floor(6 + 52 / (n + 1));
sum = 0;
while (0 < q):
q-=1
sum = self._int32(sum + CRYPT_XXTEA_DELTA);
e = sum >> 2 & 3;
for p in range(0,n):
y = v[p + 1];
mx = self._int32(((z >> 5 & 0x07FFFFFF) ^ y << 2) + ((y >> 3 & 0x1FFFFFFF) ^ z << 4)) ^ self._int32((sum ^ y) + (self._key[p & 3 ^ e] ^ z));
z = v[p] = self._int32(v[p] + mx);
p+=1#due to range
y = v[0];
mx = self._int32(((z >> 5 & 0x07FFFFFF) ^ y << 2) + ((y >> 3 & 0x1FFFFFFF) ^ z << 4)) ^ self._int32((sum ^ y) + (self._key[p & 3 ^ e] ^ z));
z = v[n] = self._int32(v[n] + mx);
return v;
def _decryptString(self,str):
if (str == ''):
return '';
v = self._str2long(str, False);
v = self._decryptArray(v);
return self._long2str(v, False);
def _decryptArray(self,v):
n = len(v) - 1;
z = v[n];
y = v[0];
q = math.floor(6 + 52 / (n + 1));
sum = self._int32(q * CRYPT_XXTEA_DELTA);
while (sum != 0):
e = sum >> 2 & 3;
for p in range( n, 0, -1):
z = v[p - 1];
mx = self._int32(((z >> 5 & 0x07FFFFFF) ^ y << 2) + ((y >> 3 & 0x1FFFFFFF) ^ z << 4)) ^ self._int32((sum ^ y) + (self._key[p & 3 ^ e] ^ z));
y = v[p] = self._int32(v[p] - mx);
p=p-1 #due to range
z = v[n];
mx = self._int32(((z >> 5 & 0x07FFFFFF) ^ y << 2) + ((y >> 3 & 0x1FFFFFFF) ^ z << 4)) ^ self._int32((sum ^ y) + (self._key[p & 3 ^ e] ^ z));
y = v[0] = self._int32(v[0] - mx);
sum = self._int32(sum - CRYPT_XXTEA_DELTA);
return v;
def _long2str(self,v, w):
ln = len(v);
s = '';
for i in range(0,ln):
s += struct.pack('<I', v[i]&0xFFFFFFFF);
if (w):
return substr(s, 0, v[ln - 1]);
else:
return s;
def _str2long(self,s, w):
#return (s + ("\0" *( (4 - len(s) % 4) & 3))).encode("hex")
i=int(math.ceil((len(s)/4)))
if (len(s)%4)>0 :
i+=1
#print struct.unpack('<I',(s + ("\0" *( (4 - len(s) % 4) & 3))))
v = list(struct.unpack(('I'*i),(s + ("\0" *( (4 - len(s) % 4) & 3)))))
if (w):
v[0] = len(s); #prb
return v;
def _int32(self,n):
while (n >= 2147483648):
n -= 4294967296;
while (n <= -2147483649):
n += 4294967296;
return int(n);
def getUrl(url, cookieJar=None,post=None, timeout=20, headers=None):
cookie_handler = urllib2.HTTPCookieProcessor(cookieJar)
opener = urllib2.build_opener(cookie_handler, urllib2.HTTPBasicAuthHandler(), urllib2.HTTPHandler())
#opener = urllib2.install_opener(opener)
req = urllib2.Request(url)
req.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.154 Safari/537.36')
if headers:
for h,hv in headers:
req.add_header(h,hv)
response = opener.open(req,post,timeout=timeout)
link=response.read()
response.close()
return link;
def HexToByte( hexStr ):
"""
Convert a string hex byte values into a byte string. The Hex Byte values may
or may not be space separated.
"""
# The list comprehension implementation is fractionally slower in this case
#
# hexStr = ''.join( hexStr.split(" ") )
# return ''.join( ["%c" % chr( int ( hexStr[i:i+2],16 ) ) \
# for i in range(0, len( hexStr ), 2) ] )
bytes = []
hexStr = ''.join( hexStr.split(" ") )
for i in range(0, len(hexStr), 2):
bytes.append( chr( int (hexStr[i:i+2], 16 ) ) )
return ''.join( bytes )
def get_url(player_id):
v=Crypt_XXTEA()
import time
# Retrieve channel id and primary key
timestamp = time.time();
#player_id = '69T7MabZ47';
init = getUrl("http://tvplayer.playtv.fr/js/"+player_id+".js?_="+str(timestamp),headers=headers);
#print init
pat="b:(\{\"a.*\"})}"
init =re.compile(pat).findall(init)[0]
init = json.loads(init);
from binascii import unhexlify
from binascii import hexlify
a = init['a'];
b = init['b'];
b=b.decode("hex")
a=a.decode("hex")
bb=""
v.setKey("object");
#b=v._long2str(b,False)
b_s=v.decrypt(b).rstrip('\0')
params = json.loads(b_s)
pack_k=params['k'].decode("hex")# pack("H*", params['k'])#init['a']
key = v.decrypt(pack_k).rstrip('\0');
v.setKey(key);
a_d=v.decrypt(a).rstrip('\0')
params = json.loads(a_d);
channel_id = params['i'];
api_url = params['u'];
req={"i": channel_id, "t": timestamp,"h":"playtv.fr","a":5}
req = json.dumps(req)
req_en=v.encrypt(req)
req_en=req_en.encode("hex");# struct.unpack("H"*(len(req_en)/4),req_en);
if not req_en.endswith( '/'):
req_en += '/';
headers2 =headers.append( [('Referer','http://static.playtv.fr/swf/tvplayer.swf?r=22'),( 'x-flash-version','11,6,602,180')])
init = getUrl(api_url+req_en,headers=headers2);
init=init.decode("hex")
params = json.loads(v.decrypt(init).rstrip('\0'));
if params['s'][1] and params['s'][1] <>'' :
streams =params['s'][0] if params['s'][0]['bitrate'] > params['s'][1]['bitrate'] else params['s'][1];
else:
streams = params['s'][0];
scheme = streams['scheme'];
host = streams['host'];
port = streams['port'];
app = streams['application'];
playpath = streams['stream'];
token = streams['token'];
title = streams['title'];
t = params['j']['t'];
k = params['j']['k'];
v.setKey("object");
key=v.decrypt(k.decode("hex"))# pack("H*", k));
v.setKey(key);
auth = v.encrypt(t).encode("hex") #unpack("H*", $xxtea->encrypt($t));
if (scheme == "http"):
final_url = scheme+"://"+host + ( ":" +port if port and len(port)>0 else "") + "/" + playpath
else:
final_url = scheme + "://" + host +( ":" +port if port and len(port)>0 else "") + "/" + app +" app=" + app +" swfUrl=http://static.playtv.fr/swf/tvplayer.swf pageUrl=http://playtv.fr/television Conn=S:" + auth + (" token=" + token if token and len(token)>0 else "") + " playpath=" + playpath +' live=1 timeout=20'
print final_url
return final_url
#print get_url('69T7MabZ47')
| gpl-2.0 |
byshen/pyspider | pyspider/libs/beanstalk.py | 58 | 3652 | #!/usr/bin/env python
# coding:utf-8
"""beanstalk queue - queue based on beanstalk
Setting: you need to set max-job-size bigger(default 65535)
DAEMON_OPTS="-l $BEANSTALKD_LISTEN_ADDR -p $BEANSTALKD_LISTEN_PORT -z 524288"
"""
import time
import umsgpack
import beanstalkc
import threading
import logging
from six.moves import queue as BaseQueue
class BeanstalkQueue(object):
max_timeout = 0.3
Empty = BaseQueue.Empty
Full = BaseQueue.Full
def __init__(self, name, host='localhost:11300', maxsize=0):
"""
Constructor for a BeanstalkdQueue.
"""
self.name = name
config = host.split(':')
self.host = config[0] if len(config) else 'localhost'
self.port = int(config[1]) if len(config) > 1 else 11300
self.lock = threading.RLock()
self.maxsize = maxsize
self.reconnect()
def stats(self):
try:
with self.lock:
stats = self.connection.stats_tube(self.name)
except beanstalkc.CommandFailed, err:
# tube is empty
if err[1] == 'NOT_FOUND':
return {}
stats = [item.split(': ') for item in stats.split('\n') if item.find(':')]
stats = [(item[0], item[1]) for item in stats if len(item) == 2]
return dict(stats)
def reconnect(self):
self.connection = beanstalkc.Connection(host=self.host, port=self.port, parse_yaml=False)
self.connection.use(self.name)
self.connection.watch(self.name)
def qsize(self):
stats = self.stats()
return int(stats.get('current-jobs-ready', 0))
def empty(self):
if self.qsize() == 0:
return True
else:
return False
def full(self):
if self.maxsize and self.qsize() >= self.maxsize:
return True
else:
return False
def put(self, obj, block=True, timeout=None):
if not block:
return self.put_nowait(obj)
start_time = time.time()
while True:
try:
return self.put_nowait(obj)
except BaseQueue.Full:
if timeout:
lasted = time.time() - start_time
if timeout > lasted:
time.sleep(min(self.max_timeout, timeout - lasted))
else:
raise
else:
time.sleep(self.max_timeout)
def put_nowait(self, obj):
if self.full():
raise BaseQueue.Full
with self.lock:
return self.connection.put(umsgpack.packb(obj))
def get(self, block=True, timeout=None):
if not block:
return self.get_nowait()
start_time = time.time()
while True:
try:
return self.get_nowait()
except BaseQueue.Empty:
if timeout:
lasted = time.time() - start_time
if timeout > lasted:
time.sleep(min(self.max_timeout, timeout - lasted))
else:
raise
else:
time.sleep(self.max_timeout)
def get_nowait(self):
try:
with self.lock:
job = self.connection.reserve(0)
if not job:
raise BaseQueue.Empty
else:
body = umsgpack.unpackb(job.body)
job.delete()
return body
except beanstalkc.DeadlineSoon:
raise BaseQueue.Empty
Queue = BeanstalkQueue
| apache-2.0 |
apocquet/django | tests/template_tests/syntax_tests/test_filter_tag.py | 521 | 1795 | from django.template import TemplateSyntaxError
from django.test import SimpleTestCase
from ..utils import setup
class FilterTagTests(SimpleTestCase):
@setup({'filter01': '{% filter upper %}{% endfilter %}'})
def test_filter01(self):
output = self.engine.render_to_string('filter01')
self.assertEqual(output, '')
@setup({'filter02': '{% filter upper %}django{% endfilter %}'})
def test_filter02(self):
output = self.engine.render_to_string('filter02')
self.assertEqual(output, 'DJANGO')
@setup({'filter03': '{% filter upper|lower %}django{% endfilter %}'})
def test_filter03(self):
output = self.engine.render_to_string('filter03')
self.assertEqual(output, 'django')
@setup({'filter04': '{% filter cut:remove %}djangospam{% endfilter %}'})
def test_filter04(self):
output = self.engine.render_to_string('filter04', {'remove': 'spam'})
self.assertEqual(output, 'django')
@setup({'filter05': '{% filter safe %}fail{% endfilter %}'})
def test_filter05(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('filter05')
@setup({'filter05bis': '{% filter upper|safe %}fail{% endfilter %}'})
def test_filter05bis(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('filter05bis')
@setup({'filter06': '{% filter escape %}fail{% endfilter %}'})
def test_filter06(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('filter06')
@setup({'filter06bis': '{% filter upper|escape %}fail{% endfilter %}'})
def test_filter06bis(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('filter06bis')
| bsd-3-clause |
x303597316/hue | desktop/core/ext-py/Django-1.6.10/tests/comment_tests/tests/test_templatetags.py | 53 | 8243 | from __future__ import absolute_import
from django.contrib.comments.forms import CommentForm
from django.contrib.comments.models import Comment
from django.contrib.contenttypes.models import ContentType
from django.template import Template, Context, Library, libraries
from ..models import Article, Author
from . import CommentTestCase
register = Library()
@register.filter
def noop(variable, param=None):
return variable
libraries['comment_testtags'] = register
class CommentTemplateTagTests(CommentTestCase):
def render(self, t, **c):
ctx = Context(c)
out = Template(t).render(ctx)
return ctx, out
def testCommentFormTarget(self):
ctx, out = self.render("{% load comments %}{% comment_form_target %}")
self.assertEqual(out, "/post/")
def testGetCommentForm(self, tag=None):
t = "{% load comments %}" + (tag or "{% get_comment_form for comment_tests.article a.id as form %}")
ctx, out = self.render(t, a=Article.objects.get(pk=1))
self.assertEqual(out, "")
self.assertIsInstance(ctx["form"], CommentForm)
def testGetCommentFormFromLiteral(self):
self.testGetCommentForm("{% get_comment_form for comment_tests.article 1 as form %}")
def testGetCommentFormFromObject(self):
self.testGetCommentForm("{% get_comment_form for a as form %}")
def testWhitespaceInGetCommentFormTag(self):
self.testGetCommentForm("{% load comment_testtags %}{% get_comment_form for a|noop:'x y' as form %}")
def testRenderCommentForm(self, tag=None):
t = "{% load comments %}" + (tag or "{% render_comment_form for comment_tests.article a.id %}")
ctx, out = self.render(t, a=Article.objects.get(pk=1))
self.assertTrue(out.strip().startswith("<form action="))
self.assertTrue(out.strip().endswith("</form>"))
def testRenderCommentFormFromLiteral(self):
self.testRenderCommentForm("{% render_comment_form for comment_tests.article 1 %}")
def testRenderCommentFormFromObject(self):
self.testRenderCommentForm("{% render_comment_form for a %}")
def testWhitespaceInRenderCommentFormTag(self):
self.testRenderCommentForm("{% load comment_testtags %}{% render_comment_form for a|noop:'x y' %}")
def testRenderCommentFormFromObjectWithQueryCount(self):
with self.assertNumQueries(1):
self.testRenderCommentFormFromObject()
def verifyGetCommentCount(self, tag=None):
t = "{% load comments %}" + (tag or "{% get_comment_count for comment_tests.article a.id as cc %}") + "{{ cc }}"
ctx, out = self.render(t, a=Article.objects.get(pk=1))
self.assertEqual(out, "2")
def testGetCommentCount(self):
self.createSomeComments()
self.verifyGetCommentCount("{% get_comment_count for comment_tests.article a.id as cc %}")
def testGetCommentCountFromLiteral(self):
self.createSomeComments()
self.verifyGetCommentCount("{% get_comment_count for comment_tests.article 1 as cc %}")
def testGetCommentCountFromObject(self):
self.createSomeComments()
self.verifyGetCommentCount("{% get_comment_count for a as cc %}")
def testWhitespaceInGetCommentCountTag(self):
self.createSomeComments()
self.verifyGetCommentCount("{% load comment_testtags %}{% get_comment_count for a|noop:'x y' as cc %}")
def verifyGetCommentList(self, tag=None):
c1, c2, c3, c4 = Comment.objects.all()[:4]
t = "{% load comments %}" + (tag or "{% get_comment_list for comment_tests.author a.id as cl %}")
ctx, out = self.render(t, a=Author.objects.get(pk=1))
self.assertEqual(out, "")
self.assertEqual(list(ctx["cl"]), [c2])
def testGetCommentList(self):
self.createSomeComments()
self.verifyGetCommentList("{% get_comment_list for comment_tests.author a.id as cl %}")
def testGetCommentListFromLiteral(self):
self.createSomeComments()
self.verifyGetCommentList("{% get_comment_list for comment_tests.author 1 as cl %}")
def testGetCommentListFromObject(self):
self.createSomeComments()
self.verifyGetCommentList("{% get_comment_list for a as cl %}")
def testWhitespaceInGetCommentListTag(self):
self.createSomeComments()
self.verifyGetCommentList("{% load comment_testtags %}{% get_comment_list for a|noop:'x y' as cl %}")
def testGetCommentPermalink(self):
c1, c2, c3, c4 = self.createSomeComments()
t = "{% load comments %}{% get_comment_list for comment_tests.author author.id as cl %}"
t += "{% get_comment_permalink cl.0 %}"
ct = ContentType.objects.get_for_model(Author)
author = Author.objects.get(pk=1)
ctx, out = self.render(t, author=author)
self.assertEqual(out, "/cr/%s/%s/#c%s" % (ct.id, author.id, c2.id))
def testGetCommentPermalinkFormatted(self):
c1, c2, c3, c4 = self.createSomeComments()
t = "{% load comments %}{% get_comment_list for comment_tests.author author.id as cl %}"
t += "{% get_comment_permalink cl.0 '#c%(id)s-by-%(user_name)s' %}"
ct = ContentType.objects.get_for_model(Author)
author = Author.objects.get(pk=1)
ctx, out = self.render(t, author=author)
self.assertEqual(out, "/cr/%s/%s/#c%s-by-Joe Somebody" % (ct.id, author.id, c2.id))
def testWhitespaceInGetCommentPermalinkTag(self):
c1, c2, c3, c4 = self.createSomeComments()
t = "{% load comments comment_testtags %}{% get_comment_list for comment_tests.author author.id as cl %}"
t += "{% get_comment_permalink cl.0|noop:'x y' %}"
ct = ContentType.objects.get_for_model(Author)
author = Author.objects.get(pk=1)
ctx, out = self.render(t, author=author)
self.assertEqual(out, "/cr/%s/%s/#c%s" % (ct.id, author.id, c2.id))
def testRenderCommentList(self, tag=None):
t = "{% load comments %}" + (tag or "{% render_comment_list for comment_tests.article a.id %}")
ctx, out = self.render(t, a=Article.objects.get(pk=1))
self.assertTrue(out.strip().startswith("<dl id=\"comments\">"))
self.assertTrue(out.strip().endswith("</dl>"))
def testRenderCommentListFromLiteral(self):
self.testRenderCommentList("{% render_comment_list for comment_tests.article 1 %}")
def testRenderCommentListFromObject(self):
self.testRenderCommentList("{% render_comment_list for a %}")
def testWhitespaceInRenderCommentListTag(self):
self.testRenderCommentList("{% load comment_testtags %}{% render_comment_list for a|noop:'x y' %}")
def testNumberQueries(self):
"""
Ensure that the template tags use cached content types to reduce the
number of DB queries.
Refs #16042.
"""
self.createSomeComments()
# {% render_comment_list %} -----------------
# Clear CT cache
ContentType.objects.clear_cache()
with self.assertNumQueries(4):
self.testRenderCommentListFromObject()
# CT's should be cached
with self.assertNumQueries(3):
self.testRenderCommentListFromObject()
# {% get_comment_list %} --------------------
ContentType.objects.clear_cache()
with self.assertNumQueries(4):
self.verifyGetCommentList()
with self.assertNumQueries(3):
self.verifyGetCommentList()
# {% render_comment_form %} -----------------
ContentType.objects.clear_cache()
with self.assertNumQueries(3):
self.testRenderCommentForm()
with self.assertNumQueries(2):
self.testRenderCommentForm()
# {% get_comment_form %} --------------------
ContentType.objects.clear_cache()
with self.assertNumQueries(3):
self.testGetCommentForm()
with self.assertNumQueries(2):
self.testGetCommentForm()
# {% get_comment_count %} -------------------
ContentType.objects.clear_cache()
with self.assertNumQueries(3):
self.verifyGetCommentCount()
with self.assertNumQueries(2):
self.verifyGetCommentCount()
| apache-2.0 |
CoDEmanX/ArangoDB | 3rdParty/V8-4.3.61/third_party/python_26/Lib/site-packages/pythonwin/pywin/idle/FormatParagraph.py | 20 | 5735 | # Extension to format a paragraph
# Does basic, standard text formatting, and also understands Python
# comment blocks. Thus, for editing Python source code, this
# extension is really only suitable for reformatting these comment
# blocks or triple-quoted strings.
# Known problems with comment reformatting:
# * If there is a selection marked, and the first line of the
# selection is not complete, the block will probably not be detected
# as comments, and will have the normal "text formatting" rules
# applied.
# * If a comment block has leading whitespace that mixes tabs and
# spaces, they will not be considered part of the same block.
# * Fancy comments, like this bulleted list, arent handled :-)
import string
import re
class FormatParagraph:
menudefs = [
('edit', [
('Format Paragraph', '<<format-paragraph>>'),
])
]
keydefs = {
'<<format-paragraph>>': ['<Alt-q>'],
}
unix_keydefs = {
'<<format-paragraph>>': ['<Meta-q>'],
}
def __init__(self, editwin):
self.editwin = editwin
def close(self):
self.editwin = None
def format_paragraph_event(self, event):
text = self.editwin.text
first, last = self.editwin.get_selection_indices()
if first and last:
data = text.get(first, last)
comment_header = ''
else:
first, last, comment_header, data = \
find_paragraph(text, text.index("insert"))
if comment_header:
# Reformat the comment lines - convert to text sans header.
lines = string.split(data, "\n")
lines = map(lambda st, l=len(comment_header): st[l:], lines)
data = string.join(lines, "\n")
# Reformat to 70 chars or a 20 char width, whichever is greater.
format_width = max(70-len(comment_header), 20)
newdata = reformat_paragraph(data, format_width)
# re-split and re-insert the comment header.
newdata = string.split(newdata, "\n")
# If the block ends in a \n, we dont want the comment
# prefix inserted after it. (Im not sure it makes sense to
# reformat a comment block that isnt made of complete
# lines, but whatever!) Can't think of a clean soltution,
# so we hack away
block_suffix = ""
if not newdata[-1]:
block_suffix = "\n"
newdata = newdata[:-1]
builder = lambda item, prefix=comment_header: prefix+item
newdata = string.join(map(builder, newdata), '\n') + block_suffix
else:
# Just a normal text format
newdata = reformat_paragraph(data)
text.tag_remove("sel", "1.0", "end")
if newdata != data:
text.mark_set("insert", first)
text.undo_block_start()
text.delete(first, last)
text.insert(first, newdata)
text.undo_block_stop()
else:
text.mark_set("insert", last)
text.see("insert")
def find_paragraph(text, mark):
lineno, col = map(int, string.split(mark, "."))
line = text.get("%d.0" % lineno, "%d.0 lineend" % lineno)
while text.compare("%d.0" % lineno, "<", "end") and is_all_white(line):
lineno = lineno + 1
line = text.get("%d.0" % lineno, "%d.0 lineend" % lineno)
first_lineno = lineno
comment_header = get_comment_header(line)
comment_header_len = len(comment_header)
while get_comment_header(line)==comment_header and \
not is_all_white(line[comment_header_len:]):
lineno = lineno + 1
line = text.get("%d.0" % lineno, "%d.0 lineend" % lineno)
last = "%d.0" % lineno
# Search back to beginning of paragraph
lineno = first_lineno - 1
line = text.get("%d.0" % lineno, "%d.0 lineend" % lineno)
while lineno > 0 and \
get_comment_header(line)==comment_header and \
not is_all_white(line[comment_header_len:]):
lineno = lineno - 1
line = text.get("%d.0" % lineno, "%d.0 lineend" % lineno)
first = "%d.0" % (lineno+1)
return first, last, comment_header, text.get(first, last)
def reformat_paragraph(data, limit=70):
lines = string.split(data, "\n")
i = 0
n = len(lines)
while i < n and is_all_white(lines[i]):
i = i+1
if i >= n:
return data
indent1 = get_indent(lines[i])
if i+1 < n and not is_all_white(lines[i+1]):
indent2 = get_indent(lines[i+1])
else:
indent2 = indent1
new = lines[:i]
partial = indent1
while i < n and not is_all_white(lines[i]):
# XXX Should take double space after period (etc.) into account
words = re.split("(\s+)", lines[i])
for j in range(0, len(words), 2):
word = words[j]
if not word:
continue # Can happen when line ends in whitespace
if len(string.expandtabs(partial + word)) > limit and \
partial != indent1:
new.append(string.rstrip(partial))
partial = indent2
partial = partial + word + " "
if j+1 < len(words) and words[j+1] != " ":
partial = partial + " "
i = i+1
new.append(string.rstrip(partial))
# XXX Should reformat remaining paragraphs as well
new.extend(lines[i:])
return string.join(new, "\n")
def is_all_white(line):
return re.match(r"^\s*$", line) is not None
def get_indent(line):
return re.match(r"^(\s*)", line).group()
def get_comment_header(line):
m = re.match(r"^(\s*#*)", line)
if m is None: return ""
return m.group(1)
| apache-2.0 |
40223125/2015cd_midterm | static/Brython3.1.0-20150301-090019/Lib/xml/sax/__init__.py | 637 | 3505 | """Simple API for XML (SAX) implementation for Python.
This module provides an implementation of the SAX 2 interface;
information about the Java version of the interface can be found at
http://www.megginson.com/SAX/. The Python version of the interface is
documented at <...>.
This package contains the following modules:
handler -- Base classes and constants which define the SAX 2 API for
the 'client-side' of SAX for Python.
saxutils -- Implementation of the convenience classes commonly used to
work with SAX.
xmlreader -- Base classes and constants which define the SAX 2 API for
the parsers used with SAX for Python.
expatreader -- Driver that allows use of the Expat parser with SAX.
"""
from .xmlreader import InputSource
from .handler import ContentHandler, ErrorHandler
from ._exceptions import SAXException, SAXNotRecognizedException, \
SAXParseException, SAXNotSupportedException, \
SAXReaderNotAvailable
def parse(source, handler, errorHandler=ErrorHandler()):
parser = make_parser()
parser.setContentHandler(handler)
parser.setErrorHandler(errorHandler)
parser.parse(source)
def parseString(string, handler, errorHandler=ErrorHandler()):
from io import BytesIO
if errorHandler is None:
errorHandler = ErrorHandler()
parser = make_parser()
parser.setContentHandler(handler)
parser.setErrorHandler(errorHandler)
inpsrc = InputSource()
inpsrc.setByteStream(BytesIO(string))
parser.parse(inpsrc)
# this is the parser list used by the make_parser function if no
# alternatives are given as parameters to the function
default_parser_list = ["xml.sax.expatreader"]
# tell modulefinder that importing sax potentially imports expatreader
_false = 0
if _false:
import xml.sax.expatreader
import os, sys
#if "PY_SAX_PARSER" in os.environ:
# default_parser_list = os.environ["PY_SAX_PARSER"].split(",")
del os
_key = "python.xml.sax.parser"
if sys.platform[:4] == "java" and sys.registry.containsKey(_key):
default_parser_list = sys.registry.getProperty(_key).split(",")
def make_parser(parser_list = []):
"""Creates and returns a SAX parser.
Creates the first parser it is able to instantiate of the ones
given in the list created by doing parser_list +
default_parser_list. The lists must contain the names of Python
modules containing both a SAX parser and a create_parser function."""
for parser_name in parser_list + default_parser_list:
try:
return _create_parser(parser_name)
except ImportError as e:
import sys
if parser_name in sys.modules:
# The parser module was found, but importing it
# failed unexpectedly, pass this exception through
raise
except SAXReaderNotAvailable:
# The parser module detected that it won't work properly,
# so try the next one
pass
raise SAXReaderNotAvailable("No parsers found", None)
# --- Internal utility methods used by make_parser
if sys.platform[ : 4] == "java":
def _create_parser(parser_name):
from org.python.core import imp
drv_module = imp.importName(parser_name, 0, globals())
return drv_module.create_parser()
else:
def _create_parser(parser_name):
drv_module = __import__(parser_name,{},{},['create_parser'])
return drv_module.create_parser()
del sys
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.