repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
XPRIZE/GLEXP-Team-SlideSpeech | appengine-try-python-flask-master/lib/werkzeug/contrib/profiler.py | 315 | 4920 | # -*- coding: utf-8 -*-
"""
werkzeug.contrib.profiler
~~~~~~~~~~~~~~~~~~~~~~~~~
This module provides a simple WSGI profiler middleware for finding
bottlenecks in web application. It uses the :mod:`profile` or
:mod:`cProfile` module to do the profiling and writes the stats to the
stream provided (defaults to stderr).
Example usage::
from werkzeug.contrib.profiler import ProfilerMiddleware
app = ProfilerMiddleware(app)
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import sys, time, os.path
try:
try:
from cProfile import Profile
except ImportError:
from profile import Profile
from pstats import Stats
available = True
except ImportError:
available = False
class MergeStream(object):
"""An object that redirects `write` calls to multiple streams.
Use this to log to both `sys.stdout` and a file::
f = open('profiler.log', 'w')
stream = MergeStream(sys.stdout, f)
profiler = ProfilerMiddleware(app, stream)
"""
def __init__(self, *streams):
if not streams:
raise TypeError('at least one stream must be given')
self.streams = streams
def write(self, data):
for stream in self.streams:
stream.write(data)
class ProfilerMiddleware(object):
"""Simple profiler middleware. Wraps a WSGI application and profiles
a request. This intentionally buffers the response so that timings are
more exact.
By giving the `profile_dir` argument, pstat.Stats files are saved to that
directory, one file per request. Without it, a summary is printed to
`stream` instead.
For the exact meaning of `sort_by` and `restrictions` consult the
:mod:`profile` documentation.
.. versionadded:: 0.9
Added support for `restrictions` and `profile_dir`.
:param app: the WSGI application to profile.
:param stream: the stream for the profiled stats. defaults to stderr.
:param sort_by: a tuple of columns to sort the result by.
:param restrictions: a tuple of profiling strictions, not used if dumping
to `profile_dir`.
:param profile_dir: directory name to save pstat files
"""
def __init__(self, app, stream=None,
sort_by=('time', 'calls'), restrictions=(), profile_dir=None):
if not available:
raise RuntimeError('the profiler is not available because '
'profile or pstat is not installed.')
self._app = app
self._stream = stream or sys.stdout
self._sort_by = sort_by
self._restrictions = restrictions
self._profile_dir = profile_dir
def __call__(self, environ, start_response):
response_body = []
def catching_start_response(status, headers, exc_info=None):
start_response(status, headers, exc_info)
return response_body.append
def runapp():
appiter = self._app(environ, catching_start_response)
response_body.extend(appiter)
if hasattr(appiter, 'close'):
appiter.close()
p = Profile()
start = time.time()
p.runcall(runapp)
body = ''.join(response_body)
elapsed = time.time() - start
if self._profile_dir is not None:
prof_filename = os.path.join(self._profile_dir,
'%s.%s.%06dms.%d.prof' % (
environ['REQUEST_METHOD'],
environ.get('PATH_INFO').strip('/').replace('/', '.') or 'root',
elapsed * 1000.0,
time.time()
))
p.dump_stats(prof_filename)
else:
stats = Stats(p, stream=self._stream)
stats.sort_stats(*self._sort_by)
self._stream.write('-' * 80)
self._stream.write('\nPATH: %r\n' % environ.get('PATH_INFO'))
stats.print_stats(*self._restrictions)
self._stream.write('-' * 80 + '\n\n')
return [body]
def make_action(app_factory, hostname='localhost', port=5000,
threaded=False, processes=1, stream=None,
sort_by=('time', 'calls'), restrictions=()):
"""Return a new callback for :mod:`werkzeug.script` that starts a local
server with the profiler enabled.
::
from werkzeug.contrib import profiler
action_profile = profiler.make_action(make_app)
"""
def action(hostname=('h', hostname), port=('p', port),
threaded=threaded, processes=processes):
"""Start a new development server."""
from werkzeug.serving import run_simple
app = ProfilerMiddleware(app_factory(), stream, sort_by, restrictions)
run_simple(hostname, port, app, False, None, threaded, processes)
return action
| apache-2.0 |
AICP/external_chromium_org | tools/perf/metrics/chrome_proxy_unittest.py | 10 | 8323 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import base64
import unittest
from metrics import chrome_proxy
from metrics import network_unittest
from metrics import test_page_measurement_results
# Timeline events used in tests.
# An HTML not via proxy.
EVENT_HTML_PROXY = network_unittest.NetworkMetricTest.MakeNetworkTimelineEvent(
url='http://test.html1',
response_headers={
'Content-Type': 'text/html',
'Content-Length': str(len(network_unittest.HTML_BODY)),
},
body=network_unittest.HTML_BODY)
# An HTML via proxy with the deprecated Via header.
EVENT_HTML_PROXY_DEPRECATED_VIA = (
network_unittest.NetworkMetricTest.MakeNetworkTimelineEvent(
url='http://test.html2',
response_headers={
'Content-Type': 'text/html',
'Content-Encoding': 'gzip',
'X-Original-Content-Length': str(len(network_unittest.HTML_BODY)),
'Via': (chrome_proxy.CHROME_PROXY_VIA_HEADER_DEPRECATED +
',other-via'),
},
body=network_unittest.HTML_BODY))
# An image via proxy with Via header and it is cached.
EVENT_IMAGE_PROXY_CACHED = (
network_unittest.NetworkMetricTest.MakeNetworkTimelineEvent(
url='http://test.image',
response_headers={
'Content-Type': 'image/jpeg',
'Content-Encoding': 'gzip',
'X-Original-Content-Length': str(network_unittest.IMAGE_OCL),
'Via': '1.1 ' + chrome_proxy.CHROME_PROXY_VIA_HEADER,
},
body=base64.b64encode(network_unittest.IMAGE_BODY),
base64_encoded_body=True,
served_from_cache=True))
# An image fetched directly.
EVENT_IMAGE_DIRECT = (
network_unittest.NetworkMetricTest.MakeNetworkTimelineEvent(
url='http://test.image',
response_headers={
'Content-Type': 'image/jpeg',
'Content-Encoding': 'gzip',
},
body=base64.b64encode(network_unittest.IMAGE_BODY),
base64_encoded_body=True))
# A safe-browsing malware response.
EVENT_MALWARE_PROXY = (
network_unittest.NetworkMetricTest.MakeNetworkTimelineEvent(
url='http://test.malware',
response_headers={
'X-Malware-Url': '1',
'Via': '1.1 ' + chrome_proxy.CHROME_PROXY_VIA_HEADER,
'Location': 'http://test.malware',
},
status=307))
class ChromeProxyMetricTest(unittest.TestCase):
_test_proxy_info = {}
def _StubGetProxyInfo(self, info):
def stub(unused_tab, unused_url=''): # pylint: disable=W0613
return ChromeProxyMetricTest._test_proxy_info
chrome_proxy.GetProxyInfoFromNetworkInternals = stub
ChromeProxyMetricTest._test_proxy_info = info
def testChromeProxyResponse(self):
# An https non-proxy response.
resp = chrome_proxy.ChromeProxyResponse(
network_unittest.NetworkMetricTest.MakeNetworkTimelineEvent(
url='https://test.url',
response_headers={
'Content-Type': 'text/html',
'Content-Length': str(len(network_unittest.HTML_BODY)),
'Via': 'some other via',
},
body=network_unittest.HTML_BODY))
self.assertFalse(resp.ShouldHaveChromeProxyViaHeader())
self.assertFalse(resp.HasChromeProxyViaHeader())
self.assertTrue(resp.IsValidByViaHeader())
# A proxied JPEG image response
resp = chrome_proxy.ChromeProxyResponse(
network_unittest.NetworkMetricTest.MakeNetworkTimelineEvent(
url='http://test.image',
response_headers={
'Content-Type': 'image/jpeg',
'Content-Encoding': 'gzip',
'Via': '1.1 ' + chrome_proxy.CHROME_PROXY_VIA_HEADER,
'X-Original-Content-Length': str(network_unittest.IMAGE_OCL),
},
body=base64.b64encode(network_unittest.IMAGE_BODY),
base64_encoded_body=True))
self.assertTrue(resp.ShouldHaveChromeProxyViaHeader())
self.assertTrue(resp.HasChromeProxyViaHeader())
self.assertTrue(resp.IsValidByViaHeader())
def testChromeProxyMetricForDataSaving(self):
metric = chrome_proxy.ChromeProxyMetric()
events = [
EVENT_HTML_PROXY,
EVENT_HTML_PROXY_DEPRECATED_VIA,
EVENT_IMAGE_PROXY_CACHED,
EVENT_IMAGE_DIRECT]
metric.SetEvents(events)
self.assertTrue(len(events), len(list(metric.IterResponses(None))))
results = test_page_measurement_results.TestPageMeasurementResults(self)
metric.AddResultsForDataSaving(None, results)
results.AssertHasPageSpecificScalarValue('resources_via_proxy', 'count', 2)
results.AssertHasPageSpecificScalarValue('resources_from_cache', 'count', 1)
results.AssertHasPageSpecificScalarValue('resources_direct', 'count', 2)
def testChromeProxyMetricForHeaderValidation(self):
metric = chrome_proxy.ChromeProxyMetric()
metric.SetEvents([
EVENT_HTML_PROXY,
EVENT_HTML_PROXY_DEPRECATED_VIA,
EVENT_IMAGE_PROXY_CACHED,
EVENT_IMAGE_DIRECT])
results = test_page_measurement_results.TestPageMeasurementResults(self)
missing_via_exception = False
try:
metric.AddResultsForHeaderValidation(None, results)
except chrome_proxy.ChromeProxyMetricException:
missing_via_exception = True
# Only the HTTP image response does not have a valid Via header.
self.assertTrue(missing_via_exception)
# Two events with valid Via headers.
metric.SetEvents([
EVENT_HTML_PROXY_DEPRECATED_VIA,
EVENT_IMAGE_PROXY_CACHED])
metric.AddResultsForHeaderValidation(None, results)
results.AssertHasPageSpecificScalarValue('checked_via_header', 'count', 2)
def testChromeProxyMetricForBypass(self):
metric = chrome_proxy.ChromeProxyMetric()
metric.SetEvents([
EVENT_HTML_PROXY,
EVENT_HTML_PROXY_DEPRECATED_VIA,
EVENT_IMAGE_PROXY_CACHED,
EVENT_IMAGE_DIRECT])
results = test_page_measurement_results.TestPageMeasurementResults(self)
bypass_exception = False
try:
metric.AddResultsForBypass(None, results)
except chrome_proxy.ChromeProxyMetricException:
bypass_exception = True
# Two of the first three events have Via headers.
self.assertTrue(bypass_exception)
# Use directly fetched image only. It is treated as bypassed.
metric.SetEvents([EVENT_IMAGE_DIRECT])
metric.AddResultsForBypass(None, results)
results.AssertHasPageSpecificScalarValue('bypass', 'count', 1)
def testChromeProxyMetricForHTTPFallback(self):
metric = chrome_proxy.ChromeProxyMetric()
metric.SetEvents([
EVENT_HTML_PROXY,
EVENT_HTML_PROXY_DEPRECATED_VIA])
results = test_page_measurement_results.TestPageMeasurementResults(self)
fallback_exception = False
info = {}
info['enabled'] = False
self._StubGetProxyInfo(info)
try:
metric.AddResultsForBypass(None, results)
except chrome_proxy.ChromeProxyMetricException:
fallback_exception = True
self.assertTrue(fallback_exception)
fallback_exception = False
info['enabled'] = True
info['proxies'] = [
'something.else.com:80',
chrome_proxy.PROXY_SETTING_DIRECT
]
self._StubGetProxyInfo(info)
try:
metric.AddResultsForBypass(None, results)
except chrome_proxy.ChromeProxyMetricException:
fallback_exception = True
self.assertTrue(fallback_exception)
info['enabled'] = True
info['proxies'] = [
chrome_proxy.PROXY_SETTING_HTTP,
chrome_proxy.PROXY_SETTING_DIRECT
]
self._StubGetProxyInfo(info)
metric.AddResultsForHTTPFallback(None, results)
def testChromeProxyMetricForSafebrowsing(self):
metric = chrome_proxy.ChromeProxyMetric()
metric.SetEvents([EVENT_MALWARE_PROXY])
results = test_page_measurement_results.TestPageMeasurementResults(self)
metric.AddResultsForSafebrowsing(None, results)
results.AssertHasPageSpecificScalarValue('safebrowsing', 'boolean', True)
# Clear results and metrics to test no response for safebrowsing
results = test_page_measurement_results.TestPageMeasurementResults(self)
metric.SetEvents([])
metric.AddResultsForSafebrowsing(None, results)
results.AssertHasPageSpecificScalarValue('safebrowsing', 'boolean', True)
| bsd-3-clause |
rmfitzpatrick/ansible | lib/ansible/modules/cloud/amazon/sts_assume_role.py | 22 | 5190 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'certified'}
DOCUMENTATION = '''
---
module: sts_assume_role
short_description: Assume a role using AWS Security Token Service and obtain temporary credentials
description:
- Assume a role using AWS Security Token Service and obtain temporary credentials
version_added: "2.0"
author: Boris Ekelchik (@bekelchik)
options:
role_arn:
description:
- The Amazon Resource Name (ARN) of the role that the caller is
assuming (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html#Identifiers_ARNs)
required: true
role_session_name:
description:
- Name of the role's session - will be used by CloudTrail
required: true
policy:
description:
- Supplemental policy to use in addition to assumed role's policies.
required: false
default: null
duration_seconds:
description:
- The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) to 3600 seconds (1 hour).
By default, the value is set to 3600 seconds.
required: false
default: null
external_id:
description:
- A unique identifier that is used by third parties to assume a role in their customers' accounts.
required: false
default: null
mfa_serial_number:
description:
- he identification number of the MFA device that is associated with the user who is making the AssumeRole call.
required: false
default: null
mfa_token:
description:
- The value provided by the MFA device, if the trust policy of the role being assumed requires MFA.
required: false
default: null
notes:
- In order to use the assumed role in a following playbook task you must pass the access_key, access_secret and access_token
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Assume an existing role (more details: http://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html)
sts_assume_role:
role_arn: "arn:aws:iam::123456789012:role/someRole"
role_session_name: "someRoleSession"
register: assumed_role
# Use the assumed role above to tag an instance in account 123456789012
ec2_tag:
aws_access_key: "{{ assumed_role.sts_creds.access_key }}"
aws_secret_key: "{{ assumed_role.sts_creds.secret_key }}"
security_token: "{{ assumed_role.sts_creds.session_token }}"
resource: i-xyzxyz01
state: present
tags:
MyNewTag: value
'''
try:
import boto.sts
from boto.exception import BotoServerError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import AnsibleAWSError, connect_to_aws, ec2_argument_spec, get_aws_connection_info
def assume_role_policy(connection, module):
role_arn = module.params.get('role_arn')
role_session_name = module.params.get('role_session_name')
policy = module.params.get('policy')
duration_seconds = module.params.get('duration_seconds')
external_id = module.params.get('external_id')
mfa_serial_number = module.params.get('mfa_serial_number')
mfa_token = module.params.get('mfa_token')
changed = False
try:
assumed_role = connection.assume_role(role_arn, role_session_name, policy, duration_seconds, external_id, mfa_serial_number, mfa_token)
changed = True
except BotoServerError as e:
module.fail_json(msg=e)
module.exit_json(changed=changed, sts_creds=assumed_role.credentials.__dict__, sts_user=assumed_role.user.__dict__)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
role_arn = dict(required=True, default=None),
role_session_name = dict(required=True, default=None),
duration_seconds = dict(required=False, default=None, type='int'),
external_id = dict(required=False, default=None),
policy = dict(required=False, default=None),
mfa_serial_number = dict(required=False, default=None),
mfa_token = dict(required=False, default=None)
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if region:
try:
connection = connect_to_aws(boto.sts, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="region must be specified")
try:
assume_role_policy(connection, module)
except BotoServerError as e:
module.fail_json(msg=e)
if __name__ == '__main__':
main()
| gpl-3.0 |
russellkondaveti/jira-dependency-graph | jira-dependency-graph.py | 1 | 6921 | #!/usr/bin/env python
from __future__ import print_function
import argparse
import json
import sys
import requests
# Using REST is pretty simple. The vast majority of this code is about the "other stuff": dealing with
# command line options, formatting graphviz, calling Google Charts, etc. The actual JIRA REST-specific code
# is only about 5 lines.
GOOGLE_CHART_URL = 'http://chart.apis.google.com/chart?'
def log(*args):
print(*args, file=sys.stderr)
class JiraSearch(object):
""" This factory will create the actual method used to fetch issues from JIRA. This is really just a closure that saves us having
to pass a bunch of parameters all over the place all the time. """
def __init__(self, url, auth):
self.url = url + '/rest/api/latest'
self.auth = auth
self.fields = ','.join(['key', 'issuetype', 'issuelinks', 'subtasks'])
def get(self, uri, params={}):
headers = {'Content-Type' : 'application/json'}
url = self.url + uri
if isinstance(self.auth, str):
return requests.get(url, params=params, cookies={'JSESSIONID': self.auth}, headers=headers)
else:
return requests.get(url, params=params, auth=self.auth, headers=headers)
def get_issue(self, key):
""" Given an issue key (i.e. JRA-9) return the JSON representation of it. This is the only place where we deal
with JIRA's REST API. """
log('Fetching ' + key)
# we need to expand subtasks and links since that's what we care about here.
response = self.get('/issue/%s' % key, params={'fields': self.fields})
return response.json()
def query(self, query):
log('Querying ' + query)
# TODO comment
response = self.get('/search', params={'jql': query, 'fields': self.fields})
content = response.json()
return content['issues']
def build_graph_data(start_issue_key, jira, excludes):
""" Given a starting image key and the issue-fetching function build up the GraphViz data representing relationships
between issues. This will consider both subtasks and issue links.
"""
def get_key(issue):
return issue['key']
def process_link(issue_key, link):
if link.has_key('outwardIssue'):
direction = 'outward'
elif link.has_key('inwardIssue'):
direction = 'inward'
else:
return
linked_issue = link[direction + 'Issue']
linked_issue_key = get_key(linked_issue)
link_type = link['type'][direction]
if link_type in excludes:
return linked_issue_key, None
if direction == 'outward':
log(issue_key + ' => ' + link_type + ' => ' + linked_issue_key)
else:
log(issue_key + ' <= ' + link_type + ' <= ' + linked_issue_key)
node = '"%s"->"%s"[label="%s"]' % (issue_key, linked_issue_key, link_type)
return linked_issue_key, node
# since the graph can be cyclic we need to prevent infinite recursion
seen = []
def walk(issue_key, graph):
""" issue is the JSON representation of the issue """
issue = jira.get_issue(issue_key)
seen.append(issue_key)
children = []
fields = issue['fields']
if fields['issuetype']['name'] == 'Epic':
issues = jira.query('"Epic Link" = "%s"' % issue_key)
for subtask in issues:
subtask_key = get_key(subtask)
log(subtask_key + ' => references epic => ' + issue_key)
node = '"%s"->"%s"[color=orange]' % (issue_key, subtask_key)
graph.append(node)
children.append(subtask_key)
if fields.has_key('subtasks'):
for subtask in fields['subtasks']:
subtask_key = get_key(subtask)
log(issue_key + ' => has subtask => ' + subtask_key)
node = '"%s"->"%s"[color=blue][label="subtask"]' % (issue_key, subtask_key)
graph.append(node)
children.append(subtask_key)
if fields.has_key('issuelinks'):
for other_link in fields['issuelinks']:
result = process_link(issue_key, other_link)
if result is not None:
children.append(result[0])
if result[1] is not None:
graph.append(result[1])
# now construct graph data for all subtasks and links of this issue
for child in (x for x in children if x not in seen):
walk(child, graph)
return graph
return walk(start_issue_key, [])
def create_graph_image(graph_data, image_file):
""" Given a formatted blob of graphviz chart data[1], make the actual request to Google
and store the resulting image to disk.
[1]: http://code.google.com/apis/chart/docs/gallery/graphviz.html
"""
chart_url = GOOGLE_CHART_URL + 'cht=gv&chl=digraph{%s}' % ';'.join(graph_data)
print('Google Chart request:')
print(chart_url)
response = requests.get(chart_url)
with open(image_file, 'w+') as image:
print('Writing to ' + image_file)
image.write(response.content)
return image_file
def print_graph(graph_data):
print('digraph{%s}' % ';'.join(graph_data))
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-u', '--user', dest='user', default='admin', help='Username to access JIRA')
parser.add_argument('-p', '--password', dest='password', default='admin', help='Password to access JIRA')
parser.add_argument('-c', '--cookie', dest='cookie', default=None, help='JSESSIONID session cookie value')
parser.add_argument('-j', '--jira', dest='jira_url', default='http://jira.example.com', help='JIRA Base URL')
parser.add_argument('-f', '--file', dest='image_file', default='issue_graph.png', help='Filename to write image to')
parser.add_argument('-l', '--local', action='store_true', default=False, help='Render graphviz code to stdout')
parser.add_argument('-x', '--exclude-link', dest='excludes', default=[], action='append', help='Exclude link type(s)')
parser.add_argument('issue', nargs='?', help='The issue key (e.g. JRADEV-1107, JRADEV-1391)')
return parser.parse_args()
def main():
options = parse_args()
if options.cookie is not None:
# Log in with browser and use --cookie=ABCDEF012345 commandline argument
auth = options.cookie
else:
# Basic Auth is usually easier for scripts like this to deal with than Cookies.
auth = (options.user, options.password)
jira = JiraSearch(options.jira_url, auth)
graph = build_graph_data(options.issue, jira, options.excludes)
if options.local:
print_graph(graph)
else:
create_graph_image(graph, options.image_file)
if __name__ == '__main__':
main()
| mit |
notro/linux-staging | scripts/gdb/linux/modules.py | 774 | 2718 | #
# gdb helper commands and functions for Linux kernel debugging
#
# module tools
#
# Copyright (c) Siemens AG, 2013
#
# Authors:
# Jan Kiszka <jan.kiszka@siemens.com>
#
# This work is licensed under the terms of the GNU GPL version 2.
#
import gdb
from linux import cpus, utils
module_type = utils.CachedType("struct module")
def module_list():
global module_type
module_ptr_type = module_type.get_type().pointer()
modules = gdb.parse_and_eval("modules")
entry = modules['next']
end_of_list = modules.address
while entry != end_of_list:
yield utils.container_of(entry, module_ptr_type, "list")
entry = entry['next']
def find_module_by_name(name):
for module in module_list():
if module['name'].string() == name:
return module
return None
class LxModule(gdb.Function):
"""Find module by name and return the module variable.
$lx_module("MODULE"): Given the name MODULE, iterate over all loaded modules
of the target and return that module variable which MODULE matches."""
def __init__(self):
super(LxModule, self).__init__("lx_module")
def invoke(self, mod_name):
mod_name = mod_name.string()
module = find_module_by_name(mod_name)
if module:
return module.dereference()
else:
raise gdb.GdbError("Unable to find MODULE " + mod_name)
LxModule()
class LxLsmod(gdb.Command):
"""List currently loaded modules."""
_module_use_type = utils.CachedType("struct module_use")
def __init__(self):
super(LxLsmod, self).__init__("lx-lsmod", gdb.COMMAND_DATA)
def invoke(self, arg, from_tty):
gdb.write(
"Address{0} Module Size Used by\n".format(
" " if utils.get_long_type().sizeof == 8 else ""))
for module in module_list():
gdb.write("{address} {name:<19} {size:>8} {ref}".format(
address=str(module['module_core']).split()[0],
name=module['name'].string(),
size=str(module['core_size']),
ref=str(module['refcnt']['counter'])))
source_list = module['source_list']
t = self._module_use_type.get_type().pointer()
entry = source_list['next']
first = True
while entry != source_list.address:
use = utils.container_of(entry, t, "source_list")
gdb.write("{separator}{name}".format(
separator=" " if first else ",",
name=use['source']['name'].string()))
first = False
entry = entry['next']
gdb.write("\n")
LxLsmod()
| gpl-2.0 |
Srisai85/scikit-learn | sklearn/utils/sparsetools/tests/test_traversal.py | 315 | 2001 | from __future__ import division, print_function, absolute_import
from nose import SkipTest
import numpy as np
from numpy.testing import assert_array_almost_equal
try:
from scipy.sparse.csgraph import breadth_first_tree, depth_first_tree,\
csgraph_to_dense, csgraph_from_dense
except ImportError:
# Oldish versions of scipy don't have that
csgraph_from_dense = None
def test_graph_breadth_first():
if csgraph_from_dense is None:
raise SkipTest("Old version of scipy, doesn't have csgraph.")
csgraph = np.array([[0, 1, 2, 0, 0],
[1, 0, 0, 0, 3],
[2, 0, 0, 7, 0],
[0, 0, 7, 0, 1],
[0, 3, 0, 1, 0]])
csgraph = csgraph_from_dense(csgraph, null_value=0)
bfirst = np.array([[0, 1, 2, 0, 0],
[0, 0, 0, 0, 3],
[0, 0, 0, 7, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]])
for directed in [True, False]:
bfirst_test = breadth_first_tree(csgraph, 0, directed)
assert_array_almost_equal(csgraph_to_dense(bfirst_test),
bfirst)
def test_graph_depth_first():
if csgraph_from_dense is None:
raise SkipTest("Old version of scipy, doesn't have csgraph.")
csgraph = np.array([[0, 1, 2, 0, 0],
[1, 0, 0, 0, 3],
[2, 0, 0, 7, 0],
[0, 0, 7, 0, 1],
[0, 3, 0, 1, 0]])
csgraph = csgraph_from_dense(csgraph, null_value=0)
dfirst = np.array([[0, 1, 0, 0, 0],
[0, 0, 0, 0, 3],
[0, 0, 0, 0, 0],
[0, 0, 7, 0, 0],
[0, 0, 0, 1, 0]])
for directed in [True, False]:
dfirst_test = depth_first_tree(csgraph, 0, directed)
assert_array_almost_equal(csgraph_to_dense(dfirst_test),
dfirst)
| bsd-3-clause |
pjryan126/solid-start-careers | store/api/glassdoor/venv/lib/python2.7/site-packages/setuptools/tests/test_packageindex.py | 377 | 7625 | """Package Index Tests
"""
import sys
import os
import unittest
import pkg_resources
from setuptools.compat import urllib2, httplib, HTTPError, unicode, pathname2url
import distutils.errors
import setuptools.package_index
from setuptools.tests.server import IndexServer
class TestPackageIndex(unittest.TestCase):
def test_bad_url_bad_port(self):
index = setuptools.package_index.PackageIndex()
url = 'http://127.0.0.1:0/nonesuch/test_package_index'
try:
v = index.open_url(url)
except Exception:
v = sys.exc_info()[1]
self.assertTrue(url in str(v))
else:
self.assertTrue(isinstance(v, HTTPError))
def test_bad_url_typo(self):
# issue 16
# easy_install inquant.contentmirror.plone breaks because of a typo
# in its home URL
index = setuptools.package_index.PackageIndex(
hosts=('www.example.com',)
)
url = 'url:%20https://svn.plone.org/svn/collective/inquant.contentmirror.plone/trunk'
try:
v = index.open_url(url)
except Exception:
v = sys.exc_info()[1]
self.assertTrue(url in str(v))
else:
self.assertTrue(isinstance(v, HTTPError))
def test_bad_url_bad_status_line(self):
index = setuptools.package_index.PackageIndex(
hosts=('www.example.com',)
)
def _urlopen(*args):
raise httplib.BadStatusLine('line')
index.opener = _urlopen
url = 'http://example.com'
try:
v = index.open_url(url)
except Exception:
v = sys.exc_info()[1]
self.assertTrue('line' in str(v))
else:
raise AssertionError('Should have raise here!')
def test_bad_url_double_scheme(self):
"""
A bad URL with a double scheme should raise a DistutilsError.
"""
index = setuptools.package_index.PackageIndex(
hosts=('www.example.com',)
)
# issue 20
url = 'http://http://svn.pythonpaste.org/Paste/wphp/trunk'
try:
index.open_url(url)
except distutils.errors.DistutilsError:
error = sys.exc_info()[1]
msg = unicode(error)
assert 'nonnumeric port' in msg or 'getaddrinfo failed' in msg or 'Name or service not known' in msg
return
raise RuntimeError("Did not raise")
def test_bad_url_screwy_href(self):
index = setuptools.package_index.PackageIndex(
hosts=('www.example.com',)
)
# issue #160
if sys.version_info[0] == 2 and sys.version_info[1] == 7:
# this should not fail
url = 'http://example.com'
page = ('<a href="http://www.famfamfam.com]('
'http://www.famfamfam.com/">')
index.process_index(url, page)
def test_url_ok(self):
index = setuptools.package_index.PackageIndex(
hosts=('www.example.com',)
)
url = 'file:///tmp/test_package_index'
self.assertTrue(index.url_ok(url, True))
def test_links_priority(self):
"""
Download links from the pypi simple index should be used before
external download links.
https://bitbucket.org/tarek/distribute/issue/163
Usecase :
- someone uploads a package on pypi, a md5 is generated
- someone manually copies this link (with the md5 in the url) onto an
external page accessible from the package page.
- someone reuploads the package (with a different md5)
- while easy_installing, an MD5 error occurs because the external link
is used
-> Setuptools should use the link from pypi, not the external one.
"""
if sys.platform.startswith('java'):
# Skip this test on jython because binding to :0 fails
return
# start an index server
server = IndexServer()
server.start()
index_url = server.base_url() + 'test_links_priority/simple/'
# scan a test index
pi = setuptools.package_index.PackageIndex(index_url)
requirement = pkg_resources.Requirement.parse('foobar')
pi.find_packages(requirement)
server.stop()
# the distribution has been found
self.assertTrue('foobar' in pi)
# we have only one link, because links are compared without md5
self.assertTrue(len(pi['foobar'])==1)
# the link should be from the index
self.assertTrue('correct_md5' in pi['foobar'][0].location)
def test_parse_bdist_wininst(self):
self.assertEqual(setuptools.package_index.parse_bdist_wininst(
'reportlab-2.5.win32-py2.4.exe'), ('reportlab-2.5', '2.4', 'win32'))
self.assertEqual(setuptools.package_index.parse_bdist_wininst(
'reportlab-2.5.win32.exe'), ('reportlab-2.5', None, 'win32'))
self.assertEqual(setuptools.package_index.parse_bdist_wininst(
'reportlab-2.5.win-amd64-py2.7.exe'), ('reportlab-2.5', '2.7', 'win-amd64'))
self.assertEqual(setuptools.package_index.parse_bdist_wininst(
'reportlab-2.5.win-amd64.exe'), ('reportlab-2.5', None, 'win-amd64'))
def test__vcs_split_rev_from_url(self):
"""
Test the basic usage of _vcs_split_rev_from_url
"""
vsrfu = setuptools.package_index.PackageIndex._vcs_split_rev_from_url
url, rev = vsrfu('https://example.com/bar@2995')
self.assertEqual(url, 'https://example.com/bar')
self.assertEqual(rev, '2995')
def test_local_index(self):
"""
local_open should be able to read an index from the file system.
"""
f = open('index.html', 'w')
f.write('<div>content</div>')
f.close()
try:
url = 'file:' + pathname2url(os.getcwd()) + '/'
res = setuptools.package_index.local_open(url)
finally:
os.remove('index.html')
assert 'content' in res.read()
class TestContentCheckers(unittest.TestCase):
def test_md5(self):
checker = setuptools.package_index.HashChecker.from_url(
'http://foo/bar#md5=f12895fdffbd45007040d2e44df98478')
checker.feed('You should probably not be using MD5'.encode('ascii'))
self.assertEqual(checker.hash.hexdigest(),
'f12895fdffbd45007040d2e44df98478')
self.assertTrue(checker.is_valid())
def test_other_fragment(self):
"Content checks should succeed silently if no hash is present"
checker = setuptools.package_index.HashChecker.from_url(
'http://foo/bar#something%20completely%20different')
checker.feed('anything'.encode('ascii'))
self.assertTrue(checker.is_valid())
def test_blank_md5(self):
"Content checks should succeed if a hash is empty"
checker = setuptools.package_index.HashChecker.from_url(
'http://foo/bar#md5=')
checker.feed('anything'.encode('ascii'))
self.assertTrue(checker.is_valid())
def test_get_hash_name_md5(self):
checker = setuptools.package_index.HashChecker.from_url(
'http://foo/bar#md5=f12895fdffbd45007040d2e44df98478')
self.assertEqual(checker.hash_name, 'md5')
def test_report(self):
checker = setuptools.package_index.HashChecker.from_url(
'http://foo/bar#md5=f12895fdffbd45007040d2e44df98478')
rep = checker.report(lambda x: x, 'My message about %s')
self.assertEqual(rep, 'My message about md5')
| gpl-2.0 |
af001/dpkt | dpkt/tcp.py | 17 | 3179 | # $Id$
"""Transmission Control Protocol."""
import dpkt
# TCP control flags
TH_FIN = 0x01 # end of data
TH_SYN = 0x02 # synchronize sequence numbers
TH_RST = 0x04 # reset connection
TH_PUSH = 0x08 # push
TH_ACK = 0x10 # acknowledgment number set
TH_URG = 0x20 # urgent pointer set
TH_ECE = 0x40 # ECN echo, RFC 3168
TH_CWR = 0x80 # congestion window reduced
TCP_PORT_MAX = 65535 # maximum port
TCP_WIN_MAX = 65535 # maximum (unscaled) window
class TCP(dpkt.Packet):
__hdr__ = (
('sport', 'H', 0xdead),
('dport', 'H', 0),
('seq', 'I', 0xdeadbeefL),
('ack', 'I', 0),
('off_x2', 'B', ((5 << 4) | 0)),
('flags', 'B', TH_SYN),
('win', 'H', TCP_WIN_MAX),
('sum', 'H', 0),
('urp', 'H', 0)
)
opts = ''
def _get_off(self): return self.off_x2 >> 4
def _set_off(self, off): self.off_x2 = (off << 4) | (self.off_x2 & 0xf)
off = property(_get_off, _set_off)
def __len__(self):
return self.__hdr_len__ + len(self.opts) + len(self.data)
def __str__(self):
return self.pack_hdr() + self.opts + str(self.data)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
ol = ((self.off_x2 >> 4) << 2) - self.__hdr_len__
if ol < 0:
raise dpkt.UnpackError, 'invalid header length'
self.opts = buf[self.__hdr_len__:self.__hdr_len__ + ol]
self.data = buf[self.__hdr_len__ + ol:]
# Options (opt_type) - http://www.iana.org/assignments/tcp-parameters
TCP_OPT_EOL = 0 # end of option list
TCP_OPT_NOP = 1 # no operation
TCP_OPT_MSS = 2 # maximum segment size
TCP_OPT_WSCALE = 3 # window scale factor, RFC 1072
TCP_OPT_SACKOK = 4 # SACK permitted, RFC 2018
TCP_OPT_SACK = 5 # SACK, RFC 2018
TCP_OPT_ECHO = 6 # echo (obsolete), RFC 1072
TCP_OPT_ECHOREPLY = 7 # echo reply (obsolete), RFC 1072
TCP_OPT_TIMESTAMP = 8 # timestamp, RFC 1323
TCP_OPT_POCONN = 9 # partial order conn, RFC 1693
TCP_OPT_POSVC = 10 # partial order service, RFC 1693
TCP_OPT_CC = 11 # connection count, RFC 1644
TCP_OPT_CCNEW = 12 # CC.NEW, RFC 1644
TCP_OPT_CCECHO = 13 # CC.ECHO, RFC 1644
TCP_OPT_ALTSUM = 14 # alt checksum request, RFC 1146
TCP_OPT_ALTSUMDATA = 15 # alt checksum data, RFC 1146
TCP_OPT_SKEETER = 16 # Skeeter
TCP_OPT_BUBBA = 17 # Bubba
TCP_OPT_TRAILSUM = 18 # trailer checksum
TCP_OPT_MD5 = 19 # MD5 signature, RFC 2385
TCP_OPT_SCPS = 20 # SCPS capabilities
TCP_OPT_SNACK = 21 # selective negative acks
TCP_OPT_REC = 22 # record boundaries
TCP_OPT_CORRUPT = 23 # corruption experienced
TCP_OPT_SNAP = 24 # SNAP
TCP_OPT_TCPCOMP = 26 # TCP compression filter
TCP_OPT_MAX = 27
def parse_opts(buf):
"""Parse TCP option buffer into a list of (option, data) tuples."""
opts = []
while buf:
o = ord(buf[0])
if o > TCP_OPT_NOP:
try:
l = ord(buf[1])
d, buf = buf[2:l], buf[l:]
except ValueError:
#print 'bad option', repr(str(buf))
opts.append(None) # XXX
break
else:
d, buf = '', buf[1:]
opts.append((o,d))
return opts
| bsd-3-clause |
theflofly/tensorflow | tensorflow/python/keras/datasets/reuters.py | 12 | 4891 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Reuters topic classification dataset.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import numpy as np
from tensorflow.python.keras.preprocessing.sequence import _remove_long_seq
from tensorflow.python.keras.utils.data_utils import get_file
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.datasets.reuters.load_data')
def load_data(path='reuters.npz',
num_words=None,
skip_top=0,
maxlen=None,
test_split=0.2,
seed=113,
start_char=1,
oov_char=2,
index_from=3,
**kwargs):
"""Loads the Reuters newswire classification dataset.
Arguments:
path: where to cache the data (relative to `~/.keras/dataset`).
num_words: max number of words to include. Words are ranked
by how often they occur (in the training set) and only
the most frequent words are kept
skip_top: skip the top N most frequently occurring words
(which may not be informative).
maxlen: truncate sequences after this length.
test_split: Fraction of the dataset to be used as test data.
seed: random seed for sample shuffling.
start_char: The start of a sequence will be marked with this character.
Set to 1 because 0 is usually the padding character.
oov_char: words that were cut out because of the `num_words`
or `skip_top` limit will be replaced with this character.
index_from: index actual words with this index and higher.
**kwargs: Used for backwards compatibility.
Returns:
Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
Note that the 'out of vocabulary' character is only used for
words that were present in the training set but are not included
because they're not making the `num_words` cut here.
Words that were not seen in the training set but are in the test set
have simply been skipped.
"""
# Legacy support
if 'nb_words' in kwargs:
logging.warning('The `nb_words` argument in `load_data` '
'has been renamed `num_words`.')
num_words = kwargs.pop('nb_words')
if kwargs:
raise TypeError('Unrecognized keyword arguments: ' + str(kwargs))
origin_folder = 'https://storage.googleapis.com/tensorflow/tf-keras-datasets/'
path = get_file(
path,
origin=origin_folder + 'reuters.npz',
file_hash='87aedbeb0cb229e378797a632c1997b6')
with np.load(path) as f:
xs, labels = f['x'], f['y']
np.random.seed(seed)
indices = np.arange(len(xs))
np.random.shuffle(indices)
xs = xs[indices]
labels = labels[indices]
if start_char is not None:
xs = [[start_char] + [w + index_from for w in x] for x in xs]
elif index_from:
xs = [[w + index_from for w in x] for x in xs]
if maxlen:
xs, labels = _remove_long_seq(maxlen, xs, labels)
if not num_words:
num_words = max([max(x) for x in xs])
# by convention, use 2 as OOV word
# reserve 'index_from' (=3 by default) characters:
# 0 (padding), 1 (start), 2 (OOV)
if oov_char is not None:
xs = [[w if skip_top <= w < num_words else oov_char for w in x] for x in xs]
else:
xs = [[w for w in x if skip_top <= w < num_words] for x in xs]
idx = int(len(xs) * (1 - test_split))
x_train, y_train = np.array(xs[:idx]), np.array(labels[:idx])
x_test, y_test = np.array(xs[idx:]), np.array(labels[idx:])
return (x_train, y_train), (x_test, y_test)
@keras_export('keras.datasets.reuters.get_word_index')
def get_word_index(path='reuters_word_index.json'):
"""Retrieves the dictionary mapping word indices back to words.
Arguments:
path: where to cache the data (relative to `~/.keras/dataset`).
Returns:
The word index dictionary.
"""
origin_folder = 'https://storage.googleapis.com/tensorflow/tf-keras-datasets/'
path = get_file(
path,
origin=origin_folder + 'reuters_word_index.json',
file_hash='4d44cc38712099c9e383dc6e5f11a921')
with open(path) as f:
return json.load(f)
| apache-2.0 |
NewpTone/stacklab-cinder | cinder/openstack/common/notifier/rabbit_notifier.py | 5 | 1709 | # Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder.openstack.common import cfg
from cinder.openstack.common import context as req_context
from cinder.openstack.common.gettextutils import _
from cinder.openstack.common import log as logging
from cinder.openstack.common import rpc
LOG = logging.getLogger(__name__)
notification_topic_opt = cfg.ListOpt(
'notification_topics', default=['notifications', ],
help='AMQP topic used for openstack notifications')
CONF = cfg.CONF
CONF.register_opt(notification_topic_opt)
def notify(context, message):
"""Sends a notification to the RabbitMQ"""
if not context:
context = req_context.get_admin_context()
priority = message.get('priority',
CONF.default_notification_level)
priority = priority.lower()
for topic in CONF.notification_topics:
topic = '%s.%s' % (topic, priority)
try:
rpc.notify(context, topic, message)
except Exception, e:
LOG.exception(_("Could not send notification to %(topic)s. "
"Payload=%(message)s"), locals())
| apache-2.0 |
hetajen/vnpy161 | vn.demo/ctpdemo/ctp_data_type.py | 129 | 257031 | # encoding: UTF-8
defineDict = {}
typedefDict = {}
#//////////////////////////////////////////////////////////////////////
#@system 新一代交易所系统
#@company 上海期货信息技术有限公司
#@file ThostFtdcUserApiDataType.h
#@brief 定义了客户端接口使用的业务数据类型
#@history
#20060106 赵鸿昊 创建该文件
#//////////////////////////////////////////////////////////////////////
#//////////////////////////////////////////////////////////////////////
#TFtdcTraderIDType是一个交易所交易员代码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcTraderIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcInvestorIDType是一个投资者代码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcInvestorIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcBrokerIDType是一个经纪公司代码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcBrokerIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcBrokerAbbrType是一个经纪公司简称类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcBrokerAbbrType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcBrokerNameType是一个经纪公司名称类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcBrokerNameType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcExchangeInstIDType是一个合约在交易所的代码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcExchangeInstIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcOrderRefType是一个报单引用类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcOrderRefType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcParticipantIDType是一个会员代码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcParticipantIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcUserIDType是一个用户代码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcUserIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcPasswordType是一个密码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcPasswordType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcClientIDType是一个交易编码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcClientIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcInstrumentIDType是一个合约代码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcInstrumentIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcMarketIDType是一个市场代码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcMarketIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcProductNameType是一个产品名称类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcProductNameType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcExchangeIDType是一个交易所代码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcExchangeIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcExchangeNameType是一个交易所名称类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcExchangeNameType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcExchangeAbbrType是一个交易所简称类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcExchangeAbbrType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcExchangeFlagType是一个交易所标志类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcExchangeFlagType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcMacAddressType是一个Mac地址类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcMacAddressType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcSystemIDType是一个系统编号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcSystemIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcExchangePropertyType是一个交易所属性类型
#//////////////////////////////////////////////////////////////////////
#正常
defineDict["THOST_FTDC_EXP_Normal"] = '0'
#根据成交生成报单
defineDict["THOST_FTDC_EXP_GenOrderByTrade"] = '1'
typedefDict["TThostFtdcExchangePropertyType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcDateType是一个日期类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcDateType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcTimeType是一个时间类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcTimeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcLongTimeType是一个长时间类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcLongTimeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcInstrumentNameType是一个合约名称类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcInstrumentNameType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcSettlementGroupIDType是一个结算组代码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcSettlementGroupIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcOrderSysIDType是一个报单编号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcOrderSysIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcTradeIDType是一个成交编号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcTradeIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCommandTypeType是一个DB命令类型类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcCommandTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcIPAddressType是一个IP地址类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcIPAddressType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcIPPortType是一个IP端口类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcIPPortType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcProductInfoType是一个产品信息类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcProductInfoType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcProtocolInfoType是一个协议信息类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcProtocolInfoType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcBusinessUnitType是一个业务单元类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcBusinessUnitType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcDepositSeqNoType是一个出入金流水号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcDepositSeqNoType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcIdentifiedCardNoType是一个证件号码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcIdentifiedCardNoType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcIdCardTypeType是一个证件类型类型
#//////////////////////////////////////////////////////////////////////
#组织机构代码
defineDict["THOST_FTDC_ICT_EID"] = '0'
#中国公民身份证
defineDict["THOST_FTDC_ICT_IDCard"] = '1'
#军官证
defineDict["THOST_FTDC_ICT_OfficerIDCard"] = '2'
#警官证
defineDict["THOST_FTDC_ICT_PoliceIDCard"] = '3'
#士兵证
defineDict["THOST_FTDC_ICT_SoldierIDCard"] = '4'
#户口簿
defineDict["THOST_FTDC_ICT_HouseholdRegister"] = '5'
#护照
defineDict["THOST_FTDC_ICT_Passport"] = '6'
#台胞证
defineDict["THOST_FTDC_ICT_TaiwanCompatriotIDCard"] = '7'
#回乡证
defineDict["THOST_FTDC_ICT_HomeComingCard"] = '8'
#营业执照号
defineDict["THOST_FTDC_ICT_LicenseNo"] = '9'
#税务登记号/当地纳税ID
defineDict["THOST_FTDC_ICT_TaxNo"] = 'A'
#港澳居民来往内地通行证
defineDict["THOST_FTDC_ICT_HMMainlandTravelPermit"] = 'B'
#台湾居民来往大陆通行证
defineDict["THOST_FTDC_ICT_TwMainlandTravelPermit"] = 'C'
#驾照
defineDict["THOST_FTDC_ICT_DrivingLicense"] = 'D'
#当地社保ID
defineDict["THOST_FTDC_ICT_SocialID"] = 'F'
#当地身份证
defineDict["THOST_FTDC_ICT_LocalID"] = 'G'
#商业登记证
defineDict["THOST_FTDC_ICT_BusinessRegistration"] = 'H'
#港澳永久性居民身份证
defineDict["THOST_FTDC_ICT_HKMCIDCard"] = 'I'
#其他证件
defineDict["THOST_FTDC_ICT_OtherCard"] = 'x'
typedefDict["TThostFtdcIdCardTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcOrderLocalIDType是一个本地报单编号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcOrderLocalIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcUserNameType是一个用户名称类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcUserNameType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcPartyNameType是一个参与人名称类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcPartyNameType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcErrorMsgType是一个错误信息类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcErrorMsgType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFieldNameType是一个字段名类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcFieldNameType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFieldContentType是一个字段内容类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcFieldContentType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcSystemNameType是一个系统名称类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcSystemNameType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcContentType是一个消息正文类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcContentType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcInvestorRangeType是一个投资者范围类型
#//////////////////////////////////////////////////////////////////////
#所有
defineDict["THOST_FTDC_IR_All"] = '1'
#投资者组
defineDict["THOST_FTDC_IR_Group"] = '2'
#单一投资者
defineDict["THOST_FTDC_IR_Single"] = '3'
typedefDict["TThostFtdcInvestorRangeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcDepartmentRangeType是一个投资者范围类型
#//////////////////////////////////////////////////////////////////////
#所有
defineDict["THOST_FTDC_DR_All"] = '1'
#组织架构
defineDict["THOST_FTDC_DR_Group"] = '2'
#单一投资者
defineDict["THOST_FTDC_DR_Single"] = '3'
typedefDict["TThostFtdcDepartmentRangeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcDataSyncStatusType是一个数据同步状态类型
#//////////////////////////////////////////////////////////////////////
#未同步
defineDict["THOST_FTDC_DS_Asynchronous"] = '1'
#同步中
defineDict["THOST_FTDC_DS_Synchronizing"] = '2'
#已同步
defineDict["THOST_FTDC_DS_Synchronized"] = '3'
typedefDict["TThostFtdcDataSyncStatusType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcBrokerDataSyncStatusType是一个经纪公司数据同步状态类型
#//////////////////////////////////////////////////////////////////////
#已同步
defineDict["THOST_FTDC_BDS_Synchronized"] = '1'
#同步中
defineDict["THOST_FTDC_BDS_Synchronizing"] = '2'
typedefDict["TThostFtdcBrokerDataSyncStatusType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcExchangeConnectStatusType是一个交易所连接状态类型
#//////////////////////////////////////////////////////////////////////
#没有任何连接
defineDict["THOST_FTDC_ECS_NoConnection"] = '1'
#已经发出合约查询请求
defineDict["THOST_FTDC_ECS_QryInstrumentSent"] = '2'
#已经获取信息
defineDict["THOST_FTDC_ECS_GotInformation"] = '9'
typedefDict["TThostFtdcExchangeConnectStatusType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcTraderConnectStatusType是一个交易所交易员连接状态类型
#//////////////////////////////////////////////////////////////////////
#没有任何连接
defineDict["THOST_FTDC_TCS_NotConnected"] = '1'
#已经连接
defineDict["THOST_FTDC_TCS_Connected"] = '2'
#已经发出合约查询请求
defineDict["THOST_FTDC_TCS_QryInstrumentSent"] = '3'
#订阅私有流
defineDict["THOST_FTDC_TCS_SubPrivateFlow"] = '4'
typedefDict["TThostFtdcTraderConnectStatusType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFunctionCodeType是一个功能代码类型
#//////////////////////////////////////////////////////////////////////
#数据异步化
defineDict["THOST_FTDC_FC_DataAsync"] = '1'
#强制用户登出
defineDict["THOST_FTDC_FC_ForceUserLogout"] = '2'
#变更管理用户口令
defineDict["THOST_FTDC_FC_UserPasswordUpdate"] = '3'
#变更经纪公司口令
defineDict["THOST_FTDC_FC_BrokerPasswordUpdate"] = '4'
#变更投资者口令
defineDict["THOST_FTDC_FC_InvestorPasswordUpdate"] = '5'
#报单插入
defineDict["THOST_FTDC_FC_OrderInsert"] = '6'
#报单操作
defineDict["THOST_FTDC_FC_OrderAction"] = '7'
#同步系统数据
defineDict["THOST_FTDC_FC_SyncSystemData"] = '8'
#同步经纪公司数据
defineDict["THOST_FTDC_FC_SyncBrokerData"] = '9'
#批量同步经纪公司数据
defineDict["THOST_FTDC_FC_BachSyncBrokerData"] = 'A'
#超级查询
defineDict["THOST_FTDC_FC_SuperQuery"] = 'B'
#预埋报单插入
defineDict["THOST_FTDC_FC_ParkedOrderInsert"] = 'C'
#预埋报单操作
defineDict["THOST_FTDC_FC_ParkedOrderAction"] = 'D'
#同步动态令牌
defineDict["THOST_FTDC_FC_SyncOTP"] = 'E'
#删除未知单
defineDict["THOST_FTDC_FC_DeleteOrder"] = 'F'
typedefDict["TThostFtdcFunctionCodeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcBrokerFunctionCodeType是一个经纪公司功能代码类型
#//////////////////////////////////////////////////////////////////////
#强制用户登出
defineDict["THOST_FTDC_BFC_ForceUserLogout"] = '1'
#变更用户口令
defineDict["THOST_FTDC_BFC_UserPasswordUpdate"] = '2'
#同步经纪公司数据
defineDict["THOST_FTDC_BFC_SyncBrokerData"] = '3'
#批量同步经纪公司数据
defineDict["THOST_FTDC_BFC_BachSyncBrokerData"] = '4'
#报单插入
defineDict["THOST_FTDC_BFC_OrderInsert"] = '5'
#报单操作
defineDict["THOST_FTDC_BFC_OrderAction"] = '6'
#全部查询
defineDict["THOST_FTDC_BFC_AllQuery"] = '7'
#系统功能:登入/登出/修改密码等
defineDict["THOST_FTDC_BFC_log"] = 'a'
#基本查询:查询基础数据,如合约,交易所等常量
defineDict["THOST_FTDC_BFC_BaseQry"] = 'b'
#交易查询:如查成交,委托
defineDict["THOST_FTDC_BFC_TradeQry"] = 'c'
#交易功能:报单,撤单
defineDict["THOST_FTDC_BFC_Trade"] = 'd'
#银期转账
defineDict["THOST_FTDC_BFC_Virement"] = 'e'
#风险监控
defineDict["THOST_FTDC_BFC_Risk"] = 'f'
#查询/管理:查询会话,踢人等
defineDict["THOST_FTDC_BFC_Session"] = 'g'
#风控通知控制
defineDict["THOST_FTDC_BFC_RiskNoticeCtl"] = 'h'
#风控通知发送
defineDict["THOST_FTDC_BFC_RiskNotice"] = 'i'
#察看经纪公司资金权限
defineDict["THOST_FTDC_BFC_BrokerDeposit"] = 'j'
#资金查询
defineDict["THOST_FTDC_BFC_QueryFund"] = 'k'
#报单查询
defineDict["THOST_FTDC_BFC_QueryOrder"] = 'l'
#成交查询
defineDict["THOST_FTDC_BFC_QueryTrade"] = 'm'
#持仓查询
defineDict["THOST_FTDC_BFC_QueryPosition"] = 'n'
#行情查询
defineDict["THOST_FTDC_BFC_QueryMarketData"] = 'o'
#用户事件查询
defineDict["THOST_FTDC_BFC_QueryUserEvent"] = 'p'
#风险通知查询
defineDict["THOST_FTDC_BFC_QueryRiskNotify"] = 'q'
#出入金查询
defineDict["THOST_FTDC_BFC_QueryFundChange"] = 'r'
#投资者信息查询
defineDict["THOST_FTDC_BFC_QueryInvestor"] = 's'
#交易编码查询
defineDict["THOST_FTDC_BFC_QueryTradingCode"] = 't'
#强平
defineDict["THOST_FTDC_BFC_ForceClose"] = 'u'
#压力测试
defineDict["THOST_FTDC_BFC_PressTest"] = 'v'
#权益反算
defineDict["THOST_FTDC_BFC_RemainCalc"] = 'w'
#净持仓保证金指标
defineDict["THOST_FTDC_BFC_NetPositionInd"] = 'x'
#风险预算
defineDict["THOST_FTDC_BFC_RiskPredict"] = 'y'
#数据导出
defineDict["THOST_FTDC_BFC_DataExport"] = 'z'
#风控指标设置
defineDict["THOST_FTDC_BFC_RiskTargetSetup"] = 'A'
#行情预警
defineDict["THOST_FTDC_BFC_MarketDataWarn"] = 'B'
#业务通知查询
defineDict["THOST_FTDC_BFC_QryBizNotice"] = 'C'
#业务通知模板设置
defineDict["THOST_FTDC_BFC_CfgBizNotice"] = 'D'
#同步动态令牌
defineDict["THOST_FTDC_BFC_SyncOTP"] = 'E'
#发送业务通知
defineDict["THOST_FTDC_BFC_SendBizNotice"] = 'F'
#风险级别标准设置
defineDict["THOST_FTDC_BFC_CfgRiskLevelStd"] = 'G'
#交易终端应急功能
defineDict["THOST_FTDC_BFC_TbCommand"] = 'H'
#删除未知单
defineDict["THOST_FTDC_BFC_DeleteOrder"] = 'J'
#预埋报单插入
defineDict["THOST_FTDC_BFC_ParkedOrderInsert"] = 'K'
#预埋报单操作
defineDict["THOST_FTDC_BFC_ParkedOrderAction"] = 'L'
typedefDict["TThostFtdcBrokerFunctionCodeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcOrderActionStatusType是一个报单操作状态类型
#//////////////////////////////////////////////////////////////////////
#已经提交
defineDict["THOST_FTDC_OAS_Submitted"] = 'a'
#已经接受
defineDict["THOST_FTDC_OAS_Accepted"] = 'b'
#已经被拒绝
defineDict["THOST_FTDC_OAS_Rejected"] = 'c'
typedefDict["TThostFtdcOrderActionStatusType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcOrderStatusType是一个报单状态类型
#//////////////////////////////////////////////////////////////////////
#全部成交
defineDict["THOST_FTDC_OST_AllTraded"] = '0'
#部分成交还在队列中
defineDict["THOST_FTDC_OST_PartTradedQueueing"] = '1'
#部分成交不在队列中
defineDict["THOST_FTDC_OST_PartTradedNotQueueing"] = '2'
#未成交还在队列中
defineDict["THOST_FTDC_OST_NoTradeQueueing"] = '3'
#未成交不在队列中
defineDict["THOST_FTDC_OST_NoTradeNotQueueing"] = '4'
#撤单
defineDict["THOST_FTDC_OST_Canceled"] = '5'
#未知
defineDict["THOST_FTDC_OST_Unknown"] = 'a'
#尚未触发
defineDict["THOST_FTDC_OST_NotTouched"] = 'b'
#已触发
defineDict["THOST_FTDC_OST_Touched"] = 'c'
typedefDict["TThostFtdcOrderStatusType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcOrderSubmitStatusType是一个报单提交状态类型
#//////////////////////////////////////////////////////////////////////
#已经提交
defineDict["THOST_FTDC_OSS_InsertSubmitted"] = '0'
#撤单已经提交
defineDict["THOST_FTDC_OSS_CancelSubmitted"] = '1'
#修改已经提交
defineDict["THOST_FTDC_OSS_ModifySubmitted"] = '2'
#已经接受
defineDict["THOST_FTDC_OSS_Accepted"] = '3'
#报单已经被拒绝
defineDict["THOST_FTDC_OSS_InsertRejected"] = '4'
#撤单已经被拒绝
defineDict["THOST_FTDC_OSS_CancelRejected"] = '5'
#改单已经被拒绝
defineDict["THOST_FTDC_OSS_ModifyRejected"] = '6'
typedefDict["TThostFtdcOrderSubmitStatusType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcPositionDateType是一个持仓日期类型
#//////////////////////////////////////////////////////////////////////
#今日持仓
defineDict["THOST_FTDC_PSD_Today"] = '1'
#历史持仓
defineDict["THOST_FTDC_PSD_History"] = '2'
typedefDict["TThostFtdcPositionDateType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcPositionDateTypeType是一个持仓日期类型类型
#//////////////////////////////////////////////////////////////////////
#使用历史持仓
defineDict["THOST_FTDC_PDT_UseHistory"] = '1'
#不使用历史持仓
defineDict["THOST_FTDC_PDT_NoUseHistory"] = '2'
typedefDict["TThostFtdcPositionDateTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcTradingRoleType是一个交易角色类型
#//////////////////////////////////////////////////////////////////////
#代理
defineDict["THOST_FTDC_ER_Broker"] = '1'
#自营
defineDict["THOST_FTDC_ER_Host"] = '2'
#做市商
defineDict["THOST_FTDC_ER_Maker"] = '3'
typedefDict["TThostFtdcTradingRoleType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcProductClassType是一个产品类型类型
#//////////////////////////////////////////////////////////////////////
#期货
defineDict["THOST_FTDC_PC_Futures"] = '1'
#期货期权
defineDict["THOST_FTDC_PC_Options"] = '2'
#组合
defineDict["THOST_FTDC_PC_Combination"] = '3'
#即期
defineDict["THOST_FTDC_PC_Spot"] = '4'
#期转现
defineDict["THOST_FTDC_PC_EFP"] = '5'
#现货期权
defineDict["THOST_FTDC_PC_SpotOption"] = '6'
typedefDict["TThostFtdcProductClassType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcInstLifePhaseType是一个合约生命周期状态类型
#//////////////////////////////////////////////////////////////////////
#未上市
defineDict["THOST_FTDC_IP_NotStart"] = '0'
#上市
defineDict["THOST_FTDC_IP_Started"] = '1'
#停牌
defineDict["THOST_FTDC_IP_Pause"] = '2'
#到期
defineDict["THOST_FTDC_IP_Expired"] = '3'
typedefDict["TThostFtdcInstLifePhaseType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcDirectionType是一个买卖方向类型
#//////////////////////////////////////////////////////////////////////
#买
defineDict["THOST_FTDC_D_Buy"] = '0'
#卖
defineDict["THOST_FTDC_D_Sell"] = '1'
typedefDict["TThostFtdcDirectionType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcPositionTypeType是一个持仓类型类型
#//////////////////////////////////////////////////////////////////////
#净持仓
defineDict["THOST_FTDC_PT_Net"] = '1'
#综合持仓
defineDict["THOST_FTDC_PT_Gross"] = '2'
typedefDict["TThostFtdcPositionTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcPosiDirectionType是一个持仓多空方向类型
#//////////////////////////////////////////////////////////////////////
#净
defineDict["THOST_FTDC_PD_Net"] = '1'
#多头
defineDict["THOST_FTDC_PD_Long"] = '2'
#空头
defineDict["THOST_FTDC_PD_Short"] = '3'
typedefDict["TThostFtdcPosiDirectionType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcSysSettlementStatusType是一个系统结算状态类型
#//////////////////////////////////////////////////////////////////////
#不活跃
defineDict["THOST_FTDC_SS_NonActive"] = '1'
#启动
defineDict["THOST_FTDC_SS_Startup"] = '2'
#操作
defineDict["THOST_FTDC_SS_Operating"] = '3'
#结算
defineDict["THOST_FTDC_SS_Settlement"] = '4'
#结算完成
defineDict["THOST_FTDC_SS_SettlementFinished"] = '5'
typedefDict["TThostFtdcSysSettlementStatusType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcRatioAttrType是一个费率属性类型
#//////////////////////////////////////////////////////////////////////
#交易费率
defineDict["THOST_FTDC_RA_Trade"] = '0'
#结算费率
defineDict["THOST_FTDC_RA_Settlement"] = '1'
typedefDict["TThostFtdcRatioAttrType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcHedgeFlagType是一个投机套保标志类型
#//////////////////////////////////////////////////////////////////////
#投机
defineDict["THOST_FTDC_HF_Speculation"] = '1'
#套利
defineDict["THOST_FTDC_HF_Arbitrage"] = '2'
#套保
defineDict["THOST_FTDC_HF_Hedge"] = '3'
typedefDict["TThostFtdcHedgeFlagType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcBillHedgeFlagType是一个投机套保标志类型
#//////////////////////////////////////////////////////////////////////
#投机
defineDict["THOST_FTDC_BHF_Speculation"] = '1'
#套利
defineDict["THOST_FTDC_BHF_Arbitrage"] = '2'
#套保
defineDict["THOST_FTDC_BHF_Hedge"] = '3'
typedefDict["TThostFtdcBillHedgeFlagType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcClientIDTypeType是一个交易编码类型类型
#//////////////////////////////////////////////////////////////////////
#投机
defineDict["THOST_FTDC_CIDT_Speculation"] = '1'
#套利
defineDict["THOST_FTDC_CIDT_Arbitrage"] = '2'
#套保
defineDict["THOST_FTDC_CIDT_Hedge"] = '3'
typedefDict["TThostFtdcClientIDTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcOrderPriceTypeType是一个报单价格条件类型
#//////////////////////////////////////////////////////////////////////
#任意价
defineDict["THOST_FTDC_OPT_AnyPrice"] = '1'
#限价
defineDict["THOST_FTDC_OPT_LimitPrice"] = '2'
#最优价
defineDict["THOST_FTDC_OPT_BestPrice"] = '3'
#最新价
defineDict["THOST_FTDC_OPT_LastPrice"] = '4'
#最新价浮动上浮1个ticks
defineDict["THOST_FTDC_OPT_LastPricePlusOneTicks"] = '5'
#最新价浮动上浮2个ticks
defineDict["THOST_FTDC_OPT_LastPricePlusTwoTicks"] = '6'
#最新价浮动上浮3个ticks
defineDict["THOST_FTDC_OPT_LastPricePlusThreeTicks"] = '7'
#卖一价
defineDict["THOST_FTDC_OPT_AskPrice1"] = '8'
#卖一价浮动上浮1个ticks
defineDict["THOST_FTDC_OPT_AskPrice1PlusOneTicks"] = '9'
#卖一价浮动上浮2个ticks
defineDict["THOST_FTDC_OPT_AskPrice1PlusTwoTicks"] = 'A'
#卖一价浮动上浮3个ticks
defineDict["THOST_FTDC_OPT_AskPrice1PlusThreeTicks"] = 'B'
#买一价
defineDict["THOST_FTDC_OPT_BidPrice1"] = 'C'
#买一价浮动上浮1个ticks
defineDict["THOST_FTDC_OPT_BidPrice1PlusOneTicks"] = 'D'
#买一价浮动上浮2个ticks
defineDict["THOST_FTDC_OPT_BidPrice1PlusTwoTicks"] = 'E'
#买一价浮动上浮3个ticks
defineDict["THOST_FTDC_OPT_BidPrice1PlusThreeTicks"] = 'F'
typedefDict["TThostFtdcOrderPriceTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcOffsetFlagType是一个开平标志类型
#//////////////////////////////////////////////////////////////////////
#开仓
defineDict["THOST_FTDC_OF_Open"] = '0'
#平仓
defineDict["THOST_FTDC_OF_Close"] = '1'
#强平
defineDict["THOST_FTDC_OF_ForceClose"] = '2'
#平今
defineDict["THOST_FTDC_OF_CloseToday"] = '3'
#平昨
defineDict["THOST_FTDC_OF_CloseYesterday"] = '4'
#强减
defineDict["THOST_FTDC_OF_ForceOff"] = '5'
#本地强平
defineDict["THOST_FTDC_OF_LocalForceClose"] = '6'
typedefDict["TThostFtdcOffsetFlagType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcForceCloseReasonType是一个强平原因类型
#//////////////////////////////////////////////////////////////////////
#非强平
defineDict["THOST_FTDC_FCC_NotForceClose"] = '0'
#资金不足
defineDict["THOST_FTDC_FCC_LackDeposit"] = '1'
#客户超仓
defineDict["THOST_FTDC_FCC_ClientOverPositionLimit"] = '2'
#会员超仓
defineDict["THOST_FTDC_FCC_MemberOverPositionLimit"] = '3'
#持仓非整数倍
defineDict["THOST_FTDC_FCC_NotMultiple"] = '4'
#违规
defineDict["THOST_FTDC_FCC_Violation"] = '5'
#其它
defineDict["THOST_FTDC_FCC_Other"] = '6'
#自然人临近交割
defineDict["THOST_FTDC_FCC_PersonDeliv"] = '7'
typedefDict["TThostFtdcForceCloseReasonType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcOrderTypeType是一个报单类型类型
#//////////////////////////////////////////////////////////////////////
#正常
defineDict["THOST_FTDC_ORDT_Normal"] = '0'
#报价衍生
defineDict["THOST_FTDC_ORDT_DeriveFromQuote"] = '1'
#组合衍生
defineDict["THOST_FTDC_ORDT_DeriveFromCombination"] = '2'
#组合报单
defineDict["THOST_FTDC_ORDT_Combination"] = '3'
#条件单
defineDict["THOST_FTDC_ORDT_ConditionalOrder"] = '4'
#互换单
defineDict["THOST_FTDC_ORDT_Swap"] = '5'
typedefDict["TThostFtdcOrderTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcTimeConditionType是一个有效期类型类型
#//////////////////////////////////////////////////////////////////////
#立即完成,否则撤销
defineDict["THOST_FTDC_TC_IOC"] = '1'
#本节有效
defineDict["THOST_FTDC_TC_GFS"] = '2'
#当日有效
defineDict["THOST_FTDC_TC_GFD"] = '3'
#指定日期前有效
defineDict["THOST_FTDC_TC_GTD"] = '4'
#撤销前有效
defineDict["THOST_FTDC_TC_GTC"] = '5'
#集合竞价有效
defineDict["THOST_FTDC_TC_GFA"] = '6'
typedefDict["TThostFtdcTimeConditionType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcVolumeConditionType是一个成交量类型类型
#//////////////////////////////////////////////////////////////////////
#任何数量
defineDict["THOST_FTDC_VC_AV"] = '1'
#最小数量
defineDict["THOST_FTDC_VC_MV"] = '2'
#全部数量
defineDict["THOST_FTDC_VC_CV"] = '3'
typedefDict["TThostFtdcVolumeConditionType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcContingentConditionType是一个触发条件类型
#//////////////////////////////////////////////////////////////////////
#立即
defineDict["THOST_FTDC_CC_Immediately"] = '1'
#止损
defineDict["THOST_FTDC_CC_Touch"] = '2'
#止赢
defineDict["THOST_FTDC_CC_TouchProfit"] = '3'
#预埋单
defineDict["THOST_FTDC_CC_ParkedOrder"] = '4'
#最新价大于条件价
defineDict["THOST_FTDC_CC_LastPriceGreaterThanStopPrice"] = '5'
#最新价大于等于条件价
defineDict["THOST_FTDC_CC_LastPriceGreaterEqualStopPrice"] = '6'
#最新价小于条件价
defineDict["THOST_FTDC_CC_LastPriceLesserThanStopPrice"] = '7'
#最新价小于等于条件价
defineDict["THOST_FTDC_CC_LastPriceLesserEqualStopPrice"] = '8'
#卖一价大于条件价
defineDict["THOST_FTDC_CC_AskPriceGreaterThanStopPrice"] = '9'
#卖一价大于等于条件价
defineDict["THOST_FTDC_CC_AskPriceGreaterEqualStopPrice"] = 'A'
#卖一价小于条件价
defineDict["THOST_FTDC_CC_AskPriceLesserThanStopPrice"] = 'B'
#卖一价小于等于条件价
defineDict["THOST_FTDC_CC_AskPriceLesserEqualStopPrice"] = 'C'
#买一价大于条件价
defineDict["THOST_FTDC_CC_BidPriceGreaterThanStopPrice"] = 'D'
#买一价大于等于条件价
defineDict["THOST_FTDC_CC_BidPriceGreaterEqualStopPrice"] = 'E'
#买一价小于条件价
defineDict["THOST_FTDC_CC_BidPriceLesserThanStopPrice"] = 'F'
#买一价小于等于条件价
defineDict["THOST_FTDC_CC_BidPriceLesserEqualStopPrice"] = 'H'
typedefDict["TThostFtdcContingentConditionType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcActionFlagType是一个操作标志类型
#//////////////////////////////////////////////////////////////////////
#删除
defineDict["THOST_FTDC_AF_Delete"] = '0'
#修改
defineDict["THOST_FTDC_AF_Modify"] = '3'
typedefDict["TThostFtdcActionFlagType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcTradingRightType是一个交易权限类型
#//////////////////////////////////////////////////////////////////////
#可以交易
defineDict["THOST_FTDC_TR_Allow"] = '0'
#只能平仓
defineDict["THOST_FTDC_TR_CloseOnly"] = '1'
#不能交易
defineDict["THOST_FTDC_TR_Forbidden"] = '2'
typedefDict["TThostFtdcTradingRightType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcOrderSourceType是一个报单来源类型
#//////////////////////////////////////////////////////////////////////
#来自参与者
defineDict["THOST_FTDC_OSRC_Participant"] = '0'
#来自管理员
defineDict["THOST_FTDC_OSRC_Administrator"] = '1'
typedefDict["TThostFtdcOrderSourceType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcTradeTypeType是一个成交类型类型
#//////////////////////////////////////////////////////////////////////
#组合持仓拆分为单一持仓,初始化不应包含该类型的持仓
defineDict["THOST_FTDC_TRDT_SplitCombination"] = '#'
#普通成交
defineDict["THOST_FTDC_TRDT_Common"] = '0'
#期权执行
defineDict["THOST_FTDC_TRDT_OptionsExecution"] = '1'
#OTC成交
defineDict["THOST_FTDC_TRDT_OTC"] = '2'
#期转现衍生成交
defineDict["THOST_FTDC_TRDT_EFPDerived"] = '3'
#组合衍生成交
defineDict["THOST_FTDC_TRDT_CombinationDerived"] = '4'
typedefDict["TThostFtdcTradeTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcPriceSourceType是一个成交价来源类型
#//////////////////////////////////////////////////////////////////////
#前成交价
defineDict["THOST_FTDC_PSRC_LastPrice"] = '0'
#买委托价
defineDict["THOST_FTDC_PSRC_Buy"] = '1'
#卖委托价
defineDict["THOST_FTDC_PSRC_Sell"] = '2'
typedefDict["TThostFtdcPriceSourceType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcInstrumentStatusType是一个合约交易状态类型
#//////////////////////////////////////////////////////////////////////
#开盘前
defineDict["THOST_FTDC_IS_BeforeTrading"] = '0'
#非交易
defineDict["THOST_FTDC_IS_NoTrading"] = '1'
#连续交易
defineDict["THOST_FTDC_IS_Continous"] = '2'
#集合竞价报单
defineDict["THOST_FTDC_IS_AuctionOrdering"] = '3'
#集合竞价价格平衡
defineDict["THOST_FTDC_IS_AuctionBalance"] = '4'
#集合竞价撮合
defineDict["THOST_FTDC_IS_AuctionMatch"] = '5'
#收盘
defineDict["THOST_FTDC_IS_Closed"] = '6'
typedefDict["TThostFtdcInstrumentStatusType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcInstStatusEnterReasonType是一个品种进入交易状态原因类型
#//////////////////////////////////////////////////////////////////////
#自动切换
defineDict["THOST_FTDC_IER_Automatic"] = '1'
#手动切换
defineDict["THOST_FTDC_IER_Manual"] = '2'
#熔断
defineDict["THOST_FTDC_IER_Fuse"] = '3'
typedefDict["TThostFtdcInstStatusEnterReasonType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcOrderActionRefType是一个报单操作引用类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcOrderActionRefType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcInstallCountType是一个安装数量类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcInstallCountType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcInstallIDType是一个安装编号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcInstallIDType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcErrorIDType是一个错误代码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcErrorIDType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcSettlementIDType是一个结算编号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcSettlementIDType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcVolumeType是一个数量类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcVolumeType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcFrontIDType是一个前置编号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcFrontIDType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcSessionIDType是一个会话编号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcSessionIDType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcSequenceNoType是一个序号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcSequenceNoType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcCommandNoType是一个DB命令序号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcCommandNoType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcMillisecType是一个时间(毫秒)类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcMillisecType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcVolumeMultipleType是一个合约数量乘数类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcVolumeMultipleType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcTradingSegmentSNType是一个交易阶段编号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcTradingSegmentSNType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcRequestIDType是一个请求编号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcRequestIDType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcYearType是一个年份类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcYearType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcMonthType是一个月份类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcMonthType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcBoolType是一个布尔型类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcBoolType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcPriceType是一个价格类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcPriceType"] = "float"
#//////////////////////////////////////////////////////////////////////
#TFtdcCombOffsetFlagType是一个组合开平标志类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcCombOffsetFlagType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCombHedgeFlagType是一个组合投机套保标志类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcCombHedgeFlagType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcRatioType是一个比率类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcRatioType"] = "float"
#//////////////////////////////////////////////////////////////////////
#TFtdcMoneyType是一个资金类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcMoneyType"] = "float"
#//////////////////////////////////////////////////////////////////////
#TFtdcLargeVolumeType是一个大额数量类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcLargeVolumeType"] = "float"
#//////////////////////////////////////////////////////////////////////
#TFtdcSequenceSeriesType是一个序列系列号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcSequenceSeriesType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcCommPhaseNoType是一个通讯时段编号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcCommPhaseNoType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcSequenceLabelType是一个序列编号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcSequenceLabelType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcUnderlyingMultipleType是一个基础商品乘数类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcUnderlyingMultipleType"] = "float"
#//////////////////////////////////////////////////////////////////////
#TFtdcPriorityType是一个优先级类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcPriorityType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcContractCodeType是一个合同编号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcContractCodeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCityType是一个市类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcCityType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcIsStockType是一个是否股民类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcIsStockType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcChannelType是一个渠道类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcChannelType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcAddressType是一个通讯地址类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcAddressType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcZipCodeType是一个邮政编码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcZipCodeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcTelephoneType是一个联系电话类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcTelephoneType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFaxType是一个传真类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcFaxType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcMobileType是一个手机类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcMobileType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcEMailType是一个电子邮件类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcEMailType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcMemoType是一个备注类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcMemoType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCompanyCodeType是一个企业代码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcCompanyCodeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcWebsiteType是一个网站地址类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcWebsiteType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcTaxNoType是一个税务登记号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcTaxNoType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcBatchStatusType是一个处理状态类型
#//////////////////////////////////////////////////////////////////////
#未上传
defineDict["THOST_FTDC_BS_NoUpload"] = '1'
#已上传
defineDict["THOST_FTDC_BS_Uploaded"] = '2'
#审核失败
defineDict["THOST_FTDC_BS_Failed"] = '3'
typedefDict["TThostFtdcBatchStatusType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcPropertyIDType是一个属性代码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcPropertyIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcPropertyNameType是一个属性名称类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcPropertyNameType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcLicenseNoType是一个营业执照号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcLicenseNoType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcAgentIDType是一个经纪人代码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcAgentIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcAgentNameType是一个经纪人名称类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcAgentNameType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcAgentGroupIDType是一个经纪人组代码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcAgentGroupIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcAgentGroupNameType是一个经纪人组名称类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcAgentGroupNameType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcReturnStyleType是一个按品种返还方式类型
#//////////////////////////////////////////////////////////////////////
#按所有品种
defineDict["THOST_FTDC_RS_All"] = '1'
#按品种
defineDict["THOST_FTDC_RS_ByProduct"] = '2'
typedefDict["TThostFtdcReturnStyleType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcReturnPatternType是一个返还模式类型
#//////////////////////////////////////////////////////////////////////
#按成交手数
defineDict["THOST_FTDC_RP_ByVolume"] = '1'
#按留存手续费
defineDict["THOST_FTDC_RP_ByFeeOnHand"] = '2'
typedefDict["TThostFtdcReturnPatternType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcReturnLevelType是一个返还级别类型
#//////////////////////////////////////////////////////////////////////
#级别1
defineDict["THOST_FTDC_RL_Level1"] = '1'
#级别2
defineDict["THOST_FTDC_RL_Level2"] = '2'
#级别3
defineDict["THOST_FTDC_RL_Level3"] = '3'
#级别4
defineDict["THOST_FTDC_RL_Level4"] = '4'
#级别5
defineDict["THOST_FTDC_RL_Level5"] = '5'
#级别6
defineDict["THOST_FTDC_RL_Level6"] = '6'
#级别7
defineDict["THOST_FTDC_RL_Level7"] = '7'
#级别8
defineDict["THOST_FTDC_RL_Level8"] = '8'
#级别9
defineDict["THOST_FTDC_RL_Level9"] = '9'
typedefDict["TThostFtdcReturnLevelType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcReturnStandardType是一个返还标准类型
#//////////////////////////////////////////////////////////////////////
#分阶段返还
defineDict["THOST_FTDC_RSD_ByPeriod"] = '1'
#按某一标准
defineDict["THOST_FTDC_RSD_ByStandard"] = '2'
typedefDict["TThostFtdcReturnStandardType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcMortgageTypeType是一个质押类型类型
#//////////////////////////////////////////////////////////////////////
#质出
defineDict["THOST_FTDC_MT_Out"] = '0'
#质入
defineDict["THOST_FTDC_MT_In"] = '1'
typedefDict["TThostFtdcMortgageTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcInvestorSettlementParamIDType是一个投资者结算参数代码类型
#//////////////////////////////////////////////////////////////////////
#质押比例
defineDict["THOST_FTDC_ISPI_MortgageRatio"] = '4'
#保证金算法
defineDict["THOST_FTDC_ISPI_MarginWay"] = '5'
#结算单结存是否包含质押
defineDict["THOST_FTDC_ISPI_BillDeposit"] = '9'
typedefDict["TThostFtdcInvestorSettlementParamIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcExchangeSettlementParamIDType是一个交易所结算参数代码类型
#//////////////////////////////////////////////////////////////////////
#质押比例
defineDict["THOST_FTDC_ESPI_MortgageRatio"] = '1'
#分项资金导入项
defineDict["THOST_FTDC_ESPI_OtherFundItem"] = '2'
#分项资金入交易所出入金
defineDict["THOST_FTDC_ESPI_OtherFundImport"] = '3'
#中金所开户最低可用金额
defineDict["THOST_FTDC_ESPI_CFFEXMinPrepa"] = '6'
#郑商所结算方式
defineDict["THOST_FTDC_ESPI_CZCESettlementType"] = '7'
#交易所交割手续费收取方式
defineDict["THOST_FTDC_ESPI_ExchDelivFeeMode"] = '9'
#投资者交割手续费收取方式
defineDict["THOST_FTDC_ESPI_DelivFeeMode"] = '0'
#郑商所组合持仓保证金收取方式
defineDict["THOST_FTDC_ESPI_CZCEComMarginType"] = 'A'
#大商所套利保证金是否优惠
defineDict["THOST_FTDC_ESPI_DceComMarginType"] = 'B'
#虚值期权保证金优惠比率
defineDict["THOST_FTDC_ESPI_OptOutDisCountRate"] = 'a'
#最低保障系数
defineDict["THOST_FTDC_ESPI_OptMiniGuarantee"] = 'b'
typedefDict["TThostFtdcExchangeSettlementParamIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcSystemParamIDType是一个系统参数代码类型
#//////////////////////////////////////////////////////////////////////
#投资者代码最小长度
defineDict["THOST_FTDC_SPI_InvestorIDMinLength"] = '1'
#投资者帐号代码最小长度
defineDict["THOST_FTDC_SPI_AccountIDMinLength"] = '2'
#投资者开户默认登录权限
defineDict["THOST_FTDC_SPI_UserRightLogon"] = '3'
#投资者交易结算单成交汇总方式
defineDict["THOST_FTDC_SPI_SettlementBillTrade"] = '4'
#统一开户更新交易编码方式
defineDict["THOST_FTDC_SPI_TradingCode"] = '5'
#结算是否判断存在未复核的出入金和分项资金
defineDict["THOST_FTDC_SPI_CheckFund"] = '6'
#是否启用手续费模板数据权限
defineDict["THOST_FTDC_SPI_CommModelRight"] = '7'
#是否启用保证金率模板数据权限
defineDict["THOST_FTDC_SPI_MarginModelRight"] = '9'
#是否规范用户才能激活
defineDict["THOST_FTDC_SPI_IsStandardActive"] = '8'
#上传的交易所结算文件路径
defineDict["THOST_FTDC_SPI_UploadSettlementFile"] = 'U'
#上报保证金监控中心文件路径
defineDict["THOST_FTDC_SPI_DownloadCSRCFile"] = 'D'
#生成的结算单文件路径
defineDict["THOST_FTDC_SPI_SettlementBillFile"] = 'S'
#证监会文件标识
defineDict["THOST_FTDC_SPI_CSRCOthersFile"] = 'C'
#投资者照片路径
defineDict["THOST_FTDC_SPI_InvestorPhoto"] = 'P'
#全结经纪公司上传文件路径
defineDict["THOST_FTDC_SPI_CSRCData"] = 'R'
#开户密码录入方式
defineDict["THOST_FTDC_SPI_InvestorPwdModel"] = 'I'
#投资者中金所结算文件下载路径
defineDict["THOST_FTDC_SPI_CFFEXInvestorSettleFile"] = 'F'
#投资者代码编码方式
defineDict["THOST_FTDC_SPI_InvestorIDType"] = 'a'
#休眠户最高权益
defineDict["THOST_FTDC_SPI_FreezeMaxReMain"] = 'r'
#手续费相关操作实时上场开关
defineDict["THOST_FTDC_SPI_IsSync"] = 'A'
#解除开仓权限限制
defineDict["THOST_FTDC_SPI_RelieveOpenLimit"] = 'O'
#是否规范用户才能休眠
defineDict["THOST_FTDC_SPI_IsStandardFreeze"] = 'X'
#郑商所是否开放所有品种套保交易
defineDict["THOST_FTDC_SPI_CZCENormalProductHedge"] = 'B'
typedefDict["TThostFtdcSystemParamIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcTradeParamIDType是一个交易系统参数代码类型
#//////////////////////////////////////////////////////////////////////
#系统加密算法
defineDict["THOST_FTDC_TPID_EncryptionStandard"] = 'E'
#系统风险算法
defineDict["THOST_FTDC_TPID_RiskMode"] = 'R'
#系统风险算法是否全局 0-否 1-是
defineDict["THOST_FTDC_TPID_RiskModeGlobal"] = 'G'
#密码加密算法
defineDict["THOST_FTDC_TPID_modeEncode"] = 'P'
#价格小数位数参数
defineDict["THOST_FTDC_TPID_tickMode"] = 'T'
#用户最大会话数
defineDict["THOST_FTDC_TPID_SingleUserSessionMaxNum"] = 'S'
#最大连续登录失败数
defineDict["THOST_FTDC_TPID_LoginFailMaxNum"] = 'L'
#是否强制认证
defineDict["THOST_FTDC_TPID_IsAuthForce"] = 'A'
typedefDict["TThostFtdcTradeParamIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcSettlementParamValueType是一个参数代码值类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcSettlementParamValueType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCounterIDType是一个计数器代码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcCounterIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcInvestorGroupNameType是一个投资者分组名称类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcInvestorGroupNameType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcBrandCodeType是一个牌号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcBrandCodeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcWarehouseType是一个仓库类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcWarehouseType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcProductDateType是一个产期类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcProductDateType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcGradeType是一个等级类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcGradeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcClassifyType是一个类别类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcClassifyType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcPositionType是一个货位类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcPositionType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcYieldlyType是一个产地类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcYieldlyType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcWeightType是一个公定重量类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcWeightType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcSubEntryFundNoType是一个分项资金流水号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcSubEntryFundNoType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcFileIDType是一个文件标识类型
#//////////////////////////////////////////////////////////////////////
#资金数据
defineDict["THOST_FTDC_FI_SettlementFund"] = 'F'
#成交数据
defineDict["THOST_FTDC_FI_Trade"] = 'T'
#投资者持仓数据
defineDict["THOST_FTDC_FI_InvestorPosition"] = 'P'
#投资者分项资金数据
defineDict["THOST_FTDC_FI_SubEntryFund"] = 'O'
#组合持仓数据
defineDict["THOST_FTDC_FI_CZCECombinationPos"] = 'C'
#上报保证金监控中心数据
defineDict["THOST_FTDC_FI_CSRCData"] = 'R'
#郑商所平仓了结数据
defineDict["THOST_FTDC_FI_CZCEClose"] = 'L'
#郑商所非平仓了结数据
defineDict["THOST_FTDC_FI_CZCENoClose"] = 'N'
#持仓明细数据
defineDict["THOST_FTDC_FI_PositionDtl"] = 'D'
#期权执行文件
defineDict["THOST_FTDC_FI_OptionStrike"] = 'S'
#结算价比对文件
defineDict["THOST_FTDC_FI_SettlementPriceComparison"] = 'M'
#上期所非持仓变动明细
defineDict["THOST_FTDC_FI_NonTradePosChange"] = 'B'
typedefDict["TThostFtdcFileIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFileNameType是一个文件名称类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcFileNameType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFileTypeType是一个文件上传类型类型
#//////////////////////////////////////////////////////////////////////
#结算
defineDict["THOST_FTDC_FUT_Settlement"] = '0'
#核对
defineDict["THOST_FTDC_FUT_Check"] = '1'
typedefDict["TThostFtdcFileTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFileFormatType是一个文件格式类型
#//////////////////////////////////////////////////////////////////////
#文本文件(.txt)
defineDict["THOST_FTDC_FFT_Txt"] = '0'
#压缩文件(.zip)
defineDict["THOST_FTDC_FFT_Zip"] = '1'
#DBF文件(.dbf)
defineDict["THOST_FTDC_FFT_DBF"] = '2'
typedefDict["TThostFtdcFileFormatType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFileUploadStatusType是一个文件状态类型
#//////////////////////////////////////////////////////////////////////
#上传成功
defineDict["THOST_FTDC_FUS_SucceedUpload"] = '1'
#上传失败
defineDict["THOST_FTDC_FUS_FailedUpload"] = '2'
#导入成功
defineDict["THOST_FTDC_FUS_SucceedLoad"] = '3'
#导入部分成功
defineDict["THOST_FTDC_FUS_PartSucceedLoad"] = '4'
#导入失败
defineDict["THOST_FTDC_FUS_FailedLoad"] = '5'
typedefDict["TThostFtdcFileUploadStatusType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcTransferDirectionType是一个移仓方向类型
#//////////////////////////////////////////////////////////////////////
#移出
defineDict["THOST_FTDC_TD_Out"] = '0'
#移入
defineDict["THOST_FTDC_TD_In"] = '1'
typedefDict["TThostFtdcTransferDirectionType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcUploadModeType是一个上传文件类型类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcUploadModeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcAccountIDType是一个投资者帐号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcAccountIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcBankFlagType是一个银行统一标识类型类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcBankFlagType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcBankAccountType是一个银行账户类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcBankAccountType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcOpenNameType是一个银行账户的开户人名称类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcOpenNameType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcOpenBankType是一个银行账户的开户行类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcOpenBankType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcBankNameType是一个银行名称类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcBankNameType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcPublishPathType是一个发布路径类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcPublishPathType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcOperatorIDType是一个操作员代码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcOperatorIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcMonthCountType是一个月份数量类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcMonthCountType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcAdvanceMonthArrayType是一个月份提前数组类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcAdvanceMonthArrayType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcDateExprType是一个日期表达式类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcDateExprType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcInstrumentIDExprType是一个合约代码表达式类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcInstrumentIDExprType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcInstrumentNameExprType是一个合约名称表达式类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcInstrumentNameExprType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcSpecialCreateRuleType是一个特殊的创建规则类型
#//////////////////////////////////////////////////////////////////////
#没有特殊创建规则
defineDict["THOST_FTDC_SC_NoSpecialRule"] = '0'
#不包含春节
defineDict["THOST_FTDC_SC_NoSpringFestival"] = '1'
typedefDict["TThostFtdcSpecialCreateRuleType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcBasisPriceTypeType是一个挂牌基准价类型类型
#//////////////////////////////////////////////////////////////////////
#上一合约结算价
defineDict["THOST_FTDC_IPT_LastSettlement"] = '1'
#上一合约收盘价
defineDict["THOST_FTDC_IPT_LaseClose"] = '2'
typedefDict["TThostFtdcBasisPriceTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcProductLifePhaseType是一个产品生命周期状态类型
#//////////////////////////////////////////////////////////////////////
#活跃
defineDict["THOST_FTDC_PLP_Active"] = '1'
#不活跃
defineDict["THOST_FTDC_PLP_NonActive"] = '2'
#注销
defineDict["THOST_FTDC_PLP_Canceled"] = '3'
typedefDict["TThostFtdcProductLifePhaseType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcDeliveryModeType是一个交割方式类型
#//////////////////////////////////////////////////////////////////////
#现金交割
defineDict["THOST_FTDC_DM_CashDeliv"] = '1'
#实物交割
defineDict["THOST_FTDC_DM_CommodityDeliv"] = '2'
typedefDict["TThostFtdcDeliveryModeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcLogLevelType是一个日志级别类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcLogLevelType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcProcessNameType是一个存储过程名称类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcProcessNameType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcOperationMemoType是一个操作摘要类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcOperationMemoType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFundIOTypeType是一个出入金类型类型
#//////////////////////////////////////////////////////////////////////
#出入金
defineDict["THOST_FTDC_FIOT_FundIO"] = '1'
#银期转帐
defineDict["THOST_FTDC_FIOT_Transfer"] = '2'
#银期换汇
defineDict["THOST_FTDC_FIOT_SwapCurrency"] = '3'
typedefDict["TThostFtdcFundIOTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFundTypeType是一个资金类型类型
#//////////////////////////////////////////////////////////////////////
#银行存款
defineDict["THOST_FTDC_FT_Deposite"] = '1'
#分项资金
defineDict["THOST_FTDC_FT_ItemFund"] = '2'
#公司调整
defineDict["THOST_FTDC_FT_Company"] = '3'
#资金内转
defineDict["THOST_FTDC_FT_InnerTransfer"] = '4'
typedefDict["TThostFtdcFundTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFundDirectionType是一个出入金方向类型
#//////////////////////////////////////////////////////////////////////
#入金
defineDict["THOST_FTDC_FD_In"] = '1'
#出金
defineDict["THOST_FTDC_FD_Out"] = '2'
typedefDict["TThostFtdcFundDirectionType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFundStatusType是一个资金状态类型
#//////////////////////////////////////////////////////////////////////
#已录入
defineDict["THOST_FTDC_FS_Record"] = '1'
#已复核
defineDict["THOST_FTDC_FS_Check"] = '2'
#已冲销
defineDict["THOST_FTDC_FS_Charge"] = '3'
typedefDict["TThostFtdcFundStatusType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcBillNoType是一个票据号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcBillNoType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcBillNameType是一个票据名称类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcBillNameType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcPublishStatusType是一个发布状态类型
#//////////////////////////////////////////////////////////////////////
#未发布
defineDict["THOST_FTDC_PS_None"] = '1'
#正在发布
defineDict["THOST_FTDC_PS_Publishing"] = '2'
#已发布
defineDict["THOST_FTDC_PS_Published"] = '3'
typedefDict["TThostFtdcPublishStatusType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcEnumValueIDType是一个枚举值代码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcEnumValueIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcEnumValueTypeType是一个枚举值类型类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcEnumValueTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcEnumValueLabelType是一个枚举值名称类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcEnumValueLabelType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcEnumValueResultType是一个枚举值结果类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcEnumValueResultType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcSystemStatusType是一个系统状态类型
#//////////////////////////////////////////////////////////////////////
#不活跃
defineDict["THOST_FTDC_ES_NonActive"] = '1'
#启动
defineDict["THOST_FTDC_ES_Startup"] = '2'
#交易开始初始化
defineDict["THOST_FTDC_ES_Initialize"] = '3'
#交易完成初始化
defineDict["THOST_FTDC_ES_Initialized"] = '4'
#收市开始
defineDict["THOST_FTDC_ES_Close"] = '5'
#收市完成
defineDict["THOST_FTDC_ES_Closed"] = '6'
#结算
defineDict["THOST_FTDC_ES_Settlement"] = '7'
typedefDict["TThostFtdcSystemStatusType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcSettlementStatusType是一个结算状态类型
#//////////////////////////////////////////////////////////////////////
#初始
defineDict["THOST_FTDC_STS_Initialize"] = '0'
#结算中
defineDict["THOST_FTDC_STS_Settlementing"] = '1'
#已结算
defineDict["THOST_FTDC_STS_Settlemented"] = '2'
#结算完成
defineDict["THOST_FTDC_STS_Finished"] = '3'
typedefDict["TThostFtdcSettlementStatusType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcRangeIntTypeType是一个限定值类型类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcRangeIntTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcRangeIntFromType是一个限定值下限类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcRangeIntFromType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcRangeIntToType是一个限定值上限类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcRangeIntToType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFunctionIDType是一个功能代码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcFunctionIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFunctionValueCodeType是一个功能编码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcFunctionValueCodeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFunctionNameType是一个功能名称类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcFunctionNameType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcRoleIDType是一个角色编号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcRoleIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcRoleNameType是一个角色名称类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcRoleNameType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcDescriptionType是一个描述类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcDescriptionType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCombineIDType是一个组合编号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcCombineIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCombineTypeType是一个组合类型类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcCombineTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcInvestorTypeType是一个投资者类型类型
#//////////////////////////////////////////////////////////////////////
#自然人
defineDict["THOST_FTDC_CT_Person"] = '0'
#法人
defineDict["THOST_FTDC_CT_Company"] = '1'
#投资基金
defineDict["THOST_FTDC_CT_Fund"] = '2'
#特殊法人
defineDict["THOST_FTDC_CT_SpecialOrgan"] = '3'
#资管户
defineDict["THOST_FTDC_CT_Asset"] = '4'
typedefDict["TThostFtdcInvestorTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcBrokerTypeType是一个经纪公司类型类型
#//////////////////////////////////////////////////////////////////////
#交易会员
defineDict["THOST_FTDC_BT_Trade"] = '0'
#交易结算会员
defineDict["THOST_FTDC_BT_TradeSettle"] = '1'
typedefDict["TThostFtdcBrokerTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcRiskLevelType是一个风险等级类型
#//////////////////////////////////////////////////////////////////////
#低风险客户
defineDict["THOST_FTDC_FAS_Low"] = '1'
#普通客户
defineDict["THOST_FTDC_FAS_Normal"] = '2'
#关注客户
defineDict["THOST_FTDC_FAS_Focus"] = '3'
#风险客户
defineDict["THOST_FTDC_FAS_Risk"] = '4'
typedefDict["TThostFtdcRiskLevelType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFeeAcceptStyleType是一个手续费收取方式类型
#//////////////////////////////////////////////////////////////////////
#按交易收取
defineDict["THOST_FTDC_FAS_ByTrade"] = '1'
#按交割收取
defineDict["THOST_FTDC_FAS_ByDeliv"] = '2'
#不收
defineDict["THOST_FTDC_FAS_None"] = '3'
#按指定手续费收取
defineDict["THOST_FTDC_FAS_FixFee"] = '4'
typedefDict["TThostFtdcFeeAcceptStyleType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcPasswordTypeType是一个密码类型类型
#//////////////////////////////////////////////////////////////////////
#交易密码
defineDict["THOST_FTDC_PWDT_Trade"] = '1'
#资金密码
defineDict["THOST_FTDC_PWDT_Account"] = '2'
typedefDict["TThostFtdcPasswordTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcAlgorithmType是一个盈亏算法类型
#//////////////////////////////////////////////////////////////////////
#浮盈浮亏都计算
defineDict["THOST_FTDC_AG_All"] = '1'
#浮盈不计,浮亏计
defineDict["THOST_FTDC_AG_OnlyLost"] = '2'
#浮盈计,浮亏不计
defineDict["THOST_FTDC_AG_OnlyGain"] = '3'
#浮盈浮亏都不计算
defineDict["THOST_FTDC_AG_None"] = '4'
typedefDict["TThostFtdcAlgorithmType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcIncludeCloseProfitType是一个是否包含平仓盈利类型
#//////////////////////////////////////////////////////////////////////
#包含平仓盈利
defineDict["THOST_FTDC_ICP_Include"] = '0'
#不包含平仓盈利
defineDict["THOST_FTDC_ICP_NotInclude"] = '2'
typedefDict["TThostFtdcIncludeCloseProfitType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcAllWithoutTradeType是一个是否受可提比例限制类型
#//////////////////////////////////////////////////////////////////////
#无仓无成交不受可提比例限制
defineDict["THOST_FTDC_AWT_Enable"] = '0'
#受可提比例限制
defineDict["THOST_FTDC_AWT_Disable"] = '2'
#无仓不受可提比例限制
defineDict["THOST_FTDC_AWT_NoHoldEnable"] = '3'
typedefDict["TThostFtdcAllWithoutTradeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCommentType是一个盈亏算法说明类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcCommentType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcVersionType是一个版本号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcVersionType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcTradeCodeType是一个交易代码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcTradeCodeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcTradeDateType是一个交易日期类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcTradeDateType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcTradeTimeType是一个交易时间类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcTradeTimeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcTradeSerialType是一个发起方流水号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcTradeSerialType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcTradeSerialNoType是一个发起方流水号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcTradeSerialNoType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcFutureIDType是一个期货公司代码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcFutureIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcBankIDType是一个银行代码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcBankIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcBankBrchIDType是一个银行分中心代码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcBankBrchIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcBankBranchIDType是一个分中心代码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcBankBranchIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcOperNoType是一个交易柜员类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcOperNoType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcDeviceIDType是一个渠道标志类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcDeviceIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcRecordNumType是一个记录数类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcRecordNumType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFutureAccountType是一个期货资金账号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcFutureAccountType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFuturePwdFlagType是一个资金密码核对标志类型
#//////////////////////////////////////////////////////////////////////
#不核对
defineDict["THOST_FTDC_FPWD_UnCheck"] = '0'
#核对
defineDict["THOST_FTDC_FPWD_Check"] = '1'
typedefDict["TThostFtdcFuturePwdFlagType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcTransferTypeType是一个银期转账类型类型
#//////////////////////////////////////////////////////////////////////
#银行转期货
defineDict["THOST_FTDC_TT_BankToFuture"] = '0'
#期货转银行
defineDict["THOST_FTDC_TT_FutureToBank"] = '1'
typedefDict["TThostFtdcTransferTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFutureAccPwdType是一个期货资金密码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcFutureAccPwdType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCurrencyCodeType是一个币种类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcCurrencyCodeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcRetCodeType是一个响应代码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcRetCodeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcRetInfoType是一个响应信息类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcRetInfoType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcTradeAmtType是一个银行总余额类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcTradeAmtType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcUseAmtType是一个银行可用余额类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcUseAmtType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFetchAmtType是一个银行可取余额类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcFetchAmtType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcTransferValidFlagType是一个转账有效标志类型
#//////////////////////////////////////////////////////////////////////
#无效或失败
defineDict["THOST_FTDC_TVF_Invalid"] = '0'
#有效
defineDict["THOST_FTDC_TVF_Valid"] = '1'
#冲正
defineDict["THOST_FTDC_TVF_Reverse"] = '2'
typedefDict["TThostFtdcTransferValidFlagType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCertCodeType是一个证件号码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcCertCodeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcReasonType是一个事由类型
#//////////////////////////////////////////////////////////////////////
#错单
defineDict["THOST_FTDC_RN_CD"] = '0'
#资金在途
defineDict["THOST_FTDC_RN_ZT"] = '1'
#其它
defineDict["THOST_FTDC_RN_QT"] = '2'
typedefDict["TThostFtdcReasonType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFundProjectIDType是一个资金项目编号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcFundProjectIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcSexType是一个性别类型
#//////////////////////////////////////////////////////////////////////
#未知
defineDict["THOST_FTDC_SEX_None"] = '0'
#男
defineDict["THOST_FTDC_SEX_Man"] = '1'
#女
defineDict["THOST_FTDC_SEX_Woman"] = '2'
typedefDict["TThostFtdcSexType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcProfessionType是一个职业类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcProfessionType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcNationalType是一个国籍类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcNationalType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcProvinceType是一个省类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcProvinceType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcRegionType是一个区类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcRegionType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCountryType是一个国家类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcCountryType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcLicenseNOType是一个营业执照类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcLicenseNOType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCompanyTypeType是一个企业性质类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcCompanyTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcBusinessScopeType是一个经营范围类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcBusinessScopeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCapitalCurrencyType是一个注册资本币种类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcCapitalCurrencyType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcUserTypeType是一个用户类型类型
#//////////////////////////////////////////////////////////////////////
#投资者
defineDict["THOST_FTDC_UT_Investor"] = '0'
#操作员
defineDict["THOST_FTDC_UT_Operator"] = '1'
#管理员
defineDict["THOST_FTDC_UT_SuperUser"] = '2'
typedefDict["TThostFtdcUserTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcRateTypeType是一个费率类型类型
#//////////////////////////////////////////////////////////////////////
#保证金率
defineDict["THOST_FTDC_RATETYPE_MarginRate"] = '2'
typedefDict["TThostFtdcRateTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcNoteTypeType是一个通知类型类型
#//////////////////////////////////////////////////////////////////////
#交易结算单
defineDict["THOST_FTDC_NOTETYPE_TradeSettleBill"] = '1'
#交易结算月报
defineDict["THOST_FTDC_NOTETYPE_TradeSettleMonth"] = '2'
#追加保证金通知书
defineDict["THOST_FTDC_NOTETYPE_CallMarginNotes"] = '3'
#强行平仓通知书
defineDict["THOST_FTDC_NOTETYPE_ForceCloseNotes"] = '4'
#成交通知书
defineDict["THOST_FTDC_NOTETYPE_TradeNotes"] = '5'
#交割通知书
defineDict["THOST_FTDC_NOTETYPE_DelivNotes"] = '6'
typedefDict["TThostFtdcNoteTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcSettlementStyleType是一个结算单方式类型
#//////////////////////////////////////////////////////////////////////
#逐日盯市
defineDict["THOST_FTDC_SBS_Day"] = '1'
#逐笔对冲
defineDict["THOST_FTDC_SBS_Volume"] = '2'
typedefDict["TThostFtdcSettlementStyleType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcBrokerDNSType是一个域名类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcBrokerDNSType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcSentenceType是一个语句类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcSentenceType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcSettlementBillTypeType是一个结算单类型类型
#//////////////////////////////////////////////////////////////////////
#日报
defineDict["THOST_FTDC_ST_Day"] = '0'
#月报
defineDict["THOST_FTDC_ST_Month"] = '1'
typedefDict["TThostFtdcSettlementBillTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcUserRightTypeType是一个客户权限类型类型
#//////////////////////////////////////////////////////////////////////
#登录
defineDict["THOST_FTDC_URT_Logon"] = '1'
#银期转帐
defineDict["THOST_FTDC_URT_Transfer"] = '2'
#邮寄结算单
defineDict["THOST_FTDC_URT_EMail"] = '3'
#传真结算单
defineDict["THOST_FTDC_URT_Fax"] = '4'
#条件单
defineDict["THOST_FTDC_URT_ConditionOrder"] = '5'
typedefDict["TThostFtdcUserRightTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcMarginPriceTypeType是一个保证金价格类型类型
#//////////////////////////////////////////////////////////////////////
#昨结算价
defineDict["THOST_FTDC_MPT_PreSettlementPrice"] = '1'
#最新价
defineDict["THOST_FTDC_MPT_SettlementPrice"] = '2'
#成交均价
defineDict["THOST_FTDC_MPT_AveragePrice"] = '3'
#开仓价
defineDict["THOST_FTDC_MPT_OpenPrice"] = '4'
typedefDict["TThostFtdcMarginPriceTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcBillGenStatusType是一个结算单生成状态类型
#//////////////////////////////////////////////////////////////////////
#未生成
defineDict["THOST_FTDC_BGS_None"] = '0'
#生成中
defineDict["THOST_FTDC_BGS_NoGenerated"] = '1'
#已生成
defineDict["THOST_FTDC_BGS_Generated"] = '2'
typedefDict["TThostFtdcBillGenStatusType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcAlgoTypeType是一个算法类型类型
#//////////////////////////////////////////////////////////////////////
#持仓处理算法
defineDict["THOST_FTDC_AT_HandlePositionAlgo"] = '1'
#寻找保证金率算法
defineDict["THOST_FTDC_AT_FindMarginRateAlgo"] = '2'
typedefDict["TThostFtdcAlgoTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcHandlePositionAlgoIDType是一个持仓处理算法编号类型
#//////////////////////////////////////////////////////////////////////
#基本
defineDict["THOST_FTDC_HPA_Base"] = '1'
#大连商品交易所
defineDict["THOST_FTDC_HPA_DCE"] = '2'
#郑州商品交易所
defineDict["THOST_FTDC_HPA_CZCE"] = '3'
typedefDict["TThostFtdcHandlePositionAlgoIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFindMarginRateAlgoIDType是一个寻找保证金率算法编号类型
#//////////////////////////////////////////////////////////////////////
#基本
defineDict["THOST_FTDC_FMRA_Base"] = '1'
#大连商品交易所
defineDict["THOST_FTDC_FMRA_DCE"] = '2'
#郑州商品交易所
defineDict["THOST_FTDC_FMRA_CZCE"] = '3'
typedefDict["TThostFtdcFindMarginRateAlgoIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcHandleTradingAccountAlgoIDType是一个资金处理算法编号类型
#//////////////////////////////////////////////////////////////////////
#基本
defineDict["THOST_FTDC_HTAA_Base"] = '1'
#大连商品交易所
defineDict["THOST_FTDC_HTAA_DCE"] = '2'
#郑州商品交易所
defineDict["THOST_FTDC_HTAA_CZCE"] = '3'
typedefDict["TThostFtdcHandleTradingAccountAlgoIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcPersonTypeType是一个联系人类型类型
#//////////////////////////////////////////////////////////////////////
#指定下单人
defineDict["THOST_FTDC_PST_Order"] = '1'
#开户授权人
defineDict["THOST_FTDC_PST_Open"] = '2'
#资金调拨人
defineDict["THOST_FTDC_PST_Fund"] = '3'
#结算单确认人
defineDict["THOST_FTDC_PST_Settlement"] = '4'
#法人
defineDict["THOST_FTDC_PST_Company"] = '5'
#法人代表
defineDict["THOST_FTDC_PST_Corporation"] = '6'
#投资者联系人
defineDict["THOST_FTDC_PST_LinkMan"] = '7'
#分户管理资产负责人
defineDict["THOST_FTDC_PST_Ledger"] = '8'
#托(保)管人
defineDict["THOST_FTDC_PST_Trustee"] = '9'
#托(保)管机构法人代表
defineDict["THOST_FTDC_PST_TrusteeCorporation"] = 'A'
#托(保)管机构开户授权人
defineDict["THOST_FTDC_PST_TrusteeOpen"] = 'B'
#托(保)管机构联系人
defineDict["THOST_FTDC_PST_TrusteeContact"] = 'C'
#境外自然人参考证件
defineDict["THOST_FTDC_PST_ForeignerRefer"] = 'D'
#法人代表参考证件
defineDict["THOST_FTDC_PST_CorporationRefer"] = 'E'
typedefDict["TThostFtdcPersonTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcQueryInvestorRangeType是一个查询范围类型
#//////////////////////////////////////////////////////////////////////
#所有
defineDict["THOST_FTDC_QIR_All"] = '1'
#查询分类
defineDict["THOST_FTDC_QIR_Group"] = '2'
#单一投资者
defineDict["THOST_FTDC_QIR_Single"] = '3'
typedefDict["TThostFtdcQueryInvestorRangeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcInvestorRiskStatusType是一个投资者风险状态类型
#//////////////////////////////////////////////////////////////////////
#正常
defineDict["THOST_FTDC_IRS_Normal"] = '1'
#警告
defineDict["THOST_FTDC_IRS_Warn"] = '2'
#追保
defineDict["THOST_FTDC_IRS_Call"] = '3'
#强平
defineDict["THOST_FTDC_IRS_Force"] = '4'
#异常
defineDict["THOST_FTDC_IRS_Exception"] = '5'
typedefDict["TThostFtdcInvestorRiskStatusType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcLegIDType是一个单腿编号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcLegIDType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcLegMultipleType是一个单腿乘数类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcLegMultipleType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcImplyLevelType是一个派生层数类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcImplyLevelType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcClearAccountType是一个结算账户类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcClearAccountType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcOrganNOType是一个结算账户类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcOrganNOType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcClearbarchIDType是一个结算账户联行号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcClearbarchIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcUserEventTypeType是一个用户事件类型类型
#//////////////////////////////////////////////////////////////////////
#登录
defineDict["THOST_FTDC_UET_Login"] = '1'
#登出
defineDict["THOST_FTDC_UET_Logout"] = '2'
#交易成功
defineDict["THOST_FTDC_UET_Trading"] = '3'
#交易失败
defineDict["THOST_FTDC_UET_TradingError"] = '4'
#修改密码
defineDict["THOST_FTDC_UET_UpdatePassword"] = '5'
#客户端认证
defineDict["THOST_FTDC_UET_Authenticate"] = '6'
#其他
defineDict["THOST_FTDC_UET_Other"] = '9'
typedefDict["TThostFtdcUserEventTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcUserEventInfoType是一个用户事件信息类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcUserEventInfoType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCloseStyleType是一个平仓方式类型
#//////////////////////////////////////////////////////////////////////
#先开先平
defineDict["THOST_FTDC_ICS_Close"] = '0'
#先平今再平昨
defineDict["THOST_FTDC_ICS_CloseToday"] = '1'
typedefDict["TThostFtdcCloseStyleType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcStatModeType是一个统计方式类型
#//////////////////////////////////////////////////////////////////////
#----
defineDict["THOST_FTDC_SM_Non"] = '0'
#按合约统计
defineDict["THOST_FTDC_SM_Instrument"] = '1'
#按产品统计
defineDict["THOST_FTDC_SM_Product"] = '2'
#按投资者统计
defineDict["THOST_FTDC_SM_Investor"] = '3'
typedefDict["TThostFtdcStatModeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcParkedOrderStatusType是一个预埋单状态类型
#//////////////////////////////////////////////////////////////////////
#未发送
defineDict["THOST_FTDC_PAOS_NotSend"] = '1'
#已发送
defineDict["THOST_FTDC_PAOS_Send"] = '2'
#已删除
defineDict["THOST_FTDC_PAOS_Deleted"] = '3'
typedefDict["TThostFtdcParkedOrderStatusType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcParkedOrderIDType是一个预埋报单编号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcParkedOrderIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcParkedOrderActionIDType是一个预埋撤单编号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcParkedOrderActionIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcVirDealStatusType是一个处理状态类型
#//////////////////////////////////////////////////////////////////////
#正在处理
defineDict["THOST_FTDC_VDS_Dealing"] = '1'
#处理成功
defineDict["THOST_FTDC_VDS_DeaclSucceed"] = '2'
typedefDict["TThostFtdcVirDealStatusType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcOrgSystemIDType是一个原有系统代码类型
#//////////////////////////////////////////////////////////////////////
#综合交易平台
defineDict["THOST_FTDC_ORGS_Standard"] = '0'
#易盛系统
defineDict["THOST_FTDC_ORGS_ESunny"] = '1'
#金仕达V6系统
defineDict["THOST_FTDC_ORGS_KingStarV6"] = '2'
typedefDict["TThostFtdcOrgSystemIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcVirTradeStatusType是一个交易状态类型
#//////////////////////////////////////////////////////////////////////
#正常处理中
defineDict["THOST_FTDC_VTS_NaturalDeal"] = '0'
#成功结束
defineDict["THOST_FTDC_VTS_SucceedEnd"] = '1'
#失败结束
defineDict["THOST_FTDC_VTS_FailedEND"] = '2'
#异常中
defineDict["THOST_FTDC_VTS_Exception"] = '3'
#已人工异常处理
defineDict["THOST_FTDC_VTS_ManualDeal"] = '4'
#通讯异常 ,请人工处理
defineDict["THOST_FTDC_VTS_MesException"] = '5'
#系统出错,请人工处理
defineDict["THOST_FTDC_VTS_SysException"] = '6'
typedefDict["TThostFtdcVirTradeStatusType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcVirBankAccTypeType是一个银行帐户类型类型
#//////////////////////////////////////////////////////////////////////
#存折
defineDict["THOST_FTDC_VBAT_BankBook"] = '1'
#储蓄卡
defineDict["THOST_FTDC_VBAT_BankCard"] = '2'
#信用卡
defineDict["THOST_FTDC_VBAT_CreditCard"] = '3'
typedefDict["TThostFtdcVirBankAccTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcVirementStatusType是一个银行帐户类型类型
#//////////////////////////////////////////////////////////////////////
#正常
defineDict["THOST_FTDC_VMS_Natural"] = '0'
#销户
defineDict["THOST_FTDC_VMS_Canceled"] = '9'
typedefDict["TThostFtdcVirementStatusType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcVirementAvailAbilityType是一个有效标志类型
#//////////////////////////////////////////////////////////////////////
#未确认
defineDict["THOST_FTDC_VAA_NoAvailAbility"] = '0'
#有效
defineDict["THOST_FTDC_VAA_AvailAbility"] = '1'
#冲正
defineDict["THOST_FTDC_VAA_Repeal"] = '2'
typedefDict["TThostFtdcVirementAvailAbilityType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcVirementTradeCodeType是一个交易代码类型
#//////////////////////////////////////////////////////////////////////
#银行发起银行资金转期货
defineDict["THOST_FTDC_VTC_BankBankToFuture"] = '102001'
#银行发起期货资金转银行
defineDict["THOST_FTDC_VTC_BankFutureToBank"] = '102002'
#期货发起银行资金转期货
defineDict["THOST_FTDC_VTC_FutureBankToFuture"] = '202001'
#期货发起期货资金转银行
defineDict["THOST_FTDC_VTC_FutureFutureToBank"] = '202002'
typedefDict["TThostFtdcVirementTradeCodeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcPhotoTypeNameType是一个影像类型名称类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcPhotoTypeNameType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcPhotoTypeIDType是一个影像类型代码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcPhotoTypeIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcPhotoNameType是一个影像名称类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcPhotoNameType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcTopicIDType是一个主题代码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcTopicIDType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcReportTypeIDType是一个交易报告类型标识类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcReportTypeIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCharacterIDType是一个交易特征代码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcCharacterIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcAMLParamIDType是一个参数代码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcAMLParamIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcAMLInvestorTypeType是一个投资者类型类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcAMLInvestorTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcAMLIdCardTypeType是一个证件类型类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcAMLIdCardTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcAMLTradeDirectType是一个资金进出方向类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcAMLTradeDirectType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcAMLTradeModelType是一个资金进出方式类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcAMLTradeModelType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcAMLParamIDType是一个参数代码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcAMLParamIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcAMLOpParamValueType是一个业务参数代码值类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcAMLOpParamValueType"] = "float"
#//////////////////////////////////////////////////////////////////////
#TFtdcAMLCustomerCardTypeType是一个客户身份证件/证明文件类型类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcAMLCustomerCardTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcAMLInstitutionNameType是一个金融机构网点名称类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcAMLInstitutionNameType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcAMLDistrictIDType是一个金融机构网点所在地区行政区划代码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcAMLDistrictIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcAMLRelationShipType是一个金融机构网点与大额交易的关系类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcAMLRelationShipType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcAMLInstitutionTypeType是一个金融机构网点代码类型类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcAMLInstitutionTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcAMLInstitutionIDType是一个金融机构网点代码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcAMLInstitutionIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcAMLAccountTypeType是一个账户类型类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcAMLAccountTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcAMLTradingTypeType是一个交易方式类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcAMLTradingTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcAMLTransactClassType是一个涉外收支交易分类与代码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcAMLTransactClassType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcAMLCapitalIOType是一个资金收付标识类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcAMLCapitalIOType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcAMLSiteType是一个交易地点类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcAMLSiteType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcAMLCapitalPurposeType是一个资金用途类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcAMLCapitalPurposeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcAMLReportTypeType是一个报文类型类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcAMLReportTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcAMLSerialNoType是一个编号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcAMLSerialNoType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcAMLStatusType是一个状态类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcAMLStatusType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcAMLGenStatusType是一个Aml生成方式类型
#//////////////////////////////////////////////////////////////////////
#程序生成
defineDict["THOST_FTDC_GEN_Program"] = '0'
#人工生成
defineDict["THOST_FTDC_GEN_HandWork"] = '1'
typedefDict["TThostFtdcAMLGenStatusType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcAMLSeqCodeType是一个业务标识号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcAMLSeqCodeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcAMLFileNameType是一个AML文件名类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcAMLFileNameType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcAMLMoneyType是一个反洗钱资金类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcAMLMoneyType"] = "float"
#//////////////////////////////////////////////////////////////////////
#TFtdcAMLFileAmountType是一个反洗钱资金类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcAMLFileAmountType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcCFMMCKeyType是一个密钥类型(保证金监管)类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcCFMMCKeyType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCFMMCKeyKindType是一个动态密钥类别(保证金监管)类型
#//////////////////////////////////////////////////////////////////////
#主动请求更新
defineDict["THOST_FTDC_CFMMCKK_REQUEST"] = 'R'
#CFMMC自动更新
defineDict["THOST_FTDC_CFMMCKK_AUTO"] = 'A'
#CFMMC手动更新
defineDict["THOST_FTDC_CFMMCKK_MANUAL"] = 'M'
typedefDict["TThostFtdcCFMMCKeyKindType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcAMLReportNameType是一个报文名称类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcAMLReportNameType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcIndividualNameType是一个个人姓名类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcIndividualNameType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCurrencyIDType是一个币种代码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcCurrencyIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCustNumberType是一个客户编号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcCustNumberType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcOrganCodeType是一个机构编码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcOrganCodeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcOrganNameType是一个机构名称类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcOrganNameType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcSuperOrganCodeType是一个上级机构编码,即期货公司总部、银行总行类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcSuperOrganCodeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcSubBranchIDType是一个分支机构类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcSubBranchIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcSubBranchNameType是一个分支机构名称类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcSubBranchNameType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcBranchNetCodeType是一个机构网点号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcBranchNetCodeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcBranchNetNameType是一个机构网点名称类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcBranchNetNameType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcOrganFlagType是一个机构标识类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcOrganFlagType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcBankCodingForFutureType是一个银行对期货公司的编码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcBankCodingForFutureType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcBankReturnCodeType是一个银行对返回码的定义类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcBankReturnCodeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcPlateReturnCodeType是一个银期转帐平台对返回码的定义类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcPlateReturnCodeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcBankSubBranchIDType是一个银行分支机构编码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcBankSubBranchIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFutureBranchIDType是一个期货分支机构编码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcFutureBranchIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcReturnCodeType是一个返回代码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcReturnCodeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcOperatorCodeType是一个操作员类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcOperatorCodeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcClearDepIDType是一个机构结算帐户机构号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcClearDepIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcClearBrchIDType是一个机构结算帐户联行号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcClearBrchIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcClearNameType是一个机构结算帐户名称类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcClearNameType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcBankAccountNameType是一个银行帐户名称类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcBankAccountNameType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcInvDepIDType是一个机构投资人账号机构号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcInvDepIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcInvBrchIDType是一个机构投资人联行号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcInvBrchIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcMessageFormatVersionType是一个信息格式版本类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcMessageFormatVersionType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcDigestType是一个摘要类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcDigestType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcAuthenticDataType是一个认证数据类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcAuthenticDataType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcPasswordKeyType是一个密钥类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcPasswordKeyType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFutureAccountNameType是一个期货帐户名称类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcFutureAccountNameType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcMobilePhoneType是一个手机类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcMobilePhoneType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFutureMainKeyType是一个期货公司主密钥类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcFutureMainKeyType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFutureWorkKeyType是一个期货公司工作密钥类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcFutureWorkKeyType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFutureTransKeyType是一个期货公司传输密钥类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcFutureTransKeyType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcBankMainKeyType是一个银行主密钥类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcBankMainKeyType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcBankWorkKeyType是一个银行工作密钥类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcBankWorkKeyType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcBankTransKeyType是一个银行传输密钥类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcBankTransKeyType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcBankServerDescriptionType是一个银行服务器描述信息类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcBankServerDescriptionType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcAddInfoType是一个附加信息类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcAddInfoType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcDescrInfoForReturnCodeType是一个返回码描述类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcDescrInfoForReturnCodeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCountryCodeType是一个国家代码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcCountryCodeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcSerialType是一个流水号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcSerialType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcPlateSerialType是一个平台流水号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcPlateSerialType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcBankSerialType是一个银行流水号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcBankSerialType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCorrectSerialType是一个被冲正交易流水号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcCorrectSerialType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcFutureSerialType是一个期货公司流水号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcFutureSerialType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcApplicationIDType是一个应用标识类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcApplicationIDType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcBankProxyIDType是一个银行代理标识类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcBankProxyIDType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcFBTCoreIDType是一个银期转帐核心系统标识类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcFBTCoreIDType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcServerPortType是一个服务端口号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcServerPortType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcRepealedTimesType是一个已经冲正次数类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcRepealedTimesType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcRepealTimeIntervalType是一个冲正时间间隔类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcRepealTimeIntervalType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcTotalTimesType是一个每日累计转帐次数类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcTotalTimesType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcFBTRequestIDType是一个请求ID类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcFBTRequestIDType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcTIDType是一个交易ID类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcTIDType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcTradeAmountType是一个交易金额(元)类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcTradeAmountType"] = "float"
#//////////////////////////////////////////////////////////////////////
#TFtdcCustFeeType是一个应收客户费用(元)类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcCustFeeType"] = "float"
#//////////////////////////////////////////////////////////////////////
#TFtdcFutureFeeType是一个应收期货公司费用(元)类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcFutureFeeType"] = "float"
#//////////////////////////////////////////////////////////////////////
#TFtdcSingleMaxAmtType是一个单笔最高限额类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcSingleMaxAmtType"] = "float"
#//////////////////////////////////////////////////////////////////////
#TFtdcSingleMinAmtType是一个单笔最低限额类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcSingleMinAmtType"] = "float"
#//////////////////////////////////////////////////////////////////////
#TFtdcTotalAmtType是一个每日累计转帐额度类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcTotalAmtType"] = "float"
#//////////////////////////////////////////////////////////////////////
#TFtdcCertificationTypeType是一个证件类型类型
#//////////////////////////////////////////////////////////////////////
#身份证
defineDict["THOST_FTDC_CFT_IDCard"] = '0'
#护照
defineDict["THOST_FTDC_CFT_Passport"] = '1'
#军官证
defineDict["THOST_FTDC_CFT_OfficerIDCard"] = '2'
#士兵证
defineDict["THOST_FTDC_CFT_SoldierIDCard"] = '3'
#回乡证
defineDict["THOST_FTDC_CFT_HomeComingCard"] = '4'
#户口簿
defineDict["THOST_FTDC_CFT_HouseholdRegister"] = '5'
#营业执照号
defineDict["THOST_FTDC_CFT_LicenseNo"] = '6'
#组织机构代码证
defineDict["THOST_FTDC_CFT_InstitutionCodeCard"] = '7'
#临时营业执照号
defineDict["THOST_FTDC_CFT_TempLicenseNo"] = '8'
#民办非企业登记证书
defineDict["THOST_FTDC_CFT_NoEnterpriseLicenseNo"] = '9'
#其他证件
defineDict["THOST_FTDC_CFT_OtherCard"] = 'x'
#主管部门批文
defineDict["THOST_FTDC_CFT_SuperDepAgree"] = 'a'
typedefDict["TThostFtdcCertificationTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFileBusinessCodeType是一个文件业务功能类型
#//////////////////////////////////////////////////////////////////////
#其他
defineDict["THOST_FTDC_FBC_Others"] = '0'
#转账交易明细对账
defineDict["THOST_FTDC_FBC_TransferDetails"] = '1'
#客户账户状态对账
defineDict["THOST_FTDC_FBC_CustAccStatus"] = '2'
#账户类交易明细对账
defineDict["THOST_FTDC_FBC_AccountTradeDetails"] = '3'
#期货账户信息变更明细对账
defineDict["THOST_FTDC_FBC_FutureAccountChangeInfoDetails"] = '4'
#客户资金台账余额明细对账
defineDict["THOST_FTDC_FBC_CustMoneyDetail"] = '5'
#客户销户结息明细对账
defineDict["THOST_FTDC_FBC_CustCancelAccountInfo"] = '6'
#客户资金余额对账结果
defineDict["THOST_FTDC_FBC_CustMoneyResult"] = '7'
#其它对账异常结果文件
defineDict["THOST_FTDC_FBC_OthersExceptionResult"] = '8'
#客户结息净额明细
defineDict["THOST_FTDC_FBC_CustInterestNetMoneyDetails"] = '9'
#客户资金交收明细
defineDict["THOST_FTDC_FBC_CustMoneySendAndReceiveDetails"] = 'a'
#法人存管银行资金交收汇总
defineDict["THOST_FTDC_FBC_CorporationMoneyTotal"] = 'b'
#主体间资金交收汇总
defineDict["THOST_FTDC_FBC_MainbodyMoneyTotal"] = 'c'
#总分平衡监管数据
defineDict["THOST_FTDC_FBC_MainPartMonitorData"] = 'd'
#存管银行备付金余额
defineDict["THOST_FTDC_FBC_PreparationMoney"] = 'e'
#协办存管银行资金监管数据
defineDict["THOST_FTDC_FBC_BankMoneyMonitorData"] = 'f'
typedefDict["TThostFtdcFileBusinessCodeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCashExchangeCodeType是一个汇钞标志类型
#//////////////////////////////////////////////////////////////////////
#汇
defineDict["THOST_FTDC_CEC_Exchange"] = '1'
#钞
defineDict["THOST_FTDC_CEC_Cash"] = '2'
typedefDict["TThostFtdcCashExchangeCodeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcYesNoIndicatorType是一个是或否标识类型
#//////////////////////////////////////////////////////////////////////
#是
defineDict["THOST_FTDC_YNI_Yes"] = '0'
#否
defineDict["THOST_FTDC_YNI_No"] = '1'
typedefDict["TThostFtdcYesNoIndicatorType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcBanlanceTypeType是一个余额类型类型
#//////////////////////////////////////////////////////////////////////
#当前余额
defineDict["THOST_FTDC_BLT_CurrentMoney"] = '0'
#可用余额
defineDict["THOST_FTDC_BLT_UsableMoney"] = '1'
#可取余额
defineDict["THOST_FTDC_BLT_FetchableMoney"] = '2'
#冻结余额
defineDict["THOST_FTDC_BLT_FreezeMoney"] = '3'
typedefDict["TThostFtdcBanlanceTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcGenderType是一个性别类型
#//////////////////////////////////////////////////////////////////////
#未知状态
defineDict["THOST_FTDC_GD_Unknown"] = '0'
#男
defineDict["THOST_FTDC_GD_Male"] = '1'
#女
defineDict["THOST_FTDC_GD_Female"] = '2'
typedefDict["TThostFtdcGenderType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFeePayFlagType是一个费用支付标志类型
#//////////////////////////////////////////////////////////////////////
#由受益方支付费用
defineDict["THOST_FTDC_FPF_BEN"] = '0'
#由发送方支付费用
defineDict["THOST_FTDC_FPF_OUR"] = '1'
#由发送方支付发起的费用,受益方支付接受的费用
defineDict["THOST_FTDC_FPF_SHA"] = '2'
typedefDict["TThostFtdcFeePayFlagType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcPassWordKeyTypeType是一个密钥类型类型
#//////////////////////////////////////////////////////////////////////
#交换密钥
defineDict["THOST_FTDC_PWKT_ExchangeKey"] = '0'
#密码密钥
defineDict["THOST_FTDC_PWKT_PassWordKey"] = '1'
#MAC密钥
defineDict["THOST_FTDC_PWKT_MACKey"] = '2'
#报文密钥
defineDict["THOST_FTDC_PWKT_MessageKey"] = '3'
typedefDict["TThostFtdcPassWordKeyTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFBTPassWordTypeType是一个密码类型类型
#//////////////////////////////////////////////////////////////////////
#查询
defineDict["THOST_FTDC_PWT_Query"] = '0'
#取款
defineDict["THOST_FTDC_PWT_Fetch"] = '1'
#转帐
defineDict["THOST_FTDC_PWT_Transfer"] = '2'
#交易
defineDict["THOST_FTDC_PWT_Trade"] = '3'
typedefDict["TThostFtdcFBTPassWordTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFBTEncryModeType是一个加密方式类型
#//////////////////////////////////////////////////////////////////////
#不加密
defineDict["THOST_FTDC_EM_NoEncry"] = '0'
#DES
defineDict["THOST_FTDC_EM_DES"] = '1'
#3DES
defineDict["THOST_FTDC_EM_3DES"] = '2'
typedefDict["TThostFtdcFBTEncryModeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcBankRepealFlagType是一个银行冲正标志类型
#//////////////////////////////////////////////////////////////////////
#银行无需自动冲正
defineDict["THOST_FTDC_BRF_BankNotNeedRepeal"] = '0'
#银行待自动冲正
defineDict["THOST_FTDC_BRF_BankWaitingRepeal"] = '1'
#银行已自动冲正
defineDict["THOST_FTDC_BRF_BankBeenRepealed"] = '2'
typedefDict["TThostFtdcBankRepealFlagType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcBrokerRepealFlagType是一个期商冲正标志类型
#//////////////////////////////////////////////////////////////////////
#期商无需自动冲正
defineDict["THOST_FTDC_BRORF_BrokerNotNeedRepeal"] = '0'
#期商待自动冲正
defineDict["THOST_FTDC_BRORF_BrokerWaitingRepeal"] = '1'
#期商已自动冲正
defineDict["THOST_FTDC_BRORF_BrokerBeenRepealed"] = '2'
typedefDict["TThostFtdcBrokerRepealFlagType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcInstitutionTypeType是一个机构类别类型
#//////////////////////////////////////////////////////////////////////
#银行
defineDict["THOST_FTDC_TS_Bank"] = '0'
#期商
defineDict["THOST_FTDC_TS_Future"] = '1'
#券商
defineDict["THOST_FTDC_TS_Store"] = '2'
typedefDict["TThostFtdcInstitutionTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcLastFragmentType是一个最后分片标志类型
#//////////////////////////////////////////////////////////////////////
#是最后分片
defineDict["THOST_FTDC_LF_Yes"] = '0'
#不是最后分片
defineDict["THOST_FTDC_LF_No"] = '1'
typedefDict["TThostFtdcLastFragmentType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcBankAccStatusType是一个银行账户状态类型
#//////////////////////////////////////////////////////////////////////
#正常
defineDict["THOST_FTDC_BAS_Normal"] = '0'
#冻结
defineDict["THOST_FTDC_BAS_Freeze"] = '1'
#挂失
defineDict["THOST_FTDC_BAS_ReportLoss"] = '2'
typedefDict["TThostFtdcBankAccStatusType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcMoneyAccountStatusType是一个资金账户状态类型
#//////////////////////////////////////////////////////////////////////
#正常
defineDict["THOST_FTDC_MAS_Normal"] = '0'
#销户
defineDict["THOST_FTDC_MAS_Cancel"] = '1'
typedefDict["TThostFtdcMoneyAccountStatusType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcManageStatusType是一个存管状态类型
#//////////////////////////////////////////////////////////////////////
#指定存管
defineDict["THOST_FTDC_MSS_Point"] = '0'
#预指定
defineDict["THOST_FTDC_MSS_PrePoint"] = '1'
#撤销指定
defineDict["THOST_FTDC_MSS_CancelPoint"] = '2'
typedefDict["TThostFtdcManageStatusType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcSystemTypeType是一个应用系统类型类型
#//////////////////////////////////////////////////////////////////////
#银期转帐
defineDict["THOST_FTDC_SYT_FutureBankTransfer"] = '0'
#银证转帐
defineDict["THOST_FTDC_SYT_StockBankTransfer"] = '1'
#第三方存管
defineDict["THOST_FTDC_SYT_TheThirdPartStore"] = '2'
typedefDict["TThostFtdcSystemTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcTxnEndFlagType是一个银期转帐划转结果标志类型
#//////////////////////////////////////////////////////////////////////
#正常处理中
defineDict["THOST_FTDC_TEF_NormalProcessing"] = '0'
#成功结束
defineDict["THOST_FTDC_TEF_Success"] = '1'
#失败结束
defineDict["THOST_FTDC_TEF_Failed"] = '2'
#异常中
defineDict["THOST_FTDC_TEF_Abnormal"] = '3'
#已人工异常处理
defineDict["THOST_FTDC_TEF_ManualProcessedForException"] = '4'
#通讯异常 ,请人工处理
defineDict["THOST_FTDC_TEF_CommuFailedNeedManualProcess"] = '5'
#系统出错,请人工处理
defineDict["THOST_FTDC_TEF_SysErrorNeedManualProcess"] = '6'
typedefDict["TThostFtdcTxnEndFlagType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcProcessStatusType是一个银期转帐服务处理状态类型
#//////////////////////////////////////////////////////////////////////
#未处理
defineDict["THOST_FTDC_PSS_NotProcess"] = '0'
#开始处理
defineDict["THOST_FTDC_PSS_StartProcess"] = '1'
#处理完成
defineDict["THOST_FTDC_PSS_Finished"] = '2'
typedefDict["TThostFtdcProcessStatusType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCustTypeType是一个客户类型类型
#//////////////////////////////////////////////////////////////////////
#自然人
defineDict["THOST_FTDC_CUSTT_Person"] = '0'
#机构户
defineDict["THOST_FTDC_CUSTT_Institution"] = '1'
typedefDict["TThostFtdcCustTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFBTTransferDirectionType是一个银期转帐方向类型
#//////////////////////////////////////////////////////////////////////
#入金,银行转期货
defineDict["THOST_FTDC_FBTTD_FromBankToFuture"] = '1'
#出金,期货转银行
defineDict["THOST_FTDC_FBTTD_FromFutureToBank"] = '2'
typedefDict["TThostFtdcFBTTransferDirectionType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcOpenOrDestroyType是一个开销户类别类型
#//////////////////////////////////////////////////////////////////////
#开户
defineDict["THOST_FTDC_OOD_Open"] = '1'
#销户
defineDict["THOST_FTDC_OOD_Destroy"] = '0'
typedefDict["TThostFtdcOpenOrDestroyType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcAvailabilityFlagType是一个有效标志类型
#//////////////////////////////////////////////////////////////////////
#未确认
defineDict["THOST_FTDC_AVAF_Invalid"] = '0'
#有效
defineDict["THOST_FTDC_AVAF_Valid"] = '1'
#冲正
defineDict["THOST_FTDC_AVAF_Repeal"] = '2'
typedefDict["TThostFtdcAvailabilityFlagType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcOrganTypeType是一个机构类型类型
#//////////////////////////////////////////////////////////////////////
#银行代理
defineDict["THOST_FTDC_OT_Bank"] = '1'
#交易前置
defineDict["THOST_FTDC_OT_Future"] = '2'
#银期转帐平台管理
defineDict["THOST_FTDC_OT_PlateForm"] = '9'
typedefDict["TThostFtdcOrganTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcOrganLevelType是一个机构级别类型
#//////////////////////////////////////////////////////////////////////
#银行总行或期商总部
defineDict["THOST_FTDC_OL_HeadQuarters"] = '1'
#银行分中心或期货公司营业部
defineDict["THOST_FTDC_OL_Branch"] = '2'
typedefDict["TThostFtdcOrganLevelType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcProtocalIDType是一个协议类型类型
#//////////////////////////////////////////////////////////////////////
#期商协议
defineDict["THOST_FTDC_PID_FutureProtocal"] = '0'
#工行协议
defineDict["THOST_FTDC_PID_ICBCProtocal"] = '1'
#农行协议
defineDict["THOST_FTDC_PID_ABCProtocal"] = '2'
#中国银行协议
defineDict["THOST_FTDC_PID_CBCProtocal"] = '3'
#建行协议
defineDict["THOST_FTDC_PID_CCBProtocal"] = '4'
#交行协议
defineDict["THOST_FTDC_PID_BOCOMProtocal"] = '5'
#银期转帐平台协议
defineDict["THOST_FTDC_PID_FBTPlateFormProtocal"] = 'X'
typedefDict["TThostFtdcProtocalIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcConnectModeType是一个套接字连接方式类型
#//////////////////////////////////////////////////////////////////////
#短连接
defineDict["THOST_FTDC_CM_ShortConnect"] = '0'
#长连接
defineDict["THOST_FTDC_CM_LongConnect"] = '1'
typedefDict["TThostFtdcConnectModeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcSyncModeType是一个套接字通信方式类型
#//////////////////////////////////////////////////////////////////////
#异步
defineDict["THOST_FTDC_SRM_ASync"] = '0'
#同步
defineDict["THOST_FTDC_SRM_Sync"] = '1'
typedefDict["TThostFtdcSyncModeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcBankAccTypeType是一个银行帐号类型类型
#//////////////////////////////////////////////////////////////////////
#银行存折
defineDict["THOST_FTDC_BAT_BankBook"] = '1'
#储蓄卡
defineDict["THOST_FTDC_BAT_SavingCard"] = '2'
#信用卡
defineDict["THOST_FTDC_BAT_CreditCard"] = '3'
typedefDict["TThostFtdcBankAccTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFutureAccTypeType是一个期货公司帐号类型类型
#//////////////////////////////////////////////////////////////////////
#银行存折
defineDict["THOST_FTDC_FAT_BankBook"] = '1'
#储蓄卡
defineDict["THOST_FTDC_FAT_SavingCard"] = '2'
#信用卡
defineDict["THOST_FTDC_FAT_CreditCard"] = '3'
typedefDict["TThostFtdcFutureAccTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcOrganStatusType是一个接入机构状态类型
#//////////////////////////////////////////////////////////////////////
#启用
defineDict["THOST_FTDC_OS_Ready"] = '0'
#签到
defineDict["THOST_FTDC_OS_CheckIn"] = '1'
#签退
defineDict["THOST_FTDC_OS_CheckOut"] = '2'
#对帐文件到达
defineDict["THOST_FTDC_OS_CheckFileArrived"] = '3'
#对帐
defineDict["THOST_FTDC_OS_CheckDetail"] = '4'
#日终清理
defineDict["THOST_FTDC_OS_DayEndClean"] = '5'
#注销
defineDict["THOST_FTDC_OS_Invalid"] = '9'
typedefDict["TThostFtdcOrganStatusType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCCBFeeModeType是一个建行收费模式类型
#//////////////////////////////////////////////////////////////////////
#按金额扣收
defineDict["THOST_FTDC_CCBFM_ByAmount"] = '1'
#按月扣收
defineDict["THOST_FTDC_CCBFM_ByMonth"] = '2'
typedefDict["TThostFtdcCCBFeeModeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCommApiTypeType是一个通讯API类型类型
#//////////////////////////////////////////////////////////////////////
#客户端
defineDict["THOST_FTDC_CAPIT_Client"] = '1'
#服务端
defineDict["THOST_FTDC_CAPIT_Server"] = '2'
#交易系统的UserApi
defineDict["THOST_FTDC_CAPIT_UserApi"] = '3'
typedefDict["TThostFtdcCommApiTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcServiceIDType是一个服务编号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcServiceIDType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcServiceLineNoType是一个服务线路编号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcServiceLineNoType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcServiceNameType是一个服务名类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcServiceNameType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcLinkStatusType是一个连接状态类型
#//////////////////////////////////////////////////////////////////////
#已经连接
defineDict["THOST_FTDC_LS_Connected"] = '1'
#没有连接
defineDict["THOST_FTDC_LS_Disconnected"] = '2'
typedefDict["TThostFtdcLinkStatusType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCommApiPointerType是一个通讯API指针类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcCommApiPointerType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcPwdFlagType是一个密码核对标志类型
#//////////////////////////////////////////////////////////////////////
#不核对
defineDict["THOST_FTDC_BPWDF_NoCheck"] = '0'
#明文核对
defineDict["THOST_FTDC_BPWDF_BlankCheck"] = '1'
#密文核对
defineDict["THOST_FTDC_BPWDF_EncryptCheck"] = '2'
typedefDict["TThostFtdcPwdFlagType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcSecuAccTypeType是一个期货帐号类型类型
#//////////////////////////////////////////////////////////////////////
#资金帐号
defineDict["THOST_FTDC_SAT_AccountID"] = '1'
#资金卡号
defineDict["THOST_FTDC_SAT_CardID"] = '2'
#上海股东帐号
defineDict["THOST_FTDC_SAT_SHStockholderID"] = '3'
#深圳股东帐号
defineDict["THOST_FTDC_SAT_SZStockholderID"] = '4'
typedefDict["TThostFtdcSecuAccTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcTransferStatusType是一个转账交易状态类型
#//////////////////////////////////////////////////////////////////////
#正常
defineDict["THOST_FTDC_TRFS_Normal"] = '0'
#被冲正
defineDict["THOST_FTDC_TRFS_Repealed"] = '1'
typedefDict["TThostFtdcTransferStatusType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcSponsorTypeType是一个发起方类型
#//////////////////////////////////////////////////////////////////////
#期商
defineDict["THOST_FTDC_SPTYPE_Broker"] = '0'
#银行
defineDict["THOST_FTDC_SPTYPE_Bank"] = '1'
typedefDict["TThostFtdcSponsorTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcReqRspTypeType是一个请求响应类别类型
#//////////////////////////////////////////////////////////////////////
#请求
defineDict["THOST_FTDC_REQRSP_Request"] = '0'
#响应
defineDict["THOST_FTDC_REQRSP_Response"] = '1'
typedefDict["TThostFtdcReqRspTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFBTUserEventTypeType是一个银期转帐用户事件类型类型
#//////////////////////////////////////////////////////////////////////
#签到
defineDict["THOST_FTDC_FBTUET_SignIn"] = '0'
#银行转期货
defineDict["THOST_FTDC_FBTUET_FromBankToFuture"] = '1'
#期货转银行
defineDict["THOST_FTDC_FBTUET_FromFutureToBank"] = '2'
#开户
defineDict["THOST_FTDC_FBTUET_OpenAccount"] = '3'
#销户
defineDict["THOST_FTDC_FBTUET_CancelAccount"] = '4'
#变更银行账户
defineDict["THOST_FTDC_FBTUET_ChangeAccount"] = '5'
#冲正银行转期货
defineDict["THOST_FTDC_FBTUET_RepealFromBankToFuture"] = '6'
#冲正期货转银行
defineDict["THOST_FTDC_FBTUET_RepealFromFutureToBank"] = '7'
#查询银行账户
defineDict["THOST_FTDC_FBTUET_QueryBankAccount"] = '8'
#查询期货账户
defineDict["THOST_FTDC_FBTUET_QueryFutureAccount"] = '9'
#签退
defineDict["THOST_FTDC_FBTUET_SignOut"] = 'A'
#密钥同步
defineDict["THOST_FTDC_FBTUET_SyncKey"] = 'B'
#其他
defineDict["THOST_FTDC_FBTUET_Other"] = 'Z'
typedefDict["TThostFtdcFBTUserEventTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcBankIDByBankType是一个银行自己的编码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcBankIDByBankType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcBankOperNoType是一个银行操作员号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcBankOperNoType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcBankCustNoType是一个银行客户号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcBankCustNoType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcDBOPSeqNoType是一个递增的序列号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcDBOPSeqNoType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcTableNameType是一个FBT表名类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcTableNameType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcPKNameType是一个FBT表操作主键名类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcPKNameType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcPKValueType是一个FBT表操作主键值类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcPKValueType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcDBOperationType是一个记录操作类型类型
#//////////////////////////////////////////////////////////////////////
#插入
defineDict["THOST_FTDC_DBOP_Insert"] = '0'
#更新
defineDict["THOST_FTDC_DBOP_Update"] = '1'
#删除
defineDict["THOST_FTDC_DBOP_Delete"] = '2'
typedefDict["TThostFtdcDBOperationType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcSyncFlagType是一个同步标记类型
#//////////////////////////////////////////////////////////////////////
#已同步
defineDict["THOST_FTDC_SYNF_Yes"] = '0'
#未同步
defineDict["THOST_FTDC_SYNF_No"] = '1'
typedefDict["TThostFtdcSyncFlagType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcTargetIDType是一个同步目标编号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcTargetIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcSyncTypeType是一个同步类型类型
#//////////////////////////////////////////////////////////////////////
#一次同步
defineDict["THOST_FTDC_SYNT_OneOffSync"] = '0'
#定时同步
defineDict["THOST_FTDC_SYNT_TimerSync"] = '1'
#定时完全同步
defineDict["THOST_FTDC_SYNT_TimerFullSync"] = '2'
typedefDict["TThostFtdcSyncTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFBETimeType是一个各种换汇时间类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcFBETimeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFBEBankNoType是一个换汇银行行号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcFBEBankNoType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFBECertNoType是一个换汇凭证号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcFBECertNoType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcExDirectionType是一个换汇方向类型
#//////////////////////////////////////////////////////////////////////
#结汇
defineDict["THOST_FTDC_FBEDIR_Settlement"] = '0'
#售汇
defineDict["THOST_FTDC_FBEDIR_Sale"] = '1'
typedefDict["TThostFtdcExDirectionType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFBEBankAccountType是一个换汇银行账户类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcFBEBankAccountType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFBEBankAccountNameType是一个换汇银行账户名类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcFBEBankAccountNameType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFBEAmtType是一个各种换汇金额类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcFBEAmtType"] = "float"
#//////////////////////////////////////////////////////////////////////
#TFtdcFBEBusinessTypeType是一个换汇业务类型类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcFBEBusinessTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFBEPostScriptType是一个换汇附言类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcFBEPostScriptType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFBERemarkType是一个换汇备注类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcFBERemarkType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcExRateType是一个换汇汇率类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcExRateType"] = "float"
#//////////////////////////////////////////////////////////////////////
#TFtdcFBEResultFlagType是一个换汇成功标志类型
#//////////////////////////////////////////////////////////////////////
#成功
defineDict["THOST_FTDC_FBERES_Success"] = '0'
#账户余额不足
defineDict["THOST_FTDC_FBERES_InsufficientBalance"] = '1'
#交易结果未知
defineDict["THOST_FTDC_FBERES_UnknownTrading"] = '8'
#失败
defineDict["THOST_FTDC_FBERES_Fail"] = 'x'
typedefDict["TThostFtdcFBEResultFlagType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFBERtnMsgType是一个换汇返回信息类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcFBERtnMsgType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFBEExtendMsgType是一个换汇扩展信息类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcFBEExtendMsgType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFBEBusinessSerialType是一个换汇记账流水号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcFBEBusinessSerialType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFBESystemSerialType是一个换汇流水号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcFBESystemSerialType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFBETotalExCntType是一个换汇交易总笔数类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcFBETotalExCntType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcFBEExchStatusType是一个换汇交易状态类型
#//////////////////////////////////////////////////////////////////////
#正常
defineDict["THOST_FTDC_FBEES_Normal"] = '0'
#交易重发
defineDict["THOST_FTDC_FBEES_ReExchange"] = '1'
typedefDict["TThostFtdcFBEExchStatusType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFBEFileFlagType是一个换汇文件标志类型
#//////////////////////////////////////////////////////////////////////
#数据包
defineDict["THOST_FTDC_FBEFG_DataPackage"] = '0'
#文件
defineDict["THOST_FTDC_FBEFG_File"] = '1'
typedefDict["TThostFtdcFBEFileFlagType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFBEAlreadyTradeType是一个换汇已交易标志类型
#//////////////////////////////////////////////////////////////////////
#未交易
defineDict["THOST_FTDC_FBEAT_NotTrade"] = '0'
#已交易
defineDict["THOST_FTDC_FBEAT_Trade"] = '1'
typedefDict["TThostFtdcFBEAlreadyTradeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFBEOpenBankType是一个换汇账户开户行类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcFBEOpenBankType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFBEUserEventTypeType是一个银期换汇用户事件类型类型
#//////////////////////////////////////////////////////////////////////
#签到
defineDict["THOST_FTDC_FBEUET_SignIn"] = '0'
#换汇
defineDict["THOST_FTDC_FBEUET_Exchange"] = '1'
#换汇重发
defineDict["THOST_FTDC_FBEUET_ReExchange"] = '2'
#银行账户查询
defineDict["THOST_FTDC_FBEUET_QueryBankAccount"] = '3'
#换汇明细查询
defineDict["THOST_FTDC_FBEUET_QueryExchDetial"] = '4'
#换汇汇总查询
defineDict["THOST_FTDC_FBEUET_QueryExchSummary"] = '5'
#换汇汇率查询
defineDict["THOST_FTDC_FBEUET_QueryExchRate"] = '6'
#对账文件通知
defineDict["THOST_FTDC_FBEUET_CheckBankAccount"] = '7'
#签退
defineDict["THOST_FTDC_FBEUET_SignOut"] = '8'
#其他
defineDict["THOST_FTDC_FBEUET_Other"] = 'Z'
typedefDict["TThostFtdcFBEUserEventTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFBEFileNameType是一个换汇相关文件名类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcFBEFileNameType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFBEBatchSerialType是一个换汇批次号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcFBEBatchSerialType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFBEReqFlagType是一个换汇发送标志类型
#//////////////////////////////////////////////////////////////////////
#未处理
defineDict["THOST_FTDC_FBERF_UnProcessed"] = '0'
#等待发送
defineDict["THOST_FTDC_FBERF_WaitSend"] = '1'
#发送成功
defineDict["THOST_FTDC_FBERF_SendSuccess"] = '2'
#发送失败
defineDict["THOST_FTDC_FBERF_SendFailed"] = '3'
#等待重发
defineDict["THOST_FTDC_FBERF_WaitReSend"] = '4'
typedefDict["TThostFtdcFBEReqFlagType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcNotifyClassType是一个风险通知类型类型
#//////////////////////////////////////////////////////////////////////
#正常
defineDict["THOST_FTDC_NC_NOERROR"] = '0'
#警示
defineDict["THOST_FTDC_NC_Warn"] = '1'
#追保
defineDict["THOST_FTDC_NC_Call"] = '2'
#强平
defineDict["THOST_FTDC_NC_Force"] = '3'
#穿仓
defineDict["THOST_FTDC_NC_CHUANCANG"] = '4'
#异常
defineDict["THOST_FTDC_NC_Exception"] = '5'
typedefDict["TThostFtdcNotifyClassType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcRiskNofityInfoType是一个客户风险通知消息类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcRiskNofityInfoType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcForceCloseSceneIdType是一个强平场景编号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcForceCloseSceneIdType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcForceCloseTypeType是一个强平单类型类型
#//////////////////////////////////////////////////////////////////////
#手工强平
defineDict["THOST_FTDC_FCT_Manual"] = '0'
#单一投资者辅助强平
defineDict["THOST_FTDC_FCT_Single"] = '1'
#批量投资者辅助强平
defineDict["THOST_FTDC_FCT_Group"] = '2'
typedefDict["TThostFtdcForceCloseTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcInstrumentIDsType是一个多个产品代码,用+分隔,如cu+zn类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcInstrumentIDsType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcRiskNotifyMethodType是一个风险通知途径类型
#//////////////////////////////////////////////////////////////////////
#系统通知
defineDict["THOST_FTDC_RNM_System"] = '0'
#短信通知
defineDict["THOST_FTDC_RNM_SMS"] = '1'
#邮件通知
defineDict["THOST_FTDC_RNM_EMail"] = '2'
#人工通知
defineDict["THOST_FTDC_RNM_Manual"] = '3'
typedefDict["TThostFtdcRiskNotifyMethodType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcRiskNotifyStatusType是一个风险通知状态类型
#//////////////////////////////////////////////////////////////////////
#未生成
defineDict["THOST_FTDC_RNS_NotGen"] = '0'
#已生成未发送
defineDict["THOST_FTDC_RNS_Generated"] = '1'
#发送失败
defineDict["THOST_FTDC_RNS_SendError"] = '2'
#已发送未接收
defineDict["THOST_FTDC_RNS_SendOk"] = '3'
#已接收未确认
defineDict["THOST_FTDC_RNS_Received"] = '4'
#已确认
defineDict["THOST_FTDC_RNS_Confirmed"] = '5'
typedefDict["TThostFtdcRiskNotifyStatusType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcRiskUserEventType是一个风控用户操作事件类型
#//////////////////////////////////////////////////////////////////////
#导出数据
defineDict["THOST_FTDC_RUE_ExportData"] = '0'
typedefDict["TThostFtdcRiskUserEventType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcParamIDType是一个参数代码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcParamIDType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcParamNameType是一个参数名类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcParamNameType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcParamValueType是一个参数值类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcParamValueType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcConditionalOrderSortTypeType是一个条件单索引条件类型
#//////////////////////////////////////////////////////////////////////
#使用最新价升序
defineDict["THOST_FTDC_COST_LastPriceAsc"] = '0'
#使用最新价降序
defineDict["THOST_FTDC_COST_LastPriceDesc"] = '1'
#使用卖价升序
defineDict["THOST_FTDC_COST_AskPriceAsc"] = '2'
#使用卖价降序
defineDict["THOST_FTDC_COST_AskPriceDesc"] = '3'
#使用买价升序
defineDict["THOST_FTDC_COST_BidPriceAsc"] = '4'
#使用买价降序
defineDict["THOST_FTDC_COST_BidPriceDesc"] = '5'
typedefDict["TThostFtdcConditionalOrderSortTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcSendTypeType是一个报送状态类型
#//////////////////////////////////////////////////////////////////////
#未发送
defineDict["THOST_FTDC_UOAST_NoSend"] = '0'
#已发送
defineDict["THOST_FTDC_UOAST_Sended"] = '1'
#已生成
defineDict["THOST_FTDC_UOAST_Generated"] = '2'
#报送失败
defineDict["THOST_FTDC_UOAST_SendFail"] = '3'
#接收成功
defineDict["THOST_FTDC_UOAST_Success"] = '4'
#接收失败
defineDict["THOST_FTDC_UOAST_Fail"] = '5'
#取消报送
defineDict["THOST_FTDC_UOAST_Cancel"] = '6'
typedefDict["TThostFtdcSendTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcClientIDStatusType是一个交易编码状态类型
#//////////////////////////////////////////////////////////////////////
#未申请
defineDict["THOST_FTDC_UOACS_NoApply"] = '1'
#已提交申请
defineDict["THOST_FTDC_UOACS_Submited"] = '2'
#已发送申请
defineDict["THOST_FTDC_UOACS_Sended"] = '3'
#完成
defineDict["THOST_FTDC_UOACS_Success"] = '4'
#拒绝
defineDict["THOST_FTDC_UOACS_Refuse"] = '5'
#已撤销编码
defineDict["THOST_FTDC_UOACS_Cancel"] = '6'
typedefDict["TThostFtdcClientIDStatusType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcIndustryIDType是一个行业编码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcIndustryIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcQuestionIDType是一个特有信息编号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcQuestionIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcQuestionContentType是一个特有信息说明类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcQuestionContentType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcOptionIDType是一个选项编号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcOptionIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcOptionContentType是一个选项说明类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcOptionContentType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcQuestionTypeType是一个特有信息类型类型
#//////////////////////////////////////////////////////////////////////
#单选
defineDict["THOST_FTDC_QT_Radio"] = '1'
#多选
defineDict["THOST_FTDC_QT_Option"] = '2'
#填空
defineDict["THOST_FTDC_QT_Blank"] = '3'
typedefDict["TThostFtdcQuestionTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcProcessIDType是一个业务流水号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcProcessIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcSeqNoType是一个流水号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcSeqNoType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcUOAProcessStatusType是一个流程状态类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcUOAProcessStatusType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcProcessTypeType是一个流程功能类型类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcProcessTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcBusinessTypeType是一个业务类型类型
#//////////////////////////////////////////////////////////////////////
#请求
defineDict["THOST_FTDC_BT_Request"] = '1'
#应答
defineDict["THOST_FTDC_BT_Response"] = '2'
#通知
defineDict["THOST_FTDC_BT_Notice"] = '3'
typedefDict["TThostFtdcBusinessTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCfmmcReturnCodeType是一个监控中心返回码类型
#//////////////////////////////////////////////////////////////////////
#成功
defineDict["THOST_FTDC_CRC_Success"] = '0'
#该客户已经有流程在处理中
defineDict["THOST_FTDC_CRC_Working"] = '1'
#监控中客户资料检查失败
defineDict["THOST_FTDC_CRC_InfoFail"] = '2'
#监控中实名制检查失败
defineDict["THOST_FTDC_CRC_IDCardFail"] = '3'
#其他错误
defineDict["THOST_FTDC_CRC_OtherFail"] = '4'
typedefDict["TThostFtdcCfmmcReturnCodeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcExReturnCodeType是一个交易所返回码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcExReturnCodeType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcClientTypeType是一个客户类型类型
#//////////////////////////////////////////////////////////////////////
#所有
defineDict["THOST_FTDC_CfMMCCT_All"] = '0'
#个人
defineDict["THOST_FTDC_CfMMCCT_Person"] = '1'
#单位
defineDict["THOST_FTDC_CfMMCCT_Company"] = '2'
#其他
defineDict["THOST_FTDC_CfMMCCT_Other"] = '3'
#特殊法人
defineDict["THOST_FTDC_CfMMCCT_SpecialOrgan"] = '4'
#资管户
defineDict["THOST_FTDC_CfMMCCT_Asset"] = '5'
typedefDict["TThostFtdcClientTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcExchangeIDTypeType是一个交易所编号类型
#//////////////////////////////////////////////////////////////////////
#上海期货交易所
defineDict["THOST_FTDC_EIDT_SHFE"] = 'S'
#郑州商品交易所
defineDict["THOST_FTDC_EIDT_CZCE"] = 'Z'
#大连商品交易所
defineDict["THOST_FTDC_EIDT_DCE"] = 'D'
#中国金融期货交易所
defineDict["THOST_FTDC_EIDT_CFFEX"] = 'J'
#上海国际能源交易中心股份有限公司
defineDict["THOST_FTDC_EIDT_INE"] = 'N'
typedefDict["TThostFtdcExchangeIDTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcExClientIDTypeType是一个交易编码类型类型
#//////////////////////////////////////////////////////////////////////
#套保
defineDict["THOST_FTDC_ECIDT_Hedge"] = '1'
#套利
defineDict["THOST_FTDC_ECIDT_Arbitrage"] = '2'
#投机
defineDict["THOST_FTDC_ECIDT_Speculation"] = '3'
typedefDict["TThostFtdcExClientIDTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcClientClassifyType是一个客户分类码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcClientClassifyType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcUOAOrganTypeType是一个单位性质类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcUOAOrganTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcUOACountryCodeType是一个国家代码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcUOACountryCodeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcAreaCodeType是一个区号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcAreaCodeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFuturesIDType是一个监控中心为客户分配的代码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcFuturesIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCffmcDateType是一个日期类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcCffmcDateType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCffmcTimeType是一个时间类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcCffmcTimeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcNocIDType是一个组织机构代码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcNocIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcUpdateFlagType是一个更新状态类型
#//////////////////////////////////////////////////////////////////////
#未更新
defineDict["THOST_FTDC_UF_NoUpdate"] = '0'
#更新全部信息成功
defineDict["THOST_FTDC_UF_Success"] = '1'
#更新全部信息失败
defineDict["THOST_FTDC_UF_Fail"] = '2'
#更新交易编码成功
defineDict["THOST_FTDC_UF_TCSuccess"] = '3'
#更新交易编码失败
defineDict["THOST_FTDC_UF_TCFail"] = '4'
#已丢弃
defineDict["THOST_FTDC_UF_Cancel"] = '5'
typedefDict["TThostFtdcUpdateFlagType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcApplyOperateIDType是一个申请动作类型
#//////////////////////////////////////////////////////////////////////
#开户
defineDict["THOST_FTDC_AOID_OpenInvestor"] = '1'
#修改身份信息
defineDict["THOST_FTDC_AOID_ModifyIDCard"] = '2'
#修改一般信息
defineDict["THOST_FTDC_AOID_ModifyNoIDCard"] = '3'
#申请交易编码
defineDict["THOST_FTDC_AOID_ApplyTradingCode"] = '4'
#撤销交易编码
defineDict["THOST_FTDC_AOID_CancelTradingCode"] = '5'
#销户
defineDict["THOST_FTDC_AOID_CancelInvestor"] = '6'
#账户休眠
defineDict["THOST_FTDC_AOID_FreezeAccount"] = '8'
#激活休眠账户
defineDict["THOST_FTDC_AOID_ActiveFreezeAccount"] = '9'
typedefDict["TThostFtdcApplyOperateIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcApplyStatusIDType是一个申请状态类型
#//////////////////////////////////////////////////////////////////////
#未补全
defineDict["THOST_FTDC_ASID_NoComplete"] = '1'
#已提交
defineDict["THOST_FTDC_ASID_Submited"] = '2'
#已审核
defineDict["THOST_FTDC_ASID_Checked"] = '3'
#已拒绝
defineDict["THOST_FTDC_ASID_Refused"] = '4'
#已删除
defineDict["THOST_FTDC_ASID_Deleted"] = '5'
typedefDict["TThostFtdcApplyStatusIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcSendMethodType是一个发送方式类型
#//////////////////////////////////////////////////////////////////////
#文件发送
defineDict["THOST_FTDC_UOASM_ByAPI"] = '1'
#电子发送
defineDict["THOST_FTDC_UOASM_ByFile"] = '2'
typedefDict["TThostFtdcSendMethodType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcEventTypeType是一个业务操作类型类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcEventTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcEventModeType是一个操作方法类型
#//////////////////////////////////////////////////////////////////////
#增加
defineDict["THOST_FTDC_EvM_ADD"] = '1'
#修改
defineDict["THOST_FTDC_EvM_UPDATE"] = '2'
#删除
defineDict["THOST_FTDC_EvM_DELETE"] = '3'
#复核
defineDict["THOST_FTDC_EvM_CHECK"] = '4'
#复制
defineDict["THOST_FTDC_EvM_COPY"] = '5'
#注销
defineDict["THOST_FTDC_EvM_CANCEL"] = '6'
#冲销
defineDict["THOST_FTDC_EvM_Reverse"] = '7'
typedefDict["TThostFtdcEventModeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcUOAAutoSendType是一个统一开户申请自动发送类型
#//////////////////////////////////////////////////////////////////////
#自动发送并接收
defineDict["THOST_FTDC_UOAA_ASR"] = '1'
#自动发送,不自动接收
defineDict["THOST_FTDC_UOAA_ASNR"] = '2'
#不自动发送,自动接收
defineDict["THOST_FTDC_UOAA_NSAR"] = '3'
#不自动发送,也不自动接收
defineDict["THOST_FTDC_UOAA_NSR"] = '4'
typedefDict["TThostFtdcUOAAutoSendType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcQueryDepthType是一个查询深度类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcQueryDepthType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcDataCenterIDType是一个数据中心代码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcDataCenterIDType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcFlowIDType是一个流程ID类型
#//////////////////////////////////////////////////////////////////////
#投资者对应投资者组设置
defineDict["THOST_FTDC_EvM_InvestorGroupFlow"] = '1'
#投资者手续费率设置
defineDict["THOST_FTDC_EvM_InvestorRate"] = '2'
#投资者手续费率模板关系设置
defineDict["THOST_FTDC_EvM_InvestorCommRateModel"] = '3'
typedefDict["TThostFtdcFlowIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCheckLevelType是一个复核级别类型
#//////////////////////////////////////////////////////////////////////
#零级复核
defineDict["THOST_FTDC_CL_Zero"] = '0'
#一级复核
defineDict["THOST_FTDC_CL_One"] = '1'
#二级复核
defineDict["THOST_FTDC_CL_Two"] = '2'
typedefDict["TThostFtdcCheckLevelType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCheckNoType是一个操作次数类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcCheckNoType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcCheckStatusType是一个复核级别类型
#//////////////////////////////////////////////////////////////////////
#未复核
defineDict["THOST_FTDC_CHS_Init"] = '0'
#复核中
defineDict["THOST_FTDC_CHS_Checking"] = '1'
#已复核
defineDict["THOST_FTDC_CHS_Checked"] = '2'
#拒绝
defineDict["THOST_FTDC_CHS_Refuse"] = '3'
#作废
defineDict["THOST_FTDC_CHS_Cancel"] = '4'
typedefDict["TThostFtdcCheckStatusType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcUsedStatusType是一个生效状态类型
#//////////////////////////////////////////////////////////////////////
#未生效
defineDict["THOST_FTDC_CHU_Unused"] = '0'
#已生效
defineDict["THOST_FTDC_CHU_Used"] = '1'
#生效失败
defineDict["THOST_FTDC_CHU_Fail"] = '2'
typedefDict["TThostFtdcUsedStatusType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcRateTemplateNameType是一个模型名称类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcRateTemplateNameType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcPropertyStringType是一个用于查询的投资属性字段类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcPropertyStringType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcBankAcountOriginType是一个账户来源类型
#//////////////////////////////////////////////////////////////////////
#手工录入
defineDict["THOST_FTDC_BAO_ByAccProperty"] = '0'
#银期转账
defineDict["THOST_FTDC_BAO_ByFBTransfer"] = '1'
typedefDict["TThostFtdcBankAcountOriginType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcMonthBillTradeSumType是一个结算单月报成交汇总方式类型
#//////////////////////////////////////////////////////////////////////
#同日同合约
defineDict["THOST_FTDC_MBTS_ByInstrument"] = '0'
#同日同合约同价格
defineDict["THOST_FTDC_MBTS_ByDayInsPrc"] = '1'
#同合约
defineDict["THOST_FTDC_MBTS_ByDayIns"] = '2'
typedefDict["TThostFtdcMonthBillTradeSumType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFBTTradeCodeEnumType是一个银期交易代码枚举类型
#//////////////////////////////////////////////////////////////////////
#银行发起银行转期货
defineDict["THOST_FTDC_FTC_BankLaunchBankToBroker"] = '102001'
#期货发起银行转期货
defineDict["THOST_FTDC_FTC_BrokerLaunchBankToBroker"] = '202001'
#银行发起期货转银行
defineDict["THOST_FTDC_FTC_BankLaunchBrokerToBank"] = '102002'
#期货发起期货转银行
defineDict["THOST_FTDC_FTC_BrokerLaunchBrokerToBank"] = '202002'
typedefDict["TThostFtdcFBTTradeCodeEnumType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcRateTemplateIDType是一个模型代码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcRateTemplateIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcRiskRateType是一个风险度类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcRiskRateType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcTimestampType是一个时间戳类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcTimestampType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcInvestorIDRuleNameType是一个号段规则名称类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcInvestorIDRuleNameType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcInvestorIDRuleExprType是一个号段规则表达式类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcInvestorIDRuleExprType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcLastDriftType是一个上次OTP漂移值类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcLastDriftType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcLastSuccessType是一个上次OTP成功值类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcLastSuccessType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcAuthKeyType是一个令牌密钥类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcAuthKeyType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcSerialNumberType是一个序列号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcSerialNumberType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcOTPTypeType是一个动态令牌类型类型
#//////////////////////////////////////////////////////////////////////
#无动态令牌
defineDict["THOST_FTDC_OTP_NONE"] = '0'
#时间令牌
defineDict["THOST_FTDC_OTP_TOTP"] = '1'
typedefDict["TThostFtdcOTPTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcOTPVendorsIDType是一个动态令牌提供商类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcOTPVendorsIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcOTPVendorsNameType是一个动态令牌提供商名称类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcOTPVendorsNameType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcOTPStatusType是一个动态令牌状态类型
#//////////////////////////////////////////////////////////////////////
#未使用
defineDict["THOST_FTDC_OTPS_Unused"] = '0'
#已使用
defineDict["THOST_FTDC_OTPS_Used"] = '1'
#注销
defineDict["THOST_FTDC_OTPS_Disuse"] = '2'
typedefDict["TThostFtdcOTPStatusType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcBrokerUserTypeType是一个经济公司用户类型类型
#//////////////////////////////////////////////////////////////////////
#投资者
defineDict["THOST_FTDC_BUT_Investor"] = '1'
#操作员
defineDict["THOST_FTDC_BUT_BrokerUser"] = '2'
typedefDict["TThostFtdcBrokerUserTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFutureTypeType是一个期货类型类型
#//////////////////////////////////////////////////////////////////////
#商品期货
defineDict["THOST_FTDC_FUTT_Commodity"] = '1'
#金融期货
defineDict["THOST_FTDC_FUTT_Financial"] = '2'
typedefDict["TThostFtdcFutureTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFundEventTypeType是一个资金管理操作类型类型
#//////////////////////////////////////////////////////////////////////
#转账限额
defineDict["THOST_FTDC_FET_Restriction"] = '0'
#当日转账限额
defineDict["THOST_FTDC_FET_TodayRestriction"] = '1'
#期商流水
defineDict["THOST_FTDC_FET_Transfer"] = '2'
#资金冻结
defineDict["THOST_FTDC_FET_Credit"] = '3'
#投资者可提资金比例
defineDict["THOST_FTDC_FET_InvestorWithdrawAlm"] = '4'
#单个银行帐户转账限额
defineDict["THOST_FTDC_FET_BankRestriction"] = '5'
#银期签约账户
defineDict["THOST_FTDC_FET_Accountregister"] = '6'
#交易所出入金
defineDict["THOST_FTDC_FET_ExchangeFundIO"] = '7'
#投资者出入金
defineDict["THOST_FTDC_FET_InvestorFundIO"] = '8'
typedefDict["TThostFtdcFundEventTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcAccountSourceTypeType是一个资金账户来源类型
#//////////////////////////////////////////////////////////////////////
#银期同步
defineDict["THOST_FTDC_AST_FBTransfer"] = '0'
#手工录入
defineDict["THOST_FTDC_AST_ManualEntry"] = '1'
typedefDict["TThostFtdcAccountSourceTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCodeSourceTypeType是一个交易编码来源类型
#//////////////////////////////////////////////////////////////////////
#统一开户(已规范)
defineDict["THOST_FTDC_CST_UnifyAccount"] = '0'
#手工录入(未规范)
defineDict["THOST_FTDC_CST_ManualEntry"] = '1'
typedefDict["TThostFtdcCodeSourceTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcUserRangeType是一个操作员范围类型
#//////////////////////////////////////////////////////////////////////
#所有
defineDict["THOST_FTDC_UR_All"] = '0'
#单一操作员
defineDict["THOST_FTDC_UR_Single"] = '1'
typedefDict["TThostFtdcUserRangeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcTimeSpanType是一个时间跨度类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcTimeSpanType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcImportSequenceIDType是一个动态令牌导入批次编号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcImportSequenceIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcByGroupType是一个交易统计表按客户统计方式类型
#//////////////////////////////////////////////////////////////////////
#按投资者统计
defineDict["THOST_FTDC_BG_Investor"] = '2'
#按类统计
defineDict["THOST_FTDC_BG_Group"] = '1'
typedefDict["TThostFtdcByGroupType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcTradeSumStatModeType是一个交易统计表按范围统计方式类型
#//////////////////////////////////////////////////////////////////////
#按合约统计
defineDict["THOST_FTDC_TSSM_Instrument"] = '1'
#按产品统计
defineDict["THOST_FTDC_TSSM_Product"] = '2'
#按交易所统计
defineDict["THOST_FTDC_TSSM_Exchange"] = '3'
typedefDict["TThostFtdcTradeSumStatModeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcComTypeType是一个组合成交类型类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcComTypeType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcUserProductIDType是一个产品标识类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcUserProductIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcUserProductNameType是一个产品名称类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcUserProductNameType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcUserProductMemoType是一个产品说明类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcUserProductMemoType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCSRCCancelFlagType是一个新增或变更标志类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcCSRCCancelFlagType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCSRCDateType是一个日期类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcCSRCDateType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCSRCInvestorNameType是一个客户名称类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcCSRCInvestorNameType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCSRCOpenInvestorNameType是一个客户名称类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcCSRCOpenInvestorNameType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCSRCInvestorIDType是一个客户代码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcCSRCInvestorIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCSRCIdentifiedCardNoType是一个证件号码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcCSRCIdentifiedCardNoType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCSRCClientIDType是一个交易编码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcCSRCClientIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCSRCBankFlagType是一个银行标识类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcCSRCBankFlagType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCSRCBankAccountType是一个银行账户类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcCSRCBankAccountType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCSRCOpenNameType是一个开户人类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcCSRCOpenNameType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCSRCMemoType是一个说明类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcCSRCMemoType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCSRCTimeType是一个时间类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcCSRCTimeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCSRCTradeIDType是一个成交流水号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcCSRCTradeIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCSRCExchangeInstIDType是一个合约代码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcCSRCExchangeInstIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCSRCMortgageNameType是一个质押品名称类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcCSRCMortgageNameType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCSRCReasonType是一个事由类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcCSRCReasonType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcIsSettlementType是一个是否为非结算会员类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcIsSettlementType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCSRCMoneyType是一个资金类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcCSRCMoneyType"] = "float"
#//////////////////////////////////////////////////////////////////////
#TFtdcCSRCPriceType是一个价格类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcCSRCPriceType"] = "float"
#//////////////////////////////////////////////////////////////////////
#TFtdcCSRCOptionsTypeType是一个期权类型类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcCSRCOptionsTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCSRCStrikePriceType是一个执行价类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcCSRCStrikePriceType"] = "float"
#//////////////////////////////////////////////////////////////////////
#TFtdcCSRCTargetProductIDType是一个标的品种类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcCSRCTargetProductIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCSRCTargetInstrIDType是一个标的合约类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcCSRCTargetInstrIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCommModelNameType是一个手续费率模板名称类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcCommModelNameType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCommModelMemoType是一个手续费率模板备注类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcCommModelMemoType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcExprSetModeType是一个日期表达式设置类型类型
#//////////////////////////////////////////////////////////////////////
#相对已有规则设置
defineDict["THOST_FTDC_ESM_Relative"] = '1'
#典型设置
defineDict["THOST_FTDC_ESM_Typical"] = '2'
typedefDict["TThostFtdcExprSetModeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcRateInvestorRangeType是一个投资者范围类型
#//////////////////////////////////////////////////////////////////////
#公司标准
defineDict["THOST_FTDC_RIR_All"] = '1'
#模板
defineDict["THOST_FTDC_RIR_Model"] = '2'
#单一投资者
defineDict["THOST_FTDC_RIR_Single"] = '3'
typedefDict["TThostFtdcRateInvestorRangeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcAgentBrokerIDType是一个代理经纪公司代码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcAgentBrokerIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcDRIdentityIDType是一个交易中心代码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcDRIdentityIDType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcDRIdentityNameType是一个交易中心名称类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcDRIdentityNameType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcDBLinkIDType是一个DBLink标识号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcDBLinkIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcSyncDataStatusType是一个主次用系统数据同步状态类型
#//////////////////////////////////////////////////////////////////////
#未同步
defineDict["THOST_FTDC_SDS_Initialize"] = '0'
#同步中
defineDict["THOST_FTDC_SDS_Settlementing"] = '1'
#已同步
defineDict["THOST_FTDC_SDS_Settlemented"] = '2'
typedefDict["TThostFtdcSyncDataStatusType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcTradeSourceType是一个成交来源类型
#//////////////////////////////////////////////////////////////////////
#来自交易所普通回报
defineDict["THOST_FTDC_TSRC_NORMAL"] = '0'
#来自查询
defineDict["THOST_FTDC_TSRC_QUERY"] = '1'
typedefDict["TThostFtdcTradeSourceType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFlexStatModeType是一个产品合约统计方式类型
#//////////////////////////////////////////////////////////////////////
#产品统计
defineDict["THOST_FTDC_FSM_Product"] = '1'
#交易所统计
defineDict["THOST_FTDC_FSM_Exchange"] = '2'
#统计所有
defineDict["THOST_FTDC_FSM_All"] = '3'
typedefDict["TThostFtdcFlexStatModeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcByInvestorRangeType是一个投资者范围统计方式类型
#//////////////////////////////////////////////////////////////////////
#属性统计
defineDict["THOST_FTDC_BIR_Property"] = '1'
#统计所有
defineDict["THOST_FTDC_BIR_All"] = '2'
typedefDict["TThostFtdcByInvestorRangeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcSRiskRateType是一个风险度类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcSRiskRateType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcSequenceNo12Type是一个序号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcSequenceNo12Type"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcPropertyInvestorRangeType是一个投资者范围类型
#//////////////////////////////////////////////////////////////////////
#所有
defineDict["THOST_FTDC_PIR_All"] = '1'
#投资者属性
defineDict["THOST_FTDC_PIR_Property"] = '2'
#单一投资者
defineDict["THOST_FTDC_PIR_Single"] = '3'
typedefDict["TThostFtdcPropertyInvestorRangeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFileStatusType是一个文件状态类型
#//////////////////////////////////////////////////////////////////////
#未生成
defineDict["THOST_FTDC_FIS_NoCreate"] = '0'
#已生成
defineDict["THOST_FTDC_FIS_Created"] = '1'
#生成失败
defineDict["THOST_FTDC_FIS_Failed"] = '2'
typedefDict["TThostFtdcFileStatusType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFileGenStyleType是一个文件生成方式类型
#//////////////////////////////////////////////////////////////////////
#下发
defineDict["THOST_FTDC_FGS_FileTransmit"] = '0'
#生成
defineDict["THOST_FTDC_FGS_FileGen"] = '1'
typedefDict["TThostFtdcFileGenStyleType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcSysOperModeType是一个系统日志操作方法类型
#//////////////////////////////////////////////////////////////////////
#增加
defineDict["THOST_FTDC_SoM_Add"] = '1'
#修改
defineDict["THOST_FTDC_SoM_Update"] = '2'
#删除
defineDict["THOST_FTDC_SoM_Delete"] = '3'
#复制
defineDict["THOST_FTDC_SoM_Copy"] = '4'
#激活
defineDict["THOST_FTDC_SoM_AcTive"] = '5'
#注销
defineDict["THOST_FTDC_SoM_CanCel"] = '6'
#重置
defineDict["THOST_FTDC_SoM_ReSet"] = '7'
typedefDict["TThostFtdcSysOperModeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcSysOperTypeType是一个系统日志操作类型类型
#//////////////////////////////////////////////////////////////////////
#修改操作员密码
defineDict["THOST_FTDC_SoT_UpdatePassword"] = '0'
#操作员组织架构关系
defineDict["THOST_FTDC_SoT_UserDepartment"] = '1'
#角色管理
defineDict["THOST_FTDC_SoT_RoleManager"] = '2'
#角色功能设置
defineDict["THOST_FTDC_SoT_RoleFunction"] = '3'
#基础参数设置
defineDict["THOST_FTDC_SoT_BaseParam"] = '4'
#设置操作员
defineDict["THOST_FTDC_SoT_SetUserID"] = '5'
#用户角色设置
defineDict["THOST_FTDC_SoT_SetUserRole"] = '6'
#用户IP限制
defineDict["THOST_FTDC_SoT_UserIpRestriction"] = '7'
#组织架构管理
defineDict["THOST_FTDC_SoT_DepartmentManager"] = '8'
#组织架构向查询分类复制
defineDict["THOST_FTDC_SoT_DepartmentCopy"] = '9'
#交易编码管理
defineDict["THOST_FTDC_SoT_Tradingcode"] = 'A'
#投资者状态维护
defineDict["THOST_FTDC_SoT_InvestorStatus"] = 'B'
#投资者权限管理
defineDict["THOST_FTDC_SoT_InvestorAuthority"] = 'C'
#属性设置
defineDict["THOST_FTDC_SoT_PropertySet"] = 'D'
#重置投资者密码
defineDict["THOST_FTDC_SoT_ReSetInvestorPasswd"] = 'E'
#投资者个性信息维护
defineDict["THOST_FTDC_SoT_InvestorPersonalityInfo"] = 'F'
typedefDict["TThostFtdcSysOperTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCSRCDataQueyTypeType是一个上报数据查询类型类型
#//////////////////////////////////////////////////////////////////////
#查询当前交易日报送的数据
defineDict["THOST_FTDC_CSRCQ_Current"] = '0'
#查询历史报送的代理经纪公司的数据
defineDict["THOST_FTDC_CSRCQ_History"] = '1'
typedefDict["TThostFtdcCSRCDataQueyTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFreezeStatusType是一个休眠状态类型
#//////////////////////////////////////////////////////////////////////
#活跃
defineDict["THOST_FTDC_FRS_Normal"] = '1'
#休眠
defineDict["THOST_FTDC_FRS_Freeze"] = '0'
typedefDict["TThostFtdcFreezeStatusType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcStandardStatusType是一个规范状态类型
#//////////////////////////////////////////////////////////////////////
#已规范
defineDict["THOST_FTDC_STST_Standard"] = '0'
#未规范
defineDict["THOST_FTDC_STST_NonStandard"] = '1'
typedefDict["TThostFtdcStandardStatusType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCSRCFreezeStatusType是一个休眠状态类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcCSRCFreezeStatusType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcRightParamTypeType是一个配置类型类型
#//////////////////////////////////////////////////////////////////////
#休眠户
defineDict["THOST_FTDC_RPT_Freeze"] = '1'
#激活休眠户
defineDict["THOST_FTDC_RPT_FreezeActive"] = '2'
#开仓权限限制
defineDict["THOST_FTDC_RPT_OpenLimit"] = '3'
#解除开仓权限限制
defineDict["THOST_FTDC_RPT_RelieveOpenLimit"] = '4'
typedefDict["TThostFtdcRightParamTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcRightTemplateIDType是一个模板代码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcRightTemplateIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcRightTemplateNameType是一个模板名称类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcRightTemplateNameType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcDataStatusType是一个反洗钱审核表数据状态类型
#//////////////////////////////////////////////////////////////////////
#正常
defineDict["THOST_FTDC_AMLDS_Normal"] = '0'
#已删除
defineDict["THOST_FTDC_AMLDS_Deleted"] = '1'
typedefDict["TThostFtdcDataStatusType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcAMLCheckStatusType是一个审核状态类型
#//////////////////////////////////////////////////////////////////////
#未复核
defineDict["THOST_FTDC_AMLCHS_Init"] = '0'
#复核中
defineDict["THOST_FTDC_AMLCHS_Checking"] = '1'
#已复核
defineDict["THOST_FTDC_AMLCHS_Checked"] = '2'
#拒绝上报
defineDict["THOST_FTDC_AMLCHS_RefuseReport"] = '3'
typedefDict["TThostFtdcAMLCheckStatusType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcAmlDateTypeType是一个日期类型类型
#//////////////////////////////////////////////////////////////////////
#检查日期
defineDict["THOST_FTDC_AMLDT_DrawDay"] = '0'
#发生日期
defineDict["THOST_FTDC_AMLDT_TouchDay"] = '1'
typedefDict["TThostFtdcAmlDateTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcAmlCheckLevelType是一个审核级别类型
#//////////////////////////////////////////////////////////////////////
#零级审核
defineDict["THOST_FTDC_AMLCL_CheckLevel0"] = '0'
#一级审核
defineDict["THOST_FTDC_AMLCL_CheckLevel1"] = '1'
#二级审核
defineDict["THOST_FTDC_AMLCL_CheckLevel2"] = '2'
#三级审核
defineDict["THOST_FTDC_AMLCL_CheckLevel3"] = '3'
typedefDict["TThostFtdcAmlCheckLevelType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcAmlCheckFlowType是一个反洗钱数据抽取审核流程类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcAmlCheckFlowType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcDataTypeType是一个数据类型类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcDataTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcExportFileTypeType是一个导出文件类型类型
#//////////////////////////////////////////////////////////////////////
#CSV
defineDict["THOST_FTDC_EFT_CSV"] = '0'
#Excel
defineDict["THOST_FTDC_EFT_EXCEL"] = '1'
#DBF
defineDict["THOST_FTDC_EFT_DBF"] = '2'
typedefDict["TThostFtdcExportFileTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcSettleManagerTypeType是一个结算配置类型类型
#//////////////////////////////////////////////////////////////////////
#结算前准备
defineDict["THOST_FTDC_SMT_Before"] = '1'
#结算
defineDict["THOST_FTDC_SMT_Settlement"] = '2'
#结算后核对
defineDict["THOST_FTDC_SMT_After"] = '3'
#结算后处理
defineDict["THOST_FTDC_SMT_Settlemented"] = '4'
typedefDict["TThostFtdcSettleManagerTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcSettleManagerIDType是一个结算配置代码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcSettleManagerIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcSettleManagerNameType是一个结算配置名称类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcSettleManagerNameType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcSettleManagerLevelType是一个结算配置等级类型
#//////////////////////////////////////////////////////////////////////
#必要
defineDict["THOST_FTDC_SML_Must"] = '1'
#警告
defineDict["THOST_FTDC_SML_Alarm"] = '2'
#提示
defineDict["THOST_FTDC_SML_Prompt"] = '3'
#不检查
defineDict["THOST_FTDC_SML_Ignore"] = '4'
typedefDict["TThostFtdcSettleManagerLevelType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcSettleManagerGroupType是一个模块分组类型
#//////////////////////////////////////////////////////////////////////
#交易所核对
defineDict["THOST_FTDC_SMG_Exhcange"] = '1'
#内部核对
defineDict["THOST_FTDC_SMG_ASP"] = '2'
#上报数据核对
defineDict["THOST_FTDC_SMG_CSRC"] = '3'
typedefDict["TThostFtdcSettleManagerGroupType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCheckResultMemoType是一个核对结果说明类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcCheckResultMemoType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFunctionUrlType是一个功能链接类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcFunctionUrlType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcAuthInfoType是一个客户端认证信息类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcAuthInfoType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcAuthCodeType是一个客户端认证码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcAuthCodeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcLimitUseTypeType是一个保值额度使用类型类型
#//////////////////////////////////////////////////////////////////////
#可重复使用
defineDict["THOST_FTDC_LUT_Repeatable"] = '1'
#不可重复使用
defineDict["THOST_FTDC_LUT_Unrepeatable"] = '2'
typedefDict["TThostFtdcLimitUseTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcDataResourceType是一个数据来源类型
#//////////////////////////////////////////////////////////////////////
#本系统
defineDict["THOST_FTDC_DAR_Settle"] = '1'
#交易所
defineDict["THOST_FTDC_DAR_Exchange"] = '2'
#报送数据
defineDict["THOST_FTDC_DAR_CSRC"] = '3'
typedefDict["TThostFtdcDataResourceType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcMarginTypeType是一个保证金类型类型
#//////////////////////////////////////////////////////////////////////
#交易所保证金率
defineDict["THOST_FTDC_MGT_ExchMarginRate"] = '0'
#投资者保证金率
defineDict["THOST_FTDC_MGT_InstrMarginRate"] = '1'
#投资者交易保证金率
defineDict["THOST_FTDC_MGT_InstrMarginRateTrade"] = '2'
typedefDict["TThostFtdcMarginTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcActiveTypeType是一个生效类型类型
#//////////////////////////////////////////////////////////////////////
#仅当日生效
defineDict["THOST_FTDC_ACT_Intraday"] = '1'
#长期生效
defineDict["THOST_FTDC_ACT_Long"] = '2'
typedefDict["TThostFtdcActiveTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcMarginRateTypeType是一个冲突保证金率类型类型
#//////////////////////////////////////////////////////////////////////
#交易所保证金率
defineDict["THOST_FTDC_MRT_Exchange"] = '1'
#投资者保证金率
defineDict["THOST_FTDC_MRT_Investor"] = '2'
#投资者交易保证金率
defineDict["THOST_FTDC_MRT_InvestorTrade"] = '3'
typedefDict["TThostFtdcMarginRateTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcBackUpStatusType是一个备份数据状态类型
#//////////////////////////////////////////////////////////////////////
#未生成备份数据
defineDict["THOST_FTDC_BUS_UnBak"] = '0'
#备份数据生成中
defineDict["THOST_FTDC_BUS_BakUp"] = '1'
#已生成备份数据
defineDict["THOST_FTDC_BUS_BakUped"] = '2'
#备份数据失败
defineDict["THOST_FTDC_BUS_BakFail"] = '3'
typedefDict["TThostFtdcBackUpStatusType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcInitSettlementType是一个结算初始化状态类型
#//////////////////////////////////////////////////////////////////////
#结算初始化未开始
defineDict["THOST_FTDC_SIS_UnInitialize"] = '0'
#结算初始化中
defineDict["THOST_FTDC_SIS_Initialize"] = '1'
#结算初始化完成
defineDict["THOST_FTDC_SIS_Initialized"] = '2'
typedefDict["TThostFtdcInitSettlementType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcReportStatusType是一个报表数据生成状态类型
#//////////////////////////////////////////////////////////////////////
#未生成报表数据
defineDict["THOST_FTDC_SRS_NoCreate"] = '0'
#报表数据生成中
defineDict["THOST_FTDC_SRS_Create"] = '1'
#已生成报表数据
defineDict["THOST_FTDC_SRS_Created"] = '2'
#生成报表数据失败
defineDict["THOST_FTDC_SRS_CreateFail"] = '3'
typedefDict["TThostFtdcReportStatusType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcSaveStatusType是一个数据归档状态类型
#//////////////////////////////////////////////////////////////////////
#归档未完成
defineDict["THOST_FTDC_SSS_UnSaveData"] = '0'
#归档完成
defineDict["THOST_FTDC_SSS_SaveDatad"] = '1'
typedefDict["TThostFtdcSaveStatusType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcSettArchiveStatusType是一个结算确认数据归档状态类型
#//////////////////////////////////////////////////////////////////////
#未归档数据
defineDict["THOST_FTDC_SAS_UnArchived"] = '0'
#数据归档中
defineDict["THOST_FTDC_SAS_Archiving"] = '1'
#已归档数据
defineDict["THOST_FTDC_SAS_Archived"] = '2'
#归档数据失败
defineDict["THOST_FTDC_SAS_ArchiveFail"] = '3'
typedefDict["TThostFtdcSettArchiveStatusType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCTPTypeType是一个CTP交易系统类型类型
#//////////////////////////////////////////////////////////////////////
#未知类型
defineDict["THOST_FTDC_CTPT_Unkown"] = '0'
#主中心
defineDict["THOST_FTDC_CTPT_MainCenter"] = '1'
#备中心
defineDict["THOST_FTDC_CTPT_BackUp"] = '2'
typedefDict["TThostFtdcCTPTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcToolIDType是一个工具代码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcToolIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcToolNameType是一个工具名称类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcToolNameType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCloseDealTypeType是一个平仓处理类型类型
#//////////////////////////////////////////////////////////////////////
#正常
defineDict["THOST_FTDC_CDT_Normal"] = '0'
#投机平仓优先
defineDict["THOST_FTDC_CDT_SpecFirst"] = '1'
typedefDict["TThostFtdcCloseDealTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcMortgageFundUseRangeType是一个货币质押资金可用范围类型
#//////////////////////////////////////////////////////////////////////
#不能使用
defineDict["THOST_FTDC_MFUR_None"] = '0'
#用于保证金
defineDict["THOST_FTDC_MFUR_Margin"] = '1'
#用于手续费、盈亏、保证金
defineDict["THOST_FTDC_MFUR_All"] = '2'
typedefDict["TThostFtdcMortgageFundUseRangeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCurrencyUnitType是一个币种单位数量类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcCurrencyUnitType"] = "float"
#//////////////////////////////////////////////////////////////////////
#TFtdcExchangeRateType是一个汇率类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcExchangeRateType"] = "float"
#//////////////////////////////////////////////////////////////////////
#TFtdcSpecProductTypeType是一个特殊产品类型类型
#//////////////////////////////////////////////////////////////////////
#郑商所套保产品
defineDict["THOST_FTDC_SPT_CzceHedge"] = '1'
#货币质押产品
defineDict["THOST_FTDC_SPT_IneForeignCurrency"] = '2'
#大连短线开平仓产品
defineDict["THOST_FTDC_SPT_DceOpenClose"] = '3'
typedefDict["TThostFtdcSpecProductTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFundMortgageTypeType是一个货币质押类型类型
#//////////////////////////////////////////////////////////////////////
#质押
defineDict["THOST_FTDC_FMT_Mortgage"] = '1'
#解质
defineDict["THOST_FTDC_FMT_Redemption"] = '2'
typedefDict["TThostFtdcFundMortgageTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcAccountSettlementParamIDType是一个投资者账户结算参数代码类型
#//////////////////////////////////////////////////////////////////////
#基础保证金
defineDict["THOST_FTDC_ASPI_BaseMargin"] = '1'
#最低权益标准
defineDict["THOST_FTDC_ASPI_LowestInterest"] = '2'
typedefDict["TThostFtdcAccountSettlementParamIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCurrencyNameType是一个币种名称类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcCurrencyNameType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCurrencySignType是一个币种符号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcCurrencySignType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFundMortDirectionType是一个货币质押方向类型
#//////////////////////////////////////////////////////////////////////
#货币质入
defineDict["THOST_FTDC_FMD_In"] = '1'
#货币质出
defineDict["THOST_FTDC_FMD_Out"] = '2'
typedefDict["TThostFtdcFundMortDirectionType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcBusinessClassType是一个换汇类别类型
#//////////////////////////////////////////////////////////////////////
#盈利
defineDict["THOST_FTDC_BT_Profit"] = '0'
#亏损
defineDict["THOST_FTDC_BT_Loss"] = '1'
#其他
defineDict["THOST_FTDC_BT_Other"] = 'Z'
typedefDict["TThostFtdcBusinessClassType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcSwapSourceTypeType是一个换汇数据来源类型
#//////////////////////////////////////////////////////////////////////
#手工
defineDict["THOST_FTDC_SST_Manual"] = '0'
#自动生成
defineDict["THOST_FTDC_SST_Automatic"] = '1'
typedefDict["TThostFtdcSwapSourceTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCurrExDirectionType是一个换汇类型类型
#//////////////////////////////////////////////////////////////////////
#结汇
defineDict["THOST_FTDC_CED_Settlement"] = '0'
#售汇
defineDict["THOST_FTDC_CED_Sale"] = '1'
typedefDict["TThostFtdcCurrExDirectionType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCurrencySwapStatusType是一个申请状态类型
#//////////////////////////////////////////////////////////////////////
#已录入
defineDict["THOST_FTDC_CSS_Entry"] = '1'
#已审核
defineDict["THOST_FTDC_CSS_Approve"] = '2'
#已拒绝
defineDict["THOST_FTDC_CSS_Refuse"] = '3'
#已撤销
defineDict["THOST_FTDC_CSS_Revoke"] = '4'
#已发送
defineDict["THOST_FTDC_CSS_Send"] = '5'
#换汇成功
defineDict["THOST_FTDC_CSS_Success"] = '6'
#换汇失败
defineDict["THOST_FTDC_CSS_Failure"] = '7'
typedefDict["TThostFtdcCurrencySwapStatusType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCurrExchCertNoType是一个凭证号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcCurrExchCertNoType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcBatchSerialNoType是一个批次号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcBatchSerialNoType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcReqFlagType是一个换汇发送标志类型
#//////////////////////////////////////////////////////////////////////
#未发送
defineDict["THOST_FTDC_REQF_NoSend"] = '0'
#发送成功
defineDict["THOST_FTDC_REQF_SendSuccess"] = '1'
#发送失败
defineDict["THOST_FTDC_REQF_SendFailed"] = '2'
#等待重发
defineDict["THOST_FTDC_REQF_WaitReSend"] = '3'
typedefDict["TThostFtdcReqFlagType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcResFlagType是一个换汇返回成功标志类型
#//////////////////////////////////////////////////////////////////////
#成功
defineDict["THOST_FTDC_RESF_Success"] = '0'
#账户余额不足
defineDict["THOST_FTDC_RESF_InsuffiCient"] = '1'
#交易结果未知
defineDict["THOST_FTDC_RESF_UnKnown"] = '8'
typedefDict["TThostFtdcResFlagType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcPageControlType是一个换汇页面控制类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcPageControlType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcRecordCountType是一个记录数类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcRecordCountType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcCurrencySwapMemoType是一个换汇需确认信息类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcCurrencySwapMemoType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcExStatusType是一个修改状态类型
#//////////////////////////////////////////////////////////////////////
#修改前
defineDict["THOST_FTDC_EXS_Before"] = '0'
#修改后
defineDict["THOST_FTDC_EXS_After"] = '1'
typedefDict["TThostFtdcExStatusType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcClientRegionType是一个开户客户地域类型
#//////////////////////////////////////////////////////////////////////
#国内客户
defineDict["THOST_FTDC_CR_Domestic"] = '1'
#港澳台客户
defineDict["THOST_FTDC_CR_GMT"] = '2'
#国外客户
defineDict["THOST_FTDC_CR_Foreign"] = '3'
typedefDict["TThostFtdcClientRegionType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcWorkPlaceType是一个工作单位类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcWorkPlaceType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcBusinessPeriodType是一个经营期限类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcBusinessPeriodType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcWebSiteType是一个网址类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcWebSiteType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcUOAIdCardTypeType是一个统一开户证件类型类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcUOAIdCardTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcClientModeType是一个开户模式类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcClientModeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcInvestorFullNameType是一个投资者全称类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcInvestorFullNameType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcUOABrokerIDType是一个境外中介机构ID类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcUOABrokerIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcUOAZipCodeType是一个邮政编码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcUOAZipCodeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcUOAEMailType是一个电子邮箱类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcUOAEMailType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcOldCityType是一个城市类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcOldCityType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCorporateIdentifiedCardNoType是一个法人代表证件号码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcCorporateIdentifiedCardNoType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcHasBoardType是一个是否有董事会类型
#//////////////////////////////////////////////////////////////////////
#没有
defineDict["THOST_FTDC_HB_No"] = '0'
#有
defineDict["THOST_FTDC_HB_Yes"] = '1'
typedefDict["TThostFtdcHasBoardType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcStartModeType是一个启动模式类型
#//////////////////////////////////////////////////////////////////////
#正常
defineDict["THOST_FTDC_SM_Normal"] = '1'
#应急
defineDict["THOST_FTDC_SM_Emerge"] = '2'
#恢复
defineDict["THOST_FTDC_SM_Restore"] = '3'
typedefDict["TThostFtdcStartModeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcTemplateTypeType是一个模型类型类型
#//////////////////////////////////////////////////////////////////////
#全量
defineDict["THOST_FTDC_TPT_Full"] = '1'
#增量
defineDict["THOST_FTDC_TPT_Increment"] = '2'
#备份
defineDict["THOST_FTDC_TPT_BackUp"] = '3'
typedefDict["TThostFtdcTemplateTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcLoginModeType是一个登录模式类型
#//////////////////////////////////////////////////////////////////////
#交易
defineDict["THOST_FTDC_LM_Trade"] = '0'
#转账
defineDict["THOST_FTDC_LM_Transfer"] = '1'
typedefDict["TThostFtdcLoginModeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcPromptTypeType是一个日历提示类型类型
#//////////////////////////////////////////////////////////////////////
#合约上下市
defineDict["THOST_FTDC_CPT_Instrument"] = '1'
#保证金分段生效
defineDict["THOST_FTDC_CPT_Margin"] = '2'
typedefDict["TThostFtdcPromptTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcLedgerManageIDType是一个分户管理资产编码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcLedgerManageIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcInvestVarietyType是一个投资品种类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcInvestVarietyType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcBankAccountTypeType是一个账户类别类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcBankAccountTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcLedgerManageBankType是一个开户银行类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcLedgerManageBankType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCffexDepartmentNameType是一个开户营业部类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcCffexDepartmentNameType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCffexDepartmentCodeType是一个营业部代码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcCffexDepartmentCodeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcHasTrusteeType是一个是否有托管人类型
#//////////////////////////////////////////////////////////////////////
#有
defineDict["THOST_FTDC_HT_Yes"] = '1'
#没有
defineDict["THOST_FTDC_HT_No"] = '0'
typedefDict["TThostFtdcHasTrusteeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCSRCMemo1Type是一个说明类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcCSRCMemo1Type"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcAssetmgrCFullNameType是一个代理资产管理业务的期货公司全称类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcAssetmgrCFullNameType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcAssetmgrApprovalNOType是一个资产管理业务批文号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcAssetmgrApprovalNOType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcAssetmgrMgrNameType是一个资产管理业务负责人姓名类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcAssetmgrMgrNameType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcAmTypeType是一个机构类型类型
#//////////////////////////////////////////////////////////////////////
#银行
defineDict["THOST_FTDC_AMT_Bank"] = '1'
#证券公司
defineDict["THOST_FTDC_AMT_Securities"] = '2'
#基金公司
defineDict["THOST_FTDC_AMT_Fund"] = '3'
#保险公司
defineDict["THOST_FTDC_AMT_Insurance"] = '4'
#信托公司
defineDict["THOST_FTDC_AMT_Trust"] = '5'
#其他
defineDict["THOST_FTDC_AMT_Other"] = '9'
typedefDict["TThostFtdcAmTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCSRCAmTypeType是一个机构类型类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcCSRCAmTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCSRCFundIOTypeType是一个出入金类型类型
#//////////////////////////////////////////////////////////////////////
#出入金
defineDict["THOST_FTDC_CFIOT_FundIO"] = '0'
#银期换汇
defineDict["THOST_FTDC_CFIOT_SwapCurrency"] = '1'
typedefDict["TThostFtdcCSRCFundIOTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCusAccountTypeType是一个结算账户类型类型
#//////////////////////////////////////////////////////////////////////
#期货结算账户
defineDict["THOST_FTDC_CAT_Futures"] = '1'
#纯期货资管业务下的资管结算账户
defineDict["THOST_FTDC_CAT_AssetmgrFuture"] = '2'
#综合类资管业务下的期货资管托管账户
defineDict["THOST_FTDC_CAT_AssetmgrTrustee"] = '3'
#综合类资管业务下的资金中转账户
defineDict["THOST_FTDC_CAT_AssetmgrTransfer"] = '4'
typedefDict["TThostFtdcCusAccountTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCSRCNationalType是一个国籍类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcCSRCNationalType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCSRCSecAgentIDType是一个二级代理ID类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcCSRCSecAgentIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcLanguageTypeType是一个通知语言类型类型
#//////////////////////////////////////////////////////////////////////
#中文
defineDict["THOST_FTDC_LT_Chinese"] = '1'
#英文
defineDict["THOST_FTDC_LT_English"] = '2'
typedefDict["TThostFtdcLanguageTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcAmAccountType是一个投资账户类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcAmAccountType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcAssetmgrClientTypeType是一个资产管理客户类型类型
#//////////////////////////////////////////////////////////////////////
#个人资管客户
defineDict["THOST_FTDC_AMCT_Person"] = '1'
#单位资管客户
defineDict["THOST_FTDC_AMCT_Organ"] = '2'
#特殊单位资管客户
defineDict["THOST_FTDC_AMCT_SpecialOrgan"] = '4'
typedefDict["TThostFtdcAssetmgrClientTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcAssetmgrTypeType是一个投资类型类型
#//////////////////////////////////////////////////////////////////////
#期货类
defineDict["THOST_FTDC_ASST_Futures"] = '3'
#综合类
defineDict["THOST_FTDC_ASST_SpecialOrgan"] = '4'
typedefDict["TThostFtdcAssetmgrTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcUOMType是一个计量单位类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcUOMType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcSHFEInstLifePhaseType是一个上期所合约生命周期状态类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcSHFEInstLifePhaseType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcSHFEProductClassType是一个产品类型类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcSHFEProductClassType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcPriceDecimalType是一个价格小数位类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcPriceDecimalType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcInTheMoneyFlagType是一个平值期权标志类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcInTheMoneyFlagType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCheckInstrTypeType是一个合约比较类型类型
#//////////////////////////////////////////////////////////////////////
#合约交易所不存在
defineDict["THOST_FTDC_CIT_HasExch"] = '0'
#合约本系统不存在
defineDict["THOST_FTDC_CIT_HasATP"] = '1'
#合约比较不一致
defineDict["THOST_FTDC_CIT_HasDiff"] = '2'
typedefDict["TThostFtdcCheckInstrTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcDeliveryTypeType是一个交割类型类型
#//////////////////////////////////////////////////////////////////////
#手工交割
defineDict["THOST_FTDC_DT_HandDeliv"] = '1'
#到期交割
defineDict["THOST_FTDC_DT_PersonDeliv"] = '2'
typedefDict["TThostFtdcDeliveryTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcBigMoneyType是一个资金类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcBigMoneyType"] = "float"
#//////////////////////////////////////////////////////////////////////
#TFtdcMaxMarginSideAlgorithmType是一个大额单边保证金算法类型
#//////////////////////////////////////////////////////////////////////
#不使用大额单边保证金算法
defineDict["THOST_FTDC_MMSA_NO"] = '0'
#使用大额单边保证金算法
defineDict["THOST_FTDC_MMSA_YES"] = '1'
typedefDict["TThostFtdcMaxMarginSideAlgorithmType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcDAClientTypeType是一个资产管理客户类型类型
#//////////////////////////////////////////////////////////////////////
#自然人
defineDict["THOST_FTDC_CACT_Person"] = '0'
#法人
defineDict["THOST_FTDC_CACT_Company"] = '1'
#其他
defineDict["THOST_FTDC_CACT_Other"] = '2'
typedefDict["TThostFtdcDAClientTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCombinInstrIDType是一个套利合约代码类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcCombinInstrIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCombinSettlePriceType是一个各腿结算价类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcCombinSettlePriceType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcDCEPriorityType是一个优先级类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcDCEPriorityType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcTradeGroupIDType是一个成交组号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcTradeGroupIDType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcIsCheckPrepaType是一个是否校验开户可用资金类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcIsCheckPrepaType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcUOAAssetmgrTypeType是一个投资类型类型
#//////////////////////////////////////////////////////////////////////
#期货类
defineDict["THOST_FTDC_UOAAT_Futures"] = '1'
#综合类
defineDict["THOST_FTDC_UOAAT_SpecialOrgan"] = '2'
typedefDict["TThostFtdcUOAAssetmgrTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcDirectionEnType是一个买卖方向类型
#//////////////////////////////////////////////////////////////////////
#Buy
defineDict["THOST_FTDC_DEN_Buy"] = '0'
#Sell
defineDict["THOST_FTDC_DEN_Sell"] = '1'
typedefDict["TThostFtdcDirectionEnType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcOffsetFlagEnType是一个开平标志类型
#//////////////////////////////////////////////////////////////////////
#Position Opening
defineDict["THOST_FTDC_OFEN_Open"] = '0'
#Position Close
defineDict["THOST_FTDC_OFEN_Close"] = '1'
#Forced Liquidation
defineDict["THOST_FTDC_OFEN_ForceClose"] = '2'
#Close Today
defineDict["THOST_FTDC_OFEN_CloseToday"] = '3'
#Close Prev.
defineDict["THOST_FTDC_OFEN_CloseYesterday"] = '4'
#Forced Reduction
defineDict["THOST_FTDC_OFEN_ForceOff"] = '5'
#Local Forced Liquidation
defineDict["THOST_FTDC_OFEN_LocalForceClose"] = '6'
typedefDict["TThostFtdcOffsetFlagEnType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcHedgeFlagEnType是一个投机套保标志类型
#//////////////////////////////////////////////////////////////////////
#Speculation
defineDict["THOST_FTDC_HFEN_Speculation"] = '1'
#Arbitrage
defineDict["THOST_FTDC_HFEN_Arbitrage"] = '2'
#Hedge
defineDict["THOST_FTDC_HFEN_Hedge"] = '3'
typedefDict["TThostFtdcHedgeFlagEnType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFundIOTypeEnType是一个出入金类型类型
#//////////////////////////////////////////////////////////////////////
#Deposit/Withdrawal
defineDict["THOST_FTDC_FIOTEN_FundIO"] = '1'
#Bank-Futures Transfer
defineDict["THOST_FTDC_FIOTEN_Transfer"] = '2'
#Bank-Futures FX Exchange
defineDict["THOST_FTDC_FIOTEN_SwapCurrency"] = '3'
typedefDict["TThostFtdcFundIOTypeEnType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFundTypeEnType是一个资金类型类型
#//////////////////////////////////////////////////////////////////////
#Bank Deposit
defineDict["THOST_FTDC_FTEN_Deposite"] = '1'
#Payment/Fee
defineDict["THOST_FTDC_FTEN_ItemFund"] = '2'
#Brokerage Adj
defineDict["THOST_FTDC_FTEN_Company"] = '3'
#Internal Transfer
defineDict["THOST_FTDC_FTEN_InnerTransfer"] = '4'
typedefDict["TThostFtdcFundTypeEnType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFundDirectionEnType是一个出入金方向类型
#//////////////////////////////////////////////////////////////////////
#Deposit
defineDict["THOST_FTDC_FDEN_In"] = '1'
#Withdrawal
defineDict["THOST_FTDC_FDEN_Out"] = '2'
typedefDict["TThostFtdcFundDirectionEnType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcFundMortDirectionEnType是一个货币质押方向类型
#//////////////////////////////////////////////////////////////////////
#Pledge
defineDict["THOST_FTDC_FMDEN_In"] = '1'
#Redemption
defineDict["THOST_FTDC_FMDEN_Out"] = '2'
typedefDict["TThostFtdcFundMortDirectionEnType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcSwapBusinessTypeType是一个换汇业务种类类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcSwapBusinessTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcOptionsTypeType是一个期权类型类型
#//////////////////////////////////////////////////////////////////////
#看涨
defineDict["THOST_FTDC_CP_CallOptions"] = '1'
#看跌
defineDict["THOST_FTDC_CP_PutOptions"] = '2'
typedefDict["TThostFtdcOptionsTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcStrikeModeType是一个执行方式类型
#//////////////////////////////////////////////////////////////////////
#欧式
defineDict["THOST_FTDC_STM_Continental"] = '0'
#美式
defineDict["THOST_FTDC_STM_American"] = '1'
#百慕大
defineDict["THOST_FTDC_STM_Bermuda"] = '2'
typedefDict["TThostFtdcStrikeModeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcStrikeTypeType是一个执行类型类型
#//////////////////////////////////////////////////////////////////////
#自身对冲
defineDict["THOST_FTDC_STT_Hedge"] = '0'
#匹配执行
defineDict["THOST_FTDC_STT_Match"] = '1'
typedefDict["TThostFtdcStrikeTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcApplyTypeType是一个中金所期权放弃执行申请类型类型
#//////////////////////////////////////////////////////////////////////
#不执行数量
defineDict["THOST_FTDC_APPT_NotStrikeNum"] = '4'
typedefDict["TThostFtdcApplyTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcGiveUpDataSourceType是一个放弃执行申请数据来源类型
#//////////////////////////////////////////////////////////////////////
#系统生成
defineDict["THOST_FTDC_GUDS_Gen"] = '0'
#手工添加
defineDict["THOST_FTDC_GUDS_Hand"] = '1'
typedefDict["TThostFtdcGiveUpDataSourceType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcExecOrderSysIDType是一个执行宣告系统编号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcExecOrderSysIDType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcExecResultType是一个执行结果类型
#//////////////////////////////////////////////////////////////////////
#没有执行
defineDict["THOST_FTDC_OER_NoExec"] = 'n'
#已经取消
defineDict["THOST_FTDC_OER_Canceled"] = 'c'
#执行成功
defineDict["THOST_FTDC_OER_OK"] = '0'
#期权持仓不够
defineDict["THOST_FTDC_OER_NoPosition"] = '1'
#资金不够
defineDict["THOST_FTDC_OER_NoDeposit"] = '2'
#会员不存在
defineDict["THOST_FTDC_OER_NoParticipant"] = '3'
#客户不存在
defineDict["THOST_FTDC_OER_NoClient"] = '4'
#合约不存在
defineDict["THOST_FTDC_OER_NoInstrument"] = '6'
#没有执行权限
defineDict["THOST_FTDC_OER_NoRight"] = '7'
#不合理的数量
defineDict["THOST_FTDC_OER_InvalidVolume"] = '8'
#没有足够的历史成交
defineDict["THOST_FTDC_OER_NoEnoughHistoryTrade"] = '9'
#未知
defineDict["THOST_FTDC_OER_Unknown"] = 'a'
typedefDict["TThostFtdcExecResultType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcStrikeSequenceType是一个执行序号类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcStrikeSequenceType"] = "int"
#//////////////////////////////////////////////////////////////////////
#TFtdcStrikeTimeType是一个执行时间类型
#//////////////////////////////////////////////////////////////////////
typedefDict["TThostFtdcStrikeTimeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCombinationTypeType是一个组合类型类型
#//////////////////////////////////////////////////////////////////////
#期货组合
defineDict["THOST_FTDC_COMBT_Future"] = '0'
#垂直价差BUL
defineDict["THOST_FTDC_COMBT_BUL"] = '1'
#垂直价差BER
defineDict["THOST_FTDC_COMBT_BER"] = '2'
#跨式组合
defineDict["THOST_FTDC_COMBT_STD"] = '3'
#宽跨式组合
defineDict["THOST_FTDC_COMBT_STG"] = '4'
#备兑组合
defineDict["THOST_FTDC_COMBT_PRT"] = '5'
typedefDict["TThostFtdcCombinationTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcOptionRoyaltyPriceTypeType是一个期权权利金价格类型类型
#//////////////////////////////////////////////////////////////////////
#昨结算价
defineDict["THOST_FTDC_ORPT_PreSettlementPrice"] = '1'
#开仓价
defineDict["THOST_FTDC_ORPT_OpenPrice"] = '4'
typedefDict["TThostFtdcOptionRoyaltyPriceTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcBalanceAlgorithmType是一个权益算法类型
#//////////////////////////////////////////////////////////////////////
#不计算期权市值盈亏
defineDict["THOST_FTDC_BLAG_Default"] = '1'
#计算期权市值亏损
defineDict["THOST_FTDC_BLAG_IncludeOptValLost"] = '2'
typedefDict["TThostFtdcBalanceAlgorithmType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcActionTypeType是一个执行类型类型
#//////////////////////////////////////////////////////////////////////
#执行
defineDict["THOST_FTDC_ACTP_Exec"] = '1'
#放弃
defineDict["THOST_FTDC_ACTP_Abandon"] = '2'
typedefDict["TThostFtdcActionTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcForQuoteStatusType是一个询价状态类型
#//////////////////////////////////////////////////////////////////////
#已经提交
defineDict["THOST_FTDC_FQST_Submitted"] = 'a'
#已经接受
defineDict["THOST_FTDC_FQST_Accepted"] = 'b'
#已经被拒绝
defineDict["THOST_FTDC_FQST_Rejected"] = 'c'
typedefDict["TThostFtdcForQuoteStatusType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcQuotStatusType是一个报价状态类型
#//////////////////////////////////////////////////////////////////////
#未知
defineDict["THOST_FTDC_QTST_Unknown"] = 'a'
#已经接受
defineDict["THOST_FTDC_QTST_Accepted"] = 'b'
#已经撤销
defineDict["THOST_FTDC_QTST_Canceled"] = 'c'
typedefDict["TThostFtdcQuotStatusType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcValueMethodType是一个取值方式类型
#//////////////////////////////////////////////////////////////////////
#按绝对值
defineDict["THOST_FTDC_VM_Absolute"] = '0'
#按比率
defineDict["THOST_FTDC_VM_Ratio"] = '1'
typedefDict["TThostFtdcValueMethodType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcExecOrderPositionFlagType是一个期权行权后是否保留期货头寸的标记类型
#//////////////////////////////////////////////////////////////////////
#保留
defineDict["THOST_FTDC_EOPF_Reserve"] = '0'
#不保留
defineDict["THOST_FTDC_EOPF_UnReserve"] = '1'
typedefDict["TThostFtdcExecOrderPositionFlagType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcExecOrderCloseFlagType是一个期权行权后生成的头寸是否自动平仓类型
#//////////////////////////////////////////////////////////////////////
#自动平仓
defineDict["THOST_FTDC_EOCF_AutoClose"] = '0'
#免于自动平仓
defineDict["THOST_FTDC_EOCF_NotToClose"] = '1'
typedefDict["TThostFtdcExecOrderCloseFlagType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcProductTypeType是一个产品类型类型
#//////////////////////////////////////////////////////////////////////
#期货
defineDict["THOST_FTDC_PTE_Futures"] = '1'
#期权
defineDict["THOST_FTDC_PTE_Options"] = '2'
typedefDict["TThostFtdcProductTypeType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCZCEUploadFileNameType是一个郑商所结算文件名类型
#//////////////////////////////////////////////////////////////////////
#^\d{8}_zz_\d{4}
defineDict["THOST_FTDC_CUFN_CUFN_O"] = 'O'
#^\d{8}成交表
defineDict["THOST_FTDC_CUFN_CUFN_T"] = 'T'
#^\d{8}单腿持仓表new
defineDict["THOST_FTDC_CUFN_CUFN_P"] = 'P'
#^\d{8}非平仓了结表
defineDict["THOST_FTDC_CUFN_CUFN_N"] = 'N'
#^\d{8}平仓表
defineDict["THOST_FTDC_CUFN_CUFN_L"] = 'L'
#^\d{8}资金表
defineDict["THOST_FTDC_CUFN_CUFN_F"] = 'F'
#^\d{8}组合持仓表
defineDict["THOST_FTDC_CUFN_CUFN_C"] = 'C'
#^\d{8}保证金参数表
defineDict["THOST_FTDC_CUFN_CUFN_M"] = 'M'
typedefDict["TThostFtdcCZCEUploadFileNameType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcDCEUploadFileNameType是一个大商所结算文件名类型
#//////////////////////////////////////////////////////////////////////
#^\d{8}_dl_\d{3}
defineDict["THOST_FTDC_DUFN_DUFN_O"] = 'O'
#^\d{8}_成交表
defineDict["THOST_FTDC_DUFN_DUFN_T"] = 'T'
#^\d{8}_持仓表
defineDict["THOST_FTDC_DUFN_DUFN_P"] = 'P'
#^\d{8}_资金结算表
defineDict["THOST_FTDC_DUFN_DUFN_F"] = 'F'
#^\d{8}_优惠组合持仓明细表
defineDict["THOST_FTDC_DUFN_DUFN_C"] = 'C'
#^\d{8}_持仓明细表
defineDict["THOST_FTDC_DUFN_DUFN_D"] = 'D'
#^\d{8}_保证金参数表
defineDict["THOST_FTDC_DUFN_DUFN_M"] = 'M'
#^\d{8}_期权执行表
defineDict["THOST_FTDC_DUFN_DUFN_S"] = 'S'
typedefDict["TThostFtdcDCEUploadFileNameType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcSHFEUploadFileNameType是一个上期所结算文件名类型
#//////////////////////////////////////////////////////////////////////
#^\d{4}_\d{8}_\d{8}_DailyFundChg
defineDict["THOST_FTDC_SUFN_SUFN_O"] = 'O'
#^\d{4}_\d{8}_\d{8}_Trade
defineDict["THOST_FTDC_SUFN_SUFN_T"] = 'T'
#^\d{4}_\d{8}_\d{8}_SettlementDetail
defineDict["THOST_FTDC_SUFN_SUFN_P"] = 'P'
#^\d{4}_\d{8}_\d{8}_Capital
defineDict["THOST_FTDC_SUFN_SUFN_F"] = 'F'
typedefDict["TThostFtdcSHFEUploadFileNameType"] = "string"
#//////////////////////////////////////////////////////////////////////
#TFtdcCFFEXUploadFileNameType是一个中金所结算文件名类型
#//////////////////////////////////////////////////////////////////////
#^\d{4}_SG\d{1}_\d{8}_\d{1}_Trade
defineDict["THOST_FTDC_CFUFN_SUFN_T"] = 'T'
#^\d{4}_SG\d{1}_\d{8}_\d{1}_SettlementDetail
defineDict["THOST_FTDC_CFUFN_SUFN_P"] = 'P'
#^\d{4}_SG\d{1}_\d{8}_\d{1}_Capital
defineDict["THOST_FTDC_CFUFN_SUFN_F"] = 'F'
#^\d{4}_SG\d{1}_\d{8}_\d{1}_OptionExec
defineDict["THOST_FTDC_CFUFN_SUFN_S"] = 'S'
typedefDict["TThostFtdcCFFEXUploadFileNameType"] = "string"
| mit |
powerjg/gem5-ci-test | src/arch/x86/isa/insts/simd128/integer/logical/por.py | 91 | 2658 | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
def macroop POR_XMM_XMM {
mor xmml, xmml, xmmlm
mor xmmh, xmmh, xmmhm
};
def macroop POR_XMM_M {
lea t1, seg, sib, disp, dataSize=asz
ldfp ufp1, seg, [1, t0, t1], dataSize=8
ldfp ufp2, seg, [1, t0, t1], 8, dataSize=8
mor xmml, xmml, ufp1
mor xmmh, xmmh, ufp2
};
def macroop POR_XMM_P {
rdip t7
lea t1, seg, riprel, disp, dataSize=asz
ldfp ufp1, seg, [1, t0, t1], dataSize=8
ldfp ufp2, seg, [1, t0, t1], 8, dataSize=8
mor xmml, xmml, ufp1
mor xmmh, xmmh, ufp2
};
'''
| bsd-3-clause |
Kongsea/tensorflow | tensorflow/contrib/slim/python/slim/data/prefetch_queue.py | 63 | 3559 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implements a simple prefetch_queue."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.summary import summary
from tensorflow.python.training import queue_runner
def _which_queue(dynamic_pad):
return (data_flow_ops.PaddingFIFOQueue if dynamic_pad
else data_flow_ops.FIFOQueue)
def prefetch_queue(tensors,
capacity=8,
num_threads=1,
dynamic_pad=False,
shared_name=None,
name=None):
"""Creates a queue to prefetech tensors from `tensors`.
A queue runner for enqueing tensors into the prefetch_queue is automatically
added to the TF QueueRunners collection.
Example:
This is for example useful to pre-assemble input batches read with
`tf.train.batch()` and enqueue the pre-assembled batches. Ops that dequeue
from the pre-assembled queue will not pay the cost of assembling the batch.
images, labels = tf.train.batch([image, label], batch_size=32, num_threads=4)
batch_queue = prefetch_queue([images, labels])
images, labels = batch_queue.dequeue()
logits = Net(images)
loss = Loss(logits, labels)
Args:
tensors: A list or dictionary of `Tensors` to enqueue in the buffer.
capacity: An integer. The maximum number of elements in the queue.
num_threads: An integer. Number of threads running the enqueue op.
dynamic_pad: Boolean. Whether to allow variable dimensions in input shapes.
shared_name: (optional). If set, this queue will be shared under the given
name across multiple sessions.
name: (Optional) A name for the operations.
Returns:
A queue from which you can dequeue tensors with the same type and shape
as `tensors`.
"""
if isinstance(tensors, dict):
# Need to wrap the keys and values in list() since Python3 returns views.
# We sort the keys so the order is consistent across runs.
names = list(sorted(tensors.keys()))
tensor_list = list([tensors[n] for n in names])
else:
names = None
tensor_list = tensors
with ops.name_scope(name, "prefetch_queue", tensor_list) as name:
dtypes = [t.dtype for t in tensor_list]
shapes = [t.get_shape() for t in tensor_list]
queue = _which_queue(dynamic_pad)(
capacity=capacity,
dtypes=dtypes,
shapes=shapes,
names=names,
shared_name=shared_name)
enqueue_op = queue.enqueue(tensors)
queue_runner.add_queue_runner(
queue_runner.QueueRunner(queue, [enqueue_op] * num_threads))
summary.scalar("fraction_of_%d_full" % capacity,
math_ops.to_float(queue.size()) * (1. / capacity))
return queue
| apache-2.0 |
D4wN/brickv | src/brickv/plugin_system/plugins/red/red_tab_settings_filesystem.py | 1 | 6081 | # -*- coding: utf-8 -*-
"""
RED Plugin
Copyright (C) 2014 Ishraq Ibne Ashraf <ishraq@tinkerforge.com>
Copyright (C) 2014-2015 Matthias Bolte <matthias@tinkerforge.com>
red_tab_settings_filesystem.py: RED settings file system tab implementation
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public
License along with this program; if not, write to the
Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA.
"""
import json
import sys
import math
from PyQt4 import QtGui
from brickv.plugin_system.plugins.red.ui_red_tab_settings_filesystem import Ui_REDTabSettingsFileSystem
from brickv.plugin_system.plugins.red.api import *
from brickv.plugin_system.plugins.red.script_manager import report_script_result
from brickv.utils import get_main_window
class REDTabSettingsFileSystem(QtGui.QWidget, Ui_REDTabSettingsFileSystem):
def __init__(self):
QtGui.QWidget.__init__(self)
self.setupUi(self)
self.session = None # Set from REDTabSettings
self.script_manager = None # Set from REDTabSettings
self.image_version = None # Set from REDTabSettings
self.service_state = None # Set from REDTabSettings
self.is_tab_on_focus = False
# For OSX progress bar text fix
self.label_fs_expand_info.hide()
self.line.hide()
self.label_pbar_fs_capacity_utilization.hide()
# Signals/slots
self.pbutton_fs_expand.clicked.connect(self.slot_fs_expand_clicked)
def tab_on_focus(self):
self.is_tab_on_focus = True
self.script_manager.execute_script('settings_fs_expand_check',
self.cb_settings_fs_expand_check,
['/dev/mmcblk0'])
def tab_off_focus(self):
self.is_tab_on_focus = False
def tab_destroy(self):
pass
# The callbacks
def cb_settings_fs_expand_check(self, result):
if not self.is_tab_on_focus:
return
if not report_script_result(result, 'Settings | File System',
'Error getting partition information'):
self.label_fs_expand_info.hide()
self.line.hide()
self.label_pbar_fs_capacity_utilization.hide()
self.pbar_fs_capacity_utilization.setMinimum(0)
self.pbar_fs_capacity_utilization.setMaximum(100)
self.pbar_fs_capacity_utilization.setValue(0)
self.pbar_fs_capacity_utilization.setFormat('')
self.pbar_fs_capacity_utilization.setEnabled(False)
self.pbutton_fs_expand.setEnabled(False)
return
try:
size_dict = json.loads(result.stdout)
p1_start = float(size_dict['p1_start'])
p1_size = float(size_dict['p1_size'])
card_size = float(size_dict['card_size'])
ext3_size = float(size_dict['ext3_size'])
except:
p1_start = 0
p1_size = 100
card_size = 100
ext3_size = 100
avialable_size = card_size - p1_start
used_size = min(p1_size, ext3_size)
percentage_utilization_v = min(int(math.ceil((used_size / avialable_size) * 100.0)), 100)
# due to common file system overahead 100% will normally never be
# reached just fake 100% in this case to avoid user confusion
if percentage_utilization_v >= 95:
percentage_utilization_v = 100
percentage_utilization = unicode(percentage_utilization_v)
self.pbar_fs_capacity_utilization.setEnabled(True)
self.pbar_fs_capacity_utilization.setMinimum(0)
self.pbar_fs_capacity_utilization.setMaximum(100)
self.pbar_fs_capacity_utilization.setValue(percentage_utilization_v)
if percentage_utilization_v == 100:
self.pbutton_fs_expand.setEnabled(False)
self.label_fs_expand_info.hide()
self.line.hide()
else:
self.pbutton_fs_expand.setEnabled(True)
self.label_fs_expand_info.show()
self.line.show()
pbar_fs_capacity_utilization_fmt = "Using {0}% of total capacity".format(percentage_utilization)
if sys.platform == 'darwin':
self.label_pbar_fs_capacity_utilization.show()
self.label_pbar_fs_capacity_utilization.setText(pbar_fs_capacity_utilization_fmt)
else:
self.pbar_fs_capacity_utilization.setFormat(pbar_fs_capacity_utilization_fmt)
# The slots
def slot_fs_expand_clicked(self):
def cb_settings_fs_expand(result):
def cb_restart_reboot_shutdown(result):
report_script_result(result, 'Settings | File System',
'Error rebooting RED Brick')
get_main_window().setEnabled(True)
if not report_script_result(result, 'Settings | File System',
'Error expanding file system'):
return
QtGui.QMessageBox.information(get_main_window(),
'Settings | Services',
'File system expansion will be complete after reboot, rebooting RED Brick now.')
self.script_manager.execute_script('restart_reboot_shutdown',
cb_restart_reboot_shutdown, ['1'])
get_main_window().setEnabled(False)
self.script_manager.execute_script('settings_fs_expand',
cb_settings_fs_expand)
| gpl-2.0 |
miguelparaiso/OdooAccessible | addons/l10n_be_hr_payroll/l10n_be_hr_payroll.py | 379 | 3110 | #-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP SA (<http://openerp.com>). All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
import openerp.addons.decimal_precision as dp
class hr_contract_be(osv.osv):
_inherit = 'hr.contract'
_columns = {
'travel_reimbursement_amount': fields.float('Reimbursement of travel expenses', digits_compute=dp.get_precision('Payroll')),
'car_company_amount': fields.float('Company car employer', digits_compute=dp.get_precision('Payroll')),
'car_employee_deduction': fields.float('Company Car Deduction for Worker', digits_compute=dp.get_precision('Payroll')),
'misc_onss_deduction': fields.float('Miscellaneous exempt ONSS ', digits_compute=dp.get_precision('Payroll')),
'meal_voucher_amount': fields.float('Check Value Meal ', digits_compute=dp.get_precision('Payroll')),
'meal_voucher_employee_deduction': fields.float('Check Value Meal - by worker ', digits_compute=dp.get_precision('Payroll')),
'insurance_employee_deduction': fields.float('Insurance Group - by worker ', digits_compute=dp.get_precision('Payroll')),
'misc_advantage_amount': fields.float('Benefits of various nature ', digits_compute=dp.get_precision('Payroll')),
'additional_net_amount': fields.float('Net supplements', digits_compute=dp.get_precision('Payroll')),
'retained_net_amount': fields.float('Net retained ', digits_compute=dp.get_precision('Payroll')),
}
class hr_employee_be(osv.osv):
_inherit = 'hr.employee'
_columns = {
'spouse_fiscal_status': fields.selection([('without income','Without Income'),('with income','With Income')], 'Tax status for spouse'),
'disabled_spouse_bool': fields.boolean('Disabled Spouse', help="if recipient spouse is declared disabled by law"),
'disabled_children_bool': fields.boolean('Disabled Children', help="if recipient children is/are declared disabled by law"),
'resident_bool': fields.boolean('Nonresident', help="if recipient lives in a foreign country"),
'disabled_children_number': fields.integer('Number of disabled children'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
tdtrask/ansible | test/units/modules/network/f5/test_bigip_policy.py | 23 | 5424 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import sys
from nose.plugins.skip import SkipTest
if sys.version_info < (2, 7):
raise SkipTest("F5 Ansible modules require Python >= 2.7")
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import Mock
from ansible.compat.tests.mock import patch
from ansible.module_utils.basic import AnsibleModule
try:
from library.bigip_policy import Parameters
from library.bigip_policy import ModuleManager
from library.bigip_policy import SimpleManager
from library.bigip_policy import ComplexManager
from library.bigip_policy import ArgumentSpec
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
from test.unit.modules.utils import set_module_args
except ImportError:
try:
from ansible.modules.network.f5.bigip_policy import Parameters
from ansible.modules.network.f5.bigip_policy import ModuleManager
from ansible.modules.network.f5.bigip_policy import SimpleManager
from ansible.modules.network.f5.bigip_policy import ComplexManager
from ansible.modules.network.f5.bigip_policy import ArgumentSpec
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
from units.modules.utils import set_module_args
except ImportError:
raise SkipTest("F5 Ansible modules require the f5-sdk Python library")
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters_none_strategy(self):
args = dict(
name='foo',
description='asdf asdf asdf',
password='password',
server='localhost',
user='admin'
)
p = Parameters(params=args)
assert p.name == 'foo'
assert p.description == 'asdf asdf asdf'
assert p.strategy is None
def test_module_parameters_with_strategy_no_partition(self):
args = dict(
name='foo',
description='asdf asdf asdf',
password='password',
server='localhost',
strategy='foo',
user='admin',
partition='Common'
)
p = Parameters(params=args)
assert p.name == 'foo'
assert p.description == 'asdf asdf asdf'
assert p.strategy == '/Common/foo'
def test_module_parameters_with_strategy_partition(self):
args = dict(
name='foo',
description='asdf asdf asdf',
password='password',
server='localhost',
strategy='/Common/foo',
user='admin',
partition='Common'
)
p = Parameters(params=args)
assert p.name == 'foo'
assert p.description == 'asdf asdf asdf'
assert p.strategy == '/Common/foo'
def test_module_parameters_with_strategy_different_partition(self):
args = dict(
name='foo',
description='asdf asdf asdf',
password='password',
server='localhost',
strategy='/Foo/bar',
user='admin',
partition='Common'
)
p = Parameters(params=args)
assert p.name == 'foo'
assert p.description == 'asdf asdf asdf'
assert p.strategy == '/Foo/bar'
def test_api_parameters(self):
args = dict(
name='foo',
description='asdf asdf asdf',
strategy='/Common/asdf'
)
p = Parameters(params=args)
assert p.name == 'foo'
assert p.description == 'asdf asdf asdf'
assert p.strategy == '/Common/asdf'
class TestSimpleTrafficPolicyManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create_policy(self, *args):
set_module_args(dict(
name="Policy-Foo",
state='present',
strategy='best',
password='password',
server='localhost',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
tm = SimpleManager(module=module, params=module.params)
tm.exists = Mock(return_value=False)
tm.create_on_device = Mock(return_value=True)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.version_is_less_than_12 = Mock(return_value=True)
mm.get_manager = Mock(return_value=tm)
results = mm.exec_module()
assert results['changed'] is True
| gpl-3.0 |
f-prettyland/angr | angr/concretization_strategies/controlled_data.py | 1 | 2017 | from itertools import groupby
from . import SimConcretizationStrategy
class SimConcretizationStrategyControlledData(SimConcretizationStrategy):
"""
Concretization strategy that constraints the address to controlled data.
Controlled data consists of symbolic data and the addresses given as arguments.
memory.
"""
def __init__(self, limit, fixed_addrs, **kwargs):
super(SimConcretizationStrategyControlledData, self).__init__(**kwargs)
self._limit = limit
self._fixed_addrs = fixed_addrs
def _concretize(self, memory, addr):
# Get all symbolic variables in memory
symbolic_vars = filter(lambda key: not key.startswith("reg_") and not key.startswith("mem_"), memory.state.memory.mem._name_mapping.keys())
controlled_addrs = sorted([_addr for s_var in symbolic_vars for _addr in memory.addrs_for_name(s_var)])
controlled_addrs.extend(self._fixed_addrs)
# Represent controlled addresses in adjacent memory areas as "base+offset"
base_length_array = [(controlled_addrs[0], 0)]
for i in xrange(1, len(controlled_addrs)):
if controlled_addrs[i - 1] + 1 == controlled_addrs[i]:
base = base_length_array[i-1][0]
else:
base = controlled_addrs[i]
base_length_array.append((base, controlled_addrs[i] - base))
# create intervals from memory areas
intervals = map(lambda t: (t[0], len(list(t[1]))), groupby(base_length_array, key=lambda t: t[0]))
constraints = []
# create constraints from intervals
for base, length in intervals:
constraints.append(memory.state.se.And(addr >= base, addr < base+length))
# try to get solutions for controlled memory
ored_constraints = memory.state.se.Or(*constraints)
solutions = self._eval(memory, addr, self._limit, extra_constraints=(ored_constraints,))
if not solutions:
solutions = None
return solutions
| bsd-2-clause |
xHeliotrope/injustice_dropper | env/lib/python3.4/site-packages/phonenumbers/data/region_SJ.py | 11 | 1827 | """Auto-generated file, do not edit by hand. SJ metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_SJ = PhoneMetadata(id='SJ', country_code=47, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='0\\d{4}|[4789]\\d{7}', possible_number_pattern='\\d{5}(?:\\d{3})?'),
fixed_line=PhoneNumberDesc(national_number_pattern='79\\d{6}', possible_number_pattern='\\d{8}', example_number='79123456'),
mobile=PhoneNumberDesc(national_number_pattern='(?:4[015-8]|5[89]|9\\d)\\d{6}', possible_number_pattern='\\d{8}', example_number='41234567'),
toll_free=PhoneNumberDesc(national_number_pattern='80[01]\\d{5}', possible_number_pattern='\\d{8}', example_number='80012345'),
premium_rate=PhoneNumberDesc(national_number_pattern='82[09]\\d{5}', possible_number_pattern='\\d{8}', example_number='82012345'),
shared_cost=PhoneNumberDesc(national_number_pattern='810(?:0[0-6]|[2-8]\\d)\\d{3}', possible_number_pattern='\\d{8}', example_number='81021234'),
personal_number=PhoneNumberDesc(national_number_pattern='880\\d{5}', possible_number_pattern='\\d{8}', example_number='88012345'),
voip=PhoneNumberDesc(national_number_pattern='85[0-5]\\d{5}', possible_number_pattern='\\d{8}', example_number='85012345'),
pager=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
uan=PhoneNumberDesc(national_number_pattern='0\\d{4}|81(?:0(?:0[7-9]|1\\d)|5\\d{2})\\d{3}', possible_number_pattern='\\d{5}(?:\\d{3})?', example_number='01234'),
voicemail=PhoneNumberDesc(national_number_pattern='81[23]\\d{5}', possible_number_pattern='\\d{8}', example_number='81212345'),
no_international_dialling=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
leading_zero_possible=True)
| mit |
LearnPythonAndMakeGames/BasicPythonTutorialSeries | basic_tutorials/dunder.py | 1 | 4222 | class Item(object):
"""Base Item Class"""
# Commonly used dunders
def __init__(self, name, value):
self.name = name
self.value = value
def __repr__(self):
"""Representation of the class"""
class_name = self.__class__.__name__
return "<{}({}) '{}'>".format(class_name, self.value, self.name)
def __str__(self):
"""String representation"""
return "{}: {}".format(self.name, self.value)
def __call__(self):
"""Displays all of the dictionary information"""
for d in sorted(dir(item01)):
print "{}: {}".format(d, str(getattr(item01, d)))
# Comparison dunders
def __eq__(self, other):
"""compares self to other for equivalence
>>> item01 = Item("sword", 150)
>>> item02 = Item("axe", 100)
>>> item01 == item02
False
>>> item01 == 150
True
"""
equal = None
accepted_types = (int, float)
if hasattr(other, 'value'):
equal = self.value == other.value
elif isinstance(other, accepted_types):
equal = self.value == other
return equal
def __ne__(self, other):
"""Compares self to other for non-equivalence
>>> item01 = Item("sword", 150)
>>> item02 = Item("axe", 100)
>>> item01 != item02
True
>>> item01 != 150
False
"""
return not self.__eq__(self, other)
def __lt__(self, other):
"""Compares self to other: self less than other
>>> item01 = Item("sword", 150)
>>> item02 = Item("axe", 100)
>>> item01 < item02
False
>>> item01 < 150
False
"""
def __gt__(self, other):
"""Compares self to other: self greater than other
>>> item01 = Item("sword", 150)
>>> item02 = Item("axe", 100)
>>> item01 > item02
True
>>> item01 > 150
False
"""
def __le__(self, other):
"""Compares self to other: self less than or equal to other
>>> item01 = Item("sword", 150)
>>> item02 = Item("axe", 100)
>>> item01 <= item02
False
>>> item01 <= 150
True
"""
def __ge__(self, other):
"""Compares self to other: self greater than or equal to other
>>> item01 = Item("sword", 150)
>>> item02 = Item("axe", 100)
>>> item01 >= item02
True
>>> item01 >= 150
True
"""
# Attribute access dunders
def __getattribute__(self, name):
"""Retrieves an attribute called 'name' from the dictionary"""
def __getattr__(self, name):
"""Only called if name does not exist on self"""
def __setattr__(self, name, value):
"""Allows you to set the attribute named 'name' with 'value'"""
def __delattr__(self, name):
"""This allows you to remove objects within the self instance"""
def __getitem__(self, key):
"""grabs the attribute by key
print item01['name']
"""
return self.__dict__.get(key, None)
def __setitem__(self, key, value):
"""sets the attribute key by value
print item01['name'] = 'Long Sword'
"""
if key in self.__dict__:
self.__dict__[key] = value
def __delitem__(self, key):
"""delete the attribute by key"""
if key in self.__dict__:
del self.__dict__[key]
# Iterable dunders
def __iter__(self):
"""Iterates over data within the Item
Usage:
for item_key, item_value in item01:
print item_key, item_value
"""
for key, value in self.__dict__.iteritem():
yield key, value
def __contains__(self, key):
"""Checks self for key
Usage:
print "name" in item01
"""
return key in self.__dict__
if __name__ == "__main__":
item01 = Item("sword", 150)
# item01()
item02 = Item("axe", 100)
print item01 != 150
item01["name"] == item01.name
for key, value in item01:
print key, value
if "name" in item01:
print item01['name']
| apache-2.0 |
Autoplectic/dit | dit/multivariate/coinformation.py | 1 | 4884 | """
The co-information aka the multivariate mututal information.
"""
from ..helpers import normalize_rvs
from ..shannon import conditional_entropy as H
from ..utils import powerset, unitful
__all__ = [
'coinformation',
]
@unitful
def coinformation(dist, rvs=None, crvs=None, rv_mode=None):
"""
Calculates the coinformation.
Parameters
----------
dist : Distribution
The distribution from which the coinformation is calculated.
rvs : list, None
The indexes of the random variable used to calculate the coinformation
between. If None, then the coinformation is calculated over all random
variables.
crvs : list, None
The indexes of the random variables to condition on. If None, then no
variables are condition on.
rv_mode : str, None
Specifies how to interpret `rvs` and `crvs`. Valid options are:
{'indices', 'names'}. If equal to 'indices', then the elements of
`crvs` and `rvs` are interpreted as random variable indices. If equal
to 'names', the the elements are interpreted as random variable names.
If `None`, then the value of `dist._rv_mode` is consulted, which
defaults to 'indices'.
Returns
-------
I : float
The coinformation.
Raises
------
ditException
Raised if `dist` is not a joint distribution or if `rvs` or `crvs`
contain non-existant random variables.
Examples
--------
Let's construct a 3-variable distribution for the XOR logic gate and name
the random variables X, Y, and Z.
>>> d = dit.example_dists.Xor()
>>> d.set_rv_names(['X', 'Y', 'Z'])
To calculate coinformations, recall that `rvs` specifies which groups of
random variables are involved. For example, the 3-way mutual information
I[X:Y:Z] is calculated as:
>>> dit.multivariate.coinformation(d, ['X', 'Y', 'Z'])
-1.0
It is a quirk of strings that each element of a string is also an iterable.
So an equivalent way to calculate the 3-way mutual information I[X:Y:Z] is:
>>> dit.multivariate.coinformation(d, 'XYZ')
-1.0
The reason this works is that list('XYZ') == ['X', 'Y', 'Z']. If we want
to use random variable indexes, we need to have explicit groupings:
>>> dit.multivariate.coinformation(d, [[0], [1], [2]], rv_mode='indexes')
-1.0
To calculate the mutual information I[X, Y : Z], we use explicit groups:
>>> dit.multivariate.coinformation(d, ['XY', 'Z'])
Using indexes, this looks like:
>>> dit.multivariate.coinformation(d, [[0, 1], [2]], rv_mode='indexes')
The mutual information I[X:Z] is given by:
>>> dit.multivariate.coinformation(d, 'XZ')
0.0
Equivalently,
>>> dit.multivariate.coinformation(d, ['X', 'Z'])
0.0
Using indexes, this becomes:
>>> dit.multivariate.coinformation(d, [[0], [2]])
0.0
Conditional mutual informations can be calculated by passing in the
conditional random variables. The conditional entropy I[X:Y|Z] is:
>>> dit.multivariate.coinformation(d, 'XY', 'Z')
1.0
Using indexes, this becomes:
>>> rvs = [[0], [1]]
>>> crvs = [[2]] # broken
>>> dit.multivariate.coinformation(d, rvs, crvs, rv_mode='indexes')
1.0
For the conditional random variables, groupings have no effect, so you
can also obtain this as:
>>> rvs = [[0], [1]]
>>> crvs = [2]
>>> dit.multivariate.coinformation(d, rvs, crvs, rv_mode='indexes')
1.0
Finally, note that entropy can also be calculated. The entropy H[Z|XY]
is obtained as:
>>> rvs = [[2]]
>>> crvs = [[0], [1]] # broken
>>> dit.multivariate.coinformation(d, rvs, crvs, rv_mode='indexes')
0.0
>>> crvs = [[0, 1]] # broken
>>> dit.multivariate.coinformation(d, rvs, crvs, rv_mode='indexes')
0.0
>>> crvs = [0, 1]
>>> dit.multivariate.coinformation(d, rvs, crvs, rv_mode='indexes')
0.0
>>> rvs = 'Z'
>>> crvs = 'XY'
>>> dit.multivariate.coinformation(d, rvs, crvs, rv_mode='indexes')
0.0
Note that [[0], [1]] says to condition on two groups. But conditioning
is a flat operation and doesn't respect the groups, so it is equal to
a single group of 2 random variables: [[0, 1]]. With random variable
names 'XY' is acceptable because list('XY') = ['X', 'Y'], which is
species two singleton groups. By the previous argument, this is will
be treated the same as ['XY'].
"""
rvs, crvs, rv_mode = normalize_rvs(dist, rvs, crvs, rv_mode)
def entropy(rvs, dist=dist, crvs=crvs, rv_mode=rv_mode):
"""
Helper function to aid in computing the entropy of subsets.
"""
return H(dist, set().union(*rvs), crvs, rv_mode=rv_mode)
I = sum((-1)**(len(Xs)+1) * entropy(Xs) for Xs in powerset(rvs))
return I
| bsd-3-clause |
drufat/vispy | vispy/visuals/polygon.py | 20 | 3795 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
"""
Simple polygon visual based on MeshVisual and LineVisual
"""
from __future__ import division
import numpy as np
from .visual import CompoundVisual
from .mesh import MeshVisual
from .line import LineVisual
from ..color import Color
from ..geometry import PolygonData
from ..gloo import set_state
class PolygonVisual(CompoundVisual):
"""
Displays a 2D polygon
Parameters
----------
pos : array
Set of vertices defining the polygon.
color : str | tuple | list of colors
Fill color of the polygon.
border_color : str | tuple | list of colors
Border color of the polygon.
border_width : int
Border width in pixels.
**kwargs : dict
Keyword arguments to pass to `PolygonVisual`.
"""
def __init__(self, pos=None, color='black',
border_color=None, border_width=1, **kwargs):
self._mesh = MeshVisual()
self._border = LineVisual()
self._pos = pos
self._color = Color(color)
self._border_width = border_width
self._border_color = Color(border_color)
self._update()
CompoundVisual.__init__(self, [self._mesh, self._border], **kwargs)
self._mesh.set_gl_state(polygon_offset_fill=True,
polygon_offset=(1, 1), cull_face=False)
self.freeze()
def _update(self):
self.data = PolygonData(vertices=np.array(self._pos, dtype=np.float32))
if self._pos is None:
return
if not self._color.is_blank:
pts, tris = self.data.triangulate()
set_state(polygon_offset_fill=False)
self._mesh.set_data(vertices=pts, faces=tris.astype(np.uint32),
color=self._color.rgba)
if not self._border_color.is_blank:
# Close border if it is not already.
border_pos = self._pos
if np.any(border_pos[0] != border_pos[1]):
border_pos = np.concatenate([border_pos, border_pos[:1]],
axis=0)
self._border.set_data(pos=border_pos,
color=self._border_color.rgba,
width=self._border_width,
connect='strip')
self._border.update()
@property
def pos(self):
""" The vertex position of the polygon.
"""
return self._pos
@pos.setter
def pos(self, pos):
self._pos = pos
self._update()
@property
def color(self):
""" The color of the polygon.
"""
return self._color
@color.setter
def color(self, color):
self._color = Color(color, clip=True)
self._update()
@property
def border_color(self):
""" The border color of the polygon.
"""
return self._border_color
@border_color.setter
def border_color(self, border_color):
self._border_color = Color(border_color)
self._update()
@property
def mesh(self):
"""The vispy.visuals.MeshVisual that is owned by the PolygonVisual.
It is used to fill in the polygon
"""
return self._mesh
@mesh.setter
def mesh(self, mesh):
self._mesh = mesh
self._update()
@property
def border(self):
"""The vispy.visuals.LineVisual that is owned by the PolygonVisual.
It is used to draw the border of the polygon
"""
return self._border
@border.setter
def border(self, border):
self._border = border
self._update()
| bsd-3-clause |
cryptobanana/ansible | lib/ansible/plugins/action/include_vars.py | 30 | 10335 | # (c) 2016, Allen Sanabria <asanabria@linuxdynasty.org>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from os import path, walk
import re
from ansible.errors import AnsibleError
from ansible.module_utils.six import string_types
from ansible.module_utils._text import to_native, to_text
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
TRANSFERS_FILES = False
VALID_FILE_EXTENSIONS = ['yaml', 'yml', 'json']
VALID_DIR_ARGUMENTS = ['dir', 'depth', 'files_matching', 'ignore_files', 'extensions']
VALID_FILE_ARGUMENTS = ['file', '_raw_params']
VALID_ALL = ['name']
def _set_dir_defaults(self):
if not self.depth:
self.depth = 0
if self.files_matching:
self.matcher = re.compile(r'{0}'.format(self.files_matching))
else:
self.matcher = None
if not self.ignore_files:
self.ignore_files = list()
if isinstance(self.ignore_files, str):
self.ignore_files = self.ignore_files.split()
elif isinstance(self.ignore_files, dict):
return {
'failed': True,
'message': '{0} must be a list'.format(self.ignore_files)
}
def _set_args(self):
""" Set instance variables based on the arguments that were passed """
self.return_results_as_name = self._task.args.get('name', None)
self.source_dir = self._task.args.get('dir', None)
self.source_file = self._task.args.get('file', None)
if not self.source_dir and not self.source_file:
self.source_file = self._task.args.get('_raw_params')
self.depth = self._task.args.get('depth', None)
self.files_matching = self._task.args.get('files_matching', None)
self.ignore_files = self._task.args.get('ignore_files', None)
self.valid_extensions = self._task.args.get('extensions', self.VALID_FILE_EXTENSIONS)
# convert/validate extensions list
if isinstance(self.valid_extensions, string_types):
self.valid_extensions = list(self.valid_extensions)
if not isinstance(self.valid_extensions, list):
raise AnsibleError('Invalid type for "extensions" option, it must be a list')
def run(self, tmp=None, task_vars=None):
""" Load yml files recursively from a directory.
"""
if task_vars is None:
task_vars = dict()
self.show_content = True
self.included_files = []
# Validate arguments
dirs = 0
files = 0
for arg in self._task.args:
if arg in self.VALID_DIR_ARGUMENTS:
dirs += 1
elif arg in self.VALID_FILE_ARGUMENTS:
files += 1
elif arg in self.VALID_ALL:
pass
else:
raise AnsibleError('{0} is not a valid option in debug'.format(arg))
if dirs and files:
raise AnsibleError("Your are mixing file only and dir only arguments, these are incompatible")
# set internal vars from args
self._set_args()
results = dict()
if self.source_dir:
self._set_dir_defaults()
self._set_root_dir()
if not path.exists(self.source_dir):
failed = True
err_msg = ('{0} directory does not exist'.format(self.source_dir))
elif not path.isdir(self.source_dir):
failed = True
err_msg = ('{0} is not a directory'.format(self.source_dir))
else:
for root_dir, filenames in self._traverse_dir_depth():
failed, err_msg, updated_results = (self._load_files_in_dir(root_dir, filenames))
if failed:
break
results.update(updated_results)
else:
try:
self.source_file = self._find_needle('vars', self.source_file)
failed, err_msg, updated_results = (
self._load_files(self.source_file)
)
if not failed:
results.update(updated_results)
except AnsibleError as e:
failed = True
err_msg = to_native(e)
if self.return_results_as_name:
scope = dict()
scope[self.return_results_as_name] = results
results = scope
result = super(ActionModule, self).run(tmp, task_vars)
if failed:
result['failed'] = failed
result['message'] = err_msg
result['ansible_included_var_files'] = self.included_files
result['ansible_facts'] = results
result['_ansible_no_log'] = not self.show_content
return result
def _set_root_dir(self):
if self._task._role:
if self.source_dir.split('/')[0] == 'vars':
path_to_use = (
path.join(self._task._role._role_path, self.source_dir)
)
if path.exists(path_to_use):
self.source_dir = path_to_use
else:
path_to_use = (
path.join(
self._task._role._role_path, 'vars', self.source_dir
)
)
self.source_dir = path_to_use
else:
current_dir = (
"/".join(self._task._ds._data_source.split('/')[:-1])
)
self.source_dir = path.join(current_dir, self.source_dir)
def _traverse_dir_depth(self):
""" Recursively iterate over a directory and sort the files in
alphabetical order. Do not iterate pass the set depth.
The default depth is unlimited.
"""
current_depth = 0
sorted_walk = list(walk(self.source_dir))
sorted_walk.sort(key=lambda x: x[0])
for current_root, current_dir, current_files in sorted_walk:
current_depth += 1
if current_depth <= self.depth or self.depth == 0:
current_files.sort()
yield (current_root, current_files)
else:
break
def _ignore_file(self, filename):
""" Return True if a file matches the list of ignore_files.
Args:
filename (str): The filename that is being matched against.
Returns:
Boolean
"""
for file_type in self.ignore_files:
try:
if re.search(r'{0}$'.format(file_type), filename):
return True
except Exception:
err_msg = 'Invalid regular expression: {0}'.format(file_type)
raise AnsibleError(err_msg)
return False
def _is_valid_file_ext(self, source_file):
""" Verify if source file has a valid extension
Args:
source_file (str): The full path of source file or source file.
Returns:
Bool
"""
file_ext = path.splitext(source_file)
return bool(len(file_ext) > 1 and file_ext[-1][1:] in self.valid_extensions)
def _load_files(self, filename, validate_extensions=False):
""" Loads a file and converts the output into a valid Python dict.
Args:
filename (str): The source file.
Returns:
Tuple (bool, str, dict)
"""
results = dict()
failed = False
err_msg = ''
if validate_extensions and not self._is_valid_file_ext(filename):
failed = True
err_msg = ('{0} does not have a valid extension: {1}' .format(filename, ', '.join(self.valid_extensions)))
else:
b_data, show_content = self._loader._get_file_contents(filename)
data = to_text(b_data, errors='surrogate_or_strict')
self.show_content = show_content
data = self._loader.load(data, show_content)
if not data:
data = dict()
if not isinstance(data, dict):
failed = True
err_msg = ('{0} must be stored as a dictionary/hash' .format(filename))
else:
self.included_files.append(filename)
results.update(data)
return failed, err_msg, results
def _load_files_in_dir(self, root_dir, var_files):
""" Load the found yml files and update/overwrite the dictionary.
Args:
root_dir (str): The base directory of the list of files that is being passed.
var_files: (list): List of files to iterate over and load into a dictionary.
Returns:
Tuple (bool, str, dict)
"""
results = dict()
failed = False
err_msg = ''
for filename in var_files:
stop_iter = False
# Never include main.yml from a role, as that is the default included by the role
if self._task._role:
if filename == 'main.yml':
stop_iter = True
continue
filepath = path.join(root_dir, filename)
if self.files_matching:
if not self.matcher.search(filename):
stop_iter = True
if not stop_iter and not failed:
if path.exists(filepath) and not self._ignore_file(filename):
failed, err_msg, loaded_data = self._load_files(filepath, validate_extensions=True)
if not failed:
results.update(loaded_data)
return failed, err_msg, results
| gpl-3.0 |
abhilashnta/edx-platform | lms/djangoapps/shoppingcart/processors/helpers.py | 169 | 1025 | """
Helper methods for credit card processing modules.
These methods should be shared among all processor implementations,
but should NOT be imported by modules outside this package.
"""
from django.conf import settings
from microsite_configuration import microsite
def get_processor_config():
"""
Return a dictionary of configuration settings for the active credit card processor.
If we're in a microsite and overrides are available, return those instead.
Returns:
dict
"""
# Retrieve the configuration settings for the active credit card processor
config = settings.CC_PROCESSOR.get(
settings.CC_PROCESSOR_NAME, {}
)
# Check whether we're in a microsite that overrides our configuration
# If so, find the microsite-specific configuration in the 'microsites'
# sub-key of the normal processor configuration.
config_key = microsite.get_value('cybersource_config_key')
if config_key:
config = config['microsites'][config_key]
return config
| agpl-3.0 |
schleichdi2/OPENNFR-6.0-CORE | opennfr-openembedded-core/meta/lib/oeqa/selftest/wic.py | 1 | 17312 | #!/usr/bin/env python
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# Copyright (c) 2015, Intel Corporation.
# All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# AUTHORS
# Ed Bartosh <ed.bartosh@linux.intel.com>
"""Test cases for wic."""
import os
from glob import glob
from shutil import rmtree
from oeqa.selftest.base import oeSelfTest
from oeqa.utils.commands import runCmd, bitbake, get_bb_var, runqemu
from oeqa.utils.decorators import testcase
class Wic(oeSelfTest):
"""Wic test class."""
resultdir = "/var/tmp/wic/build/"
alternate_resultdir = "/var/tmp/wic/build/alt/"
image_is_ready = False
wicenv_cache = {}
def setUpLocal(self):
"""This code is executed before each test method."""
self.write_config('IMAGE_FSTYPES += " hddimg"\n'
'MACHINE_FEATURES_append = " efi"\n'
'WKS_FILE = "wic-image-minimal"\n')
# Do this here instead of in setUpClass as the base setUp does some
# clean up which can result in the native tools built earlier in
# setUpClass being unavailable.
if not Wic.image_is_ready:
bitbake('syslinux syslinux-native parted-native gptfdisk-native '
'dosfstools-native mtools-native bmap-tools-native')
bitbake('core-image-minimal')
Wic.image_is_ready = True
rmtree(self.resultdir, ignore_errors=True)
@testcase(1552)
def test_version(self):
"""Test wic --version"""
self.assertEqual(0, runCmd('wic --version').status)
@testcase(1208)
def test_help(self):
"""Test wic --help and wic -h"""
self.assertEqual(0, runCmd('wic --help').status)
self.assertEqual(0, runCmd('wic -h').status)
@testcase(1209)
def test_createhelp(self):
"""Test wic create --help"""
self.assertEqual(0, runCmd('wic create --help').status)
@testcase(1210)
def test_listhelp(self):
"""Test wic list --help"""
self.assertEqual(0, runCmd('wic list --help').status)
@testcase(1553)
def test_help_create(self):
"""Test wic help create"""
self.assertEqual(0, runCmd('wic help create').status)
@testcase(1554)
def test_help_list(self):
"""Test wic help list"""
self.assertEqual(0, runCmd('wic help list').status)
@testcase(1215)
def test_help_overview(self):
"""Test wic help overview"""
self.assertEqual(0, runCmd('wic help overview').status)
@testcase(1216)
def test_help_plugins(self):
"""Test wic help plugins"""
self.assertEqual(0, runCmd('wic help plugins').status)
@testcase(1217)
def test_help_kickstart(self):
"""Test wic help kickstart"""
self.assertEqual(0, runCmd('wic help kickstart').status)
@testcase(1555)
def test_list_images(self):
"""Test wic list images"""
self.assertEqual(0, runCmd('wic list images').status)
@testcase(1556)
def test_list_source_plugins(self):
"""Test wic list source-plugins"""
self.assertEqual(0, runCmd('wic list source-plugins').status)
@testcase(1557)
def test_listed_images_help(self):
"""Test wic listed images help"""
output = runCmd('wic list images').output
imagelist = [line.split()[0] for line in output.splitlines()]
for image in imagelist:
self.assertEqual(0, runCmd('wic list %s help' % image).status)
@testcase(1213)
def test_unsupported_subcommand(self):
"""Test unsupported subcommand"""
self.assertEqual(1, runCmd('wic unsupported',
ignore_status=True).status)
@testcase(1214)
def test_no_command(self):
"""Test wic without command"""
self.assertEqual(1, runCmd('wic', ignore_status=True).status)
@testcase(1211)
def test_build_image_name(self):
"""Test wic create directdisk --image-name=core-image-minimal"""
cmd = "wic create directdisk --image-name=core-image-minimal"
self.assertEqual(0, runCmd(cmd).status)
self.assertEqual(1, len(glob(self.resultdir + "directdisk-*.direct")))
@testcase(1157)
def test_gpt_image(self):
"""Test creation of core-image-minimal with gpt table and UUID boot"""
cmd = "wic create directdisk-gpt --image-name core-image-minimal"
self.assertEqual(0, runCmd(cmd).status)
self.assertEqual(1, len(glob(self.resultdir + "directdisk-*.direct")))
@testcase(1346)
def test_iso_image(self):
"""Test creation of hybrid iso image with legacy and EFI boot"""
cmd = "wic create mkhybridiso --image-name core-image-minimal"
self.assertEqual(0, runCmd(cmd).status)
self.assertEqual(1, len(glob(self.resultdir + "HYBRID_ISO_IMG-*.direct")))
self.assertEqual(1, len(glob(self.resultdir + "HYBRID_ISO_IMG-*.iso")))
@testcase(1348)
def test_qemux86_directdisk(self):
"""Test creation of qemux-86-directdisk image"""
cmd = "wic create qemux86-directdisk -e core-image-minimal"
self.assertEqual(0, runCmd(cmd).status)
self.assertEqual(1, len(glob(self.resultdir + "qemux86-directdisk-*direct")))
@testcase(1350)
def test_mkefidisk(self):
"""Test creation of mkefidisk image"""
cmd = "wic create mkefidisk -e core-image-minimal"
self.assertEqual(0, runCmd(cmd).status)
self.assertEqual(1, len(glob(self.resultdir + "mkefidisk-*direct")))
@testcase(1385)
def test_directdisk_bootloader_config(self):
"""Test creation of directdisk-bootloader-config image"""
cmd = "wic create directdisk-bootloader-config -e core-image-minimal"
self.assertEqual(0, runCmd(cmd).status)
self.assertEqual(1, len(glob(self.resultdir + "directdisk-bootloader-config-*direct")))
@testcase(1560)
def test_systemd_bootdisk(self):
"""Test creation of systemd-bootdisk image"""
cmd = "wic create systemd-bootdisk -e core-image-minimal"
self.assertEqual(0, runCmd(cmd).status)
self.assertEqual(1, len(glob(self.resultdir + "systemd-bootdisk-*direct")))
@testcase(1561)
def test_sdimage_bootpart(self):
"""Test creation of sdimage-bootpart image"""
cmd = "wic create sdimage-bootpart -e core-image-minimal"
self.write_config('IMAGE_BOOT_FILES = "bzImage"\n')
self.assertEqual(0, runCmd(cmd).status)
self.assertEqual(1, len(glob(self.resultdir + "sdimage-bootpart-*direct")))
@testcase(1562)
def test_alternate_output_dir(self):
"""Test alternate output directory"""
self.assertEqual(0, runCmd("wic create directdisk "
"-e core-image-minimal "
"-o %s"
% self.alternate_resultdir).status)
self.assertEqual(1, len(glob(self.alternate_resultdir +
"build/directdisk-*.direct")))
self.assertEqual(0, runCmd("wic create mkefidisk -e "
"core-image-minimal "
"--outdir=%s"
% self.alternate_resultdir).status)
self.assertEqual(1, len(glob(self.alternate_resultdir +
"build/mkefidisk-*direct")))
@testcase(1212)
def test_build_artifacts(self):
"""Test wic create directdisk providing all artifacts."""
bbvars = dict((var.lower(), get_bb_var(var, 'core-image-minimal'))
for var in ('STAGING_DATADIR', 'DEPLOY_DIR_IMAGE',
'STAGING_DIR_NATIVE', 'IMAGE_ROOTFS'))
status = runCmd("wic create directdisk "
"-b %(staging_datadir)s "
"-k %(deploy_dir_image)s "
"-n %(staging_dir_native)s "
"-r %(image_rootfs)s" % bbvars).status
self.assertEqual(0, status)
self.assertEqual(1, len(glob(self.resultdir + "directdisk-*.direct")))
@testcase(1264)
def test_compress_gzip(self):
"""Test compressing an image with gzip"""
self.assertEqual(0, runCmd("wic create directdisk "
"--image-name core-image-minimal "
"-c gzip").status)
self.assertEqual(1, len(glob(self.resultdir + "directdisk-*.direct.gz")))
@testcase(1265)
def test_compress_bzip2(self):
"""Test compressing an image with bzip2"""
self.assertEqual(0, runCmd("wic create directdisk "
"--image-name=core-image-minimal "
"-c bzip2").status)
self.assertEqual(1, len(glob(self.resultdir + "directdisk-*.direct.bz2")))
@testcase(1266)
def test_compress_xz(self):
"""Test compressing an image with xz"""
self.assertEqual(0, runCmd("wic create directdisk "
"--image-name=core-image-minimal "
"--compress-with=xz").status)
self.assertEqual(1, len(glob(self.resultdir + "directdisk-*.direct.xz")))
@testcase(1267)
def test_wrong_compressor(self):
"""Test how wic breaks if wrong compressor is provided"""
self.assertEqual(2, runCmd("wic create directdisk "
"--image-name=core-image-minimal "
"-c wrong", ignore_status=True).status)
@testcase(1558)
def test_debug(self):
"""Test debug"""
self.assertEqual(0, runCmd("wic create directdisk "
"--image-name=core-image-minimal "
"-D").status)
self.assertEqual(1, len(glob(self.resultdir + "directdisk-*.direct")))
self.assertEqual(0, runCmd("wic create directdisk "
"--image-name=core-image-minimal "
"--debug").status)
self.assertEqual(1, len(glob(self.resultdir + "directdisk-*.direct")))
@testcase(1563)
def test_skip_build_check(self):
"""Test skip build check"""
self.assertEqual(0, runCmd("wic create directdisk "
"--image-name=core-image-minimal "
"-s").status)
self.assertEqual(1, len(glob(self.resultdir + "directdisk-*.direct")))
self.assertEqual(0, runCmd("wic create directdisk "
"--image-name=core-image-minimal "
"--skip-build-check").status)
self.assertEqual(1, len(glob(self.resultdir + "directdisk-*.direct")))
@testcase(1564)
def test_build_rootfs(self):
"""Test build rootfs"""
self.assertEqual(0, runCmd("wic create directdisk "
"--image-name=core-image-minimal "
"-f").status)
self.assertEqual(1, len(glob(self.resultdir + "directdisk-*.direct")))
self.assertEqual(0, runCmd("wic create directdisk "
"--image-name=core-image-minimal "
"--build-rootfs").status)
self.assertEqual(1, len(glob(self.resultdir + "directdisk-*.direct")))
@testcase(1268)
def test_rootfs_indirect_recipes(self):
"""Test usage of rootfs plugin with rootfs recipes"""
status = runCmd("wic create directdisk-multi-rootfs "
"--image-name=core-image-minimal "
"--rootfs rootfs1=core-image-minimal "
"--rootfs rootfs2=core-image-minimal").status
self.assertEqual(0, status)
self.assertEqual(1, len(glob(self.resultdir + "directdisk-multi-rootfs*.direct")))
@testcase(1269)
def test_rootfs_artifacts(self):
"""Test usage of rootfs plugin with rootfs paths"""
bbvars = dict((var.lower(), get_bb_var(var, 'core-image-minimal'))
for var in ('STAGING_DATADIR', 'DEPLOY_DIR_IMAGE',
'STAGING_DIR_NATIVE', 'IMAGE_ROOTFS'))
bbvars['wks'] = "directdisk-multi-rootfs"
status = runCmd("wic create %(wks)s "
"--bootimg-dir=%(staging_datadir)s "
"--kernel-dir=%(deploy_dir_image)s "
"--native-sysroot=%(staging_dir_native)s "
"--rootfs-dir rootfs1=%(image_rootfs)s "
"--rootfs-dir rootfs2=%(image_rootfs)s"
% bbvars).status
self.assertEqual(0, status)
self.assertEqual(1, len(glob(self.resultdir + "%(wks)s-*.direct" % bbvars)))
@testcase(1496)
def test_bmap(self):
"""Test generation of .bmap file"""
cmd = "wic create directdisk -e core-image-minimal -m"
status = runCmd(cmd).status
self.assertEqual(0, status)
self.assertEqual(1, len(glob(self.resultdir + "directdisk-*direct")))
self.assertEqual(1, len(glob(self.resultdir + "directdisk-*direct.bmap")))
cmd = "wic create directdisk -e core-image-minimal --bmap"
status = runCmd(cmd).status
self.assertEqual(0, status)
self.assertEqual(1, len(glob(self.resultdir + "directdisk-*direct")))
self.assertEqual(1, len(glob(self.resultdir + "directdisk-*direct.bmap")))
def _get_image_env_path(self, image):
"""Generate and obtain the path to <image>.env"""
if image not in self.wicenv_cache:
self.assertEqual(0, bitbake('%s -c do_rootfs_wicenv' % image).status)
stdir = get_bb_var('STAGING_DIR_TARGET', image)
self.wicenv_cache[image] = os.path.join(stdir, 'imgdata')
return self.wicenv_cache[image]
@testcase(1347)
def test_image_env(self):
"""Test generation of <image>.env files."""
image = 'core-image-minimal'
imgdatadir = self._get_image_env_path(image)
basename = get_bb_var('IMAGE_BASENAME', image)
self.assertEqual(basename, image)
path = os.path.join(imgdatadir, basename) + '.env'
self.assertTrue(os.path.isfile(path))
wicvars = set(get_bb_var('WICVARS', image).split())
# filter out optional variables
wicvars = wicvars.difference(('HDDDIR', 'IMAGE_BOOT_FILES',
'INITRD', 'ISODIR'))
with open(path) as envfile:
content = dict(line.split("=", 1) for line in envfile)
# test if variables used by wic present in the .env file
for var in wicvars:
self.assertTrue(var in content, "%s is not in .env file" % var)
self.assertTrue(content[var])
@testcase(1559)
def test_image_vars_dir(self):
"""Test image vars directory selection"""
image = 'core-image-minimal'
imgenvdir = self._get_image_env_path(image)
self.assertEqual(0, runCmd("wic create directdisk "
"--image-name=%s "
"-v %s"
% (image, imgenvdir)).status)
self.assertEqual(0, runCmd("wic create directdisk "
"--image-name=%s "
"--vars %s"
% (image, imgenvdir)).status)
@testcase(1351)
def test_wic_image_type(self):
"""Test building wic images by bitbake"""
self.assertEqual(0, bitbake('wic-image-minimal').status)
deploy_dir = get_bb_var('DEPLOY_DIR_IMAGE')
machine = get_bb_var('MACHINE')
prefix = os.path.join(deploy_dir, 'wic-image-minimal-%s.' % machine)
# check if we have result image and manifests symlinks
# pointing to existing files
for suffix in ('wic', 'manifest'):
path = prefix + suffix
self.assertTrue(os.path.islink(path))
self.assertTrue(os.path.isfile(os.path.realpath(path)))
@testcase(1422)
def test_qemu(self):
"""Test wic-image-minimal under qemu"""
self.assertEqual(0, bitbake('wic-image-minimal').status)
with runqemu('wic-image-minimal', ssh=False) as qemu:
cmd = "mount |grep '^/dev/' | cut -f1,3 -d ' '"
status, output = qemu.run_serial(cmd)
self.assertEqual(1, status, 'Failed to run command "%s": %s' % (cmd, output))
self.assertEqual(output, '/dev/root /\r\n/dev/vda3 /mnt')
| gpl-2.0 |
Carmezim/tensorflow | tensorflow/contrib/learn/python/learn/estimators/dnn.py | 22 | 36083 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Deep Neural Network estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.contrib import layers
from tensorflow.contrib.framework import deprecated
from tensorflow.contrib.framework import deprecated_arg_values
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
from tensorflow.contrib.layers.python.layers import optimizers
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn.estimators import dnn_linear_combined
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import prediction_key
from tensorflow.contrib.learn.python.learn.utils import export
from tensorflow.python.ops import nn
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import variable_scope
from tensorflow.python.summary import summary
# The default learning rate of 0.05 is a historical artifact of the initial
# implementation, but seems a reasonable choice.
_LEARNING_RATE = 0.05
def _get_feature_dict(features):
if isinstance(features, dict):
return features
return {"": features}
def _get_optimizer(optimizer):
if callable(optimizer):
return optimizer()
else:
return optimizer
def _add_hidden_layer_summary(value, tag):
summary.scalar("%s_fraction_of_zero_values" % tag, nn.zero_fraction(value))
summary.histogram("%s_activation" % tag, value)
def _dnn_model_fn(features, labels, mode, params, config=None):
"""Deep Neural Net model_fn.
Args:
features: `Tensor` or dict of `Tensor` (depends on data passed to `fit`).
labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of
dtype `int32` or `int64` in the range `[0, n_classes)`.
mode: Defines whether this is training, evaluation or prediction.
See `ModeKeys`.
params: A dict of hyperparameters.
The following hyperparameters are expected:
* head: A `_Head` instance.
* hidden_units: List of hidden units per layer.
* feature_columns: An iterable containing all the feature columns used by
the model.
* optimizer: string, `Optimizer` object, or callable that defines the
optimizer to use for training. If `None`, will use the Adagrad
optimizer with a default learning rate of 0.05.
* activation_fn: Activation function applied to each layer. If `None`,
will use `tf.nn.relu`.
* dropout: When not `None`, the probability we will drop out a given
coordinate.
* gradient_clip_norm: A float > 0. If provided, gradients are
clipped to their global norm with this clipping ratio.
* embedding_lr_multipliers: Optional. A dictionary from
`EmbeddingColumn` to a `float` multiplier. Multiplier will be used to
multiply with learning rate for the embedding variables.
* input_layer_min_slice_size: Optional. The min slice size of input layer
partitions. If not provided, will use the default of 64M.
config: `RunConfig` object to configure the runtime settings.
Returns:
predictions: A dict of `Tensor` objects.
loss: A scalar containing the loss of the step.
train_op: The op for training.
"""
head = params["head"]
hidden_units = params["hidden_units"]
feature_columns = params["feature_columns"]
optimizer = params.get("optimizer") or "Adagrad"
activation_fn = params.get("activation_fn")
dropout = params.get("dropout")
gradient_clip_norm = params.get("gradient_clip_norm")
input_layer_min_slice_size = (
params.get("input_layer_min_slice_size") or 64 << 20)
num_ps_replicas = config.num_ps_replicas if config else 0
embedding_lr_multipliers = params.get("embedding_lr_multipliers", {})
features = _get_feature_dict(features)
parent_scope = "dnn"
partitioner = partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas)
with variable_scope.variable_scope(
parent_scope,
values=tuple(six.itervalues(features)),
partitioner=partitioner):
input_layer_partitioner = (
partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas,
min_slice_size=input_layer_min_slice_size))
with variable_scope.variable_scope(
"input_from_feature_columns",
values=tuple(six.itervalues(features)),
partitioner=input_layer_partitioner) as input_layer_scope:
net = layers.input_from_feature_columns(
columns_to_tensors=features,
feature_columns=feature_columns,
weight_collections=[parent_scope],
scope=input_layer_scope)
for layer_id, num_hidden_units in enumerate(hidden_units):
with variable_scope.variable_scope(
"hiddenlayer_%d" % layer_id,
values=(net,)) as hidden_layer_scope:
net = layers.fully_connected(
net,
num_hidden_units,
activation_fn=activation_fn,
variables_collections=[parent_scope],
scope=hidden_layer_scope)
if dropout is not None and mode == model_fn.ModeKeys.TRAIN:
net = layers.dropout(net, keep_prob=(1.0 - dropout))
_add_hidden_layer_summary(net, hidden_layer_scope.name)
with variable_scope.variable_scope(
"logits",
values=(net,)) as logits_scope:
logits = layers.fully_connected(
net,
head.logits_dimension,
activation_fn=None,
variables_collections=[parent_scope],
scope=logits_scope)
_add_hidden_layer_summary(logits, logits_scope.name)
def _train_op_fn(loss):
"""Returns the op to optimize the loss."""
return optimizers.optimize_loss(
loss=loss,
global_step=contrib_variables.get_global_step(),
learning_rate=_LEARNING_RATE,
optimizer=_get_optimizer(optimizer),
gradient_multipliers=(
dnn_linear_combined._extract_embedding_lr_multipliers( # pylint: disable=protected-access
embedding_lr_multipliers, parent_scope,
input_layer_scope.name)),
clip_gradients=gradient_clip_norm,
name=parent_scope,
# Empty summaries to prevent optimizers from logging training_loss.
summaries=[])
return head.create_model_fn_ops(
features=features,
mode=mode,
labels=labels,
train_op_fn=_train_op_fn,
logits=logits)
class DNNClassifier(estimator.Estimator):
"""A classifier for TensorFlow DNN models.
Example:
```python
sparse_feature_a = sparse_column_with_hash_bucket(...)
sparse_feature_b = sparse_column_with_hash_bucket(...)
sparse_feature_a_emb = embedding_column(sparse_id_column=sparse_feature_a,
...)
sparse_feature_b_emb = embedding_column(sparse_id_column=sparse_feature_b,
...)
estimator = DNNClassifier(
feature_columns=[sparse_feature_a_emb, sparse_feature_b_emb],
hidden_units=[1024, 512, 256])
# Or estimator using the ProximalAdagradOptimizer optimizer with
# regularization.
estimator = DNNClassifier(
feature_columns=[sparse_feature_a_emb, sparse_feature_b_emb],
hidden_units=[1024, 512, 256],
optimizer=tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
# Input builders
def input_fn_train: # returns x, y (where y represents label's class index).
pass
estimator.fit(input_fn=input_fn_train)
def input_fn_eval: # returns x, y (where y represents label's class index).
pass
estimator.evaluate(input_fn=input_fn_eval)
def input_fn_predict: # returns x, None
# predict_classes returns class indices.
estimator.predict_classes(input_fn=input_fn_predict)
```
If the user specifies `label_keys` in constructor, labels must be strings from
the `label_keys` vocabulary. Example:
```python
label_keys = ['label0', 'label1', 'label2']
estimator = DNNClassifier(
feature_columns=[sparse_feature_a_emb, sparse_feature_b_emb],
hidden_units=[1024, 512, 256],
label_keys=label_keys)
def input_fn_train: # returns x, y (where y is one of label_keys).
pass
estimator.fit(input_fn=input_fn_train)
def input_fn_eval: # returns x, y (where y is one of label_keys).
pass
estimator.evaluate(input_fn=input_fn_eval)
def input_fn_predict: # returns x, None
# predict_classes returns one of label_keys.
estimator.predict_classes(input_fn=input_fn_predict)
```
Input of `fit` and `evaluate` should have following features,
otherwise there will be a `KeyError`:
* if `weight_column_name` is not `None`, a feature with
`key=weight_column_name` whose value is a `Tensor`.
* for each `column` in `feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `WeightedSparseColumn`, two features: the first with
`key` the id column name, the second with `key` the weight column name.
Both features' `value` must be a `SparseTensor`.
- if `column` is a `RealValuedColumn`, a feature with `key=column.name`
whose `value` is a `Tensor`.
"""
def __init__(self,
hidden_units,
feature_columns,
model_dir=None,
n_classes=2,
weight_column_name=None,
optimizer=None,
activation_fn=nn.relu,
dropout=None,
gradient_clip_norm=None,
enable_centered_bias=False,
config=None,
feature_engineering_fn=None,
embedding_lr_multipliers=None,
input_layer_min_slice_size=None,
label_keys=None):
"""Initializes a DNNClassifier instance.
Args:
hidden_units: List of hidden units per layer. All layers are fully
connected. Ex. `[64, 32]` means first layer has 64 nodes and second one
has 32.
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `FeatureColumn`.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
n_classes: number of label classes. Default is binary classification.
It must be greater than 1. Note: Class labels are integers representing
the class index (i.e. values from 0 to n_classes-1). For arbitrary
label values (e.g. string labels), convert to class indices first.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
optimizer: An instance of `tf.Optimizer` used to train the model. If
`None`, will use an Adagrad optimizer.
activation_fn: Activation function applied to each layer. If `None`, will
use `tf.nn.relu`.
dropout: When not `None`, the probability we will drop out a given
coordinate.
gradient_clip_norm: A float > 0. If provided, gradients are
clipped to their global norm with this clipping ratio. See
`tf.clip_by_global_norm` for more details.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
config: `RunConfig` object to configure the runtime settings.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and returns features and
labels which will be fed into the model.
embedding_lr_multipliers: Optional. A dictionary from `EmbeddingColumn` to
a `float` multiplier. Multiplier will be used to multiply with learning
rate for the embedding variables.
input_layer_min_slice_size: Optional. The min slice size of input layer
partitions. If not provided, will use the default of 64M.
label_keys: Optional list of strings with size `[n_classes]` defining the
label vocabulary. Only supported for `n_classes` > 2.
Returns:
A `DNNClassifier` estimator.
Raises:
ValueError: If `n_classes` < 2.
"""
self._feature_columns = tuple(feature_columns or [])
super(DNNClassifier, self).__init__(
model_fn=_dnn_model_fn,
model_dir=model_dir,
config=config,
params={
"head":
head_lib.multi_class_head(
n_classes,
weight_column_name=weight_column_name,
enable_centered_bias=enable_centered_bias,
label_keys=label_keys),
"hidden_units": hidden_units,
"feature_columns": self._feature_columns,
"optimizer": optimizer,
"activation_fn": activation_fn,
"dropout": dropout,
"gradient_clip_norm": gradient_clip_norm,
"embedding_lr_multipliers": embedding_lr_multipliers,
"input_layer_min_slice_size": input_layer_min_slice_size,
},
feature_engineering_fn=feature_engineering_fn)
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE,
estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
@deprecated_arg_values(
"2017-03-01",
"Please switch to predict_classes, or set `outputs` argument.",
outputs=None)
def predict(self, x=None, input_fn=None, batch_size=None, outputs=None,
as_iterable=True):
"""Returns predictions for given features.
By default, returns predicted classes. But this default will be dropped
soon. Users should either pass `outputs`, or call `predict_classes` method.
Args:
x: features.
input_fn: Input function. If set, x must be None.
batch_size: Override default batch size.
outputs: list of `str`, name of the output to predict.
If `None`, returns classes.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted classes with shape [batch_size] (or an iterable
of predicted classes if as_iterable is True). Each predicted class is
represented by its class index (i.e. integer from 0 to n_classes-1).
If `outputs` is set, returns a dict of predictions.
"""
if not outputs:
return self.predict_classes(
x=x,
input_fn=input_fn,
batch_size=batch_size,
as_iterable=as_iterable)
return super(DNNClassifier, self).predict(
x=x,
input_fn=input_fn,
batch_size=batch_size,
outputs=outputs,
as_iterable=as_iterable)
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE,
estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
def predict_classes(self, x=None, input_fn=None, batch_size=None,
as_iterable=True):
"""Returns predicted classes for given features.
Args:
x: features.
input_fn: Input function. If set, x must be None.
batch_size: Override default batch size.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted classes with shape [batch_size] (or an iterable
of predicted classes if as_iterable is True). Each predicted class is
represented by its class index (i.e. integer from 0 to n_classes-1).
"""
key = prediction_key.PredictionKey.CLASSES
preds = super(DNNClassifier, self).predict(
x=x,
input_fn=input_fn,
batch_size=batch_size,
outputs=[key],
as_iterable=as_iterable)
if as_iterable:
return (pred[key] for pred in preds)
return preds[key].reshape(-1)
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE,
estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
def predict_proba(self,
x=None,
input_fn=None,
batch_size=None,
as_iterable=True):
"""Returns predicted probabilities for given features.
Args:
x: features.
input_fn: Input function. If set, x and y must be None.
batch_size: Override default batch size.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted probabilities with shape [batch_size, n_classes]
(or an iterable of predicted probabilities if as_iterable is True).
"""
key = prediction_key.PredictionKey.PROBABILITIES
preds = super(DNNClassifier, self).predict(
x=x,
input_fn=input_fn,
batch_size=batch_size,
outputs=[key],
as_iterable=as_iterable)
if as_iterable:
return (pred[key] for pred in preds)
return preds[key]
@deprecated("2017-03-25", "Please use Estimator.export_savedmodel() instead.")
def export(self,
export_dir,
input_fn=None,
input_feature_key=None,
use_deprecated_input_fn=True,
signature_fn=None,
default_batch_size=1,
exports_to_keep=None):
"""See BaseEstimator.export."""
def default_input_fn(unused_estimator, examples):
return layers.parse_feature_columns_from_examples(examples,
self._feature_columns)
return super(DNNClassifier, self).export(
export_dir=export_dir,
input_fn=input_fn or default_input_fn,
input_feature_key=input_feature_key,
use_deprecated_input_fn=use_deprecated_input_fn,
signature_fn=(signature_fn or
export.classification_signature_fn_with_prob),
prediction_key=prediction_key.PredictionKey.PROBABILITIES,
default_batch_size=default_batch_size,
exports_to_keep=exports_to_keep)
class DNNRegressor(estimator.Estimator):
"""A regressor for TensorFlow DNN models.
Example:
```python
sparse_feature_a = sparse_column_with_hash_bucket(...)
sparse_feature_b = sparse_column_with_hash_bucket(...)
sparse_feature_a_emb = embedding_column(sparse_id_column=sparse_feature_a,
...)
sparse_feature_b_emb = embedding_column(sparse_id_column=sparse_feature_b,
...)
estimator = DNNRegressor(
feature_columns=[sparse_feature_a, sparse_feature_b],
hidden_units=[1024, 512, 256])
# Or estimator using the ProximalAdagradOptimizer optimizer with
# regularization.
estimator = DNNRegressor(
feature_columns=[sparse_feature_a, sparse_feature_b],
hidden_units=[1024, 512, 256],
optimizer=tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
# Input builders
def input_fn_train: # returns x, y
pass
estimator.fit(input_fn=input_fn_train)
def input_fn_eval: # returns x, y
pass
estimator.evaluate(input_fn=input_fn_eval)
def input_fn_predict: # returns x, None
pass
estimator.predict_scores(input_fn=input_fn_predict)
```
Input of `fit` and `evaluate` should have following features,
otherwise there will be a `KeyError`:
* if `weight_column_name` is not `None`, a feature with
`key=weight_column_name` whose value is a `Tensor`.
* for each `column` in `feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `WeightedSparseColumn`, two features: the first with
`key` the id column name, the second with `key` the weight column name.
Both features' `value` must be a `SparseTensor`.
- if `column` is a `RealValuedColumn`, a feature with `key=column.name`
whose `value` is a `Tensor`.
"""
def __init__(self,
hidden_units,
feature_columns,
model_dir=None,
weight_column_name=None,
optimizer=None,
activation_fn=nn.relu,
dropout=None,
gradient_clip_norm=None,
enable_centered_bias=False,
config=None,
feature_engineering_fn=None,
label_dimension=1,
embedding_lr_multipliers=None,
input_layer_min_slice_size=None):
"""Initializes a `DNNRegressor` instance.
Args:
hidden_units: List of hidden units per layer. All layers are fully
connected. Ex. `[64, 32]` means first layer has 64 nodes and second one
has 32.
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `FeatureColumn`.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
optimizer: An instance of `tf.Optimizer` used to train the model. If
`None`, will use an Adagrad optimizer.
activation_fn: Activation function applied to each layer. If `None`, will
use `tf.nn.relu`.
dropout: When not `None`, the probability we will drop out a given
coordinate.
gradient_clip_norm: A `float` > 0. If provided, gradients are clipped
to their global norm with this clipping ratio. See
`tf.clip_by_global_norm` for more details.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
config: `RunConfig` object to configure the runtime settings.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and
returns features and labels which will be fed
into the model.
label_dimension: Number of regression targets per example. This is the
size of the last dimension of the labels and logits `Tensor` objects
(typically, these have shape `[batch_size, label_dimension]`).
embedding_lr_multipliers: Optional. A dictionary from `EbeddingColumn` to
a `float` multiplier. Multiplier will be used to multiply with
learning rate for the embedding variables.
input_layer_min_slice_size: Optional. The min slice size of input layer
partitions. If not provided, will use the default of 64M.
Returns:
A `DNNRegressor` estimator.
"""
self._feature_columns = tuple(feature_columns or [])
super(DNNRegressor, self).__init__(
model_fn=_dnn_model_fn,
model_dir=model_dir,
config=config,
params={
"head":
head_lib.regression_head(
label_dimension=label_dimension,
weight_column_name=weight_column_name,
enable_centered_bias=enable_centered_bias),
"hidden_units": hidden_units,
"feature_columns": self._feature_columns,
"optimizer": optimizer,
"activation_fn": activation_fn,
"dropout": dropout,
"gradient_clip_norm": gradient_clip_norm,
"embedding_lr_multipliers": embedding_lr_multipliers,
"input_layer_min_slice_size": input_layer_min_slice_size,
},
feature_engineering_fn=feature_engineering_fn)
def evaluate(self,
x=None,
y=None,
input_fn=None,
feed_fn=None,
batch_size=None,
steps=None,
metrics=None,
name=None,
checkpoint_path=None,
hooks=None):
"""See evaluable.Evaluable."""
# TODO(zakaria): remove once deprecation is finished (b/31229024)
custom_metrics = {}
if metrics:
for key, metric in six.iteritems(metrics):
if (not isinstance(metric, metric_spec.MetricSpec) and
not isinstance(key, tuple)):
custom_metrics[(key, prediction_key.PredictionKey.SCORES)] = metric
else:
custom_metrics[key] = metric
return super(DNNRegressor, self).evaluate(
x=x,
y=y,
input_fn=input_fn,
feed_fn=feed_fn,
batch_size=batch_size,
steps=steps,
metrics=custom_metrics,
name=name,
checkpoint_path=checkpoint_path,
hooks=hooks)
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE,
estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
@deprecated_arg_values(
"2017-03-01",
"Please switch to predict_scores, or set `outputs` argument.",
outputs=None)
def predict(self, x=None, input_fn=None, batch_size=None, outputs=None,
as_iterable=True):
"""Returns predictions for given features.
By default, returns predicted scores. But this default will be dropped
soon. Users should either pass `outputs`, or call `predict_scores` method.
Args:
x: features.
input_fn: Input function. If set, x must be None.
batch_size: Override default batch size.
outputs: list of `str`, name of the output to predict.
If `None`, returns scores.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted scores (or an iterable of predicted scores if
as_iterable is True). If `label_dimension == 1`, the shape of the output
is `[batch_size]`, otherwise the shape is `[batch_size, label_dimension]`.
If `outputs` is set, returns a dict of predictions.
"""
if not outputs:
return self.predict_scores(
x=x,
input_fn=input_fn,
batch_size=batch_size,
as_iterable=as_iterable)
return super(DNNRegressor, self).predict(
x=x,
input_fn=input_fn,
batch_size=batch_size,
outputs=outputs,
as_iterable=as_iterable)
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE,
estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
def predict_scores(self, x=None, input_fn=None, batch_size=None,
as_iterable=True):
"""Returns predicted scores for given features.
Args:
x: features.
input_fn: Input function. If set, x must be None.
batch_size: Override default batch size.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted scores (or an iterable of predicted scores if
as_iterable is True). If `label_dimension == 1`, the shape of the output
is `[batch_size]`, otherwise the shape is `[batch_size, label_dimension]`.
"""
key = prediction_key.PredictionKey.SCORES
preds = super(DNNRegressor, self).predict(
x=x,
input_fn=input_fn,
batch_size=batch_size,
outputs=[key],
as_iterable=as_iterable)
if as_iterable:
return (pred[key] for pred in preds)
return preds[key]
@deprecated("2017-03-25", "Please use Estimator.export_savedmodel() instead.")
def export(self,
export_dir,
input_fn=None,
input_feature_key=None,
use_deprecated_input_fn=True,
signature_fn=None,
default_batch_size=1,
exports_to_keep=None):
"""See BaseEstimator.export."""
def default_input_fn(unused_estimator, examples):
return layers.parse_feature_columns_from_examples(examples,
self._feature_columns)
return super(DNNRegressor, self).export(
export_dir=export_dir,
input_fn=input_fn or default_input_fn,
input_feature_key=input_feature_key,
use_deprecated_input_fn=use_deprecated_input_fn,
signature_fn=signature_fn or export.regression_signature_fn,
prediction_key=prediction_key.PredictionKey.SCORES,
default_batch_size=default_batch_size,
exports_to_keep=exports_to_keep)
class DNNEstimator(estimator.Estimator):
"""A Estimator for TensorFlow DNN models with user specified _Head.
Example:
```python
sparse_feature_a = sparse_column_with_hash_bucket(...)
sparse_feature_b = sparse_column_with_hash_bucket(...)
sparse_feature_a_emb = embedding_column(sparse_id_column=sparse_feature_a,
...)
sparse_feature_b_emb = embedding_column(sparse_id_column=sparse_feature_b,
...)
To create a DNNEstimator for binary classification, where
estimator = DNNEstimator(
feature_columns=[sparse_feature_a_emb, sparse_feature_b_emb],
head=tf.contrib.learn.multi_class_head(n_classes=2),
hidden_units=[1024, 512, 256])
If your label is keyed with "y" in your labels dict, and weights are keyed
with "w" in features dict, and you want to enable centered bias,
head = tf.contrib.learn.multi_class_head(
n_classes=2,
label_name="x",
weight_column_name="w",
enable_centered_bias=True)
estimator = DNNEstimator(
feature_columns=[sparse_feature_a_emb, sparse_feature_b_emb],
head=head,
hidden_units=[1024, 512, 256])
# Input builders
def input_fn_train: # returns x, y (where y represents label's class index).
pass
estimator.fit(input_fn=input_fn_train)
def input_fn_eval: # returns x, y (where y represents label's class index).
pass
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(x=x) # returns predicted labels (i.e. label's class index).
```
Input of `fit` and `evaluate` should have following features,
otherwise there will be a `KeyError`:
* if `weight_column_name` is not `None`, a feature with
`key=weight_column_name` whose value is a `Tensor`.
* for each `column` in `feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `WeightedSparseColumn`, two features: the first with
`key` the id column name, the second with `key` the weight column name.
Both features' `value` must be a `SparseTensor`.
- if `column` is a `RealValuedColumn`, a feature with `key=column.name`
whose `value` is a `Tensor`.
"""
def __init__(self,
head,
hidden_units,
feature_columns,
model_dir=None,
optimizer=None,
activation_fn=nn.relu,
dropout=None,
gradient_clip_norm=None,
config=None,
feature_engineering_fn=None,
embedding_lr_multipliers=None,
input_layer_min_slice_size=None):
"""Initializes a `DNNEstimator` instance.
Args:
head: `Head` instance.
hidden_units: List of hidden units per layer. All layers are fully
connected. Ex. `[64, 32]` means first layer has 64 nodes and second one
has 32.
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `FeatureColumn`.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
optimizer: An instance of `tf.Optimizer` used to train the model. If
`None`, will use an Adagrad optimizer.
activation_fn: Activation function applied to each layer. If `None`, will
use `tf.nn.relu`.
dropout: When not `None`, the probability we will drop out a given
coordinate.
gradient_clip_norm: A float > 0. If provided, gradients are
clipped to their global norm with this clipping ratio. See
`tf.clip_by_global_norm` for more details.
config: `RunConfig` object to configure the runtime settings.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and
returns features and labels which will be fed
into the model.
embedding_lr_multipliers: Optional. A dictionary from `EmbeddingColumn` to
a `float` multiplier. Multiplier will be used to multiply with
learning rate for the embedding variables.
input_layer_min_slice_size: Optional. The min slice size of input layer
partitions. If not provided, will use the default of 64M.
Returns:
A `DNNEstimator` estimator.
"""
super(DNNEstimator, self).__init__(
model_fn=_dnn_model_fn,
model_dir=model_dir,
config=config,
params={
"head": head,
"hidden_units": hidden_units,
"feature_columns": feature_columns,
"optimizer": optimizer,
"activation_fn": activation_fn,
"dropout": dropout,
"gradient_clip_norm": gradient_clip_norm,
"embedding_lr_multipliers": embedding_lr_multipliers,
"input_layer_min_slice_size": input_layer_min_slice_size,
},
feature_engineering_fn=feature_engineering_fn)
| apache-2.0 |
Krossom/python-for-android | python-modules/twisted/twisted/python/zippath.py | 49 | 7421 | # -*- test-case-name: twisted.test.test_paths.ZipFilePathTestCase -*-
# Copyright (c) 2006-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
This module contains partial re-implementations of FilePath, pending some
specification of formal interfaces it is a duck-typing attempt to emulate them
for certain restricted uses.
See the constructor for ZipArchive for use.
"""
__metaclass__ = type
import os
import time
import errno
from twisted.python.zipstream import ChunkingZipFile
from twisted.python.filepath import FilePath, _PathHelper
# using FilePath here exclusively rather than os to make sure that we don't do
# anything OS-path-specific here.
ZIP_PATH_SEP = '/' # In zipfiles, "/" is universally used as the
# path separator, regardless of platform.
class ZipPath(_PathHelper):
"""
I represent a file or directory contained within a zip file.
"""
def __init__(self, archive, pathInArchive):
"""
Don't construct me directly. Use ZipArchive.child().
@param archive: a ZipArchive instance.
@param pathInArchive: a ZIP_PATH_SEP-separated string.
"""
self.archive = archive
self.pathInArchive = pathInArchive
# self.path pretends to be os-specific because that's the way the
# 'zipimport' module does it.
self.path = os.path.join(archive.zipfile.filename,
*(self.pathInArchive.split(ZIP_PATH_SEP)))
def __cmp__(self, other):
if not isinstance(other, ZipPath):
return NotImplemented
return cmp((self.archive, self.pathInArchive),
(other.archive, other.pathInArchive))
def __repr__(self):
parts = [os.path.abspath(self.archive.path)]
parts.extend(self.pathInArchive.split(ZIP_PATH_SEP))
path = os.sep.join(parts)
return "ZipPath('%s')" % (path.encode('string-escape'),)
def parent(self):
splitup = self.pathInArchive.split(ZIP_PATH_SEP)
if len(splitup) == 1:
return self.archive
return ZipPath(self.archive, ZIP_PATH_SEP.join(splitup[:-1]))
def child(self, path):
"""
Return a new ZipPath representing a path in C{self.archive} which is
a child of this path.
@note: Requesting the C{".."} (or other special name) child will not
cause L{InsecurePath} to be raised since these names do not have
any special meaning inside a zip archive. Be particularly
careful with the C{path} attribute (if you absolutely must use
it) as this means it may include special names with special
meaning outside of the context of a zip archive.
"""
return ZipPath(self.archive, ZIP_PATH_SEP.join([self.pathInArchive, path]))
def sibling(self, path):
return self.parent().child(path)
# preauthChild = child
def exists(self):
return self.isdir() or self.isfile()
def isdir(self):
return self.pathInArchive in self.archive.childmap
def isfile(self):
return self.pathInArchive in self.archive.zipfile.NameToInfo
def islink(self):
return False
def listdir(self):
if self.exists():
if self.isdir():
return self.archive.childmap[self.pathInArchive].keys()
else:
raise OSError(errno.ENOTDIR, "Leaf zip entry listed")
else:
raise OSError(errno.ENOENT, "Non-existent zip entry listed")
def splitext(self):
"""
Return a value similar to that returned by os.path.splitext.
"""
# This happens to work out because of the fact that we use OS-specific
# path separators in the constructor to construct our fake 'path'
# attribute.
return os.path.splitext(self.path)
def basename(self):
return self.pathInArchive.split(ZIP_PATH_SEP)[-1]
def dirname(self):
# XXX NOTE: This API isn't a very good idea on filepath, but it's even
# less meaningful here.
return self.parent().path
def open(self):
return self.archive.zipfile.readfile(self.pathInArchive)
def restat(self):
pass
def getAccessTime(self):
"""
Retrieve this file's last access-time. This is the same as the last access
time for the archive.
@return: a number of seconds since the epoch
"""
return self.archive.getAccessTime()
def getModificationTime(self):
"""
Retrieve this file's last modification time. This is the time of
modification recorded in the zipfile.
@return: a number of seconds since the epoch.
"""
return time.mktime(
self.archive.zipfile.NameToInfo[self.pathInArchive].date_time
+ (0, 0, 0))
def getStatusChangeTime(self):
"""
Retrieve this file's last modification time. This name is provided for
compatibility, and returns the same value as getmtime.
@return: a number of seconds since the epoch.
"""
return self.getModificationTime()
class ZipArchive(ZipPath):
""" I am a FilePath-like object which can wrap a zip archive as if it were a
directory.
"""
archive = property(lambda self: self)
def __init__(self, archivePathname):
"""Create a ZipArchive, treating the archive at archivePathname as a zip file.
@param archivePathname: a str, naming a path in the filesystem.
"""
self.zipfile = ChunkingZipFile(archivePathname)
self.path = archivePathname
self.pathInArchive = ''
# zipfile is already wasting O(N) memory on cached ZipInfo instances,
# so there's no sense in trying to do this lazily or intelligently
self.childmap = {} # map parent: list of children
for name in self.zipfile.namelist():
name = name.split(ZIP_PATH_SEP)
for x in range(len(name)):
child = name[-x]
parent = ZIP_PATH_SEP.join(name[:-x])
if parent not in self.childmap:
self.childmap[parent] = {}
self.childmap[parent][child] = 1
parent = ''
def child(self, path):
"""
Create a ZipPath pointing at a path within the archive.
@param path: a str with no path separators in it, either '/' or the
system path separator, if it's different.
"""
return ZipPath(self, path)
def exists(self):
"""
Returns true if the underlying archive exists.
"""
return FilePath(self.zipfile.filename).exists()
def getAccessTime(self):
"""
Return the archive file's last access time.
"""
return FilePath(self.zipfile.filename).getAccessTime()
def getModificationTime(self):
"""
Return the archive file's modification time.
"""
return FilePath(self.zipfile.filename).getModificationTime()
def getStatusChangeTime(self):
"""
Return the archive file's status change time.
"""
return FilePath(self.zipfile.filename).getStatusChangeTime()
def __repr__(self):
return 'ZipArchive(%r)' % (os.path.abspath(self.path),)
__all__ = ['ZipArchive', 'ZipPath']
| apache-2.0 |
isrohutamahopetechnik/MissionPlanner | LogAnalyzer/tests/TestDualGyroDrift.py | 273 | 5396 | from LogAnalyzer import Test,TestResult
import DataflashLog
# import scipy
# import pylab #### TEMP!!! only for dev
# from scipy import signal
class TestDualGyroDrift(Test):
'''test for gyro drift between dual IMU data'''
def __init__(self):
Test.__init__(self)
self.name = "Gyro Drift"
self.enable = False
def run(self, logdata, verbose):
self.result = TestResult()
self.result.status = TestResult.StatusType.GOOD
# if "IMU" not in logdata.channels or "IMU2" not in logdata.channels:
# self.result.status = TestResult.StatusType.NA
# return
# imuX = logdata.channels["IMU"]["GyrX"].listData
# imu2X = logdata.channels["IMU2"]["GyrX"].listData
# # NOTE: weird thing about Holger's log is that the counts of IMU+IMU2 are different
# print "length 1: %.2f, length 2: %.2f" % (len(imuX),len(imu2X))
# #assert(len(imuX) == len(imu2X))
# # divide the curve into segments and get the average of each segment
# # we will get the diff between those averages, rather than a per-sample diff as the IMU+IMU2 arrays are often not the same length
# diffThresholdWARN = 0.03
# diffThresholdFAIL = 0.05
# nSamples = 10
# imu1XAverages, imu1YAverages, imu1ZAverages, imu2XAverages, imu2YAverages, imu2ZAverages = ([],[],[],[],[],[])
# imuXDiffAverages, imuYDiffAverages, imuZDiffAverages = ([],[],[])
# maxDiffX, maxDiffY, maxDiffZ = (0,0,0)
# sliceLength1 = len(logdata.channels["IMU"]["GyrX"].dictData.values()) / nSamples
# sliceLength2 = len(logdata.channels["IMU2"]["GyrX"].dictData.values()) / nSamples
# for i in range(0,nSamples):
# imu1XAverages.append(numpy.mean(logdata.channels["IMU"]["GyrX"].dictData.values()[i*sliceLength1:i*sliceLength1+sliceLength1]))
# imu1YAverages.append(numpy.mean(logdata.channels["IMU"]["GyrY"].dictData.values()[i*sliceLength1:i*sliceLength1+sliceLength1]))
# imu1ZAverages.append(numpy.mean(logdata.channels["IMU"]["GyrZ"].dictData.values()[i*sliceLength1:i*sliceLength1+sliceLength1]))
# imu2XAverages.append(numpy.mean(logdata.channels["IMU2"]["GyrX"].dictData.values()[i*sliceLength2:i*sliceLength2+sliceLength2]))
# imu2YAverages.append(numpy.mean(logdata.channels["IMU2"]["GyrY"].dictData.values()[i*sliceLength2:i*sliceLength2+sliceLength2]))
# imu2ZAverages.append(numpy.mean(logdata.channels["IMU2"]["GyrZ"].dictData.values()[i*sliceLength2:i*sliceLength2+sliceLength2]))
# imuXDiffAverages.append(imu2XAverages[-1]-imu1XAverages[-1])
# imuYDiffAverages.append(imu2YAverages[-1]-imu1YAverages[-1])
# imuZDiffAverages.append(imu2ZAverages[-1]-imu1ZAverages[-1])
# if abs(imuXDiffAverages[-1]) > maxDiffX:
# maxDiffX = imuXDiffAverages[-1]
# if abs(imuYDiffAverages[-1]) > maxDiffY:
# maxDiffY = imuYDiffAverages[-1]
# if abs(imuZDiffAverages[-1]) > maxDiffZ:
# maxDiffZ = imuZDiffAverages[-1]
# if max(maxDiffX,maxDiffY,maxDiffZ) > diffThresholdFAIL:
# self.result.status = TestResult.StatusType.FAIL
# self.result.statusMessage = "IMU/IMU2 gyro averages differ by more than %s radians" % diffThresholdFAIL
# elif max(maxDiffX,maxDiffY,maxDiffZ) > diffThresholdWARN:
# self.result.status = TestResult.StatusType.WARN
# self.result.statusMessage = "IMU/IMU2 gyro averages differ by more than %s radians" % diffThresholdWARN
# # pylab.plot(zip(*imuX)[0], zip(*imuX)[1], 'g')
# # pylab.plot(zip(*imu2X)[0], zip(*imu2X)[1], 'r')
# #pylab.plot(range(0,(nSamples*sliceLength1),sliceLength1), imu1ZAverages, 'b')
# print "Gyro averages1X: " + `imu1XAverages`
# print "Gyro averages1Y: " + `imu1YAverages`
# print "Gyro averages1Z: " + `imu1ZAverages` + "\n"
# print "Gyro averages2X: " + `imu2XAverages`
# print "Gyro averages2Y: " + `imu2YAverages`
# print "Gyro averages2Z: " + `imu2ZAverages` + "\n"
# print "Gyro averages diff X: " + `imuXDiffAverages`
# print "Gyro averages diff Y: " + `imuYDiffAverages`
# print "Gyro averages diff Z: " + `imuZDiffAverages`
# # lowpass filter using numpy
# # cutoff = 100
# # fs = 10000.0
# # b,a = scipy.signal.filter_design.butter(5,cutoff/(fs/2))
# # imuXFiltered = scipy.signal.filtfilt(b,a,zip(*imuX)[1])
# # imu2XFiltered = scipy.signal.filtfilt(b,a,zip(*imu2X)[1])
# #pylab.plot(imuXFiltered, 'r')
# # TMP: DISPLAY BEFORE+AFTER plots
# pylab.show()
# # print "imuX average before lowpass filter: %.8f" % logdata.channels["IMU"]["GyrX"].avg()
# # print "imuX average after lowpass filter: %.8f" % numpy.mean(imuXFiltered)
# # print "imu2X average before lowpass filter: %.8f" % logdata.channels["IMU2"]["GyrX"].avg()
# # print "imu2X average after lowpass filter: %.8f" % numpy.mean(imu2XFiltered)
# avg1X = logdata.channels["IMU"]["GyrX"].avg()
# avg1Y = logdata.channels["IMU"]["GyrY"].avg()
# avg1Z = logdata.channels["IMU"]["GyrZ"].avg()
# avg2X = logdata.channels["IMU2"]["GyrX"].avg()
# avg2Y = logdata.channels["IMU2"]["GyrY"].avg()
# avg2Z = logdata.channels["IMU2"]["GyrZ"].avg()
# avgRatioX = (max(avg1X,avg2X) - min(avg1X,avg2X)) / #abs(max(avg1X,avg2X) / min(avg1X,avg2X))
# avgRatioY = abs(max(avg1Y,avg2Y) / min(avg1Y,avg2Y))
# avgRatioZ = abs(max(avg1Z,avg2Z) / min(avg1Z,avg2Z))
# self.result.statusMessage = "IMU gyro avg: %.4f,%.4f,%.4f\nIMU2 gyro avg: %.4f,%.4f,%.4f\nAvg ratio: %.4f,%.4f,%.4f" % (avg1X,avg1Y,avg1Z, avg2X,avg2Y,avg2Z, avgRatioX,avgRatioY,avgRatioZ)
| gpl-3.0 |
intgr/django | tests/sites_tests/tests.py | 40 | 13012 | from django.apps import apps
from django.apps.registry import Apps
from django.conf import settings
from django.contrib.sites import models
from django.contrib.sites.management import create_default_site
from django.contrib.sites.middleware import CurrentSiteMiddleware
from django.contrib.sites.models import Site, clear_site_cache
from django.contrib.sites.requests import RequestSite
from django.contrib.sites.shortcuts import get_current_site
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.db.models.signals import post_migrate
from django.http import HttpRequest, HttpResponse
from django.test import TestCase, modify_settings, override_settings
from django.test.utils import captured_stdout
@modify_settings(INSTALLED_APPS={'append': 'django.contrib.sites'})
class SitesFrameworkTests(TestCase):
multi_db = True
def setUp(self):
self.site = Site(
id=settings.SITE_ID,
domain="example.com",
name="example.com",
)
self.site.save()
def tearDown(self):
Site.objects.clear_cache()
def test_site_manager(self):
# Make sure that get_current() does not return a deleted Site object.
s = Site.objects.get_current()
self.assertIsInstance(s, Site)
s.delete()
with self.assertRaises(ObjectDoesNotExist):
Site.objects.get_current()
def test_site_cache(self):
# After updating a Site object (e.g. via the admin), we shouldn't return a
# bogus value from the SITE_CACHE.
site = Site.objects.get_current()
self.assertEqual("example.com", site.name)
s2 = Site.objects.get(id=settings.SITE_ID)
s2.name = "Example site"
s2.save()
site = Site.objects.get_current()
self.assertEqual("Example site", site.name)
def test_delete_all_sites_clears_cache(self):
# When all site objects are deleted the cache should also
# be cleared and get_current() should raise a DoesNotExist.
self.assertIsInstance(Site.objects.get_current(), Site)
Site.objects.all().delete()
with self.assertRaises(Site.DoesNotExist):
Site.objects.get_current()
@override_settings(ALLOWED_HOSTS=['example.com'])
def test_get_current_site(self):
# The correct Site object is returned
request = HttpRequest()
request.META = {
"SERVER_NAME": "example.com",
"SERVER_PORT": "80",
}
site = get_current_site(request)
self.assertIsInstance(site, Site)
self.assertEqual(site.id, settings.SITE_ID)
# An exception is raised if the sites framework is installed
# but there is no matching Site
site.delete()
with self.assertRaises(ObjectDoesNotExist):
get_current_site(request)
# A RequestSite is returned if the sites framework is not installed
with self.modify_settings(INSTALLED_APPS={'remove': 'django.contrib.sites'}):
site = get_current_site(request)
self.assertIsInstance(site, RequestSite)
self.assertEqual(site.name, "example.com")
@override_settings(SITE_ID='', ALLOWED_HOSTS=['example.com'])
def test_get_current_site_no_site_id(self):
request = HttpRequest()
request.META = {
"SERVER_NAME": "example.com",
"SERVER_PORT": "80",
}
del settings.SITE_ID
site = get_current_site(request)
self.assertEqual(site.name, "example.com")
@override_settings(SITE_ID='', ALLOWED_HOSTS=['example.com'])
def test_get_current_site_host_with_trailing_dot(self):
"""
The site is matched if the name in the request has a trailing dot.
"""
request = HttpRequest()
request.META = {
'SERVER_NAME': 'example.com.',
'SERVER_PORT': '80',
}
site = get_current_site(request)
self.assertEqual(site.name, 'example.com')
@override_settings(SITE_ID='', ALLOWED_HOSTS=['example.com', 'example.net'])
def test_get_current_site_no_site_id_and_handle_port_fallback(self):
request = HttpRequest()
s1 = self.site
s2 = Site.objects.create(domain='example.com:80', name='example.com:80')
# Host header without port
request.META = {'HTTP_HOST': 'example.com'}
site = get_current_site(request)
self.assertEqual(site, s1)
# Host header with port - match, no fallback without port
request.META = {'HTTP_HOST': 'example.com:80'}
site = get_current_site(request)
self.assertEqual(site, s2)
# Host header with port - no match, fallback without port
request.META = {'HTTP_HOST': 'example.com:81'}
site = get_current_site(request)
self.assertEqual(site, s1)
# Host header with non-matching domain
request.META = {'HTTP_HOST': 'example.net'}
with self.assertRaises(ObjectDoesNotExist):
get_current_site(request)
# Ensure domain for RequestSite always matches host header
with self.modify_settings(INSTALLED_APPS={'remove': 'django.contrib.sites'}):
request.META = {'HTTP_HOST': 'example.com'}
site = get_current_site(request)
self.assertEqual(site.name, 'example.com')
request.META = {'HTTP_HOST': 'example.com:80'}
site = get_current_site(request)
self.assertEqual(site.name, 'example.com:80')
def test_domain_name_with_whitespaces(self):
# Regression for #17320
# Domain names are not allowed contain whitespace characters
site = Site(name="test name", domain="test test")
with self.assertRaises(ValidationError):
site.full_clean()
site.domain = "test\ttest"
with self.assertRaises(ValidationError):
site.full_clean()
site.domain = "test\ntest"
with self.assertRaises(ValidationError):
site.full_clean()
@override_settings(ALLOWED_HOSTS=['example.com'])
def test_clear_site_cache(self):
request = HttpRequest()
request.META = {
"SERVER_NAME": "example.com",
"SERVER_PORT": "80",
}
self.assertEqual(models.SITE_CACHE, {})
get_current_site(request)
expected_cache = {self.site.id: self.site}
self.assertEqual(models.SITE_CACHE, expected_cache)
with self.settings(SITE_ID=''):
get_current_site(request)
expected_cache.update({self.site.domain: self.site})
self.assertEqual(models.SITE_CACHE, expected_cache)
clear_site_cache(Site, instance=self.site, using='default')
self.assertEqual(models.SITE_CACHE, {})
@override_settings(SITE_ID='', ALLOWED_HOSTS=['example2.com'])
def test_clear_site_cache_domain(self):
site = Site.objects.create(name='example2.com', domain='example2.com')
request = HttpRequest()
request.META = {
"SERVER_NAME": "example2.com",
"SERVER_PORT": "80",
}
get_current_site(request) # prime the models.SITE_CACHE
expected_cache = {site.domain: site}
self.assertEqual(models.SITE_CACHE, expected_cache)
# Site exists in 'default' database so using='other' shouldn't clear.
clear_site_cache(Site, instance=site, using='other')
self.assertEqual(models.SITE_CACHE, expected_cache)
# using='default' should clear.
clear_site_cache(Site, instance=site, using='default')
self.assertEqual(models.SITE_CACHE, {})
def test_unique_domain(self):
site = Site(domain=self.site.domain)
msg = 'Site with this Domain name already exists.'
with self.assertRaisesMessage(ValidationError, msg):
site.validate_unique()
def test_site_natural_key(self):
self.assertEqual(Site.objects.get_by_natural_key(self.site.domain), self.site)
self.assertEqual(self.site.natural_key(), (self.site.domain,))
@override_settings(ALLOWED_HOSTS=['example.com'])
def test_requestsite_save_notimplemented_msg(self):
# Test response msg for RequestSite.save NotImplementedError
request = HttpRequest()
request.META = {
"HTTP_HOST": "example.com",
}
msg = 'RequestSite cannot be saved.'
with self.assertRaisesMessage(NotImplementedError, msg):
RequestSite(request).save()
@override_settings(ALLOWED_HOSTS=['example.com'])
def test_requestsite_delete_notimplemented_msg(self):
# Test response msg for RequestSite.delete NotImplementedError
request = HttpRequest()
request.META = {
"HTTP_HOST": "example.com",
}
msg = 'RequestSite cannot be deleted.'
with self.assertRaisesMessage(NotImplementedError, msg):
RequestSite(request).delete()
class JustOtherRouter:
def allow_migrate(self, db, app_label, **hints):
return db == 'other'
@modify_settings(INSTALLED_APPS={'append': 'django.contrib.sites'})
class CreateDefaultSiteTests(TestCase):
multi_db = True
def setUp(self):
self.app_config = apps.get_app_config('sites')
# Delete the site created as part of the default migration process.
Site.objects.all().delete()
def test_basic(self):
"""
#15346, #15573 - create_default_site() creates an example site only if
none exist.
"""
with captured_stdout() as stdout:
create_default_site(self.app_config)
self.assertEqual(Site.objects.count(), 1)
self.assertIn("Creating example.com", stdout.getvalue())
with captured_stdout() as stdout:
create_default_site(self.app_config)
self.assertEqual(Site.objects.count(), 1)
self.assertEqual("", stdout.getvalue())
@override_settings(DATABASE_ROUTERS=[JustOtherRouter()])
def test_multi_db_with_router(self):
"""
#16353, #16828 - The default site creation should respect db routing.
"""
create_default_site(self.app_config, using='default', verbosity=0)
create_default_site(self.app_config, using='other', verbosity=0)
self.assertFalse(Site.objects.using('default').exists())
self.assertTrue(Site.objects.using('other').exists())
def test_multi_db(self):
create_default_site(self.app_config, using='default', verbosity=0)
create_default_site(self.app_config, using='other', verbosity=0)
self.assertTrue(Site.objects.using('default').exists())
self.assertTrue(Site.objects.using('other').exists())
def test_save_another(self):
"""
#17415 - Another site can be created right after the default one.
On some backends the sequence needs to be reset after saving with an
explicit ID. There shouldn't be a sequence collisions by saving another
site. This test is only meaningful with databases that use sequences
for automatic primary keys such as PostgreSQL and Oracle.
"""
create_default_site(self.app_config, verbosity=0)
Site(domain='example2.com', name='example2.com').save()
def test_signal(self):
"""
#23641 - Sending the ``post_migrate`` signal triggers creation of the
default site.
"""
post_migrate.send(sender=self.app_config, app_config=self.app_config, verbosity=0)
self.assertTrue(Site.objects.exists())
@override_settings(SITE_ID=35696)
def test_custom_site_id(self):
"""
#23945 - The configured ``SITE_ID`` should be respected.
"""
create_default_site(self.app_config, verbosity=0)
self.assertEqual(Site.objects.get().pk, 35696)
@override_settings() # Restore original ``SITE_ID`` afterwards.
def test_no_site_id(self):
"""
#24488 - The pk should default to 1 if no ``SITE_ID`` is configured.
"""
del settings.SITE_ID
create_default_site(self.app_config, verbosity=0)
self.assertEqual(Site.objects.get().pk, 1)
def test_unavailable_site_model(self):
"""
#24075 - A Site shouldn't be created if the model isn't available.
"""
apps = Apps()
create_default_site(self.app_config, verbosity=0, apps=apps)
self.assertFalse(Site.objects.exists())
class MiddlewareTest(TestCase):
def test_old_style_request(self):
"""The request has correct `site` attribute."""
middleware = CurrentSiteMiddleware()
request = HttpRequest()
middleware.process_request(request)
self.assertEqual(request.site.id, settings.SITE_ID)
def test_request(self):
def get_response(request):
return HttpResponse(str(request.site.id))
response = CurrentSiteMiddleware(get_response)(HttpRequest())
self.assertContains(response, settings.SITE_ID)
| bsd-3-clause |
40223139/203739test | static/Brython3.1.3-20150514-095342/Lib/contextlib.py | 737 | 8788 | """Utilities for with-statement contexts. See PEP 343."""
import sys
from collections import deque
from functools import wraps
__all__ = ["contextmanager", "closing", "ContextDecorator", "ExitStack"]
class ContextDecorator(object):
"A base class or mixin that enables context managers to work as decorators."
def _recreate_cm(self):
"""Return a recreated instance of self.
Allows an otherwise one-shot context manager like
_GeneratorContextManager to support use as
a decorator via implicit recreation.
This is a private interface just for _GeneratorContextManager.
See issue #11647 for details.
"""
return self
def __call__(self, func):
@wraps(func)
def inner(*args, **kwds):
with self._recreate_cm():
return func(*args, **kwds)
return inner
class _GeneratorContextManager(ContextDecorator):
"""Helper for @contextmanager decorator."""
def __init__(self, func, *args, **kwds):
self.gen = func(*args, **kwds)
self.func, self.args, self.kwds = func, args, kwds
def _recreate_cm(self):
# _GCM instances are one-shot context managers, so the
# CM must be recreated each time a decorated function is
# called
return self.__class__(self.func, *self.args, **self.kwds)
def __enter__(self):
try:
return next(self.gen)
except StopIteration:
raise RuntimeError("generator didn't yield")
def __exit__(self, type, value, traceback):
if type is None:
try:
next(self.gen)
except StopIteration:
return
else:
raise RuntimeError("generator didn't stop")
else:
if value is None:
# Need to force instantiation so we can reliably
# tell if we get the same exception back
value = type()
try:
self.gen.throw(type, value, traceback)
raise RuntimeError("generator didn't stop after throw()")
except StopIteration as exc:
# Suppress the exception *unless* it's the same exception that
# was passed to throw(). This prevents a StopIteration
# raised inside the "with" statement from being suppressed
return exc is not value
except:
# only re-raise if it's *not* the exception that was
# passed to throw(), because __exit__() must not raise
# an exception unless __exit__() itself failed. But throw()
# has to raise the exception to signal propagation, so this
# fixes the impedance mismatch between the throw() protocol
# and the __exit__() protocol.
#
if sys.exc_info()[1] is not value:
raise
def contextmanager(func):
"""@contextmanager decorator.
Typical usage:
@contextmanager
def some_generator(<arguments>):
<setup>
try:
yield <value>
finally:
<cleanup>
This makes this:
with some_generator(<arguments>) as <variable>:
<body>
equivalent to this:
<setup>
try:
<variable> = <value>
<body>
finally:
<cleanup>
"""
@wraps(func)
def helper(*args, **kwds):
return _GeneratorContextManager(func, *args, **kwds)
return helper
class closing(object):
"""Context to automatically close something at the end of a block.
Code like this:
with closing(<module>.open(<arguments>)) as f:
<block>
is equivalent to this:
f = <module>.open(<arguments>)
try:
<block>
finally:
f.close()
"""
def __init__(self, thing):
self.thing = thing
def __enter__(self):
return self.thing
def __exit__(self, *exc_info):
self.thing.close()
# Inspired by discussions on http://bugs.python.org/issue13585
class ExitStack(object):
"""Context manager for dynamic management of a stack of exit callbacks
For example:
with ExitStack() as stack:
files = [stack.enter_context(open(fname)) for fname in filenames]
# All opened files will automatically be closed at the end of
# the with statement, even if attempts to open files later
# in the list raise an exception
"""
def __init__(self):
self._exit_callbacks = deque()
def pop_all(self):
"""Preserve the context stack by transferring it to a new instance"""
new_stack = type(self)()
new_stack._exit_callbacks = self._exit_callbacks
self._exit_callbacks = deque()
return new_stack
def _push_cm_exit(self, cm, cm_exit):
"""Helper to correctly register callbacks to __exit__ methods"""
def _exit_wrapper(*exc_details):
return cm_exit(cm, *exc_details)
_exit_wrapper.__self__ = cm
self.push(_exit_wrapper)
def push(self, exit):
"""Registers a callback with the standard __exit__ method signature
Can suppress exceptions the same way __exit__ methods can.
Also accepts any object with an __exit__ method (registering a call
to the method instead of the object itself)
"""
# We use an unbound method rather than a bound method to follow
# the standard lookup behaviour for special methods
_cb_type = type(exit)
try:
exit_method = _cb_type.__exit__
except AttributeError:
# Not a context manager, so assume its a callable
self._exit_callbacks.append(exit)
else:
self._push_cm_exit(exit, exit_method)
return exit # Allow use as a decorator
def callback(self, callback, *args, **kwds):
"""Registers an arbitrary callback and arguments.
Cannot suppress exceptions.
"""
def _exit_wrapper(exc_type, exc, tb):
callback(*args, **kwds)
# We changed the signature, so using @wraps is not appropriate, but
# setting __wrapped__ may still help with introspection
_exit_wrapper.__wrapped__ = callback
self.push(_exit_wrapper)
return callback # Allow use as a decorator
def enter_context(self, cm):
"""Enters the supplied context manager
If successful, also pushes its __exit__ method as a callback and
returns the result of the __enter__ method.
"""
# We look up the special methods on the type to match the with statement
_cm_type = type(cm)
_exit = _cm_type.__exit__
result = _cm_type.__enter__(cm)
self._push_cm_exit(cm, _exit)
return result
def close(self):
"""Immediately unwind the context stack"""
self.__exit__(None, None, None)
def __enter__(self):
return self
def __exit__(self, *exc_details):
received_exc = exc_details[0] is not None
# We manipulate the exception state so it behaves as though
# we were actually nesting multiple with statements
frame_exc = sys.exc_info()[1]
def _fix_exception_context(new_exc, old_exc):
while 1:
exc_context = new_exc.__context__
if exc_context in (None, frame_exc):
break
new_exc = exc_context
new_exc.__context__ = old_exc
# Callbacks are invoked in LIFO order to match the behaviour of
# nested context managers
suppressed_exc = False
pending_raise = False
while self._exit_callbacks:
cb = self._exit_callbacks.pop()
try:
if cb(*exc_details):
suppressed_exc = True
pending_raise = False
exc_details = (None, None, None)
except:
new_exc_details = sys.exc_info()
# simulate the stack of exceptions by setting the context
_fix_exception_context(new_exc_details[1], exc_details[1])
pending_raise = True
exc_details = new_exc_details
if pending_raise:
try:
# bare "raise exc_details[1]" replaces our carefully
# set-up context
fixed_ctx = exc_details[1].__context__
raise exc_details[1]
except BaseException:
exc_details[1].__context__ = fixed_ctx
raise
return received_exc and suppressed_exc
| gpl-3.0 |
alexmandujano/django | django/contrib/auth/context_processors.py | 122 | 1937 | # PermWrapper and PermLookupDict proxy the permissions system into objects that
# the template system can understand.
class PermLookupDict(object):
def __init__(self, user, app_label):
self.user, self.app_label = user, app_label
def __repr__(self):
return str(self.user.get_all_permissions())
def __getitem__(self, perm_name):
return self.user.has_perm("%s.%s" % (self.app_label, perm_name))
def __iter__(self):
# To fix 'item in perms.someapp' and __getitem__ iteraction we need to
# define __iter__. See #18979 for details.
raise TypeError("PermLookupDict is not iterable.")
def __bool__(self):
return self.user.has_module_perms(self.app_label)
def __nonzero__(self): # Python 2 compatibility
return type(self).__bool__(self)
class PermWrapper(object):
def __init__(self, user):
self.user = user
def __getitem__(self, app_label):
return PermLookupDict(self.user, app_label)
def __iter__(self):
# I am large, I contain multitudes.
raise TypeError("PermWrapper is not iterable.")
def __contains__(self, perm_name):
"""
Lookup by "someapp" or "someapp.someperm" in perms.
"""
if '.' not in perm_name:
# The name refers to module.
return bool(self[perm_name])
app_label, perm_name = perm_name.split('.', 1)
return self[app_label][perm_name]
def auth(request):
"""
Returns context variables required by apps that use Django's authentication
system.
If there is no 'user' attribute in the request, uses AnonymousUser (from
django.contrib.auth).
"""
if hasattr(request, 'user'):
user = request.user
else:
from django.contrib.auth.models import AnonymousUser
user = AnonymousUser()
return {
'user': user,
'perms': PermWrapper(user),
}
| bsd-3-clause |
Unidata/siphon | siphon/cdmr/ncstream.py | 1 | 13683 | # Copyright (c) 2014-2016 Siphon Contributors.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Handle binary stream returns in NCStream format."""
from __future__ import print_function
from collections import OrderedDict
import itertools
import logging
import zlib
import numpy as np
from . import cdmrfeature_pb2 as cdmrf
from . import ncStream_pb2 as stream # noqa
MAGIC_HEADER = b'\xad\xec\xce\xda'
MAGIC_DATA = b'\xab\xec\xce\xba'
MAGIC_DATA2 = b'\xab\xeb\xbe\xba'
MAGIC_VDATA = b'\xab\xef\xfe\xba'
MAGIC_VEND = b'\xed\xef\xfe\xda'
MAGIC_ERR = b'\xab\xad\xba\xda'
MAGIC_HEADERCOV = b'\xad\xed\xde\xda'
MAGIC_DATACOV = b'\xab\xed\xde\xba'
logging.basicConfig(level=logging.WARNING)
log = logging.getLogger(__name__)
#
# NCStream handling
#
def read_ncstream_data(fobj):
"""Handle reading an NcStream v1 data block from a file-like object."""
data = read_proto_object(fobj, stream.Data)
if data.dataType in (stream.STRING, stream.OPAQUE) or data.vdata:
log.debug('Reading string/opaque/vlen')
num_obj = read_var_int(fobj)
log.debug('Num objects: %d', num_obj)
blocks = [read_block(fobj) for _ in range(num_obj)]
if data.dataType == stream.STRING:
blocks = [b.decode('utf-8', errors='ignore') for b in blocks]
# Again endian isn't coded properly
dt = data_type_to_numpy(data.dataType).newbyteorder('>')
if data.vdata:
return np.array([np.frombuffer(b, dtype=dt) for b in blocks], dtype=object)
else:
return np.array(blocks, dtype=dt)
elif data.dataType in _dtype_lookup:
log.debug('Reading array data')
bin_data = read_block(fobj)
log.debug('Binary data: %s', bin_data)
# Hard code to big endian for now since it's not encoded correctly
dt = data_type_to_numpy(data.dataType).newbyteorder('>')
# Handle decompressing the bytes
if data.compress == stream.DEFLATE:
bin_data = zlib.decompress(bin_data)
assert len(bin_data) == data.uncompressedSize
elif data.compress != stream.NONE:
raise NotImplementedError('Compression type {0} not implemented!'.format(
data.compress))
# Turn bytes into an array
return reshape_array(data, np.frombuffer(bin_data, dtype=dt))
elif data.dataType == stream.STRUCTURE:
sd = read_proto_object(fobj, stream.StructureData)
# Make a datatype appropriate to the rows of struct
endian = '>' if data.bigend else '<'
dt = np.dtype([(endian, np.void, sd.rowLength)])
# Turn bytes into an array
return reshape_array(data, np.frombuffer(sd.data, dtype=dt))
elif data.dataType == stream.SEQUENCE:
log.debug('Reading sequence')
blocks = []
magic = read_magic(fobj)
while magic != MAGIC_VEND:
if magic == MAGIC_VDATA:
log.error('Bad magic for struct/seq data!')
blocks.append(read_proto_object(fobj, stream.StructureData))
magic = read_magic(fobj)
return data, blocks
else:
raise NotImplementedError("Don't know how to handle data type: {0}".format(
data.dataType))
def read_ncstream_data2(fobj):
"""Handle reading an NcStream v2 data block from a file-like object."""
data = read_proto_object(fobj, stream.DataCol)
return datacol_to_array(data)
def read_ncstream_err(fobj):
"""Handle reading an NcStream error from a file-like object and raise as error."""
err = read_proto_object(fobj, stream.Error)
raise RuntimeError(err.message)
ncstream_table = {MAGIC_HEADER: lambda f: read_proto_object(f, stream.Header),
MAGIC_DATA: read_ncstream_data,
MAGIC_DATA2: read_ncstream_data2,
MAGIC_ERR: read_ncstream_err}
def read_ncstream_messages(fobj):
"""Read a collection of NcStream messages from a file-like object."""
return read_messages(fobj, ncstream_table)
#
# CDMRemoteFeature handling
#
cdmrf_table = {MAGIC_HEADERCOV: lambda f: read_proto_object(f, cdmrf.CoverageDataset),
MAGIC_DATACOV: lambda f: read_proto_object(f, cdmrf.CoverageDataResponse),
MAGIC_DATA2: read_ncstream_data2, # For coordinates
MAGIC_ERR: read_ncstream_err}
def read_cdmrf_messages(fobj):
"""Read a collection of CDMRemoteFeature messages from a file-like object."""
return read_messages(fobj, cdmrf_table)
#
# General Utilities
#
def read_messages(fobj, magic_table):
"""Read messages from a file-like object until stream is exhausted."""
messages = []
while True:
magic = read_magic(fobj)
if not magic:
break
func = magic_table.get(magic)
if func is not None:
messages.append(func(fobj))
else:
log.error('Unknown magic: ' + str(' '.join('{0:02x}'.format(b)
for b in bytearray(magic))))
return messages
def read_proto_object(fobj, klass):
"""Read a block of data and parse using the given protobuf object."""
log.debug('%s chunk', klass.__name__)
obj = klass()
obj.ParseFromString(read_block(fobj))
log.debug('Header: %s', str(obj))
return obj
def read_magic(fobj):
"""Read magic bytes.
Parameters
----------
fobj : file-like object
The file to read from.
Returns
-------
bytes
magic byte sequence read
"""
return fobj.read(4)
def read_block(fobj):
"""Read a block.
Reads a block from a file object by first reading the number of bytes to read, which must
be encoded as a variable-byte length integer.
Parameters
----------
fobj : file-like object
The file to read from.
Returns
-------
bytes
block of bytes read
"""
num = read_var_int(fobj)
log.debug('Next block: %d bytes', num)
return fobj.read(num)
def process_vlen(data_header, array):
"""Process vlen coming back from NCStream v2.
This takes the array of values and slices into an object array, with entries containing
the appropriate pieces of the original array. Sizes are controlled by the passed in
`data_header`.
Parameters
----------
data_header : Header
array : :class:`numpy.ndarray`
Returns
-------
ndarray
object array containing sub-sequences from the original primitive array
"""
source = iter(array)
return np.array([np.fromiter(itertools.islice(source, size), dtype=array.dtype)
for size in data_header.vlens], dtype=object)
def datacol_to_array(datacol):
"""Convert DataCol from NCStream v2 into an array with appropriate type.
Depending on the data type specified, this extracts data from the appropriate members
and packs into a :class:`numpy.ndarray`, recursing as necessary for compound data types.
Parameters
----------
datacol : DataCol
Returns
-------
ndarray
array containing extracted data
"""
if datacol.dataType == stream.STRING:
arr = np.array(datacol.stringdata, dtype=object)
elif datacol.dataType == stream.OPAQUE:
arr = np.array(datacol.opaquedata, dtype=object)
elif datacol.dataType == stream.STRUCTURE:
members = OrderedDict((mem.name, datacol_to_array(mem))
for mem in datacol.structdata.memberData)
log.debug('Struct members:\n%s', str(members))
# str() around name necessary because protobuf gives unicode names, but dtype doesn't
# support them on Python 2
dt = np.dtype([(str(name), arr.dtype) for name, arr in members.items()])
log.debug('Struct dtype: %s', str(dt))
arr = np.empty((datacol.nelems,), dtype=dt)
for name, arr_data in members.items():
arr[name] = arr_data
else:
# Make an appropriate datatype
endian = '>' if datacol.bigend else '<'
dt = data_type_to_numpy(datacol.dataType).newbyteorder(endian)
# Turn bytes into an array
arr = np.frombuffer(datacol.primdata, dtype=dt)
if arr.size != datacol.nelems:
log.warning('Array size %d does not agree with nelems %d',
arr.size, datacol.nelems)
if datacol.isVlen:
arr = process_vlen(datacol, arr)
try:
arr = reshape_array(datacol, arr)
except ValueError:
# In this case, the array collapsed, need different resize that
# correctly sizes from elements
shape = tuple(r.size for r in datacol.section.range) + (datacol.vlens[0],)
arr = arr.reshape(*shape)
else:
arr = reshape_array(datacol, arr)
return arr
def reshape_array(data_header, array):
"""Extract the appropriate array shape from the header.
Can handle taking a data header and either bytes containing data or a StructureData
instance, which will have binary data as well as some additional information.
Parameters
----------
array : :class:`numpy.ndarray`
data_header : Data
"""
shape = tuple(r.size for r in data_header.section.range)
if shape:
return array.reshape(*shape)
else:
return array
# STRUCTURE = 8;
# SEQUENCE = 9;
_dtype_lookup = {stream.CHAR: 'S1', stream.BYTE: 'b', stream.SHORT: 'i2',
stream.INT: 'i4', stream.LONG: 'i8', stream.FLOAT: 'f4',
stream.DOUBLE: 'f8', stream.STRING: 'O',
stream.ENUM1: 'B', stream.ENUM2: 'u2', stream.ENUM4: 'u4',
stream.OPAQUE: 'O', stream.UBYTE: 'B', stream.USHORT: 'u2',
stream.UINT: 'u4', stream.ULONG: 'u8'}
def data_type_to_numpy(datatype, unsigned=False):
"""Convert an ncstream datatype to a numpy one."""
basic_type = _dtype_lookup[datatype]
if datatype in (stream.STRING, stream.OPAQUE):
return np.dtype(basic_type)
if unsigned:
basic_type = basic_type.replace('i', 'u')
return np.dtype('=' + basic_type)
def struct_to_dtype(struct):
"""Convert a Structure specification to a numpy structured dtype."""
# str() around name necessary because protobuf gives unicode names, but dtype doesn't
# support them on Python 2
fields = [(str(var.name), data_type_to_numpy(var.dataType, var.unsigned))
for var in struct.vars]
for s in struct.structs:
fields.append((str(s.name), struct_to_dtype(s)))
log.debug('Structure fields: %s', fields)
dt = np.dtype(fields)
return dt
def unpack_variable(var):
"""Unpack an NCStream Variable into information we can use."""
# If we actually get a structure instance, handle turning that into a variable
if var.dataType == stream.STRUCTURE:
return None, struct_to_dtype(var), 'Structure'
elif var.dataType == stream.SEQUENCE:
log.warning('Sequence support not implemented!')
dt = data_type_to_numpy(var.dataType, var.unsigned)
if var.dataType == stream.OPAQUE:
type_name = 'opaque'
elif var.dataType == stream.STRING:
type_name = 'string'
else:
type_name = dt.name
if var.data:
log.debug('Storing variable data: %s %s', dt, var.data)
if var.dataType == stream.STRING:
data = var.data
else:
# Always sent big endian
data = np.frombuffer(var.data, dtype=dt.newbyteorder('>'))
else:
data = None
return data, dt, type_name
_attr_converters = {stream.Attribute.BYTE: np.dtype('>b'),
stream.Attribute.SHORT: np.dtype('>i2'),
stream.Attribute.INT: np.dtype('>i4'),
stream.Attribute.LONG: np.dtype('>i8'),
stream.Attribute.FLOAT: np.dtype('>f4'),
stream.Attribute.DOUBLE: np.dtype('>f8')}
def unpack_attribute(att):
"""Unpack an embedded attribute into a python or numpy object."""
if att.unsigned:
log.warning('Unsupported unsigned attribute!')
# TDS 5.0 now has a dataType attribute that takes precedence
if att.len == 0: # Empty
val = None
elif att.dataType == stream.STRING: # Then look for new datatype string
val = att.sdata
elif att.dataType: # Then a non-zero new data type
val = np.frombuffer(att.data,
dtype='>' + _dtype_lookup[att.dataType], count=att.len)
elif att.type: # Then non-zero old-data type0
val = np.frombuffer(att.data,
dtype=_attr_converters[att.type], count=att.len)
elif att.sdata: # This leaves both 0, try old string
val = att.sdata
else: # Assume new datatype is Char (0)
val = np.array(att.data, dtype=_dtype_lookup[att.dataType])
if att.len == 1:
val = val[0]
return att.name, val
def read_var_int(file_obj):
"""Read a variable-length integer.
Parameters
----------
file_obj : file-like object
The file to read from.
Returns
-------
int
the variable-length value read
"""
# Read all bytes from here, stopping with the first one that does not have
# the MSB set. Save the lower 7 bits, and keep stacking to the *left*.
val = 0
shift = 0
while True:
# Read next byte
next_val = ord(file_obj.read(1))
val |= ((next_val & 0x7F) << shift)
shift += 7
if not next_val & 0x80:
break
return val
| bsd-3-clause |
rthornton/booktracker | booktracker/urls.py | 28 | 1025 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
from django.views.generic import TemplateView
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', # noqa
TemplateView.as_view(template_name='pages/home.html'),
name="home"),
url(r'^about/$',
TemplateView.as_view(template_name='pages/about.html'),
name="about"),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
# User management
url(r'^users/', include("users.urls", namespace="users")),
url(r'^accounts/', include('allauth.urls')),
# Uncomment the next line to enable avatars
url(r'^avatar/', include('avatar.urls')),
# Your stuff: custom urls go here
) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| bsd-3-clause |
joxeankoret/nightmare | lib/interfaces/vstruct/defs/macho/__init__.py | 18 | 2876 | '''
Structure definitions for the OSX MachO binary format.
'''
import struct
import vstruct
from vstruct.defs.macho.fat import *
from vstruct.defs.macho.const import *
from vstruct.defs.macho.stabs import *
from vstruct.defs.macho.loader import *
class mach_o(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self._raw_bytes = ''
self._symbols = None
self.mach_header = mach_header()
self.load_commands = vstruct.VStruct()
def getSymbols(self):
if self._symbols != None:
return self._symbols
self._symbols = []
for fname,vs in self.load_commands:
if vs.cmd != LC_SYMTAB:
continue
strbytes = self._raw_bytes[vs.stroff:vs.stroff+vs.strsize]
#print repr(strbytes)
strtab = strbytes.split('\x00')
#print strtab
offset = vs.symoff
print 'NSYMS:',vs.nsyms
for i in xrange(vs.nsyms):
n = nlist() # FIXME 64!
offset = n.vsParse(self._raw_bytes, offset)
#symstr = strtab[n.n_strx]
# FIXME this is slow!
symstr = strbytes[n.n_strx:].split('\x00', 1)[0]
#print n.tree()
#print symstr
def getLibDeps(self):
'''
Return a list of the library files this Mach-O is dependant on
'''
ret = []
for fname, vs in self.load_commands:
if vs.cmd != LC_LOAD_DYLIB:
continue
ret.append(vs.namedata)
return ret
def getSegments(self):
'''
Return a list of (segname, rva, perms, bytes) tuples for the memory
segments defined by the loader commands
'''
ret = []
for fname, vs in self.load_commands:
if vs.cmd != LC_SEGMENT:
print hex(vs.cmd),hex(vs.cmdsize) # 2, 5, b, e
continue
# Slice the segment bytes from raw bytes
fbytes = self._raw_bytes[ vs.fileoff: vs.fileoff + vs.filesize ]
# Pad out to virtual size
fbytes = fbytes.ljust(vs.vmsize, '\x00')
ret.append((vs.segname, vs.vmaddr, vs.initprot, fbytes))
return ret
def vsParse(self, bytes, offset=0):
self._raw_bytes = bytes[offset:]
offset = self.mach_header.vsParse(bytes, offset=offset)
#print bytes[offset:].encode('hex')
for i in xrange(self.mach_header.ncmds):
# should we use endian from header?
cmdtype, cmdlen = struct.unpack('<II', bytes[offset:offset+8])
cmdclass = getCommandClass(cmdtype)
cmdobj = cmdclass()
cmdobj.vsParse(bytes, offset=offset)
self.load_commands.vsAddField('cmd%d' % i, cmdobj)
offset += cmdobj.cmdsize
| gpl-2.0 |
snnn/tensorflow | tensorflow/python/training/moving_averages.py | 4 | 22211 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Maintain moving averages of parameters."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.training import distribution_strategy_context
from tensorflow.python.training import slot_creator
from tensorflow.python.util.tf_export import tf_export
# TODO(touts): switch to variables.Variable.
def assign_moving_average(variable, value, decay, zero_debias=True, name=None):
"""Compute the moving average of a variable.
The moving average of 'variable' updated with 'value' is:
variable * decay + value * (1 - decay)
The returned Operation sets 'variable' to the newly computed moving average,
by performing this subtraction:
variable -= (1 - decay) * (variable - value)
Since variables that are initialized to a `0` value will be `0` biased,
`zero_debias` optionally enables scaling by the mathematically correct
debiasing factor of
1 - decay ** num_updates
See `ADAM: A Method for Stochastic Optimization` Section 3 for more details
(https://arxiv.org/abs/1412.6980).
The names of the debias shadow variables, by default, include both the scope
they were created in and the scope of the variables they debias. They are also
given a uniquifying-suffix.
E.g.:
```
with tf.variable_scope('scope1'):
with tf.variable_scope('scope2'):
var = tf.get_variable('foo')
update_1 = tf.assign_moving_average(var, 0.0, 1.0)
update_2 = tf.assign_moving_average(var, 0.0, 0.9)
# var.name: 'scope1/scope2/foo'
# shadow var names: 'scope1/scope2/scope1/scope2/foo/biased'
# 'scope1/scope2/scope1/scope2/foo/biased_1'
```
Args:
variable: A Variable.
value: A tensor with the same shape as 'variable'.
decay: A float Tensor or float value. The moving average decay.
zero_debias: A python bool. If true, assume the variable is 0-initialized
and unbias it, as in https://arxiv.org/abs/1412.6980. See docstring in
`_zero_debias` for more details.
name: Optional name of the returned operation.
Returns:
A tensor which if evaluated will compute and return the new moving average.
"""
def update_fn(v, value, decay=decay):
decay = ops.convert_to_tensor(1.0 - decay, name="decay")
if decay.dtype != v.dtype.base_dtype:
decay = math_ops.cast(decay, v.dtype.base_dtype)
if zero_debias:
update_delta = _zero_debias(v, value, decay)
else:
update_delta = (v - value) * decay
return state_ops.assign_sub(v, update_delta, name=scope)
with ops.name_scope(name, "AssignMovingAvg",
[variable, value, decay]) as scope:
tower_context = distribution_strategy_context.get_tower_context()
if tower_context:
# In a tower context, we update variable using the mean of value across
# towers.
def merge_fn(strategy, v, value):
value = strategy.reduce(
variable_scope.VariableAggregation.MEAN, value, v)
return strategy.update(v, update_fn, value)
return tower_context.merge_call(merge_fn, variable, value)
else:
strategy = distribution_strategy_context.get_cross_tower_context()
return strategy.update(variable, update_fn, value)
def weighted_moving_average(value,
decay,
weight,
truediv=True,
collections=None,
name=None):
"""Compute the weighted moving average of `value`.
Conceptually, the weighted moving average is:
`moving_average(value * weight) / moving_average(weight)`,
where a moving average updates by the rule
`new_value = decay * old_value + (1 - decay) * update`
Internally, this Op keeps moving average variables of both `value * weight`
and `weight`.
Args:
value: A numeric `Tensor`.
decay: A float `Tensor` or float value. The moving average decay.
weight: `Tensor` that keeps the current value of a weight.
Shape should be able to multiply `value`.
truediv: Boolean, if `True`, dividing by `moving_average(weight)` is
floating point division. If `False`, use division implied by dtypes.
collections: List of graph collections keys to add the internal variables
`value * weight` and `weight` to.
Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
name: Optional name of the returned operation.
Defaults to "WeightedMovingAvg".
Returns:
An Operation that updates and returns the weighted moving average.
"""
# Unlike assign_moving_average, the weighted moving average doesn't modify
# user-visible variables. It is the ratio of two internal variables, which are
# moving averages of the updates. Thus, the signature of this function is
# quite different than assign_moving_average.
if collections is None:
collections = [ops.GraphKeys.GLOBAL_VARIABLES]
with variable_scope.variable_scope(name, "WeightedMovingAvg",
[value, weight, decay]) as scope:
value_x_weight_var = variable_scope.get_variable(
"value_x_weight",
shape=value.get_shape(),
dtype=value.dtype,
initializer=init_ops.zeros_initializer(),
trainable=False,
collections=collections)
weight_var = variable_scope.get_variable(
"weight",
shape=weight.get_shape(),
dtype=weight.dtype,
initializer=init_ops.zeros_initializer(),
trainable=False,
collections=collections)
numerator = assign_moving_average(
value_x_weight_var, value * weight, decay, zero_debias=False)
denominator = assign_moving_average(
weight_var, weight, decay, zero_debias=False)
if truediv:
return math_ops.truediv(numerator, denominator, name=scope.name)
else:
return math_ops.div(numerator, denominator, name=scope.name)
def _zero_debias(unbiased_var, value, decay):
"""Compute the delta required for a debiased Variable.
All exponential moving averages initialized with Tensors are initialized to 0,
and therefore are biased to 0. Variables initialized to 0 and used as EMAs are
similarly biased. This function creates the debias updated amount according to
a scale factor, as in https://arxiv.org/abs/1412.6980.
To demonstrate the bias the results from 0-initialization, take an EMA that
was initialized to `0` with decay `b`. After `t` timesteps of seeing the
constant `c`, the variable have the following value:
```
EMA = 0*b^(t) + c*(1 - b)*b^(t-1) + c*(1 - b)*b^(t-2) + ...
= c*(1 - b^t)
```
To have the true value `c`, we would divide by the scale factor `1 - b^t`.
In order to perform debiasing, we use two shadow variables. One keeps track of
the biased estimate, and the other keeps track of the number of updates that
have occurred.
Args:
unbiased_var: A Variable representing the current value of the unbiased EMA.
value: A Tensor representing the most recent value.
decay: A Tensor representing `1-decay` for the EMA.
Returns:
The amount that the unbiased variable should be updated. Computing this
tensor will also update the shadow variables appropriately.
"""
with variable_scope.variable_scope(
unbiased_var.op.name, values=[unbiased_var, value, decay]) as scope:
with ops.colocate_with(unbiased_var):
with ops.init_scope():
biased_initializer = init_ops.zeros_initializer(
dtype=unbiased_var.dtype)(unbiased_var.get_shape())
local_step_initializer = init_ops.zeros_initializer()
def _maybe_get_unique(name):
"""Get name for a unique variable, if not `reuse=True`."""
if variable_scope.get_variable_scope().reuse:
return name
vs_vars = [x.op.name for x in
variable_scope.get_variable_scope().global_variables()]
full_name = variable_scope.get_variable_scope().name + "/" + name
if full_name not in vs_vars: return name
idx = 1
while full_name + ("_%d" % idx) in vs_vars:
idx += 1
return name + ("_%d" % idx)
biased_var = variable_scope.get_variable(
_maybe_get_unique("biased"), initializer=biased_initializer,
trainable=False)
local_step = variable_scope.get_variable(
_maybe_get_unique("local_step"),
shape=[],
dtype=unbiased_var.dtype,
initializer=local_step_initializer,
trainable=False)
# Get an update ops for both shadow variables.
update_biased = state_ops.assign_sub(biased_var,
(biased_var - value) * decay,
name=scope.name)
update_local_step = local_step.assign_add(1)
# Compute the value of the delta to update the unbiased EMA. Make sure to
# use the new values of the biased variable and the local step.
with ops.control_dependencies([update_biased, update_local_step]):
# This function gets `1 - decay`, so use `1.0 - decay` in the exponent.
unbiased_ema_delta = (unbiased_var - biased_var.read_value() /
(1 - math_ops.pow(
1.0 - decay, local_step.read_value())))
return unbiased_ema_delta
@tf_export("train.ExponentialMovingAverage")
class ExponentialMovingAverage(object):
"""Maintains moving averages of variables by employing an exponential decay.
When training a model, it is often beneficial to maintain moving averages of
the trained parameters. Evaluations that use averaged parameters sometimes
produce significantly better results than the final trained values.
The `apply()` method adds shadow copies of trained variables and add ops that
maintain a moving average of the trained variables in their shadow copies.
It is used when building the training model. The ops that maintain moving
averages are typically run after each training step.
The `average()` and `average_name()` methods give access to the shadow
variables and their names. They are useful when building an evaluation
model, or when restoring a model from a checkpoint file. They help use the
moving averages in place of the last trained values for evaluations.
The moving averages are computed using exponential decay. You specify the
decay value when creating the `ExponentialMovingAverage` object. The shadow
variables are initialized with the same initial values as the trained
variables. When you run the ops to maintain the moving averages, each
shadow variable is updated with the formula:
`shadow_variable -= (1 - decay) * (shadow_variable - variable)`
This is mathematically equivalent to the classic formula below, but the use
of an `assign_sub` op (the `"-="` in the formula) allows concurrent lockless
updates to the variables:
`shadow_variable = decay * shadow_variable + (1 - decay) * variable`
Reasonable values for `decay` are close to 1.0, typically in the
multiple-nines range: 0.999, 0.9999, etc.
Example usage when creating a training model:
```python
# Create variables.
var0 = tf.Variable(...)
var1 = tf.Variable(...)
# ... use the variables to build a training model...
...
# Create an op that applies the optimizer. This is what we usually
# would use as a training op.
opt_op = opt.minimize(my_loss, [var0, var1])
# Create an ExponentialMovingAverage object
ema = tf.train.ExponentialMovingAverage(decay=0.9999)
with tf.control_dependencies([opt_op]):
# Create the shadow variables, and add ops to maintain moving averages
# of var0 and var1. This also creates an op that will update the moving
# averages after each training step. This is what we will use in place
# of the usual training op.
training_op = ema.apply([var0, var1])
...train the model by running training_op...
```
There are two ways to use the moving averages for evaluations:
* Build a model that uses the shadow variables instead of the variables.
For this, use the `average()` method which returns the shadow variable
for a given variable.
* Build a model normally but load the checkpoint files to evaluate by using
the shadow variable names. For this use the `average_name()` method. See
the `tf.train.Saver` for more
information on restoring saved variables.
Example of restoring the shadow variable values:
```python
# Create a Saver that loads variables from their saved shadow values.
shadow_var0_name = ema.average_name(var0)
shadow_var1_name = ema.average_name(var1)
saver = tf.train.Saver({shadow_var0_name: var0, shadow_var1_name: var1})
saver.restore(...checkpoint filename...)
# var0 and var1 now hold the moving average values
```
"""
def __init__(self, decay, num_updates=None, zero_debias=False,
name="ExponentialMovingAverage"):
"""Creates a new ExponentialMovingAverage object.
The `apply()` method has to be called to create shadow variables and add
ops to maintain moving averages.
The optional `num_updates` parameter allows one to tweak the decay rate
dynamically. It is typical to pass the count of training steps, usually
kept in a variable that is incremented at each step, in which case the
decay rate is lower at the start of training. This makes moving averages
move faster. If passed, the actual decay rate used is:
`min(decay, (1 + num_updates) / (10 + num_updates))`
Args:
decay: Float. The decay to use.
num_updates: Optional count of number of updates applied to variables.
zero_debias: If `True`, zero debias moving-averages that are initialized
with tensors.
name: String. Optional prefix name to use for the name of ops added in
`apply()`.
"""
self._decay = decay
self._num_updates = num_updates
self._zero_debias = zero_debias
self._name = name
self._averages = {}
@property
def name(self):
"""The name of this ExponentialMovingAverage object."""
return self._name
def apply(self, var_list=None):
"""Maintains moving averages of variables.
`var_list` must be a list of `Variable` or `Tensor` objects. This method
creates shadow variables for all elements of `var_list`. Shadow variables
for `Variable` objects are initialized to the variable's initial value.
They will be added to the `GraphKeys.MOVING_AVERAGE_VARIABLES` collection.
For `Tensor` objects, the shadow variables are initialized to 0 and zero
debiased (see docstring in `assign_moving_average` for more details).
shadow variables are created with `trainable=False` and added to the
`GraphKeys.ALL_VARIABLES` collection. They will be returned by calls to
`tf.global_variables()`.
Returns an op that updates all shadow variables from the current value of
their associated variables.
Note that `apply()` can be called multiple times. When eager execution is
enabled each call to apply will update the variables once, so this needs to
be called in a loop.
Args:
var_list: A list of Variable or Tensor objects. The variables
and Tensors must be of types bfloat16, float16, float32, or float64.
Returns:
An Operation that updates the moving averages.
Raises:
TypeError: If the arguments are not an allowed type.
"""
# TODO(touts): op_scope
if var_list is None:
var_list = variables.trainable_variables()
zero_debias_true = set() # set of vars to set `zero_debias=True`
for var in var_list:
if var.dtype.base_dtype not in [
dtypes.bfloat16, dtypes.float16, dtypes.float32, dtypes.float64
]:
raise TypeError("The variables must be half, float, or double: %s" %
var.name)
if var not in self._averages:
# For variables: to lower communication bandwidth across devices we keep
# the moving averages on the same device as the variables. For other
# tensors, we rely on the existing device allocation mechanism.
with ops.init_scope():
if isinstance(var, variables.Variable):
avg = slot_creator.create_slot(var,
var.initialized_value(),
self.name,
colocate_with_primary=True)
# NOTE(mrry): We only add `tf.Variable` objects to the
# `MOVING_AVERAGE_VARIABLES` collection.
ops.add_to_collection(ops.GraphKeys.MOVING_AVERAGE_VARIABLES, var)
else:
avg = slot_creator.create_zeros_slot(
var,
self.name,
colocate_with_primary=(var.op.type in ["Variable",
"VariableV2",
"VarHandleOp"]))
if self._zero_debias:
zero_debias_true.add(avg)
self._averages[var] = avg
with ops.name_scope(self.name) as scope:
decay = ops.convert_to_tensor(self._decay, name="decay")
if self._num_updates is not None:
num_updates = math_ops.cast(self._num_updates,
dtypes.float32,
name="num_updates")
decay = math_ops.minimum(decay,
(1.0 + num_updates) / (10.0 + num_updates))
updates = []
for var in var_list:
zero_debias = self._averages[var] in zero_debias_true
updates.append(assign_moving_average(
self._averages[var], var, decay, zero_debias=zero_debias))
return control_flow_ops.group(*updates, name=scope)
def average(self, var):
"""Returns the `Variable` holding the average of `var`.
Args:
var: A `Variable` object.
Returns:
A `Variable` object or `None` if the moving average of `var`
is not maintained.
"""
return self._averages.get(var, None)
def average_name(self, var):
"""Returns the name of the `Variable` holding the average for `var`.
The typical scenario for `ExponentialMovingAverage` is to compute moving
averages of variables during training, and restore the variables from the
computed moving averages during evaluations.
To restore variables, you have to know the name of the shadow variables.
That name and the original variable can then be passed to a `Saver()` object
to restore the variable from the moving average value with:
`saver = tf.train.Saver({ema.average_name(var): var})`
`average_name()` can be called whether or not `apply()` has been called.
Args:
var: A `Variable` object.
Returns:
A string: The name of the variable that will be used or was used
by the `ExponentialMovingAverage class` to hold the moving average of
`var`.
"""
if var in self._averages:
return self._averages[var].op.name
return ops.get_default_graph().unique_name(
var.op.name + "/" + self.name, mark_as_used=False)
def variables_to_restore(self, moving_avg_variables=None):
"""Returns a map of names to `Variables` to restore.
If a variable has a moving average, use the moving average variable name as
the restore name; otherwise, use the variable name.
For example,
```python
variables_to_restore = ema.variables_to_restore()
saver = tf.train.Saver(variables_to_restore)
```
Below is an example of such mapping:
```
conv/batchnorm/gamma/ExponentialMovingAverage: conv/batchnorm/gamma,
conv_4/conv2d_params/ExponentialMovingAverage: conv_4/conv2d_params,
global_step: global_step
```
Args:
moving_avg_variables: a list of variables that require to use of the
moving variable name to be restored. If None, it will default to
variables.moving_average_variables() + variables.trainable_variables()
Returns:
A map from restore_names to variables. The restore_name can be the
moving_average version of the variable name if it exist, or the original
variable name.
"""
name_map = {}
if moving_avg_variables is None:
# Include trainable variables and variables which have been explicitly
# added to the moving_average_variables collection.
moving_avg_variables = variables.trainable_variables()
moving_avg_variables += variables.moving_average_variables()
# Remove duplicates
moving_avg_variables = set(moving_avg_variables)
# Collect all the variables with moving average,
for v in moving_avg_variables:
name_map[self.average_name(v)] = v
# Make sure we restore variables without moving averages as well.
moving_avg_variable_names = set([v.name for v in moving_avg_variables])
for v in list(set(variables.global_variables())):
if v.name not in moving_avg_variable_names and v.op.name not in name_map:
name_map[v.op.name] = v
return name_map
| apache-2.0 |
papouso/odoo | addons/l10n_th/__init__.py | 893 | 1045 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
n3wtron/log4all-client-python | pylog4all/l4a_search.py | 2 | 1866 | import logging
from optparse import OptionParser
import sys
from dateutil import parser as dtparser
from pylog4all.search import search_log
from pylog4all.client import Log4allClient
from pylog4all import config
from pylog4all.util import add_common_parser_options
__author__ = 'Igor Maculan <n3wtron@gmail.com>'
def main():
usage = "usage: %prog add [options] log_text"
parser = OptionParser(usage=usage, )
add_common_parser_options(parser)
# SEARCH
parser.add_option("-s", "--since", dest="since", action="store", help="search since")
parser.add_option("-t", "--to", dest="to", action="store", help="search to")
parser.add_option("--full", dest="full_log", action="store_true", help="log with stacktrace")
parser.add_option("-n", "--num", dest="result_per_page", action="store",
help="number of results", default="10")
(options, args) = parser.parse_args(sys.argv[1:])
if options.verbose:
logging.basicConfig(level=logging.DEBUG)
if options.user_setup:
config.user_setup()
else:
user_conf = config.get_user_conf()
if options.server is None:
server = user_conf['server']
else:
server = options.server
if server.strip() == '':
print ('server is mandatory')
parser.print_help()
exit(-2)
cl = Log4allClient(server)
if options.since is None or options.to is None:
print ('--since and --to are mandatory in search')
parser.print_help()
exit(-2)
if len(args) == 2:
query = args[1]
else:
query = ''
search_log(cl, dtparser.parse(options.since), dtparser.parse(options.to), query, options.full_log,
int(options.result_per_page))
if __name__ == '__main__':
main() | gpl-3.0 |
mz314/django-chartjs | setup.py | 4 | 1516 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from os.path import abspath, dirname, join
from setuptools import find_packages, setup
def read_relative_file(filename):
"""Returns contents of the given file, whose path is supposed relative
to this module."""
with open(join(dirname(abspath(__file__)), filename)) as f:
return f.read()
if __name__ == '__main__': # ``import setup`` doesn't trigger setup().
setup(
name='django-chartjs',
version=read_relative_file('VERSION').strip(),
description="Django Chart.js and Hightchart ajax views",
long_description=read_relative_file('README.rst'),
classifiers=['Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3'],
keywords='django chart chartjs highchart ajax class based views',
author='Rémy Hubscher',
author_email='hubscher.remy@gmail.com',
url='https://github.com/novapost/django-chartjs',
license='BSD Licence',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=['six'] # depends on Django
)
| bsd-3-clause |
mudithkr/zamboni | lib/geoip/__init__.py | 19 | 2748 | import logging
import requests
from django_statsd.clients import statsd
from mkt import regions
log = logging.getLogger('z.geoip')
def is_public(ip):
parts = map(int, ip.split('.'))
# localhost
if ip == '127.0.0.1':
return False
# 10.x.x.x
elif parts[0] == 10:
return False
# 192.168.x.x
elif parts[0] == 192 and parts[1] == 168:
return False
# 172.16-32.x.x
elif parts[0] == 172 and 16 <= parts[1] <= 31:
return False
return True
class GeoIP:
"""Call to geodude server to resolve an IP to Geo Info block."""
def __init__(self, settings):
self.timeout = float(getattr(settings, 'GEOIP_DEFAULT_TIMEOUT', .2))
self.url = getattr(settings, 'GEOIP_URL', '')
self.default_val = getattr(settings, 'GEOIP_DEFAULT_VAL',
regions.RESTOFWORLD.slug).lower()
def lookup(self, address):
"""Resolve an IP address to a block of geo information.
If a given address is unresolvable or the geoip server is not defined,
return the default as defined by the settings, or "restofworld".
"""
public_ip = is_public(address)
if self.url and public_ip:
with statsd.timer('z.geoip'):
res = None
try:
res = requests.post('{0}/country.json'.format(self.url),
timeout=self.timeout,
data={'ip': address})
except requests.Timeout:
statsd.incr('z.geoip.timeout')
log.error(('Geodude timed out looking up: {0}'
.format(address)))
except requests.RequestException as e:
statsd.incr('z.geoip.error')
log.error('Geodude connection error: {0}'.format(str(e)))
if res and res.status_code == 200:
statsd.incr('z.geoip.success')
country_code = res.json().get(
'country_code', self.default_val).lower()
log.info(('Geodude lookup for {0} returned {1}'
.format(address, country_code)))
return country_code
log.info('Geodude lookup returned non-200 response: {0}'
.format(res.status_code))
else:
if public_ip:
log.info('Geodude lookup skipped for public IP: {0}'
.format(address))
else:
log.info('Geodude lookup skipped for private IP: {0}'
.format(address))
return self.default_val
| bsd-3-clause |
pferreir/indico-backup | indico/MaKaC/plugins/EPayment/worldPay/webinterface/urlHandlers.py | 2 | 1795 | # -*- coding: utf-8 -*-
##
##
## This file is part of Indico.
## Copyright (C) 2002 - 2014 European Organization for Nuclear Research (CERN).
##
## Indico is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or (at your option) any later version.
##
## Indico is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Indico;if not, see <http://www.gnu.org/licenses/>.
from MaKaC.webinterface.urlHandlers import URLHandler as MainURLHandler
from MaKaC.plugins.EPayment import worldPay
class EPURLHandler(MainURLHandler):
_requestTag = ''
@classmethod
def getURL(cls, target=None):
return super(EPURLHandler, cls).getURL(target, EPaymentName=worldPay.MODULE_ID, requestTag=cls._requestTag)
# URL for WorldPay configuration
class UHConfModifEPayment(EPURLHandler):
_endpoint = 'event_mgmt.confModifEpayment-modifModule'
class UHConfModifEPaymentWorldPay( UHConfModifEPayment ):
_requestTag = "modifWorldPay"
class UHConfModifEPaymentWorldPayDataModif( UHConfModifEPayment ):
_requestTag = "modifWorldPayData"
class UHConfModifEPaymentWorldPayPerformDataModif( UHConfModifEPayment ):
_requestTag = "modifWorldPayPerformDataModif"
# URL for WorldPay callback
class UHPay(MainURLHandler):
_endpoint = 'misc.payment'
class UHPayConfirmWorldPay( UHPay ):
_requestTag = "confirm"
class UHPayCancelWorldPay( UHPay ):
_requestTag = "cancel"
| gpl-3.0 |
kiszk/spark | python/pyspark/ml/feature.py | 3 | 155266 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
if sys.version > '3':
basestring = str
from pyspark import since, keyword_only, SparkContext
from pyspark.rdd import ignore_unicode_prefix
from pyspark.ml.linalg import _convert_to_vector
from pyspark.ml.param.shared import *
from pyspark.ml.util import JavaMLReadable, JavaMLWritable
from pyspark.ml.wrapper import JavaEstimator, JavaModel, JavaParams, JavaTransformer, _jvm
from pyspark.ml.common import inherit_doc
__all__ = ['Binarizer',
'BucketedRandomProjectionLSH', 'BucketedRandomProjectionLSHModel',
'Bucketizer',
'ChiSqSelector', 'ChiSqSelectorModel',
'CountVectorizer', 'CountVectorizerModel',
'DCT',
'ElementwiseProduct',
'FeatureHasher',
'HashingTF',
'IDF', 'IDFModel',
'Imputer', 'ImputerModel',
'IndexToString',
'Interaction',
'MaxAbsScaler', 'MaxAbsScalerModel',
'MinHashLSH', 'MinHashLSHModel',
'MinMaxScaler', 'MinMaxScalerModel',
'NGram',
'Normalizer',
'OneHotEncoder', 'OneHotEncoderModel',
'PCA', 'PCAModel',
'PolynomialExpansion',
'QuantileDiscretizer',
'RobustScaler', 'RobustScalerModel',
'RegexTokenizer',
'RFormula', 'RFormulaModel',
'SQLTransformer',
'StandardScaler', 'StandardScalerModel',
'StopWordsRemover',
'StringIndexer', 'StringIndexerModel',
'Tokenizer',
'VectorAssembler',
'VectorIndexer', 'VectorIndexerModel',
'VectorSizeHint',
'VectorSlicer',
'Word2Vec', 'Word2VecModel']
@inherit_doc
class Binarizer(JavaTransformer, HasInputCol, HasOutputCol, JavaMLReadable, JavaMLWritable):
"""
Binarize a column of continuous features given a threshold.
>>> df = spark.createDataFrame([(0.5,)], ["values"])
>>> binarizer = Binarizer(threshold=1.0, inputCol="values", outputCol="features")
>>> binarizer.transform(df).head().features
0.0
>>> binarizer.setParams(outputCol="freqs").transform(df).head().freqs
0.0
>>> params = {binarizer.threshold: -0.5, binarizer.outputCol: "vector"}
>>> binarizer.transform(df, params).head().vector
1.0
>>> binarizerPath = temp_path + "/binarizer"
>>> binarizer.save(binarizerPath)
>>> loadedBinarizer = Binarizer.load(binarizerPath)
>>> loadedBinarizer.getThreshold() == binarizer.getThreshold()
True
.. versionadded:: 1.4.0
"""
threshold = Param(Params._dummy(), "threshold",
"Param for threshold used to binarize continuous features. " +
"The features greater than the threshold will be binarized to 1.0. " +
"The features equal to or less than the threshold will be binarized to 0.0",
typeConverter=TypeConverters.toFloat)
@keyword_only
def __init__(self, threshold=0.0, inputCol=None, outputCol=None):
"""
__init__(self, threshold=0.0, inputCol=None, outputCol=None)
"""
super(Binarizer, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.Binarizer", self.uid)
self._setDefault(threshold=0.0)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, threshold=0.0, inputCol=None, outputCol=None):
"""
setParams(self, threshold=0.0, inputCol=None, outputCol=None)
Sets params for this Binarizer.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.4.0")
def setThreshold(self, value):
"""
Sets the value of :py:attr:`threshold`.
"""
return self._set(threshold=value)
@since("1.4.0")
def getThreshold(self):
"""
Gets the value of threshold or its default value.
"""
return self.getOrDefault(self.threshold)
class LSHParams(Params):
"""
Mixin for Locality Sensitive Hashing (LSH) algorithm parameters.
"""
numHashTables = Param(Params._dummy(), "numHashTables", "number of hash tables, where " +
"increasing number of hash tables lowers the false negative rate, " +
"and decreasing it improves the running performance.",
typeConverter=TypeConverters.toInt)
def __init__(self):
super(LSHParams, self).__init__()
def setNumHashTables(self, value):
"""
Sets the value of :py:attr:`numHashTables`.
"""
return self._set(numHashTables=value)
def getNumHashTables(self):
"""
Gets the value of numHashTables or its default value.
"""
return self.getOrDefault(self.numHashTables)
class LSHModel(JavaModel):
"""
Mixin for Locality Sensitive Hashing (LSH) models.
"""
def approxNearestNeighbors(self, dataset, key, numNearestNeighbors, distCol="distCol"):
"""
Given a large dataset and an item, approximately find at most k items which have the
closest distance to the item. If the :py:attr:`outputCol` is missing, the method will
transform the data; if the :py:attr:`outputCol` exists, it will use that. This allows
caching of the transformed data when necessary.
.. note:: This method is experimental and will likely change behavior in the next release.
:param dataset: The dataset to search for nearest neighbors of the key.
:param key: Feature vector representing the item to search for.
:param numNearestNeighbors: The maximum number of nearest neighbors.
:param distCol: Output column for storing the distance between each result row and the key.
Use "distCol" as default value if it's not specified.
:return: A dataset containing at most k items closest to the key. A column "distCol" is
added to show the distance between each row and the key.
"""
return self._call_java("approxNearestNeighbors", dataset, key, numNearestNeighbors,
distCol)
def approxSimilarityJoin(self, datasetA, datasetB, threshold, distCol="distCol"):
"""
Join two datasets to approximately find all pairs of rows whose distance are smaller than
the threshold. If the :py:attr:`outputCol` is missing, the method will transform the data;
if the :py:attr:`outputCol` exists, it will use that. This allows caching of the
transformed data when necessary.
:param datasetA: One of the datasets to join.
:param datasetB: Another dataset to join.
:param threshold: The threshold for the distance of row pairs.
:param distCol: Output column for storing the distance between each pair of rows. Use
"distCol" as default value if it's not specified.
:return: A joined dataset containing pairs of rows. The original rows are in columns
"datasetA" and "datasetB", and a column "distCol" is added to show the distance
between each pair.
"""
threshold = TypeConverters.toFloat(threshold)
return self._call_java("approxSimilarityJoin", datasetA, datasetB, threshold, distCol)
@inherit_doc
class BucketedRandomProjectionLSH(JavaEstimator, LSHParams, HasInputCol, HasOutputCol, HasSeed,
JavaMLReadable, JavaMLWritable):
"""
.. note:: Experimental
LSH class for Euclidean distance metrics.
The input is dense or sparse vectors, each of which represents a point in the Euclidean
distance space. The output will be vectors of configurable dimension. Hash values in the same
dimension are calculated by the same hash function.
.. seealso:: `Stable Distributions
<https://en.wikipedia.org/wiki/Locality-sensitive_hashing#Stable_distributions>`_
.. seealso:: `Hashing for Similarity Search: A Survey <https://arxiv.org/abs/1408.2927>`_
>>> from pyspark.ml.linalg import Vectors
>>> from pyspark.sql.functions import col
>>> data = [(0, Vectors.dense([-1.0, -1.0 ]),),
... (1, Vectors.dense([-1.0, 1.0 ]),),
... (2, Vectors.dense([1.0, -1.0 ]),),
... (3, Vectors.dense([1.0, 1.0]),)]
>>> df = spark.createDataFrame(data, ["id", "features"])
>>> brp = BucketedRandomProjectionLSH(inputCol="features", outputCol="hashes",
... seed=12345, bucketLength=1.0)
>>> model = brp.fit(df)
>>> model.transform(df).head()
Row(id=0, features=DenseVector([-1.0, -1.0]), hashes=[DenseVector([-1.0])])
>>> data2 = [(4, Vectors.dense([2.0, 2.0 ]),),
... (5, Vectors.dense([2.0, 3.0 ]),),
... (6, Vectors.dense([3.0, 2.0 ]),),
... (7, Vectors.dense([3.0, 3.0]),)]
>>> df2 = spark.createDataFrame(data2, ["id", "features"])
>>> model.approxNearestNeighbors(df2, Vectors.dense([1.0, 2.0]), 1).collect()
[Row(id=4, features=DenseVector([2.0, 2.0]), hashes=[DenseVector([1.0])], distCol=1.0)]
>>> model.approxSimilarityJoin(df, df2, 3.0, distCol="EuclideanDistance").select(
... col("datasetA.id").alias("idA"),
... col("datasetB.id").alias("idB"),
... col("EuclideanDistance")).show()
+---+---+-----------------+
|idA|idB|EuclideanDistance|
+---+---+-----------------+
| 3| 6| 2.23606797749979|
+---+---+-----------------+
...
>>> model.approxSimilarityJoin(df, df2, 3, distCol="EuclideanDistance").select(
... col("datasetA.id").alias("idA"),
... col("datasetB.id").alias("idB"),
... col("EuclideanDistance")).show()
+---+---+-----------------+
|idA|idB|EuclideanDistance|
+---+---+-----------------+
| 3| 6| 2.23606797749979|
+---+---+-----------------+
...
>>> brpPath = temp_path + "/brp"
>>> brp.save(brpPath)
>>> brp2 = BucketedRandomProjectionLSH.load(brpPath)
>>> brp2.getBucketLength() == brp.getBucketLength()
True
>>> modelPath = temp_path + "/brp-model"
>>> model.save(modelPath)
>>> model2 = BucketedRandomProjectionLSHModel.load(modelPath)
>>> model.transform(df).head().hashes == model2.transform(df).head().hashes
True
.. versionadded:: 2.2.0
"""
bucketLength = Param(Params._dummy(), "bucketLength", "the length of each hash bucket, " +
"a larger bucket lowers the false negative rate.",
typeConverter=TypeConverters.toFloat)
@keyword_only
def __init__(self, inputCol=None, outputCol=None, seed=None, numHashTables=1,
bucketLength=None):
"""
__init__(self, inputCol=None, outputCol=None, seed=None, numHashTables=1, \
bucketLength=None)
"""
super(BucketedRandomProjectionLSH, self).__init__()
self._java_obj = \
self._new_java_obj("org.apache.spark.ml.feature.BucketedRandomProjectionLSH", self.uid)
self._setDefault(numHashTables=1)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.2.0")
def setParams(self, inputCol=None, outputCol=None, seed=None, numHashTables=1,
bucketLength=None):
"""
setParams(self, inputCol=None, outputCol=None, seed=None, numHashTables=1, \
bucketLength=None)
Sets params for this BucketedRandomProjectionLSH.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.2.0")
def setBucketLength(self, value):
"""
Sets the value of :py:attr:`bucketLength`.
"""
return self._set(bucketLength=value)
@since("2.2.0")
def getBucketLength(self):
"""
Gets the value of bucketLength or its default value.
"""
return self.getOrDefault(self.bucketLength)
def _create_model(self, java_model):
return BucketedRandomProjectionLSHModel(java_model)
class BucketedRandomProjectionLSHModel(LSHModel, JavaMLReadable, JavaMLWritable):
r"""
.. note:: Experimental
Model fitted by :py:class:`BucketedRandomProjectionLSH`, where multiple random vectors are
stored. The vectors are normalized to be unit vectors and each vector is used in a hash
function: :math:`h_i(x) = floor(r_i \cdot x / bucketLength)` where :math:`r_i` is the
i-th random unit vector. The number of buckets will be `(max L2 norm of input vectors) /
bucketLength`.
.. versionadded:: 2.2.0
"""
@inherit_doc
class Bucketizer(JavaTransformer, HasInputCol, HasOutputCol, HasHandleInvalid,
JavaMLReadable, JavaMLWritable):
"""
Maps a column of continuous features to a column of feature buckets.
>>> values = [(0.1,), (0.4,), (1.2,), (1.5,), (float("nan"),), (float("nan"),)]
>>> df = spark.createDataFrame(values, ["values"])
>>> bucketizer = Bucketizer(splits=[-float("inf"), 0.5, 1.4, float("inf")],
... inputCol="values", outputCol="buckets")
>>> bucketed = bucketizer.setHandleInvalid("keep").transform(df).collect()
>>> len(bucketed)
6
>>> bucketed[0].buckets
0.0
>>> bucketed[1].buckets
0.0
>>> bucketed[2].buckets
1.0
>>> bucketed[3].buckets
2.0
>>> bucketizer.setParams(outputCol="b").transform(df).head().b
0.0
>>> bucketizerPath = temp_path + "/bucketizer"
>>> bucketizer.save(bucketizerPath)
>>> loadedBucketizer = Bucketizer.load(bucketizerPath)
>>> loadedBucketizer.getSplits() == bucketizer.getSplits()
True
>>> bucketed = bucketizer.setHandleInvalid("skip").transform(df).collect()
>>> len(bucketed)
4
.. versionadded:: 1.4.0
"""
splits = \
Param(Params._dummy(), "splits",
"Split points for mapping continuous features into buckets. With n+1 splits, " +
"there are n buckets. A bucket defined by splits x,y holds values in the " +
"range [x,y) except the last bucket, which also includes y. The splits " +
"should be of length >= 3 and strictly increasing. Values at -inf, inf must be " +
"explicitly provided to cover all Double values; otherwise, values outside the " +
"splits specified will be treated as errors.",
typeConverter=TypeConverters.toListFloat)
handleInvalid = Param(Params._dummy(), "handleInvalid", "how to handle invalid entries "
"containing NaN values. Values outside the splits will always be treated "
"as errors. Options are 'skip' (filter out rows with invalid values), " +
"'error' (throw an error), or 'keep' (keep invalid values in a special " +
"additional bucket).",
typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, splits=None, inputCol=None, outputCol=None, handleInvalid="error"):
"""
__init__(self, splits=None, inputCol=None, outputCol=None, handleInvalid="error")
"""
super(Bucketizer, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.Bucketizer", self.uid)
self._setDefault(handleInvalid="error")
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, splits=None, inputCol=None, outputCol=None, handleInvalid="error"):
"""
setParams(self, splits=None, inputCol=None, outputCol=None, handleInvalid="error")
Sets params for this Bucketizer.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.4.0")
def setSplits(self, value):
"""
Sets the value of :py:attr:`splits`.
"""
return self._set(splits=value)
@since("1.4.0")
def getSplits(self):
"""
Gets the value of threshold or its default value.
"""
return self.getOrDefault(self.splits)
class _CountVectorizerParams(JavaParams, HasInputCol, HasOutputCol):
"""
Params for :py:attr:`CountVectorizer` and :py:attr:`CountVectorizerModel`.
"""
minTF = Param(
Params._dummy(), "minTF", "Filter to ignore rare words in" +
" a document. For each document, terms with frequency/count less than the given" +
" threshold are ignored. If this is an integer >= 1, then this specifies a count (of" +
" times the term must appear in the document); if this is a double in [0,1), then this " +
"specifies a fraction (out of the document's token count). Note that the parameter is " +
"only used in transform of CountVectorizerModel and does not affect fitting. Default 1.0",
typeConverter=TypeConverters.toFloat)
minDF = Param(
Params._dummy(), "minDF", "Specifies the minimum number of" +
" different documents a term must appear in to be included in the vocabulary." +
" If this is an integer >= 1, this specifies the number of documents the term must" +
" appear in; if this is a double in [0,1), then this specifies the fraction of documents." +
" Default 1.0", typeConverter=TypeConverters.toFloat)
maxDF = Param(
Params._dummy(), "maxDF", "Specifies the maximum number of" +
" different documents a term could appear in to be included in the vocabulary." +
" A term that appears more than the threshold will be ignored. If this is an" +
" integer >= 1, this specifies the maximum number of documents the term could appear in;" +
" if this is a double in [0,1), then this specifies the maximum" +
" fraction of documents the term could appear in." +
" Default (2^63) - 1", typeConverter=TypeConverters.toFloat)
vocabSize = Param(
Params._dummy(), "vocabSize", "max size of the vocabulary. Default 1 << 18.",
typeConverter=TypeConverters.toInt)
binary = Param(
Params._dummy(), "binary", "Binary toggle to control the output vector values." +
" If True, all nonzero counts (after minTF filter applied) are set to 1. This is useful" +
" for discrete probabilistic models that model binary events rather than integer counts." +
" Default False", typeConverter=TypeConverters.toBoolean)
def __init__(self, *args):
super(_CountVectorizerParams, self).__init__(*args)
self._setDefault(minTF=1.0, minDF=1.0, maxDF=2 ** 63 - 1, vocabSize=1 << 18, binary=False)
@since("1.6.0")
def getMinTF(self):
"""
Gets the value of minTF or its default value.
"""
return self.getOrDefault(self.minTF)
@since("1.6.0")
def getMinDF(self):
"""
Gets the value of minDF or its default value.
"""
return self.getOrDefault(self.minDF)
@since("2.4.0")
def getMaxDF(self):
"""
Gets the value of maxDF or its default value.
"""
return self.getOrDefault(self.maxDF)
@since("1.6.0")
def getVocabSize(self):
"""
Gets the value of vocabSize or its default value.
"""
return self.getOrDefault(self.vocabSize)
@since("2.0.0")
def getBinary(self):
"""
Gets the value of binary or its default value.
"""
return self.getOrDefault(self.binary)
@inherit_doc
class CountVectorizer(JavaEstimator, _CountVectorizerParams, JavaMLReadable, JavaMLWritable):
"""
Extracts a vocabulary from document collections and generates a :py:attr:`CountVectorizerModel`.
>>> df = spark.createDataFrame(
... [(0, ["a", "b", "c"]), (1, ["a", "b", "b", "c", "a"])],
... ["label", "raw"])
>>> cv = CountVectorizer(inputCol="raw", outputCol="vectors")
>>> model = cv.fit(df)
>>> model.transform(df).show(truncate=False)
+-----+---------------+-------------------------+
|label|raw |vectors |
+-----+---------------+-------------------------+
|0 |[a, b, c] |(3,[0,1,2],[1.0,1.0,1.0])|
|1 |[a, b, b, c, a]|(3,[0,1,2],[2.0,2.0,1.0])|
+-----+---------------+-------------------------+
...
>>> sorted(model.vocabulary) == ['a', 'b', 'c']
True
>>> countVectorizerPath = temp_path + "/count-vectorizer"
>>> cv.save(countVectorizerPath)
>>> loadedCv = CountVectorizer.load(countVectorizerPath)
>>> loadedCv.getMinDF() == cv.getMinDF()
True
>>> loadedCv.getMinTF() == cv.getMinTF()
True
>>> loadedCv.getVocabSize() == cv.getVocabSize()
True
>>> modelPath = temp_path + "/count-vectorizer-model"
>>> model.save(modelPath)
>>> loadedModel = CountVectorizerModel.load(modelPath)
>>> loadedModel.vocabulary == model.vocabulary
True
>>> fromVocabModel = CountVectorizerModel.from_vocabulary(["a", "b", "c"],
... inputCol="raw", outputCol="vectors")
>>> fromVocabModel.transform(df).show(truncate=False)
+-----+---------------+-------------------------+
|label|raw |vectors |
+-----+---------------+-------------------------+
|0 |[a, b, c] |(3,[0,1,2],[1.0,1.0,1.0])|
|1 |[a, b, b, c, a]|(3,[0,1,2],[2.0,2.0,1.0])|
+-----+---------------+-------------------------+
...
.. versionadded:: 1.6.0
"""
@keyword_only
def __init__(self, minTF=1.0, minDF=1.0, maxDF=2 ** 63 - 1, vocabSize=1 << 18, binary=False,
inputCol=None, outputCol=None):
"""
__init__(self, minTF=1.0, minDF=1.0, maxDF=2 ** 63 - 1, vocabSize=1 << 18, binary=False,\
inputCol=None,outputCol=None)
"""
super(CountVectorizer, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.CountVectorizer",
self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.6.0")
def setParams(self, minTF=1.0, minDF=1.0, maxDF=2 ** 63 - 1, vocabSize=1 << 18, binary=False,
inputCol=None, outputCol=None):
"""
setParams(self, minTF=1.0, minDF=1.0, maxDF=2 ** 63 - 1, vocabSize=1 << 18, binary=False,\
inputCol=None, outputCol=None)
Set the params for the CountVectorizer
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.6.0")
def setMinTF(self, value):
"""
Sets the value of :py:attr:`minTF`.
"""
return self._set(minTF=value)
@since("1.6.0")
def setMinDF(self, value):
"""
Sets the value of :py:attr:`minDF`.
"""
return self._set(minDF=value)
@since("2.4.0")
def setMaxDF(self, value):
"""
Sets the value of :py:attr:`maxDF`.
"""
return self._set(maxDF=value)
@since("1.6.0")
def setVocabSize(self, value):
"""
Sets the value of :py:attr:`vocabSize`.
"""
return self._set(vocabSize=value)
@since("2.0.0")
def setBinary(self, value):
"""
Sets the value of :py:attr:`binary`.
"""
return self._set(binary=value)
def _create_model(self, java_model):
return CountVectorizerModel(java_model)
@inherit_doc
class CountVectorizerModel(JavaModel, _CountVectorizerParams, JavaMLReadable, JavaMLWritable):
"""
Model fitted by :py:class:`CountVectorizer`.
.. versionadded:: 1.6.0
"""
@classmethod
@since("2.4.0")
def from_vocabulary(cls, vocabulary, inputCol, outputCol=None, minTF=None, binary=None):
"""
Construct the model directly from a vocabulary list of strings,
requires an active SparkContext.
"""
sc = SparkContext._active_spark_context
java_class = sc._gateway.jvm.java.lang.String
jvocab = CountVectorizerModel._new_java_array(vocabulary, java_class)
model = CountVectorizerModel._create_from_java_class(
"org.apache.spark.ml.feature.CountVectorizerModel", jvocab)
model.setInputCol(inputCol)
if outputCol is not None:
model.setOutputCol(outputCol)
if minTF is not None:
model.setMinTF(minTF)
if binary is not None:
model.setBinary(binary)
model._set(vocabSize=len(vocabulary))
return model
@property
@since("1.6.0")
def vocabulary(self):
"""
An array of terms in the vocabulary.
"""
return self._call_java("vocabulary")
@since("2.4.0")
def setMinTF(self, value):
"""
Sets the value of :py:attr:`minTF`.
"""
return self._set(minTF=value)
@since("2.4.0")
def setBinary(self, value):
"""
Sets the value of :py:attr:`binary`.
"""
return self._set(binary=value)
@inherit_doc
class DCT(JavaTransformer, HasInputCol, HasOutputCol, JavaMLReadable, JavaMLWritable):
"""
A feature transformer that takes the 1D discrete cosine transform
of a real vector. No zero padding is performed on the input vector.
It returns a real vector of the same length representing the DCT.
The return vector is scaled such that the transform matrix is
unitary (aka scaled DCT-II).
.. seealso:: `More information on Wikipedia
<https://en.wikipedia.org/wiki/Discrete_cosine_transform#DCT-II Wikipedia>`_.
>>> from pyspark.ml.linalg import Vectors
>>> df1 = spark.createDataFrame([(Vectors.dense([5.0, 8.0, 6.0]),)], ["vec"])
>>> dct = DCT(inverse=False, inputCol="vec", outputCol="resultVec")
>>> df2 = dct.transform(df1)
>>> df2.head().resultVec
DenseVector([10.969..., -0.707..., -2.041...])
>>> df3 = DCT(inverse=True, inputCol="resultVec", outputCol="origVec").transform(df2)
>>> df3.head().origVec
DenseVector([5.0, 8.0, 6.0])
>>> dctPath = temp_path + "/dct"
>>> dct.save(dctPath)
>>> loadedDtc = DCT.load(dctPath)
>>> loadedDtc.getInverse()
False
.. versionadded:: 1.6.0
"""
inverse = Param(Params._dummy(), "inverse", "Set transformer to perform inverse DCT, " +
"default False.", typeConverter=TypeConverters.toBoolean)
@keyword_only
def __init__(self, inverse=False, inputCol=None, outputCol=None):
"""
__init__(self, inverse=False, inputCol=None, outputCol=None)
"""
super(DCT, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.DCT", self.uid)
self._setDefault(inverse=False)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.6.0")
def setParams(self, inverse=False, inputCol=None, outputCol=None):
"""
setParams(self, inverse=False, inputCol=None, outputCol=None)
Sets params for this DCT.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.6.0")
def setInverse(self, value):
"""
Sets the value of :py:attr:`inverse`.
"""
return self._set(inverse=value)
@since("1.6.0")
def getInverse(self):
"""
Gets the value of inverse or its default value.
"""
return self.getOrDefault(self.inverse)
@inherit_doc
class ElementwiseProduct(JavaTransformer, HasInputCol, HasOutputCol, JavaMLReadable,
JavaMLWritable):
"""
Outputs the Hadamard product (i.e., the element-wise product) of each input vector
with a provided "weight" vector. In other words, it scales each column of the dataset
by a scalar multiplier.
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([(Vectors.dense([2.0, 1.0, 3.0]),)], ["values"])
>>> ep = ElementwiseProduct(scalingVec=Vectors.dense([1.0, 2.0, 3.0]),
... inputCol="values", outputCol="eprod")
>>> ep.transform(df).head().eprod
DenseVector([2.0, 2.0, 9.0])
>>> ep.setParams(scalingVec=Vectors.dense([2.0, 3.0, 5.0])).transform(df).head().eprod
DenseVector([4.0, 3.0, 15.0])
>>> elementwiseProductPath = temp_path + "/elementwise-product"
>>> ep.save(elementwiseProductPath)
>>> loadedEp = ElementwiseProduct.load(elementwiseProductPath)
>>> loadedEp.getScalingVec() == ep.getScalingVec()
True
.. versionadded:: 1.5.0
"""
scalingVec = Param(Params._dummy(), "scalingVec", "Vector for hadamard product.",
typeConverter=TypeConverters.toVector)
@keyword_only
def __init__(self, scalingVec=None, inputCol=None, outputCol=None):
"""
__init__(self, scalingVec=None, inputCol=None, outputCol=None)
"""
super(ElementwiseProduct, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.ElementwiseProduct",
self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.5.0")
def setParams(self, scalingVec=None, inputCol=None, outputCol=None):
"""
setParams(self, scalingVec=None, inputCol=None, outputCol=None)
Sets params for this ElementwiseProduct.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.0.0")
def setScalingVec(self, value):
"""
Sets the value of :py:attr:`scalingVec`.
"""
return self._set(scalingVec=value)
@since("2.0.0")
def getScalingVec(self):
"""
Gets the value of scalingVec or its default value.
"""
return self.getOrDefault(self.scalingVec)
@inherit_doc
class FeatureHasher(JavaTransformer, HasInputCols, HasOutputCol, HasNumFeatures, JavaMLReadable,
JavaMLWritable):
"""
.. note:: Experimental
Feature hashing projects a set of categorical or numerical features into a feature vector of
specified dimension (typically substantially smaller than that of the original feature
space). This is done using the hashing trick (https://en.wikipedia.org/wiki/Feature_hashing)
to map features to indices in the feature vector.
The FeatureHasher transformer operates on multiple columns. Each column may contain either
numeric or categorical features. Behavior and handling of column data types is as follows:
* Numeric columns:
For numeric features, the hash value of the column name is used to map the
feature value to its index in the feature vector. By default, numeric features
are not treated as categorical (even when they are integers). To treat them
as categorical, specify the relevant columns in `categoricalCols`.
* String columns:
For categorical features, the hash value of the string "column_name=value"
is used to map to the vector index, with an indicator value of `1.0`.
Thus, categorical features are "one-hot" encoded
(similarly to using :py:class:`OneHotEncoder` with `dropLast=false`).
* Boolean columns:
Boolean values are treated in the same way as string columns. That is,
boolean features are represented as "column_name=true" or "column_name=false",
with an indicator value of `1.0`.
Null (missing) values are ignored (implicitly zero in the resulting feature vector).
Since a simple modulo is used to transform the hash function to a vector index,
it is advisable to use a power of two as the `numFeatures` parameter;
otherwise the features will not be mapped evenly to the vector indices.
>>> data = [(2.0, True, "1", "foo"), (3.0, False, "2", "bar")]
>>> cols = ["real", "bool", "stringNum", "string"]
>>> df = spark.createDataFrame(data, cols)
>>> hasher = FeatureHasher(inputCols=cols, outputCol="features")
>>> hasher.transform(df).head().features
SparseVector(262144, {174475: 2.0, 247670: 1.0, 257907: 1.0, 262126: 1.0})
>>> hasher.setCategoricalCols(["real"]).transform(df).head().features
SparseVector(262144, {171257: 1.0, 247670: 1.0, 257907: 1.0, 262126: 1.0})
>>> hasherPath = temp_path + "/hasher"
>>> hasher.save(hasherPath)
>>> loadedHasher = FeatureHasher.load(hasherPath)
>>> loadedHasher.getNumFeatures() == hasher.getNumFeatures()
True
>>> loadedHasher.transform(df).head().features == hasher.transform(df).head().features
True
.. versionadded:: 2.3.0
"""
categoricalCols = Param(Params._dummy(), "categoricalCols",
"numeric columns to treat as categorical",
typeConverter=TypeConverters.toListString)
@keyword_only
def __init__(self, numFeatures=1 << 18, inputCols=None, outputCol=None, categoricalCols=None):
"""
__init__(self, numFeatures=1 << 18, inputCols=None, outputCol=None, categoricalCols=None)
"""
super(FeatureHasher, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.FeatureHasher", self.uid)
self._setDefault(numFeatures=1 << 18)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.3.0")
def setParams(self, numFeatures=1 << 18, inputCols=None, outputCol=None, categoricalCols=None):
"""
setParams(self, numFeatures=1 << 18, inputCols=None, outputCol=None, categoricalCols=None)
Sets params for this FeatureHasher.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.3.0")
def setCategoricalCols(self, value):
"""
Sets the value of :py:attr:`categoricalCols`.
"""
return self._set(categoricalCols=value)
@since("2.3.0")
def getCategoricalCols(self):
"""
Gets the value of binary or its default value.
"""
return self.getOrDefault(self.categoricalCols)
@inherit_doc
class HashingTF(JavaTransformer, HasInputCol, HasOutputCol, HasNumFeatures, JavaMLReadable,
JavaMLWritable):
"""
Maps a sequence of terms to their term frequencies using the hashing trick.
Currently we use Austin Appleby's MurmurHash 3 algorithm (MurmurHash3_x86_32)
to calculate the hash code value for the term object.
Since a simple modulo is used to transform the hash function to a column index,
it is advisable to use a power of two as the numFeatures parameter;
otherwise the features will not be mapped evenly to the columns.
>>> df = spark.createDataFrame([(["a", "b", "c"],)], ["words"])
>>> hashingTF = HashingTF(numFeatures=10, inputCol="words", outputCol="features")
>>> hashingTF.transform(df).head().features
SparseVector(10, {5: 1.0, 7: 1.0, 8: 1.0})
>>> hashingTF.setParams(outputCol="freqs").transform(df).head().freqs
SparseVector(10, {5: 1.0, 7: 1.0, 8: 1.0})
>>> params = {hashingTF.numFeatures: 5, hashingTF.outputCol: "vector"}
>>> hashingTF.transform(df, params).head().vector
SparseVector(5, {0: 1.0, 2: 1.0, 3: 1.0})
>>> hashingTFPath = temp_path + "/hashing-tf"
>>> hashingTF.save(hashingTFPath)
>>> loadedHashingTF = HashingTF.load(hashingTFPath)
>>> loadedHashingTF.getNumFeatures() == hashingTF.getNumFeatures()
True
>>> hashingTF.indexOf("b")
5
.. versionadded:: 1.3.0
"""
binary = Param(Params._dummy(), "binary", "If True, all non zero counts are set to 1. " +
"This is useful for discrete probabilistic models that model binary events " +
"rather than integer counts. Default False.",
typeConverter=TypeConverters.toBoolean)
@keyword_only
def __init__(self, numFeatures=1 << 18, binary=False, inputCol=None, outputCol=None):
"""
__init__(self, numFeatures=1 << 18, binary=False, inputCol=None, outputCol=None)
"""
super(HashingTF, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.HashingTF", self.uid)
self._setDefault(numFeatures=1 << 18, binary=False)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.3.0")
def setParams(self, numFeatures=1 << 18, binary=False, inputCol=None, outputCol=None):
"""
setParams(self, numFeatures=1 << 18, binary=False, inputCol=None, outputCol=None)
Sets params for this HashingTF.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.0.0")
def setBinary(self, value):
"""
Sets the value of :py:attr:`binary`.
"""
return self._set(binary=value)
@since("2.0.0")
def getBinary(self):
"""
Gets the value of binary or its default value.
"""
return self.getOrDefault(self.binary)
@since("3.0.0")
def indexOf(self, term):
"""
Returns the index of the input term.
"""
self._transfer_params_to_java()
return self._java_obj.indexOf(term)
@inherit_doc
class IDF(JavaEstimator, HasInputCol, HasOutputCol, JavaMLReadable, JavaMLWritable):
"""
Compute the Inverse Document Frequency (IDF) given a collection of documents.
>>> from pyspark.ml.linalg import DenseVector
>>> df = spark.createDataFrame([(DenseVector([1.0, 2.0]),),
... (DenseVector([0.0, 1.0]),), (DenseVector([3.0, 0.2]),)], ["tf"])
>>> idf = IDF(minDocFreq=3, inputCol="tf", outputCol="idf")
>>> model = idf.fit(df)
>>> model.idf
DenseVector([0.0, 0.0])
>>> model.docFreq
[0, 3]
>>> model.numDocs == df.count()
True
>>> model.transform(df).head().idf
DenseVector([0.0, 0.0])
>>> idf.setParams(outputCol="freqs").fit(df).transform(df).collect()[1].freqs
DenseVector([0.0, 0.0])
>>> params = {idf.minDocFreq: 1, idf.outputCol: "vector"}
>>> idf.fit(df, params).transform(df).head().vector
DenseVector([0.2877, 0.0])
>>> idfPath = temp_path + "/idf"
>>> idf.save(idfPath)
>>> loadedIdf = IDF.load(idfPath)
>>> loadedIdf.getMinDocFreq() == idf.getMinDocFreq()
True
>>> modelPath = temp_path + "/idf-model"
>>> model.save(modelPath)
>>> loadedModel = IDFModel.load(modelPath)
>>> loadedModel.transform(df).head().idf == model.transform(df).head().idf
True
.. versionadded:: 1.4.0
"""
minDocFreq = Param(Params._dummy(), "minDocFreq",
"minimum number of documents in which a term should appear for filtering",
typeConverter=TypeConverters.toInt)
@keyword_only
def __init__(self, minDocFreq=0, inputCol=None, outputCol=None):
"""
__init__(self, minDocFreq=0, inputCol=None, outputCol=None)
"""
super(IDF, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.IDF", self.uid)
self._setDefault(minDocFreq=0)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, minDocFreq=0, inputCol=None, outputCol=None):
"""
setParams(self, minDocFreq=0, inputCol=None, outputCol=None)
Sets params for this IDF.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.4.0")
def setMinDocFreq(self, value):
"""
Sets the value of :py:attr:`minDocFreq`.
"""
return self._set(minDocFreq=value)
@since("1.4.0")
def getMinDocFreq(self):
"""
Gets the value of minDocFreq or its default value.
"""
return self.getOrDefault(self.minDocFreq)
def _create_model(self, java_model):
return IDFModel(java_model)
class IDFModel(JavaModel, JavaMLReadable, JavaMLWritable):
"""
Model fitted by :py:class:`IDF`.
.. versionadded:: 1.4.0
"""
@property
@since("2.0.0")
def idf(self):
"""
Returns the IDF vector.
"""
return self._call_java("idf")
@property
@since("3.0.0")
def docFreq(self):
"""
Returns the document frequency.
"""
return self._call_java("docFreq")
@property
@since("3.0.0")
def numDocs(self):
"""
Returns number of documents evaluated to compute idf
"""
return self._call_java("numDocs")
@inherit_doc
class Imputer(JavaEstimator, HasInputCols, JavaMLReadable, JavaMLWritable):
"""
.. note:: Experimental
Imputation estimator for completing missing values, either using the mean or the median
of the columns in which the missing values are located. The input columns should be of
DoubleType or FloatType. Currently Imputer does not support categorical features and
possibly creates incorrect values for a categorical feature.
Note that the mean/median value is computed after filtering out missing values.
All Null values in the input columns are treated as missing, and so are also imputed. For
computing median, :py:meth:`pyspark.sql.DataFrame.approxQuantile` is used with a
relative error of `0.001`.
>>> df = spark.createDataFrame([(1.0, float("nan")), (2.0, float("nan")), (float("nan"), 3.0),
... (4.0, 4.0), (5.0, 5.0)], ["a", "b"])
>>> imputer = Imputer(inputCols=["a", "b"], outputCols=["out_a", "out_b"])
>>> model = imputer.fit(df)
>>> model.surrogateDF.show()
+---+---+
| a| b|
+---+---+
|3.0|4.0|
+---+---+
...
>>> model.transform(df).show()
+---+---+-----+-----+
| a| b|out_a|out_b|
+---+---+-----+-----+
|1.0|NaN| 1.0| 4.0|
|2.0|NaN| 2.0| 4.0|
|NaN|3.0| 3.0| 3.0|
...
>>> imputer.setStrategy("median").setMissingValue(1.0).fit(df).transform(df).show()
+---+---+-----+-----+
| a| b|out_a|out_b|
+---+---+-----+-----+
|1.0|NaN| 4.0| NaN|
...
>>> imputerPath = temp_path + "/imputer"
>>> imputer.save(imputerPath)
>>> loadedImputer = Imputer.load(imputerPath)
>>> loadedImputer.getStrategy() == imputer.getStrategy()
True
>>> loadedImputer.getMissingValue()
1.0
>>> modelPath = temp_path + "/imputer-model"
>>> model.save(modelPath)
>>> loadedModel = ImputerModel.load(modelPath)
>>> loadedModel.transform(df).head().out_a == model.transform(df).head().out_a
True
.. versionadded:: 2.2.0
"""
outputCols = Param(Params._dummy(), "outputCols",
"output column names.", typeConverter=TypeConverters.toListString)
strategy = Param(Params._dummy(), "strategy",
"strategy for imputation. If mean, then replace missing values using the mean "
"value of the feature. If median, then replace missing values using the "
"median value of the feature.",
typeConverter=TypeConverters.toString)
missingValue = Param(Params._dummy(), "missingValue",
"The placeholder for the missing values. All occurrences of missingValue "
"will be imputed.", typeConverter=TypeConverters.toFloat)
@keyword_only
def __init__(self, strategy="mean", missingValue=float("nan"), inputCols=None,
outputCols=None):
"""
__init__(self, strategy="mean", missingValue=float("nan"), inputCols=None, \
outputCols=None):
"""
super(Imputer, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.Imputer", self.uid)
self._setDefault(strategy="mean", missingValue=float("nan"))
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.2.0")
def setParams(self, strategy="mean", missingValue=float("nan"), inputCols=None,
outputCols=None):
"""
setParams(self, strategy="mean", missingValue=float("nan"), inputCols=None, \
outputCols=None)
Sets params for this Imputer.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.2.0")
def setOutputCols(self, value):
"""
Sets the value of :py:attr:`outputCols`.
"""
return self._set(outputCols=value)
@since("2.2.0")
def getOutputCols(self):
"""
Gets the value of :py:attr:`outputCols` or its default value.
"""
return self.getOrDefault(self.outputCols)
@since("2.2.0")
def setStrategy(self, value):
"""
Sets the value of :py:attr:`strategy`.
"""
return self._set(strategy=value)
@since("2.2.0")
def getStrategy(self):
"""
Gets the value of :py:attr:`strategy` or its default value.
"""
return self.getOrDefault(self.strategy)
@since("2.2.0")
def setMissingValue(self, value):
"""
Sets the value of :py:attr:`missingValue`.
"""
return self._set(missingValue=value)
@since("2.2.0")
def getMissingValue(self):
"""
Gets the value of :py:attr:`missingValue` or its default value.
"""
return self.getOrDefault(self.missingValue)
def _create_model(self, java_model):
return ImputerModel(java_model)
class ImputerModel(JavaModel, JavaMLReadable, JavaMLWritable):
"""
.. note:: Experimental
Model fitted by :py:class:`Imputer`.
.. versionadded:: 2.2.0
"""
@property
@since("2.2.0")
def surrogateDF(self):
"""
Returns a DataFrame containing inputCols and their corresponding surrogates,
which are used to replace the missing values in the input DataFrame.
"""
return self._call_java("surrogateDF")
@inherit_doc
class Interaction(JavaTransformer, HasInputCols, HasOutputCol, JavaMLReadable, JavaMLWritable):
"""
Implements the feature interaction transform. This transformer takes in Double and Vector type
columns and outputs a flattened vector of their feature interactions. To handle interaction,
we first one-hot encode any nominal features. Then, a vector of the feature cross-products is
produced.
For example, given the input feature values `Double(2)` and `Vector(3, 4)`, the output would be
`Vector(6, 8)` if all input features were numeric. If the first feature was instead nominal
with four categories, the output would then be `Vector(0, 0, 0, 0, 3, 4, 0, 0)`.
>>> df = spark.createDataFrame([(0.0, 1.0), (2.0, 3.0)], ["a", "b"])
>>> interaction = Interaction(inputCols=["a", "b"], outputCol="ab")
>>> interaction.transform(df).show()
+---+---+-----+
| a| b| ab|
+---+---+-----+
|0.0|1.0|[0.0]|
|2.0|3.0|[6.0]|
+---+---+-----+
...
>>> interactionPath = temp_path + "/interaction"
>>> interaction.save(interactionPath)
>>> loadedInteraction = Interaction.load(interactionPath)
>>> loadedInteraction.transform(df).head().ab == interaction.transform(df).head().ab
True
.. versionadded:: 3.0.0
"""
@keyword_only
def __init__(self, inputCols=None, outputCol=None):
"""
__init__(self, inputCols=None, outputCol=None):
"""
super(Interaction, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.Interaction", self.uid)
self._setDefault()
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("3.0.0")
def setParams(self, inputCols=None, outputCol=None):
"""
setParams(self, inputCols=None, outputCol=None)
Sets params for this Interaction.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@inherit_doc
class MaxAbsScaler(JavaEstimator, HasInputCol, HasOutputCol, JavaMLReadable, JavaMLWritable):
"""
Rescale each feature individually to range [-1, 1] by dividing through the largest maximum
absolute value in each feature. It does not shift/center the data, and thus does not destroy
any sparsity.
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([(Vectors.dense([1.0]),), (Vectors.dense([2.0]),)], ["a"])
>>> maScaler = MaxAbsScaler(inputCol="a", outputCol="scaled")
>>> model = maScaler.fit(df)
>>> model.transform(df).show()
+-----+------+
| a|scaled|
+-----+------+
|[1.0]| [0.5]|
|[2.0]| [1.0]|
+-----+------+
...
>>> scalerPath = temp_path + "/max-abs-scaler"
>>> maScaler.save(scalerPath)
>>> loadedMAScaler = MaxAbsScaler.load(scalerPath)
>>> loadedMAScaler.getInputCol() == maScaler.getInputCol()
True
>>> loadedMAScaler.getOutputCol() == maScaler.getOutputCol()
True
>>> modelPath = temp_path + "/max-abs-scaler-model"
>>> model.save(modelPath)
>>> loadedModel = MaxAbsScalerModel.load(modelPath)
>>> loadedModel.maxAbs == model.maxAbs
True
.. versionadded:: 2.0.0
"""
@keyword_only
def __init__(self, inputCol=None, outputCol=None):
"""
__init__(self, inputCol=None, outputCol=None)
"""
super(MaxAbsScaler, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.MaxAbsScaler", self.uid)
self._setDefault()
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.0.0")
def setParams(self, inputCol=None, outputCol=None):
"""
setParams(self, inputCol=None, outputCol=None)
Sets params for this MaxAbsScaler.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return MaxAbsScalerModel(java_model)
class MaxAbsScalerModel(JavaModel, JavaMLReadable, JavaMLWritable):
"""
Model fitted by :py:class:`MaxAbsScaler`.
.. versionadded:: 2.0.0
"""
@property
@since("2.0.0")
def maxAbs(self):
"""
Max Abs vector.
"""
return self._call_java("maxAbs")
@inherit_doc
class MinHashLSH(JavaEstimator, LSHParams, HasInputCol, HasOutputCol, HasSeed,
JavaMLReadable, JavaMLWritable):
"""
.. note:: Experimental
LSH class for Jaccard distance.
The input can be dense or sparse vectors, but it is more efficient if it is sparse.
For example, `Vectors.sparse(10, [(2, 1.0), (3, 1.0), (5, 1.0)])` means there are 10 elements
in the space. This set contains elements 2, 3, and 5. Also, any input vector must have at
least 1 non-zero index, and all non-zero values are treated as binary "1" values.
.. seealso:: `Wikipedia on MinHash <https://en.wikipedia.org/wiki/MinHash>`_
>>> from pyspark.ml.linalg import Vectors
>>> from pyspark.sql.functions import col
>>> data = [(0, Vectors.sparse(6, [0, 1, 2], [1.0, 1.0, 1.0]),),
... (1, Vectors.sparse(6, [2, 3, 4], [1.0, 1.0, 1.0]),),
... (2, Vectors.sparse(6, [0, 2, 4], [1.0, 1.0, 1.0]),)]
>>> df = spark.createDataFrame(data, ["id", "features"])
>>> mh = MinHashLSH(inputCol="features", outputCol="hashes", seed=12345)
>>> model = mh.fit(df)
>>> model.transform(df).head()
Row(id=0, features=SparseVector(6, {0: 1.0, 1: 1.0, 2: 1.0}), hashes=[DenseVector([6179668...
>>> data2 = [(3, Vectors.sparse(6, [1, 3, 5], [1.0, 1.0, 1.0]),),
... (4, Vectors.sparse(6, [2, 3, 5], [1.0, 1.0, 1.0]),),
... (5, Vectors.sparse(6, [1, 2, 4], [1.0, 1.0, 1.0]),)]
>>> df2 = spark.createDataFrame(data2, ["id", "features"])
>>> key = Vectors.sparse(6, [1, 2], [1.0, 1.0])
>>> model.approxNearestNeighbors(df2, key, 1).collect()
[Row(id=5, features=SparseVector(6, {1: 1.0, 2: 1.0, 4: 1.0}), hashes=[DenseVector([6179668...
>>> model.approxSimilarityJoin(df, df2, 0.6, distCol="JaccardDistance").select(
... col("datasetA.id").alias("idA"),
... col("datasetB.id").alias("idB"),
... col("JaccardDistance")).show()
+---+---+---------------+
|idA|idB|JaccardDistance|
+---+---+---------------+
| 0| 5| 0.5|
| 1| 4| 0.5|
+---+---+---------------+
...
>>> mhPath = temp_path + "/mh"
>>> mh.save(mhPath)
>>> mh2 = MinHashLSH.load(mhPath)
>>> mh2.getOutputCol() == mh.getOutputCol()
True
>>> modelPath = temp_path + "/mh-model"
>>> model.save(modelPath)
>>> model2 = MinHashLSHModel.load(modelPath)
>>> model.transform(df).head().hashes == model2.transform(df).head().hashes
True
.. versionadded:: 2.2.0
"""
@keyword_only
def __init__(self, inputCol=None, outputCol=None, seed=None, numHashTables=1):
"""
__init__(self, inputCol=None, outputCol=None, seed=None, numHashTables=1)
"""
super(MinHashLSH, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.MinHashLSH", self.uid)
self._setDefault(numHashTables=1)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.2.0")
def setParams(self, inputCol=None, outputCol=None, seed=None, numHashTables=1):
"""
setParams(self, inputCol=None, outputCol=None, seed=None, numHashTables=1)
Sets params for this MinHashLSH.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return MinHashLSHModel(java_model)
class MinHashLSHModel(LSHModel, JavaMLReadable, JavaMLWritable):
r"""
.. note:: Experimental
Model produced by :py:class:`MinHashLSH`, where where multiple hash functions are stored. Each
hash function is picked from the following family of hash functions, where :math:`a_i` and
:math:`b_i` are randomly chosen integers less than prime:
:math:`h_i(x) = ((x \cdot a_i + b_i) \mod prime)` This hash family is approximately min-wise
independent according to the reference.
.. seealso:: Tom Bohman, Colin Cooper, and Alan Frieze. "Min-wise independent linear
permutations." Electronic Journal of Combinatorics 7 (2000): R26.
.. versionadded:: 2.2.0
"""
@inherit_doc
class MinMaxScaler(JavaEstimator, HasInputCol, HasOutputCol, JavaMLReadable, JavaMLWritable):
"""
Rescale each feature individually to a common range [min, max] linearly using column summary
statistics, which is also known as min-max normalization or Rescaling. The rescaled value for
feature E is calculated as,
Rescaled(e_i) = (e_i - E_min) / (E_max - E_min) * (max - min) + min
For the case E_max == E_min, Rescaled(e_i) = 0.5 * (max + min)
.. note:: Since zero values will probably be transformed to non-zero values, output of the
transformer will be DenseVector even for sparse input.
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([(Vectors.dense([0.0]),), (Vectors.dense([2.0]),)], ["a"])
>>> mmScaler = MinMaxScaler(inputCol="a", outputCol="scaled")
>>> model = mmScaler.fit(df)
>>> model.originalMin
DenseVector([0.0])
>>> model.originalMax
DenseVector([2.0])
>>> model.transform(df).show()
+-----+------+
| a|scaled|
+-----+------+
|[0.0]| [0.0]|
|[2.0]| [1.0]|
+-----+------+
...
>>> minMaxScalerPath = temp_path + "/min-max-scaler"
>>> mmScaler.save(minMaxScalerPath)
>>> loadedMMScaler = MinMaxScaler.load(minMaxScalerPath)
>>> loadedMMScaler.getMin() == mmScaler.getMin()
True
>>> loadedMMScaler.getMax() == mmScaler.getMax()
True
>>> modelPath = temp_path + "/min-max-scaler-model"
>>> model.save(modelPath)
>>> loadedModel = MinMaxScalerModel.load(modelPath)
>>> loadedModel.originalMin == model.originalMin
True
>>> loadedModel.originalMax == model.originalMax
True
.. versionadded:: 1.6.0
"""
min = Param(Params._dummy(), "min", "Lower bound of the output feature range",
typeConverter=TypeConverters.toFloat)
max = Param(Params._dummy(), "max", "Upper bound of the output feature range",
typeConverter=TypeConverters.toFloat)
@keyword_only
def __init__(self, min=0.0, max=1.0, inputCol=None, outputCol=None):
"""
__init__(self, min=0.0, max=1.0, inputCol=None, outputCol=None)
"""
super(MinMaxScaler, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.MinMaxScaler", self.uid)
self._setDefault(min=0.0, max=1.0)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.6.0")
def setParams(self, min=0.0, max=1.0, inputCol=None, outputCol=None):
"""
setParams(self, min=0.0, max=1.0, inputCol=None, outputCol=None)
Sets params for this MinMaxScaler.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.6.0")
def setMin(self, value):
"""
Sets the value of :py:attr:`min`.
"""
return self._set(min=value)
@since("1.6.0")
def getMin(self):
"""
Gets the value of min or its default value.
"""
return self.getOrDefault(self.min)
@since("1.6.0")
def setMax(self, value):
"""
Sets the value of :py:attr:`max`.
"""
return self._set(max=value)
@since("1.6.0")
def getMax(self):
"""
Gets the value of max or its default value.
"""
return self.getOrDefault(self.max)
def _create_model(self, java_model):
return MinMaxScalerModel(java_model)
class MinMaxScalerModel(JavaModel, JavaMLReadable, JavaMLWritable):
"""
Model fitted by :py:class:`MinMaxScaler`.
.. versionadded:: 1.6.0
"""
@property
@since("2.0.0")
def originalMin(self):
"""
Min value for each original column during fitting.
"""
return self._call_java("originalMin")
@property
@since("2.0.0")
def originalMax(self):
"""
Max value for each original column during fitting.
"""
return self._call_java("originalMax")
@inherit_doc
@ignore_unicode_prefix
class NGram(JavaTransformer, HasInputCol, HasOutputCol, JavaMLReadable, JavaMLWritable):
"""
A feature transformer that converts the input array of strings into an array of n-grams. Null
values in the input array are ignored.
It returns an array of n-grams where each n-gram is represented by a space-separated string of
words.
When the input is empty, an empty array is returned.
When the input array length is less than n (number of elements per n-gram), no n-grams are
returned.
>>> df = spark.createDataFrame([Row(inputTokens=["a", "b", "c", "d", "e"])])
>>> ngram = NGram(n=2, inputCol="inputTokens", outputCol="nGrams")
>>> ngram.transform(df).head()
Row(inputTokens=[u'a', u'b', u'c', u'd', u'e'], nGrams=[u'a b', u'b c', u'c d', u'd e'])
>>> # Change n-gram length
>>> ngram.setParams(n=4).transform(df).head()
Row(inputTokens=[u'a', u'b', u'c', u'd', u'e'], nGrams=[u'a b c d', u'b c d e'])
>>> # Temporarily modify output column.
>>> ngram.transform(df, {ngram.outputCol: "output"}).head()
Row(inputTokens=[u'a', u'b', u'c', u'd', u'e'], output=[u'a b c d', u'b c d e'])
>>> ngram.transform(df).head()
Row(inputTokens=[u'a', u'b', u'c', u'd', u'e'], nGrams=[u'a b c d', u'b c d e'])
>>> # Must use keyword arguments to specify params.
>>> ngram.setParams("text")
Traceback (most recent call last):
...
TypeError: Method setParams forces keyword arguments.
>>> ngramPath = temp_path + "/ngram"
>>> ngram.save(ngramPath)
>>> loadedNGram = NGram.load(ngramPath)
>>> loadedNGram.getN() == ngram.getN()
True
.. versionadded:: 1.5.0
"""
n = Param(Params._dummy(), "n", "number of elements per n-gram (>=1)",
typeConverter=TypeConverters.toInt)
@keyword_only
def __init__(self, n=2, inputCol=None, outputCol=None):
"""
__init__(self, n=2, inputCol=None, outputCol=None)
"""
super(NGram, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.NGram", self.uid)
self._setDefault(n=2)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.5.0")
def setParams(self, n=2, inputCol=None, outputCol=None):
"""
setParams(self, n=2, inputCol=None, outputCol=None)
Sets params for this NGram.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.5.0")
def setN(self, value):
"""
Sets the value of :py:attr:`n`.
"""
return self._set(n=value)
@since("1.5.0")
def getN(self):
"""
Gets the value of n or its default value.
"""
return self.getOrDefault(self.n)
@inherit_doc
class Normalizer(JavaTransformer, HasInputCol, HasOutputCol, JavaMLReadable, JavaMLWritable):
"""
Normalize a vector to have unit norm using the given p-norm.
>>> from pyspark.ml.linalg import Vectors
>>> svec = Vectors.sparse(4, {1: 4.0, 3: 3.0})
>>> df = spark.createDataFrame([(Vectors.dense([3.0, -4.0]), svec)], ["dense", "sparse"])
>>> normalizer = Normalizer(p=2.0, inputCol="dense", outputCol="features")
>>> normalizer.transform(df).head().features
DenseVector([0.6, -0.8])
>>> normalizer.setParams(inputCol="sparse", outputCol="freqs").transform(df).head().freqs
SparseVector(4, {1: 0.8, 3: 0.6})
>>> params = {normalizer.p: 1.0, normalizer.inputCol: "dense", normalizer.outputCol: "vector"}
>>> normalizer.transform(df, params).head().vector
DenseVector([0.4286, -0.5714])
>>> normalizerPath = temp_path + "/normalizer"
>>> normalizer.save(normalizerPath)
>>> loadedNormalizer = Normalizer.load(normalizerPath)
>>> loadedNormalizer.getP() == normalizer.getP()
True
.. versionadded:: 1.4.0
"""
p = Param(Params._dummy(), "p", "the p norm value.",
typeConverter=TypeConverters.toFloat)
@keyword_only
def __init__(self, p=2.0, inputCol=None, outputCol=None):
"""
__init__(self, p=2.0, inputCol=None, outputCol=None)
"""
super(Normalizer, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.Normalizer", self.uid)
self._setDefault(p=2.0)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, p=2.0, inputCol=None, outputCol=None):
"""
setParams(self, p=2.0, inputCol=None, outputCol=None)
Sets params for this Normalizer.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.4.0")
def setP(self, value):
"""
Sets the value of :py:attr:`p`.
"""
return self._set(p=value)
@since("1.4.0")
def getP(self):
"""
Gets the value of p or its default value.
"""
return self.getOrDefault(self.p)
@inherit_doc
class OneHotEncoder(JavaEstimator, HasInputCols, HasOutputCols, HasHandleInvalid,
JavaMLReadable, JavaMLWritable):
"""
A one-hot encoder that maps a column of category indices to a column of binary vectors, with
at most a single one-value per row that indicates the input category index.
For example with 5 categories, an input value of 2.0 would map to an output vector of
`[0.0, 0.0, 1.0, 0.0]`.
The last category is not included by default (configurable via :py:attr:`dropLast`),
because it makes the vector entries sum up to one, and hence linearly dependent.
So an input value of 4.0 maps to `[0.0, 0.0, 0.0, 0.0]`.
.. note:: This is different from scikit-learn's OneHotEncoder, which keeps all categories.
The output vectors are sparse.
When :py:attr:`handleInvalid` is configured to 'keep', an extra "category" indicating invalid
values is added as last category. So when :py:attr:`dropLast` is true, invalid values are
encoded as all-zeros vector.
.. note:: When encoding multi-column by using :py:attr:`inputCols` and
:py:attr:`outputCols` params, input/output cols come in pairs, specified by the order in
the arrays, and each pair is treated independently.
.. seealso:: :py:class:`StringIndexer` for converting categorical values into category indices
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([(0.0,), (1.0,), (2.0,)], ["input"])
>>> ohe = OneHotEncoder(inputCols=["input"], outputCols=["output"])
>>> model = ohe.fit(df)
>>> model.transform(df).head().output
SparseVector(2, {0: 1.0})
>>> ohePath = temp_path + "/ohe"
>>> ohe.save(ohePath)
>>> loadedOHE = OneHotEncoder.load(ohePath)
>>> loadedOHE.getInputCols() == ohe.getInputCols()
True
>>> modelPath = temp_path + "/ohe-model"
>>> model.save(modelPath)
>>> loadedModel = OneHotEncoderModel.load(modelPath)
>>> loadedModel.categorySizes == model.categorySizes
True
.. versionadded:: 2.3.0
"""
handleInvalid = Param(Params._dummy(), "handleInvalid", "How to handle invalid data during " +
"transform(). Options are 'keep' (invalid data presented as an extra " +
"categorical feature) or error (throw an error). Note that this Param " +
"is only used during transform; during fitting, invalid data will " +
"result in an error.",
typeConverter=TypeConverters.toString)
dropLast = Param(Params._dummy(), "dropLast", "whether to drop the last category",
typeConverter=TypeConverters.toBoolean)
@keyword_only
def __init__(self, inputCols=None, outputCols=None, handleInvalid="error", dropLast=True):
"""
__init__(self, inputCols=None, outputCols=None, handleInvalid="error", dropLast=True)
"""
super(OneHotEncoder, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.feature.OneHotEncoder", self.uid)
self._setDefault(handleInvalid="error", dropLast=True)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.3.0")
def setParams(self, inputCols=None, outputCols=None, handleInvalid="error", dropLast=True):
"""
setParams(self, inputCols=None, outputCols=None, handleInvalid="error", dropLast=True)
Sets params for this OneHotEncoder.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.3.0")
def setDropLast(self, value):
"""
Sets the value of :py:attr:`dropLast`.
"""
return self._set(dropLast=value)
@since("2.3.0")
def getDropLast(self):
"""
Gets the value of dropLast or its default value.
"""
return self.getOrDefault(self.dropLast)
def _create_model(self, java_model):
return OneHotEncoderModel(java_model)
class OneHotEncoderModel(JavaModel, JavaMLReadable, JavaMLWritable):
"""
Model fitted by :py:class:`OneHotEncoder`.
.. versionadded:: 2.3.0
"""
@property
@since("2.3.0")
def categorySizes(self):
"""
Original number of categories for each feature being encoded.
The array contains one value for each input column, in order.
"""
return self._call_java("categorySizes")
@inherit_doc
class PolynomialExpansion(JavaTransformer, HasInputCol, HasOutputCol, JavaMLReadable,
JavaMLWritable):
"""
Perform feature expansion in a polynomial space. As said in `wikipedia of Polynomial Expansion
<http://en.wikipedia.org/wiki/Polynomial_expansion>`_, "In mathematics, an
expansion of a product of sums expresses it as a sum of products by using the fact that
multiplication distributes over addition". Take a 2-variable feature vector as an example:
`(x, y)`, if we want to expand it with degree 2, then we get `(x, x * x, y, x * y, y * y)`.
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([(Vectors.dense([0.5, 2.0]),)], ["dense"])
>>> px = PolynomialExpansion(degree=2, inputCol="dense", outputCol="expanded")
>>> px.transform(df).head().expanded
DenseVector([0.5, 0.25, 2.0, 1.0, 4.0])
>>> px.setParams(outputCol="test").transform(df).head().test
DenseVector([0.5, 0.25, 2.0, 1.0, 4.0])
>>> polyExpansionPath = temp_path + "/poly-expansion"
>>> px.save(polyExpansionPath)
>>> loadedPx = PolynomialExpansion.load(polyExpansionPath)
>>> loadedPx.getDegree() == px.getDegree()
True
.. versionadded:: 1.4.0
"""
degree = Param(Params._dummy(), "degree", "the polynomial degree to expand (>= 1)",
typeConverter=TypeConverters.toInt)
@keyword_only
def __init__(self, degree=2, inputCol=None, outputCol=None):
"""
__init__(self, degree=2, inputCol=None, outputCol=None)
"""
super(PolynomialExpansion, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.feature.PolynomialExpansion", self.uid)
self._setDefault(degree=2)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, degree=2, inputCol=None, outputCol=None):
"""
setParams(self, degree=2, inputCol=None, outputCol=None)
Sets params for this PolynomialExpansion.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.4.0")
def setDegree(self, value):
"""
Sets the value of :py:attr:`degree`.
"""
return self._set(degree=value)
@since("1.4.0")
def getDegree(self):
"""
Gets the value of degree or its default value.
"""
return self.getOrDefault(self.degree)
@inherit_doc
class QuantileDiscretizer(JavaEstimator, HasInputCol, HasOutputCol, HasHandleInvalid,
JavaMLReadable, JavaMLWritable):
"""
.. note:: Experimental
`QuantileDiscretizer` takes a column with continuous features and outputs a column with binned
categorical features. The number of bins can be set using the :py:attr:`numBuckets` parameter.
It is possible that the number of buckets used will be less than this value, for example, if
there are too few distinct values of the input to create enough distinct quantiles.
NaN handling: Note also that
QuantileDiscretizer will raise an error when it finds NaN values in the dataset, but the user
can also choose to either keep or remove NaN values within the dataset by setting
:py:attr:`handleInvalid` parameter. If the user chooses to keep NaN values, they will be
handled specially and placed into their own bucket, for example, if 4 buckets are used, then
non-NaN data will be put into buckets[0-3], but NaNs will be counted in a special bucket[4].
Algorithm: The bin ranges are chosen using an approximate algorithm (see the documentation for
:py:meth:`~.DataFrameStatFunctions.approxQuantile` for a detailed description).
The precision of the approximation can be controlled with the
:py:attr:`relativeError` parameter.
The lower and upper bin bounds will be `-Infinity` and `+Infinity`, covering all real values.
>>> values = [(0.1,), (0.4,), (1.2,), (1.5,), (float("nan"),), (float("nan"),)]
>>> df = spark.createDataFrame(values, ["values"])
>>> qds = QuantileDiscretizer(numBuckets=2,
... inputCol="values", outputCol="buckets", relativeError=0.01, handleInvalid="error")
>>> qds.getRelativeError()
0.01
>>> bucketizer = qds.fit(df)
>>> qds.setHandleInvalid("keep").fit(df).transform(df).count()
6
>>> qds.setHandleInvalid("skip").fit(df).transform(df).count()
4
>>> splits = bucketizer.getSplits()
>>> splits[0]
-inf
>>> print("%2.1f" % round(splits[1], 1))
0.4
>>> bucketed = bucketizer.transform(df).head()
>>> bucketed.buckets
0.0
>>> quantileDiscretizerPath = temp_path + "/quantile-discretizer"
>>> qds.save(quantileDiscretizerPath)
>>> loadedQds = QuantileDiscretizer.load(quantileDiscretizerPath)
>>> loadedQds.getNumBuckets() == qds.getNumBuckets()
True
.. versionadded:: 2.0.0
"""
numBuckets = Param(Params._dummy(), "numBuckets",
"Maximum number of buckets (quantiles, or " +
"categories) into which data points are grouped. Must be >= 2.",
typeConverter=TypeConverters.toInt)
relativeError = Param(Params._dummy(), "relativeError", "The relative target precision for " +
"the approximate quantile algorithm used to generate buckets. " +
"Must be in the range [0, 1].",
typeConverter=TypeConverters.toFloat)
handleInvalid = Param(Params._dummy(), "handleInvalid", "how to handle invalid entries. " +
"Options are skip (filter out rows with invalid values), " +
"error (throw an error), or keep (keep invalid values in a special " +
"additional bucket).",
typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, numBuckets=2, inputCol=None, outputCol=None, relativeError=0.001,
handleInvalid="error"):
"""
__init__(self, numBuckets=2, inputCol=None, outputCol=None, relativeError=0.001, \
handleInvalid="error")
"""
super(QuantileDiscretizer, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.QuantileDiscretizer",
self.uid)
self._setDefault(numBuckets=2, relativeError=0.001, handleInvalid="error")
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.0.0")
def setParams(self, numBuckets=2, inputCol=None, outputCol=None, relativeError=0.001,
handleInvalid="error"):
"""
setParams(self, numBuckets=2, inputCol=None, outputCol=None, relativeError=0.001, \
handleInvalid="error")
Set the params for the QuantileDiscretizer
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.0.0")
def setNumBuckets(self, value):
"""
Sets the value of :py:attr:`numBuckets`.
"""
return self._set(numBuckets=value)
@since("2.0.0")
def getNumBuckets(self):
"""
Gets the value of numBuckets or its default value.
"""
return self.getOrDefault(self.numBuckets)
@since("2.0.0")
def setRelativeError(self, value):
"""
Sets the value of :py:attr:`relativeError`.
"""
return self._set(relativeError=value)
@since("2.0.0")
def getRelativeError(self):
"""
Gets the value of relativeError or its default value.
"""
return self.getOrDefault(self.relativeError)
def _create_model(self, java_model):
"""
Private method to convert the java_model to a Python model.
"""
return Bucketizer(splits=list(java_model.getSplits()),
inputCol=self.getInputCol(),
outputCol=self.getOutputCol(),
handleInvalid=self.getHandleInvalid())
@inherit_doc
class RobustScaler(JavaEstimator, HasInputCol, HasOutputCol, JavaMLReadable, JavaMLWritable):
"""
RobustScaler removes the median and scales the data according to the quantile range.
The quantile range is by default IQR (Interquartile Range, quantile range between the
1st quartile = 25th quantile and the 3rd quartile = 75th quantile) but can be configured.
Centering and scaling happen independently on each feature by computing the relevant
statistics on the samples in the training set. Median and quantile range are then
stored to be used on later data using the transform method.
>>> from pyspark.ml.linalg import Vectors
>>> data = [(0, Vectors.dense([0.0, 0.0]),),
... (1, Vectors.dense([1.0, -1.0]),),
... (2, Vectors.dense([2.0, -2.0]),),
... (3, Vectors.dense([3.0, -3.0]),),
... (4, Vectors.dense([4.0, -4.0]),),]
>>> df = spark.createDataFrame(data, ["id", "features"])
>>> scaler = RobustScaler(inputCol="features", outputCol="scaled")
>>> model = scaler.fit(df)
>>> model.median
DenseVector([2.0, -2.0])
>>> model.range
DenseVector([2.0, 2.0])
>>> model.transform(df).collect()[1].scaled
DenseVector([0.5, -0.5])
>>> scalerPath = temp_path + "/robust-scaler"
>>> scaler.save(scalerPath)
>>> loadedScaler = RobustScaler.load(scalerPath)
>>> loadedScaler.getWithCentering() == scaler.getWithCentering()
True
>>> loadedScaler.getWithScaling() == scaler.getWithScaling()
True
>>> modelPath = temp_path + "/robust-scaler-model"
>>> model.save(modelPath)
>>> loadedModel = RobustScalerModel.load(modelPath)
>>> loadedModel.median == model.median
True
>>> loadedModel.range == model.range
True
.. versionadded:: 3.0.0
"""
lower = Param(Params._dummy(), "lower", "Lower quantile to calculate quantile range",
typeConverter=TypeConverters.toFloat)
upper = Param(Params._dummy(), "upper", "Upper quantile to calculate quantile range",
typeConverter=TypeConverters.toFloat)
withCentering = Param(Params._dummy(), "withCentering", "Whether to center data with median",
typeConverter=TypeConverters.toBoolean)
withScaling = Param(Params._dummy(), "withScaling", "Whether to scale the data to "
"quantile range", typeConverter=TypeConverters.toBoolean)
@keyword_only
def __init__(self, lower=0.25, upper=0.75, withCentering=False, withScaling=True,
inputCol=None, outputCol=None):
"""
__init__(self, lower=0.25, upper=0.75, withCentering=False, withScaling=True, \
inputCol=None, outputCol=None)
"""
super(RobustScaler, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.RobustScaler", self.uid)
self._setDefault(lower=0.25, upper=0.75, withCentering=False, withScaling=True)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("3.0.0")
def setParams(self, lower=0.25, upper=0.75, withCentering=False, withScaling=True,
inputCol=None, outputCol=None):
"""
setParams(self, lower=0.25, upper=0.75, withCentering=False, withScaling=True, \
inputCol=None, outputCol=None)
Sets params for this RobustScaler.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("3.0.0")
def setLower(self, value):
"""
Sets the value of :py:attr:`lower`.
"""
return self._set(lower=value)
@since("3.0.0")
def getLower(self):
"""
Gets the value of lower or its default value.
"""
return self.getOrDefault(self.lower)
@since("3.0.0")
def setUpper(self, value):
"""
Sets the value of :py:attr:`upper`.
"""
return self._set(upper=value)
@since("3.0.0")
def getUpper(self):
"""
Gets the value of upper or its default value.
"""
return self.getOrDefault(self.upper)
@since("3.0.0")
def setWithCentering(self, value):
"""
Sets the value of :py:attr:`withCentering`.
"""
return self._set(withCentering=value)
@since("3.0.0")
def getWithCentering(self):
"""
Gets the value of withCentering or its default value.
"""
return self.getOrDefault(self.withCentering)
@since("3.0.0")
def setWithScaling(self, value):
"""
Sets the value of :py:attr:`withScaling`.
"""
return self._set(withScaling=value)
@since("3.0.0")
def getWithScaling(self):
"""
Gets the value of withScaling or its default value.
"""
return self.getOrDefault(self.withScaling)
def _create_model(self, java_model):
return RobustScalerModel(java_model)
class RobustScalerModel(JavaModel, JavaMLReadable, JavaMLWritable):
"""
Model fitted by :py:class:`RobustScaler`.
.. versionadded:: 3.0.0
"""
@property
@since("3.0.0")
def median(self):
"""
Median of the RobustScalerModel.
"""
return self._call_java("median")
@property
@since("3.0.0")
def range(self):
"""
Quantile range of the RobustScalerModel.
"""
return self._call_java("range")
@inherit_doc
@ignore_unicode_prefix
class RegexTokenizer(JavaTransformer, HasInputCol, HasOutputCol, JavaMLReadable, JavaMLWritable):
"""
A regex based tokenizer that extracts tokens either by using the
provided regex pattern (in Java dialect) to split the text
(default) or repeatedly matching the regex (if gaps is false).
Optional parameters also allow filtering tokens using a minimal
length.
It returns an array of strings that can be empty.
>>> df = spark.createDataFrame([("A B c",)], ["text"])
>>> reTokenizer = RegexTokenizer(inputCol="text", outputCol="words")
>>> reTokenizer.transform(df).head()
Row(text=u'A B c', words=[u'a', u'b', u'c'])
>>> # Change a parameter.
>>> reTokenizer.setParams(outputCol="tokens").transform(df).head()
Row(text=u'A B c', tokens=[u'a', u'b', u'c'])
>>> # Temporarily modify a parameter.
>>> reTokenizer.transform(df, {reTokenizer.outputCol: "words"}).head()
Row(text=u'A B c', words=[u'a', u'b', u'c'])
>>> reTokenizer.transform(df).head()
Row(text=u'A B c', tokens=[u'a', u'b', u'c'])
>>> # Must use keyword arguments to specify params.
>>> reTokenizer.setParams("text")
Traceback (most recent call last):
...
TypeError: Method setParams forces keyword arguments.
>>> regexTokenizerPath = temp_path + "/regex-tokenizer"
>>> reTokenizer.save(regexTokenizerPath)
>>> loadedReTokenizer = RegexTokenizer.load(regexTokenizerPath)
>>> loadedReTokenizer.getMinTokenLength() == reTokenizer.getMinTokenLength()
True
>>> loadedReTokenizer.getGaps() == reTokenizer.getGaps()
True
.. versionadded:: 1.4.0
"""
minTokenLength = Param(Params._dummy(), "minTokenLength", "minimum token length (>= 0)",
typeConverter=TypeConverters.toInt)
gaps = Param(Params._dummy(), "gaps", "whether regex splits on gaps (True) or matches tokens " +
"(False)")
pattern = Param(Params._dummy(), "pattern", "regex pattern (Java dialect) used for tokenizing",
typeConverter=TypeConverters.toString)
toLowercase = Param(Params._dummy(), "toLowercase", "whether to convert all characters to " +
"lowercase before tokenizing", typeConverter=TypeConverters.toBoolean)
@keyword_only
def __init__(self, minTokenLength=1, gaps=True, pattern="\\s+", inputCol=None,
outputCol=None, toLowercase=True):
"""
__init__(self, minTokenLength=1, gaps=True, pattern="\\s+", inputCol=None, \
outputCol=None, toLowercase=True)
"""
super(RegexTokenizer, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.RegexTokenizer", self.uid)
self._setDefault(minTokenLength=1, gaps=True, pattern="\\s+", toLowercase=True)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, minTokenLength=1, gaps=True, pattern="\\s+", inputCol=None,
outputCol=None, toLowercase=True):
"""
setParams(self, minTokenLength=1, gaps=True, pattern="\\s+", inputCol=None, \
outputCol=None, toLowercase=True)
Sets params for this RegexTokenizer.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.4.0")
def setMinTokenLength(self, value):
"""
Sets the value of :py:attr:`minTokenLength`.
"""
return self._set(minTokenLength=value)
@since("1.4.0")
def getMinTokenLength(self):
"""
Gets the value of minTokenLength or its default value.
"""
return self.getOrDefault(self.minTokenLength)
@since("1.4.0")
def setGaps(self, value):
"""
Sets the value of :py:attr:`gaps`.
"""
return self._set(gaps=value)
@since("1.4.0")
def getGaps(self):
"""
Gets the value of gaps or its default value.
"""
return self.getOrDefault(self.gaps)
@since("1.4.0")
def setPattern(self, value):
"""
Sets the value of :py:attr:`pattern`.
"""
return self._set(pattern=value)
@since("1.4.0")
def getPattern(self):
"""
Gets the value of pattern or its default value.
"""
return self.getOrDefault(self.pattern)
@since("2.0.0")
def setToLowercase(self, value):
"""
Sets the value of :py:attr:`toLowercase`.
"""
return self._set(toLowercase=value)
@since("2.0.0")
def getToLowercase(self):
"""
Gets the value of toLowercase or its default value.
"""
return self.getOrDefault(self.toLowercase)
@inherit_doc
class SQLTransformer(JavaTransformer, JavaMLReadable, JavaMLWritable):
"""
Implements the transforms which are defined by SQL statement.
Currently we only support SQL syntax like 'SELECT ... FROM __THIS__'
where '__THIS__' represents the underlying table of the input dataset.
>>> df = spark.createDataFrame([(0, 1.0, 3.0), (2, 2.0, 5.0)], ["id", "v1", "v2"])
>>> sqlTrans = SQLTransformer(
... statement="SELECT *, (v1 + v2) AS v3, (v1 * v2) AS v4 FROM __THIS__")
>>> sqlTrans.transform(df).head()
Row(id=0, v1=1.0, v2=3.0, v3=4.0, v4=3.0)
>>> sqlTransformerPath = temp_path + "/sql-transformer"
>>> sqlTrans.save(sqlTransformerPath)
>>> loadedSqlTrans = SQLTransformer.load(sqlTransformerPath)
>>> loadedSqlTrans.getStatement() == sqlTrans.getStatement()
True
.. versionadded:: 1.6.0
"""
statement = Param(Params._dummy(), "statement", "SQL statement",
typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, statement=None):
"""
__init__(self, statement=None)
"""
super(SQLTransformer, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.SQLTransformer", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.6.0")
def setParams(self, statement=None):
"""
setParams(self, statement=None)
Sets params for this SQLTransformer.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.6.0")
def setStatement(self, value):
"""
Sets the value of :py:attr:`statement`.
"""
return self._set(statement=value)
@since("1.6.0")
def getStatement(self):
"""
Gets the value of statement or its default value.
"""
return self.getOrDefault(self.statement)
@inherit_doc
class StandardScaler(JavaEstimator, HasInputCol, HasOutputCol, JavaMLReadable, JavaMLWritable):
"""
Standardizes features by removing the mean and scaling to unit variance using column summary
statistics on the samples in the training set.
The "unit std" is computed using the `corrected sample standard deviation \
<https://en.wikipedia.org/wiki/Standard_deviation#Corrected_sample_standard_deviation>`_,
which is computed as the square root of the unbiased sample variance.
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([(Vectors.dense([0.0]),), (Vectors.dense([2.0]),)], ["a"])
>>> standardScaler = StandardScaler(inputCol="a", outputCol="scaled")
>>> model = standardScaler.fit(df)
>>> model.mean
DenseVector([1.0])
>>> model.std
DenseVector([1.4142])
>>> model.transform(df).collect()[1].scaled
DenseVector([1.4142])
>>> standardScalerPath = temp_path + "/standard-scaler"
>>> standardScaler.save(standardScalerPath)
>>> loadedStandardScaler = StandardScaler.load(standardScalerPath)
>>> loadedStandardScaler.getWithMean() == standardScaler.getWithMean()
True
>>> loadedStandardScaler.getWithStd() == standardScaler.getWithStd()
True
>>> modelPath = temp_path + "/standard-scaler-model"
>>> model.save(modelPath)
>>> loadedModel = StandardScalerModel.load(modelPath)
>>> loadedModel.std == model.std
True
>>> loadedModel.mean == model.mean
True
.. versionadded:: 1.4.0
"""
withMean = Param(Params._dummy(), "withMean", "Center data with mean",
typeConverter=TypeConverters.toBoolean)
withStd = Param(Params._dummy(), "withStd", "Scale to unit standard deviation",
typeConverter=TypeConverters.toBoolean)
@keyword_only
def __init__(self, withMean=False, withStd=True, inputCol=None, outputCol=None):
"""
__init__(self, withMean=False, withStd=True, inputCol=None, outputCol=None)
"""
super(StandardScaler, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.StandardScaler", self.uid)
self._setDefault(withMean=False, withStd=True)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, withMean=False, withStd=True, inputCol=None, outputCol=None):
"""
setParams(self, withMean=False, withStd=True, inputCol=None, outputCol=None)
Sets params for this StandardScaler.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.4.0")
def setWithMean(self, value):
"""
Sets the value of :py:attr:`withMean`.
"""
return self._set(withMean=value)
@since("1.4.0")
def getWithMean(self):
"""
Gets the value of withMean or its default value.
"""
return self.getOrDefault(self.withMean)
@since("1.4.0")
def setWithStd(self, value):
"""
Sets the value of :py:attr:`withStd`.
"""
return self._set(withStd=value)
@since("1.4.0")
def getWithStd(self):
"""
Gets the value of withStd or its default value.
"""
return self.getOrDefault(self.withStd)
def _create_model(self, java_model):
return StandardScalerModel(java_model)
class StandardScalerModel(JavaModel, JavaMLReadable, JavaMLWritable):
"""
Model fitted by :py:class:`StandardScaler`.
.. versionadded:: 1.4.0
"""
@property
@since("2.0.0")
def std(self):
"""
Standard deviation of the StandardScalerModel.
"""
return self._call_java("std")
@property
@since("2.0.0")
def mean(self):
"""
Mean of the StandardScalerModel.
"""
return self._call_java("mean")
class _StringIndexerParams(JavaParams, HasHandleInvalid, HasInputCol, HasOutputCol,
HasInputCols, HasOutputCols):
"""
Params for :py:attr:`StringIndexer` and :py:attr:`StringIndexerModel`.
"""
stringOrderType = Param(Params._dummy(), "stringOrderType",
"How to order labels of string column. The first label after " +
"ordering is assigned an index of 0. Supported options: " +
"frequencyDesc, frequencyAsc, alphabetDesc, alphabetAsc. " +
"Default is frequencyDesc. In case of equal frequency when " +
"under frequencyDesc/Asc, the strings are further sorted " +
"alphabetically",
typeConverter=TypeConverters.toString)
handleInvalid = Param(Params._dummy(), "handleInvalid", "how to handle invalid data (unseen " +
"or NULL values) in features and label column of string type. " +
"Options are 'skip' (filter out rows with invalid data), " +
"error (throw an error), or 'keep' (put invalid data " +
"in a special additional bucket, at index numLabels).",
typeConverter=TypeConverters.toString)
def __init__(self, *args):
super(_StringIndexerParams, self).__init__(*args)
self._setDefault(handleInvalid="error", stringOrderType="frequencyDesc")
@since("2.3.0")
def getStringOrderType(self):
"""
Gets the value of :py:attr:`stringOrderType` or its default value 'frequencyDesc'.
"""
return self.getOrDefault(self.stringOrderType)
@inherit_doc
class StringIndexer(JavaEstimator, _StringIndexerParams, JavaMLReadable, JavaMLWritable):
"""
A label indexer that maps a string column of labels to an ML column of label indices.
If the input column is numeric, we cast it to string and index the string values.
The indices are in [0, numLabels). By default, this is ordered by label frequencies
so the most frequent label gets index 0. The ordering behavior is controlled by
setting :py:attr:`stringOrderType`. Its default value is 'frequencyDesc'.
>>> stringIndexer = StringIndexer(inputCol="label", outputCol="indexed", handleInvalid="error",
... stringOrderType="frequencyDesc")
>>> model = stringIndexer.fit(stringIndDf)
>>> td = model.transform(stringIndDf)
>>> sorted(set([(i[0], i[1]) for i in td.select(td.id, td.indexed).collect()]),
... key=lambda x: x[0])
[(0, 0.0), (1, 2.0), (2, 1.0), (3, 0.0), (4, 0.0), (5, 1.0)]
>>> inverter = IndexToString(inputCol="indexed", outputCol="label2", labels=model.labels)
>>> itd = inverter.transform(td)
>>> sorted(set([(i[0], str(i[1])) for i in itd.select(itd.id, itd.label2).collect()]),
... key=lambda x: x[0])
[(0, 'a'), (1, 'b'), (2, 'c'), (3, 'a'), (4, 'a'), (5, 'c')]
>>> stringIndexerPath = temp_path + "/string-indexer"
>>> stringIndexer.save(stringIndexerPath)
>>> loadedIndexer = StringIndexer.load(stringIndexerPath)
>>> loadedIndexer.getHandleInvalid() == stringIndexer.getHandleInvalid()
True
>>> modelPath = temp_path + "/string-indexer-model"
>>> model.save(modelPath)
>>> loadedModel = StringIndexerModel.load(modelPath)
>>> loadedModel.labels == model.labels
True
>>> indexToStringPath = temp_path + "/index-to-string"
>>> inverter.save(indexToStringPath)
>>> loadedInverter = IndexToString.load(indexToStringPath)
>>> loadedInverter.getLabels() == inverter.getLabels()
True
>>> stringIndexer.getStringOrderType()
'frequencyDesc'
>>> stringIndexer = StringIndexer(inputCol="label", outputCol="indexed", handleInvalid="error",
... stringOrderType="alphabetDesc")
>>> model = stringIndexer.fit(stringIndDf)
>>> td = model.transform(stringIndDf)
>>> sorted(set([(i[0], i[1]) for i in td.select(td.id, td.indexed).collect()]),
... key=lambda x: x[0])
[(0, 2.0), (1, 1.0), (2, 0.0), (3, 2.0), (4, 2.0), (5, 0.0)]
>>> fromlabelsModel = StringIndexerModel.from_labels(["a", "b", "c"],
... inputCol="label", outputCol="indexed", handleInvalid="error")
>>> result = fromlabelsModel.transform(stringIndDf)
>>> sorted(set([(i[0], i[1]) for i in result.select(result.id, result.indexed).collect()]),
... key=lambda x: x[0])
[(0, 0.0), (1, 1.0), (2, 2.0), (3, 0.0), (4, 0.0), (5, 2.0)]
>>> testData = sc.parallelize([Row(id=0, label1="a", label2="e"),
... Row(id=1, label1="b", label2="f"),
... Row(id=2, label1="c", label2="e"),
... Row(id=3, label1="a", label2="f"),
... Row(id=4, label1="a", label2="f"),
... Row(id=5, label1="c", label2="f")], 3)
>>> multiRowDf = spark.createDataFrame(testData)
>>> inputs = ["label1", "label2"]
>>> outputs = ["index1", "index2"]
>>> stringIndexer = StringIndexer(inputCols=inputs, outputCols=outputs)
>>> model = stringIndexer.fit(multiRowDf)
>>> result = model.transform(multiRowDf)
>>> sorted(set([(i[0], i[1], i[2]) for i in result.select(result.id, result.index1,
... result.index2).collect()]), key=lambda x: x[0])
[(0, 0.0, 1.0), (1, 2.0, 0.0), (2, 1.0, 1.0), (3, 0.0, 0.0), (4, 0.0, 0.0), (5, 1.0, 0.0)]
>>> fromlabelsModel = StringIndexerModel.from_arrays_of_labels([["a", "b", "c"], ["e", "f"]],
... inputCols=inputs, outputCols=outputs)
>>> result = fromlabelsModel.transform(multiRowDf)
>>> sorted(set([(i[0], i[1], i[2]) for i in result.select(result.id, result.index1,
... result.index2).collect()]), key=lambda x: x[0])
[(0, 0.0, 0.0), (1, 1.0, 1.0), (2, 2.0, 0.0), (3, 0.0, 1.0), (4, 0.0, 1.0), (5, 2.0, 1.0)]
.. versionadded:: 1.4.0
"""
@keyword_only
def __init__(self, inputCol=None, outputCol=None, inputCols=None, outputCols=None,
handleInvalid="error", stringOrderType="frequencyDesc"):
"""
__init__(self, inputCol=None, outputCol=None, inputCols=None, outputCols=None, \
handleInvalid="error", stringOrderType="frequencyDesc")
"""
super(StringIndexer, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.StringIndexer", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, inputCol=None, outputCol=None, inputCols=None, outputCols=None,
handleInvalid="error", stringOrderType="frequencyDesc"):
"""
setParams(self, inputCol=None, outputCol=None, inputCols=None, outputCols=None, \
handleInvalid="error", stringOrderType="frequencyDesc")
Sets params for this StringIndexer.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return StringIndexerModel(java_model)
@since("2.3.0")
def setStringOrderType(self, value):
"""
Sets the value of :py:attr:`stringOrderType`.
"""
return self._set(stringOrderType=value)
class StringIndexerModel(JavaModel, _StringIndexerParams, JavaMLReadable, JavaMLWritable):
"""
Model fitted by :py:class:`StringIndexer`.
.. versionadded:: 1.4.0
"""
@classmethod
@since("2.4.0")
def from_labels(cls, labels, inputCol, outputCol=None, handleInvalid=None):
"""
Construct the model directly from an array of label strings,
requires an active SparkContext.
"""
sc = SparkContext._active_spark_context
java_class = sc._gateway.jvm.java.lang.String
jlabels = StringIndexerModel._new_java_array(labels, java_class)
model = StringIndexerModel._create_from_java_class(
"org.apache.spark.ml.feature.StringIndexerModel", jlabels)
model.setInputCol(inputCol)
if outputCol is not None:
model.setOutputCol(outputCol)
if handleInvalid is not None:
model.setHandleInvalid(handleInvalid)
return model
@classmethod
@since("3.0.0")
def from_arrays_of_labels(cls, arrayOfLabels, inputCols, outputCols=None,
handleInvalid=None):
"""
Construct the model directly from an array of array of label strings,
requires an active SparkContext.
"""
sc = SparkContext._active_spark_context
java_class = sc._gateway.jvm.java.lang.String
jlabels = StringIndexerModel._new_java_array(arrayOfLabels, java_class)
model = StringIndexerModel._create_from_java_class(
"org.apache.spark.ml.feature.StringIndexerModel", jlabels)
model.setInputCols(inputCols)
if outputCols is not None:
model.setOutputCols(outputCols)
if handleInvalid is not None:
model.setHandleInvalid(handleInvalid)
return model
@property
@since("1.5.0")
def labels(self):
"""
Ordered list of labels, corresponding to indices to be assigned.
"""
return self._call_java("labels")
@since("2.4.0")
def setHandleInvalid(self, value):
"""
Sets the value of :py:attr:`handleInvalid`.
"""
return self._set(handleInvalid=value)
@inherit_doc
class IndexToString(JavaTransformer, HasInputCol, HasOutputCol, JavaMLReadable, JavaMLWritable):
"""
A :py:class:`Transformer` that maps a column of indices back to a new column of
corresponding string values.
The index-string mapping is either from the ML attributes of the input column,
or from user-supplied labels (which take precedence over ML attributes).
See :class:`StringIndexer` for converting strings into indices.
.. versionadded:: 1.6.0
"""
labels = Param(Params._dummy(), "labels",
"Optional array of labels specifying index-string mapping." +
" If not provided or if empty, then metadata from inputCol is used instead.",
typeConverter=TypeConverters.toListString)
@keyword_only
def __init__(self, inputCol=None, outputCol=None, labels=None):
"""
__init__(self, inputCol=None, outputCol=None, labels=None)
"""
super(IndexToString, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.IndexToString",
self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.6.0")
def setParams(self, inputCol=None, outputCol=None, labels=None):
"""
setParams(self, inputCol=None, outputCol=None, labels=None)
Sets params for this IndexToString.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.6.0")
def setLabels(self, value):
"""
Sets the value of :py:attr:`labels`.
"""
return self._set(labels=value)
@since("1.6.0")
def getLabels(self):
"""
Gets the value of :py:attr:`labels` or its default value.
"""
return self.getOrDefault(self.labels)
class StopWordsRemover(JavaTransformer, HasInputCol, HasOutputCol, JavaMLReadable, JavaMLWritable):
"""
A feature transformer that filters out stop words from input.
.. note:: null values from input array are preserved unless adding null to stopWords explicitly.
>>> df = spark.createDataFrame([(["a", "b", "c"],)], ["text"])
>>> remover = StopWordsRemover(inputCol="text", outputCol="words", stopWords=["b"])
>>> remover.transform(df).head().words == ['a', 'c']
True
>>> stopWordsRemoverPath = temp_path + "/stopwords-remover"
>>> remover.save(stopWordsRemoverPath)
>>> loadedRemover = StopWordsRemover.load(stopWordsRemoverPath)
>>> loadedRemover.getStopWords() == remover.getStopWords()
True
>>> loadedRemover.getCaseSensitive() == remover.getCaseSensitive()
True
.. versionadded:: 1.6.0
"""
stopWords = Param(Params._dummy(), "stopWords", "The words to be filtered out",
typeConverter=TypeConverters.toListString)
caseSensitive = Param(Params._dummy(), "caseSensitive", "whether to do a case sensitive " +
"comparison over the stop words", typeConverter=TypeConverters.toBoolean)
locale = Param(Params._dummy(), "locale", "locale of the input. ignored when case sensitive " +
"is true", typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, inputCol=None, outputCol=None, stopWords=None, caseSensitive=False,
locale=None):
"""
__init__(self, inputCol=None, outputCol=None, stopWords=None, caseSensitive=false, \
locale=None)
"""
super(StopWordsRemover, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.StopWordsRemover",
self.uid)
self._setDefault(stopWords=StopWordsRemover.loadDefaultStopWords("english"),
caseSensitive=False, locale=self._java_obj.getLocale())
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.6.0")
def setParams(self, inputCol=None, outputCol=None, stopWords=None, caseSensitive=False,
locale=None):
"""
setParams(self, inputCol=None, outputCol=None, stopWords=None, caseSensitive=false, \
locale=None)
Sets params for this StopWordRemover.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.6.0")
def setStopWords(self, value):
"""
Sets the value of :py:attr:`stopWords`.
"""
return self._set(stopWords=value)
@since("1.6.0")
def getStopWords(self):
"""
Gets the value of :py:attr:`stopWords` or its default value.
"""
return self.getOrDefault(self.stopWords)
@since("1.6.0")
def setCaseSensitive(self, value):
"""
Sets the value of :py:attr:`caseSensitive`.
"""
return self._set(caseSensitive=value)
@since("1.6.0")
def getCaseSensitive(self):
"""
Gets the value of :py:attr:`caseSensitive` or its default value.
"""
return self.getOrDefault(self.caseSensitive)
@since("2.4.0")
def setLocale(self, value):
"""
Sets the value of :py:attr:`locale`.
"""
return self._set(locale=value)
@since("2.4.0")
def getLocale(self):
"""
Gets the value of :py:attr:`locale`.
"""
return self.getOrDefault(self.locale)
@staticmethod
@since("2.0.0")
def loadDefaultStopWords(language):
"""
Loads the default stop words for the given language.
Supported languages: danish, dutch, english, finnish, french, german, hungarian,
italian, norwegian, portuguese, russian, spanish, swedish, turkish
"""
stopWordsObj = _jvm().org.apache.spark.ml.feature.StopWordsRemover
return list(stopWordsObj.loadDefaultStopWords(language))
@inherit_doc
@ignore_unicode_prefix
class Tokenizer(JavaTransformer, HasInputCol, HasOutputCol, JavaMLReadable, JavaMLWritable):
"""
A tokenizer that converts the input string to lowercase and then
splits it by white spaces.
>>> df = spark.createDataFrame([("a b c",)], ["text"])
>>> tokenizer = Tokenizer(inputCol="text", outputCol="words")
>>> tokenizer.transform(df).head()
Row(text=u'a b c', words=[u'a', u'b', u'c'])
>>> # Change a parameter.
>>> tokenizer.setParams(outputCol="tokens").transform(df).head()
Row(text=u'a b c', tokens=[u'a', u'b', u'c'])
>>> # Temporarily modify a parameter.
>>> tokenizer.transform(df, {tokenizer.outputCol: "words"}).head()
Row(text=u'a b c', words=[u'a', u'b', u'c'])
>>> tokenizer.transform(df).head()
Row(text=u'a b c', tokens=[u'a', u'b', u'c'])
>>> # Must use keyword arguments to specify params.
>>> tokenizer.setParams("text")
Traceback (most recent call last):
...
TypeError: Method setParams forces keyword arguments.
>>> tokenizerPath = temp_path + "/tokenizer"
>>> tokenizer.save(tokenizerPath)
>>> loadedTokenizer = Tokenizer.load(tokenizerPath)
>>> loadedTokenizer.transform(df).head().tokens == tokenizer.transform(df).head().tokens
True
.. versionadded:: 1.3.0
"""
@keyword_only
def __init__(self, inputCol=None, outputCol=None):
"""
__init__(self, inputCol=None, outputCol=None)
"""
super(Tokenizer, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.Tokenizer", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.3.0")
def setParams(self, inputCol=None, outputCol=None):
"""
setParams(self, inputCol=None, outputCol=None)
Sets params for this Tokenizer.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@inherit_doc
class VectorAssembler(JavaTransformer, HasInputCols, HasOutputCol, HasHandleInvalid, JavaMLReadable,
JavaMLWritable):
"""
A feature transformer that merges multiple columns into a vector column.
>>> df = spark.createDataFrame([(1, 0, 3)], ["a", "b", "c"])
>>> vecAssembler = VectorAssembler(inputCols=["a", "b", "c"], outputCol="features")
>>> vecAssembler.transform(df).head().features
DenseVector([1.0, 0.0, 3.0])
>>> vecAssembler.setParams(outputCol="freqs").transform(df).head().freqs
DenseVector([1.0, 0.0, 3.0])
>>> params = {vecAssembler.inputCols: ["b", "a"], vecAssembler.outputCol: "vector"}
>>> vecAssembler.transform(df, params).head().vector
DenseVector([0.0, 1.0])
>>> vectorAssemblerPath = temp_path + "/vector-assembler"
>>> vecAssembler.save(vectorAssemblerPath)
>>> loadedAssembler = VectorAssembler.load(vectorAssemblerPath)
>>> loadedAssembler.transform(df).head().freqs == vecAssembler.transform(df).head().freqs
True
>>> dfWithNullsAndNaNs = spark.createDataFrame(
... [(1.0, 2.0, None), (3.0, float("nan"), 4.0), (5.0, 6.0, 7.0)], ["a", "b", "c"])
>>> vecAssembler2 = VectorAssembler(inputCols=["a", "b", "c"], outputCol="features",
... handleInvalid="keep")
>>> vecAssembler2.transform(dfWithNullsAndNaNs).show()
+---+---+----+-------------+
| a| b| c| features|
+---+---+----+-------------+
|1.0|2.0|null|[1.0,2.0,NaN]|
|3.0|NaN| 4.0|[3.0,NaN,4.0]|
|5.0|6.0| 7.0|[5.0,6.0,7.0]|
+---+---+----+-------------+
...
>>> vecAssembler2.setParams(handleInvalid="skip").transform(dfWithNullsAndNaNs).show()
+---+---+---+-------------+
| a| b| c| features|
+---+---+---+-------------+
|5.0|6.0|7.0|[5.0,6.0,7.0]|
+---+---+---+-------------+
...
.. versionadded:: 1.4.0
"""
handleInvalid = Param(Params._dummy(), "handleInvalid", "How to handle invalid data (NULL " +
"and NaN values). Options are 'skip' (filter out rows with invalid " +
"data), 'error' (throw an error), or 'keep' (return relevant number " +
"of NaN in the output). Column lengths are taken from the size of ML " +
"Attribute Group, which can be set using `VectorSizeHint` in a " +
"pipeline before `VectorAssembler`. Column lengths can also be " +
"inferred from first rows of the data since it is safe to do so but " +
"only in case of 'error' or 'skip').",
typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, inputCols=None, outputCol=None, handleInvalid="error"):
"""
__init__(self, inputCols=None, outputCol=None, handleInvalid="error")
"""
super(VectorAssembler, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.VectorAssembler", self.uid)
self._setDefault(handleInvalid="error")
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, inputCols=None, outputCol=None, handleInvalid="error"):
"""
setParams(self, inputCols=None, outputCol=None, handleInvalid="error")
Sets params for this VectorAssembler.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@inherit_doc
class VectorIndexer(JavaEstimator, HasInputCol, HasOutputCol, HasHandleInvalid, JavaMLReadable,
JavaMLWritable):
"""
Class for indexing categorical feature columns in a dataset of `Vector`.
This has 2 usage modes:
- Automatically identify categorical features (default behavior)
- This helps process a dataset of unknown vectors into a dataset with some continuous
features and some categorical features. The choice between continuous and categorical
is based upon a maxCategories parameter.
- Set maxCategories to the maximum number of categorical any categorical feature should
have.
- E.g.: Feature 0 has unique values {-1.0, 0.0}, and feature 1 values {1.0, 3.0, 5.0}.
If maxCategories = 2, then feature 0 will be declared categorical and use indices {0, 1},
and feature 1 will be declared continuous.
- Index all features, if all features are categorical
- If maxCategories is set to be very large, then this will build an index of unique
values for all features.
- Warning: This can cause problems if features are continuous since this will collect ALL
unique values to the driver.
- E.g.: Feature 0 has unique values {-1.0, 0.0}, and feature 1 values {1.0, 3.0, 5.0}.
If maxCategories >= 3, then both features will be declared categorical.
This returns a model which can transform categorical features to use 0-based indices.
Index stability:
- This is not guaranteed to choose the same category index across multiple runs.
- If a categorical feature includes value 0, then this is guaranteed to map value 0 to
index 0. This maintains vector sparsity.
- More stability may be added in the future.
TODO: Future extensions: The following functionality is planned for the future:
- Preserve metadata in transform; if a feature's metadata is already present,
do not recompute.
- Specify certain features to not index, either via a parameter or via existing metadata.
- Add warning if a categorical feature has only 1 category.
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([(Vectors.dense([-1.0, 0.0]),),
... (Vectors.dense([0.0, 1.0]),), (Vectors.dense([0.0, 2.0]),)], ["a"])
>>> indexer = VectorIndexer(maxCategories=2, inputCol="a", outputCol="indexed")
>>> model = indexer.fit(df)
>>> model.transform(df).head().indexed
DenseVector([1.0, 0.0])
>>> model.numFeatures
2
>>> model.categoryMaps
{0: {0.0: 0, -1.0: 1}}
>>> indexer.setParams(outputCol="test").fit(df).transform(df).collect()[1].test
DenseVector([0.0, 1.0])
>>> params = {indexer.maxCategories: 3, indexer.outputCol: "vector"}
>>> model2 = indexer.fit(df, params)
>>> model2.transform(df).head().vector
DenseVector([1.0, 0.0])
>>> vectorIndexerPath = temp_path + "/vector-indexer"
>>> indexer.save(vectorIndexerPath)
>>> loadedIndexer = VectorIndexer.load(vectorIndexerPath)
>>> loadedIndexer.getMaxCategories() == indexer.getMaxCategories()
True
>>> modelPath = temp_path + "/vector-indexer-model"
>>> model.save(modelPath)
>>> loadedModel = VectorIndexerModel.load(modelPath)
>>> loadedModel.numFeatures == model.numFeatures
True
>>> loadedModel.categoryMaps == model.categoryMaps
True
>>> dfWithInvalid = spark.createDataFrame([(Vectors.dense([3.0, 1.0]),)], ["a"])
>>> indexer.getHandleInvalid()
'error'
>>> model3 = indexer.setHandleInvalid("skip").fit(df)
>>> model3.transform(dfWithInvalid).count()
0
>>> model4 = indexer.setParams(handleInvalid="keep", outputCol="indexed").fit(df)
>>> model4.transform(dfWithInvalid).head().indexed
DenseVector([2.0, 1.0])
.. versionadded:: 1.4.0
"""
maxCategories = Param(Params._dummy(), "maxCategories",
"Threshold for the number of values a categorical feature can take " +
"(>= 2). If a feature is found to have > maxCategories values, then " +
"it is declared continuous.", typeConverter=TypeConverters.toInt)
handleInvalid = Param(Params._dummy(), "handleInvalid", "How to handle invalid data " +
"(unseen labels or NULL values). Options are 'skip' (filter out " +
"rows with invalid data), 'error' (throw an error), or 'keep' (put " +
"invalid data in a special additional bucket, at index of the number " +
"of categories of the feature).",
typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, maxCategories=20, inputCol=None, outputCol=None, handleInvalid="error"):
"""
__init__(self, maxCategories=20, inputCol=None, outputCol=None, handleInvalid="error")
"""
super(VectorIndexer, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.VectorIndexer", self.uid)
self._setDefault(maxCategories=20, handleInvalid="error")
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, maxCategories=20, inputCol=None, outputCol=None, handleInvalid="error"):
"""
setParams(self, maxCategories=20, inputCol=None, outputCol=None, handleInvalid="error")
Sets params for this VectorIndexer.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.4.0")
def setMaxCategories(self, value):
"""
Sets the value of :py:attr:`maxCategories`.
"""
return self._set(maxCategories=value)
@since("1.4.0")
def getMaxCategories(self):
"""
Gets the value of maxCategories or its default value.
"""
return self.getOrDefault(self.maxCategories)
def _create_model(self, java_model):
return VectorIndexerModel(java_model)
class VectorIndexerModel(JavaModel, JavaMLReadable, JavaMLWritable):
"""
Model fitted by :py:class:`VectorIndexer`.
Transform categorical features to use 0-based indices instead of their original values.
- Categorical features are mapped to indices.
- Continuous features (columns) are left unchanged.
This also appends metadata to the output column, marking features as Numeric (continuous),
Nominal (categorical), or Binary (either continuous or categorical).
Non-ML metadata is not carried over from the input to the output column.
This maintains vector sparsity.
.. versionadded:: 1.4.0
"""
@property
@since("1.4.0")
def numFeatures(self):
"""
Number of features, i.e., length of Vectors which this transforms.
"""
return self._call_java("numFeatures")
@property
@since("1.4.0")
def categoryMaps(self):
"""
Feature value index. Keys are categorical feature indices (column indices).
Values are maps from original features values to 0-based category indices.
If a feature is not in this map, it is treated as continuous.
"""
return self._call_java("javaCategoryMaps")
@inherit_doc
class VectorSlicer(JavaTransformer, HasInputCol, HasOutputCol, JavaMLReadable, JavaMLWritable):
"""
This class takes a feature vector and outputs a new feature vector with a subarray
of the original features.
The subset of features can be specified with either indices (`setIndices()`)
or names (`setNames()`). At least one feature must be selected. Duplicate features
are not allowed, so there can be no overlap between selected indices and names.
The output vector will order features with the selected indices first (in the order given),
followed by the selected names (in the order given).
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (Vectors.dense([-2.0, 2.3, 0.0, 0.0, 1.0]),),
... (Vectors.dense([0.0, 0.0, 0.0, 0.0, 0.0]),),
... (Vectors.dense([0.6, -1.1, -3.0, 4.5, 3.3]),)], ["features"])
>>> vs = VectorSlicer(inputCol="features", outputCol="sliced", indices=[1, 4])
>>> vs.transform(df).head().sliced
DenseVector([2.3, 1.0])
>>> vectorSlicerPath = temp_path + "/vector-slicer"
>>> vs.save(vectorSlicerPath)
>>> loadedVs = VectorSlicer.load(vectorSlicerPath)
>>> loadedVs.getIndices() == vs.getIndices()
True
>>> loadedVs.getNames() == vs.getNames()
True
.. versionadded:: 1.6.0
"""
indices = Param(Params._dummy(), "indices", "An array of indices to select features from " +
"a vector column. There can be no overlap with names.",
typeConverter=TypeConverters.toListInt)
names = Param(Params._dummy(), "names", "An array of feature names to select features from " +
"a vector column. These names must be specified by ML " +
"org.apache.spark.ml.attribute.Attribute. There can be no overlap with " +
"indices.", typeConverter=TypeConverters.toListString)
@keyword_only
def __init__(self, inputCol=None, outputCol=None, indices=None, names=None):
"""
__init__(self, inputCol=None, outputCol=None, indices=None, names=None)
"""
super(VectorSlicer, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.VectorSlicer", self.uid)
self._setDefault(indices=[], names=[])
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.6.0")
def setParams(self, inputCol=None, outputCol=None, indices=None, names=None):
"""
setParams(self, inputCol=None, outputCol=None, indices=None, names=None):
Sets params for this VectorSlicer.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.6.0")
def setIndices(self, value):
"""
Sets the value of :py:attr:`indices`.
"""
return self._set(indices=value)
@since("1.6.0")
def getIndices(self):
"""
Gets the value of indices or its default value.
"""
return self.getOrDefault(self.indices)
@since("1.6.0")
def setNames(self, value):
"""
Sets the value of :py:attr:`names`.
"""
return self._set(names=value)
@since("1.6.0")
def getNames(self):
"""
Gets the value of names or its default value.
"""
return self.getOrDefault(self.names)
@inherit_doc
@ignore_unicode_prefix
class Word2Vec(JavaEstimator, HasStepSize, HasMaxIter, HasSeed, HasInputCol, HasOutputCol,
JavaMLReadable, JavaMLWritable):
"""
Word2Vec trains a model of `Map(String, Vector)`, i.e. transforms a word into a code for further
natural language processing or machine learning process.
>>> sent = ("a b " * 100 + "a c " * 10).split(" ")
>>> doc = spark.createDataFrame([(sent,), (sent,)], ["sentence"])
>>> word2Vec = Word2Vec(vectorSize=5, seed=42, inputCol="sentence", outputCol="model")
>>> model = word2Vec.fit(doc)
>>> model.getVectors().show()
+----+--------------------+
|word| vector|
+----+--------------------+
| a|[0.09511678665876...|
| b|[-1.2028766870498...|
| c|[0.30153277516365...|
+----+--------------------+
...
>>> model.findSynonymsArray("a", 2)
[(u'b', 0.015859870240092278), (u'c', -0.5680795907974243)]
>>> from pyspark.sql.functions import format_number as fmt
>>> model.findSynonyms("a", 2).select("word", fmt("similarity", 5).alias("similarity")).show()
+----+----------+
|word|similarity|
+----+----------+
| b| 0.01586|
| c| -0.56808|
+----+----------+
...
>>> model.transform(doc).head().model
DenseVector([-0.4833, 0.1855, -0.273, -0.0509, -0.4769])
>>> word2vecPath = temp_path + "/word2vec"
>>> word2Vec.save(word2vecPath)
>>> loadedWord2Vec = Word2Vec.load(word2vecPath)
>>> loadedWord2Vec.getVectorSize() == word2Vec.getVectorSize()
True
>>> loadedWord2Vec.getNumPartitions() == word2Vec.getNumPartitions()
True
>>> loadedWord2Vec.getMinCount() == word2Vec.getMinCount()
True
>>> modelPath = temp_path + "/word2vec-model"
>>> model.save(modelPath)
>>> loadedModel = Word2VecModel.load(modelPath)
>>> loadedModel.getVectors().first().word == model.getVectors().first().word
True
>>> loadedModel.getVectors().first().vector == model.getVectors().first().vector
True
.. versionadded:: 1.4.0
"""
vectorSize = Param(Params._dummy(), "vectorSize",
"the dimension of codes after transforming from words",
typeConverter=TypeConverters.toInt)
numPartitions = Param(Params._dummy(), "numPartitions",
"number of partitions for sentences of words",
typeConverter=TypeConverters.toInt)
minCount = Param(Params._dummy(), "minCount",
"the minimum number of times a token must appear to be included in the " +
"word2vec model's vocabulary", typeConverter=TypeConverters.toInt)
windowSize = Param(Params._dummy(), "windowSize",
"the window size (context words from [-window, window]). Default value is 5",
typeConverter=TypeConverters.toInt)
maxSentenceLength = Param(Params._dummy(), "maxSentenceLength",
"Maximum length (in words) of each sentence in the input data. " +
"Any sentence longer than this threshold will " +
"be divided into chunks up to the size.",
typeConverter=TypeConverters.toInt)
@keyword_only
def __init__(self, vectorSize=100, minCount=5, numPartitions=1, stepSize=0.025, maxIter=1,
seed=None, inputCol=None, outputCol=None, windowSize=5, maxSentenceLength=1000):
"""
__init__(self, vectorSize=100, minCount=5, numPartitions=1, stepSize=0.025, maxIter=1, \
seed=None, inputCol=None, outputCol=None, windowSize=5, maxSentenceLength=1000)
"""
super(Word2Vec, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.Word2Vec", self.uid)
self._setDefault(vectorSize=100, minCount=5, numPartitions=1, stepSize=0.025, maxIter=1,
windowSize=5, maxSentenceLength=1000)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, vectorSize=100, minCount=5, numPartitions=1, stepSize=0.025, maxIter=1,
seed=None, inputCol=None, outputCol=None, windowSize=5, maxSentenceLength=1000):
"""
setParams(self, minCount=5, numPartitions=1, stepSize=0.025, maxIter=1, seed=None, \
inputCol=None, outputCol=None, windowSize=5, maxSentenceLength=1000)
Sets params for this Word2Vec.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.4.0")
def setVectorSize(self, value):
"""
Sets the value of :py:attr:`vectorSize`.
"""
return self._set(vectorSize=value)
@since("1.4.0")
def getVectorSize(self):
"""
Gets the value of vectorSize or its default value.
"""
return self.getOrDefault(self.vectorSize)
@since("1.4.0")
def setNumPartitions(self, value):
"""
Sets the value of :py:attr:`numPartitions`.
"""
return self._set(numPartitions=value)
@since("1.4.0")
def getNumPartitions(self):
"""
Gets the value of numPartitions or its default value.
"""
return self.getOrDefault(self.numPartitions)
@since("1.4.0")
def setMinCount(self, value):
"""
Sets the value of :py:attr:`minCount`.
"""
return self._set(minCount=value)
@since("1.4.0")
def getMinCount(self):
"""
Gets the value of minCount or its default value.
"""
return self.getOrDefault(self.minCount)
@since("2.0.0")
def setWindowSize(self, value):
"""
Sets the value of :py:attr:`windowSize`.
"""
return self._set(windowSize=value)
@since("2.0.0")
def getWindowSize(self):
"""
Gets the value of windowSize or its default value.
"""
return self.getOrDefault(self.windowSize)
@since("2.0.0")
def setMaxSentenceLength(self, value):
"""
Sets the value of :py:attr:`maxSentenceLength`.
"""
return self._set(maxSentenceLength=value)
@since("2.0.0")
def getMaxSentenceLength(self):
"""
Gets the value of maxSentenceLength or its default value.
"""
return self.getOrDefault(self.maxSentenceLength)
def _create_model(self, java_model):
return Word2VecModel(java_model)
class Word2VecModel(JavaModel, JavaMLReadable, JavaMLWritable):
"""
Model fitted by :py:class:`Word2Vec`.
.. versionadded:: 1.4.0
"""
@since("1.5.0")
def getVectors(self):
"""
Returns the vector representation of the words as a dataframe
with two fields, word and vector.
"""
return self._call_java("getVectors")
@since("1.5.0")
def findSynonyms(self, word, num):
"""
Find "num" number of words closest in similarity to "word".
word can be a string or vector representation.
Returns a dataframe with two fields word and similarity (which
gives the cosine similarity).
"""
if not isinstance(word, basestring):
word = _convert_to_vector(word)
return self._call_java("findSynonyms", word, num)
@since("2.3.0")
def findSynonymsArray(self, word, num):
"""
Find "num" number of words closest in similarity to "word".
word can be a string or vector representation.
Returns an array with two fields word and similarity (which
gives the cosine similarity).
"""
if not isinstance(word, basestring):
word = _convert_to_vector(word)
tuples = self._java_obj.findSynonymsArray(word, num)
return list(map(lambda st: (st._1(), st._2()), list(tuples)))
@inherit_doc
class PCA(JavaEstimator, HasInputCol, HasOutputCol, JavaMLReadable, JavaMLWritable):
"""
PCA trains a model to project vectors to a lower dimensional space of the
top :py:attr:`k` principal components.
>>> from pyspark.ml.linalg import Vectors
>>> data = [(Vectors.sparse(5, [(1, 1.0), (3, 7.0)]),),
... (Vectors.dense([2.0, 0.0, 3.0, 4.0, 5.0]),),
... (Vectors.dense([4.0, 0.0, 0.0, 6.0, 7.0]),)]
>>> df = spark.createDataFrame(data,["features"])
>>> pca = PCA(k=2, inputCol="features", outputCol="pca_features")
>>> model = pca.fit(df)
>>> model.transform(df).collect()[0].pca_features
DenseVector([1.648..., -4.013...])
>>> model.explainedVariance
DenseVector([0.794..., 0.205...])
>>> pcaPath = temp_path + "/pca"
>>> pca.save(pcaPath)
>>> loadedPca = PCA.load(pcaPath)
>>> loadedPca.getK() == pca.getK()
True
>>> modelPath = temp_path + "/pca-model"
>>> model.save(modelPath)
>>> loadedModel = PCAModel.load(modelPath)
>>> loadedModel.pc == model.pc
True
>>> loadedModel.explainedVariance == model.explainedVariance
True
.. versionadded:: 1.5.0
"""
k = Param(Params._dummy(), "k", "the number of principal components",
typeConverter=TypeConverters.toInt)
@keyword_only
def __init__(self, k=None, inputCol=None, outputCol=None):
"""
__init__(self, k=None, inputCol=None, outputCol=None)
"""
super(PCA, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.PCA", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.5.0")
def setParams(self, k=None, inputCol=None, outputCol=None):
"""
setParams(self, k=None, inputCol=None, outputCol=None)
Set params for this PCA.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.5.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
"""
return self._set(k=value)
@since("1.5.0")
def getK(self):
"""
Gets the value of k or its default value.
"""
return self.getOrDefault(self.k)
def _create_model(self, java_model):
return PCAModel(java_model)
class PCAModel(JavaModel, JavaMLReadable, JavaMLWritable):
"""
Model fitted by :py:class:`PCA`. Transforms vectors to a lower dimensional space.
.. versionadded:: 1.5.0
"""
@property
@since("2.0.0")
def pc(self):
"""
Returns a principal components Matrix.
Each column is one principal component.
"""
return self._call_java("pc")
@property
@since("2.0.0")
def explainedVariance(self):
"""
Returns a vector of proportions of variance
explained by each principal component.
"""
return self._call_java("explainedVariance")
@inherit_doc
class RFormula(JavaEstimator, HasFeaturesCol, HasLabelCol, HasHandleInvalid,
JavaMLReadable, JavaMLWritable):
"""
.. note:: Experimental
Implements the transforms required for fitting a dataset against an
R model formula. Currently we support a limited subset of the R
operators, including '~', '.', ':', '+', '-', '*', and '^'.
Also see the `R formula docs
<http://stat.ethz.ch/R-manual/R-patched/library/stats/html/formula.html>`_.
>>> df = spark.createDataFrame([
... (1.0, 1.0, "a"),
... (0.0, 2.0, "b"),
... (0.0, 0.0, "a")
... ], ["y", "x", "s"])
>>> rf = RFormula(formula="y ~ x + s")
>>> model = rf.fit(df)
>>> model.transform(df).show()
+---+---+---+---------+-----+
| y| x| s| features|label|
+---+---+---+---------+-----+
|1.0|1.0| a|[1.0,1.0]| 1.0|
|0.0|2.0| b|[2.0,0.0]| 0.0|
|0.0|0.0| a|[0.0,1.0]| 0.0|
+---+---+---+---------+-----+
...
>>> rf.fit(df, {rf.formula: "y ~ . - s"}).transform(df).show()
+---+---+---+--------+-----+
| y| x| s|features|label|
+---+---+---+--------+-----+
|1.0|1.0| a| [1.0]| 1.0|
|0.0|2.0| b| [2.0]| 0.0|
|0.0|0.0| a| [0.0]| 0.0|
+---+---+---+--------+-----+
...
>>> rFormulaPath = temp_path + "/rFormula"
>>> rf.save(rFormulaPath)
>>> loadedRF = RFormula.load(rFormulaPath)
>>> loadedRF.getFormula() == rf.getFormula()
True
>>> loadedRF.getFeaturesCol() == rf.getFeaturesCol()
True
>>> loadedRF.getLabelCol() == rf.getLabelCol()
True
>>> loadedRF.getHandleInvalid() == rf.getHandleInvalid()
True
>>> str(loadedRF)
'RFormula(y ~ x + s) (uid=...)'
>>> modelPath = temp_path + "/rFormulaModel"
>>> model.save(modelPath)
>>> loadedModel = RFormulaModel.load(modelPath)
>>> loadedModel.uid == model.uid
True
>>> loadedModel.transform(df).show()
+---+---+---+---------+-----+
| y| x| s| features|label|
+---+---+---+---------+-----+
|1.0|1.0| a|[1.0,1.0]| 1.0|
|0.0|2.0| b|[2.0,0.0]| 0.0|
|0.0|0.0| a|[0.0,1.0]| 0.0|
+---+---+---+---------+-----+
...
>>> str(loadedModel)
'RFormulaModel(ResolvedRFormula(label=y, terms=[x,s], hasIntercept=true)) (uid=...)'
.. versionadded:: 1.5.0
"""
formula = Param(Params._dummy(), "formula", "R model formula",
typeConverter=TypeConverters.toString)
forceIndexLabel = Param(Params._dummy(), "forceIndexLabel",
"Force to index label whether it is numeric or string",
typeConverter=TypeConverters.toBoolean)
stringIndexerOrderType = Param(Params._dummy(), "stringIndexerOrderType",
"How to order categories of a string feature column used by " +
"StringIndexer. The last category after ordering is dropped " +
"when encoding strings. Supported options: frequencyDesc, " +
"frequencyAsc, alphabetDesc, alphabetAsc. The default value " +
"is frequencyDesc. When the ordering is set to alphabetDesc, " +
"RFormula drops the same category as R when encoding strings.",
typeConverter=TypeConverters.toString)
handleInvalid = Param(Params._dummy(), "handleInvalid", "how to handle invalid entries. " +
"Options are 'skip' (filter out rows with invalid values), " +
"'error' (throw an error), or 'keep' (put invalid data in a special " +
"additional bucket, at index numLabels).",
typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, formula=None, featuresCol="features", labelCol="label",
forceIndexLabel=False, stringIndexerOrderType="frequencyDesc",
handleInvalid="error"):
"""
__init__(self, formula=None, featuresCol="features", labelCol="label", \
forceIndexLabel=False, stringIndexerOrderType="frequencyDesc", \
handleInvalid="error")
"""
super(RFormula, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.RFormula", self.uid)
self._setDefault(forceIndexLabel=False, stringIndexerOrderType="frequencyDesc",
handleInvalid="error")
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.5.0")
def setParams(self, formula=None, featuresCol="features", labelCol="label",
forceIndexLabel=False, stringIndexerOrderType="frequencyDesc",
handleInvalid="error"):
"""
setParams(self, formula=None, featuresCol="features", labelCol="label", \
forceIndexLabel=False, stringIndexerOrderType="frequencyDesc", \
handleInvalid="error")
Sets params for RFormula.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.5.0")
def setFormula(self, value):
"""
Sets the value of :py:attr:`formula`.
"""
return self._set(formula=value)
@since("1.5.0")
def getFormula(self):
"""
Gets the value of :py:attr:`formula`.
"""
return self.getOrDefault(self.formula)
@since("2.1.0")
def setForceIndexLabel(self, value):
"""
Sets the value of :py:attr:`forceIndexLabel`.
"""
return self._set(forceIndexLabel=value)
@since("2.1.0")
def getForceIndexLabel(self):
"""
Gets the value of :py:attr:`forceIndexLabel`.
"""
return self.getOrDefault(self.forceIndexLabel)
@since("2.3.0")
def setStringIndexerOrderType(self, value):
"""
Sets the value of :py:attr:`stringIndexerOrderType`.
"""
return self._set(stringIndexerOrderType=value)
@since("2.3.0")
def getStringIndexerOrderType(self):
"""
Gets the value of :py:attr:`stringIndexerOrderType` or its default value 'frequencyDesc'.
"""
return self.getOrDefault(self.stringIndexerOrderType)
def _create_model(self, java_model):
return RFormulaModel(java_model)
def __str__(self):
formulaStr = self.getFormula() if self.isDefined(self.formula) else ""
return "RFormula(%s) (uid=%s)" % (formulaStr, self.uid)
class RFormulaModel(JavaModel, JavaMLReadable, JavaMLWritable):
"""
.. note:: Experimental
Model fitted by :py:class:`RFormula`. Fitting is required to determine the
factor levels of formula terms.
.. versionadded:: 1.5.0
"""
def __str__(self):
resolvedFormula = self._call_java("resolvedFormula")
return "RFormulaModel(%s) (uid=%s)" % (resolvedFormula, self.uid)
@inherit_doc
class ChiSqSelector(JavaEstimator, HasFeaturesCol, HasOutputCol, HasLabelCol, JavaMLReadable,
JavaMLWritable):
"""
.. note:: Experimental
Chi-Squared feature selection, which selects categorical features to use for predicting a
categorical label.
The selector supports different selection methods: `numTopFeatures`, `percentile`, `fpr`,
`fdr`, `fwe`.
* `numTopFeatures` chooses a fixed number of top features according to a chi-squared test.
* `percentile` is similar but chooses a fraction of all features
instead of a fixed number.
* `fpr` chooses all features whose p-values are below a threshold,
thus controlling the false positive rate of selection.
* `fdr` uses the `Benjamini-Hochberg procedure <https://en.wikipedia.org/wiki/
False_discovery_rate#Benjamini.E2.80.93Hochberg_procedure>`_
to choose all features whose false discovery rate is below a threshold.
* `fwe` chooses all features whose p-values are below a threshold. The threshold is scaled by
1/numFeatures, thus controlling the family-wise error rate of selection.
By default, the selection method is `numTopFeatures`, with the default number of top features
set to 50.
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame(
... [(Vectors.dense([0.0, 0.0, 18.0, 1.0]), 1.0),
... (Vectors.dense([0.0, 1.0, 12.0, 0.0]), 0.0),
... (Vectors.dense([1.0, 0.0, 15.0, 0.1]), 0.0)],
... ["features", "label"])
>>> selector = ChiSqSelector(numTopFeatures=1, outputCol="selectedFeatures")
>>> model = selector.fit(df)
>>> model.transform(df).head().selectedFeatures
DenseVector([18.0])
>>> model.selectedFeatures
[2]
>>> chiSqSelectorPath = temp_path + "/chi-sq-selector"
>>> selector.save(chiSqSelectorPath)
>>> loadedSelector = ChiSqSelector.load(chiSqSelectorPath)
>>> loadedSelector.getNumTopFeatures() == selector.getNumTopFeatures()
True
>>> modelPath = temp_path + "/chi-sq-selector-model"
>>> model.save(modelPath)
>>> loadedModel = ChiSqSelectorModel.load(modelPath)
>>> loadedModel.selectedFeatures == model.selectedFeatures
True
.. versionadded:: 2.0.0
"""
selectorType = Param(Params._dummy(), "selectorType",
"The selector type of the ChisqSelector. " +
"Supported options: numTopFeatures (default), percentile, fpr, fdr, fwe.",
typeConverter=TypeConverters.toString)
numTopFeatures = \
Param(Params._dummy(), "numTopFeatures",
"Number of features that selector will select, ordered by ascending p-value. " +
"If the number of features is < numTopFeatures, then this will select " +
"all features.", typeConverter=TypeConverters.toInt)
percentile = Param(Params._dummy(), "percentile", "Percentile of features that selector " +
"will select, ordered by ascending p-value.",
typeConverter=TypeConverters.toFloat)
fpr = Param(Params._dummy(), "fpr", "The highest p-value for features to be kept.",
typeConverter=TypeConverters.toFloat)
fdr = Param(Params._dummy(), "fdr", "The upper bound of the expected false discovery rate.",
typeConverter=TypeConverters.toFloat)
fwe = Param(Params._dummy(), "fwe", "The upper bound of the expected family-wise error rate.",
typeConverter=TypeConverters.toFloat)
@keyword_only
def __init__(self, numTopFeatures=50, featuresCol="features", outputCol=None,
labelCol="label", selectorType="numTopFeatures", percentile=0.1, fpr=0.05,
fdr=0.05, fwe=0.05):
"""
__init__(self, numTopFeatures=50, featuresCol="features", outputCol=None, \
labelCol="label", selectorType="numTopFeatures", percentile=0.1, fpr=0.05, \
fdr=0.05, fwe=0.05)
"""
super(ChiSqSelector, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.ChiSqSelector", self.uid)
self._setDefault(numTopFeatures=50, selectorType="numTopFeatures", percentile=0.1,
fpr=0.05, fdr=0.05, fwe=0.05)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.0.0")
def setParams(self, numTopFeatures=50, featuresCol="features", outputCol=None,
labelCol="labels", selectorType="numTopFeatures", percentile=0.1, fpr=0.05,
fdr=0.05, fwe=0.05):
"""
setParams(self, numTopFeatures=50, featuresCol="features", outputCol=None, \
labelCol="labels", selectorType="numTopFeatures", percentile=0.1, fpr=0.05, \
fdr=0.05, fwe=0.05)
Sets params for this ChiSqSelector.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.1.0")
def setSelectorType(self, value):
"""
Sets the value of :py:attr:`selectorType`.
"""
return self._set(selectorType=value)
@since("2.1.0")
def getSelectorType(self):
"""
Gets the value of selectorType or its default value.
"""
return self.getOrDefault(self.selectorType)
@since("2.0.0")
def setNumTopFeatures(self, value):
"""
Sets the value of :py:attr:`numTopFeatures`.
Only applicable when selectorType = "numTopFeatures".
"""
return self._set(numTopFeatures=value)
@since("2.0.0")
def getNumTopFeatures(self):
"""
Gets the value of numTopFeatures or its default value.
"""
return self.getOrDefault(self.numTopFeatures)
@since("2.1.0")
def setPercentile(self, value):
"""
Sets the value of :py:attr:`percentile`.
Only applicable when selectorType = "percentile".
"""
return self._set(percentile=value)
@since("2.1.0")
def getPercentile(self):
"""
Gets the value of percentile or its default value.
"""
return self.getOrDefault(self.percentile)
@since("2.1.0")
def setFpr(self, value):
"""
Sets the value of :py:attr:`fpr`.
Only applicable when selectorType = "fpr".
"""
return self._set(fpr=value)
@since("2.1.0")
def getFpr(self):
"""
Gets the value of fpr or its default value.
"""
return self.getOrDefault(self.fpr)
@since("2.2.0")
def setFdr(self, value):
"""
Sets the value of :py:attr:`fdr`.
Only applicable when selectorType = "fdr".
"""
return self._set(fdr=value)
@since("2.2.0")
def getFdr(self):
"""
Gets the value of fdr or its default value.
"""
return self.getOrDefault(self.fdr)
@since("2.2.0")
def setFwe(self, value):
"""
Sets the value of :py:attr:`fwe`.
Only applicable when selectorType = "fwe".
"""
return self._set(fwe=value)
@since("2.2.0")
def getFwe(self):
"""
Gets the value of fwe or its default value.
"""
return self.getOrDefault(self.fwe)
def _create_model(self, java_model):
return ChiSqSelectorModel(java_model)
class ChiSqSelectorModel(JavaModel, JavaMLReadable, JavaMLWritable):
"""
.. note:: Experimental
Model fitted by :py:class:`ChiSqSelector`.
.. versionadded:: 2.0.0
"""
@property
@since("2.0.0")
def selectedFeatures(self):
"""
List of indices to select (filter).
"""
return self._call_java("selectedFeatures")
@inherit_doc
class VectorSizeHint(JavaTransformer, HasInputCol, HasHandleInvalid, JavaMLReadable,
JavaMLWritable):
"""
.. note:: Experimental
A feature transformer that adds size information to the metadata of a vector column.
VectorAssembler needs size information for its input columns and cannot be used on streaming
dataframes without this metadata.
.. note:: VectorSizeHint modifies `inputCol` to include size metadata and does not have an
outputCol.
>>> from pyspark.ml.linalg import Vectors
>>> from pyspark.ml import Pipeline, PipelineModel
>>> data = [(Vectors.dense([1., 2., 3.]), 4.)]
>>> df = spark.createDataFrame(data, ["vector", "float"])
>>>
>>> sizeHint = VectorSizeHint(inputCol="vector", size=3, handleInvalid="skip")
>>> vecAssembler = VectorAssembler(inputCols=["vector", "float"], outputCol="assembled")
>>> pipeline = Pipeline(stages=[sizeHint, vecAssembler])
>>>
>>> pipelineModel = pipeline.fit(df)
>>> pipelineModel.transform(df).head().assembled
DenseVector([1.0, 2.0, 3.0, 4.0])
>>> vectorSizeHintPath = temp_path + "/vector-size-hint-pipeline"
>>> pipelineModel.save(vectorSizeHintPath)
>>> loadedPipeline = PipelineModel.load(vectorSizeHintPath)
>>> loaded = loadedPipeline.transform(df).head().assembled
>>> expected = pipelineModel.transform(df).head().assembled
>>> loaded == expected
True
.. versionadded:: 2.3.0
"""
size = Param(Params._dummy(), "size", "Size of vectors in column.",
typeConverter=TypeConverters.toInt)
handleInvalid = Param(Params._dummy(), "handleInvalid",
"How to handle invalid vectors in inputCol. Invalid vectors include "
"nulls and vectors with the wrong size. The options are `skip` (filter "
"out rows with invalid vectors), `error` (throw an error) and "
"`optimistic` (do not check the vector size, and keep all rows). "
"`error` by default.",
TypeConverters.toString)
@keyword_only
def __init__(self, inputCol=None, size=None, handleInvalid="error"):
"""
__init__(self, inputCol=None, size=None, handleInvalid="error")
"""
super(VectorSizeHint, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.VectorSizeHint", self.uid)
self._setDefault(handleInvalid="error")
self.setParams(**self._input_kwargs)
@keyword_only
@since("2.3.0")
def setParams(self, inputCol=None, size=None, handleInvalid="error"):
"""
setParams(self, inputCol=None, size=None, handleInvalid="error")
Sets params for this VectorSizeHint.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.3.0")
def getSize(self):
""" Gets size param, the size of vectors in `inputCol`."""
return self.getOrDefault(self.size)
@since("2.3.0")
def setSize(self, value):
""" Sets size param, the size of vectors in `inputCol`."""
return self._set(size=value)
if __name__ == "__main__":
import doctest
import tempfile
import pyspark.ml.feature
from pyspark.sql import Row, SparkSession
globs = globals().copy()
features = pyspark.ml.feature.__dict__.copy()
globs.update(features)
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
spark = SparkSession.builder\
.master("local[2]")\
.appName("ml.feature tests")\
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
testData = sc.parallelize([Row(id=0, label="a"), Row(id=1, label="b"),
Row(id=2, label="c"), Row(id=3, label="a"),
Row(id=4, label="a"), Row(id=5, label="c")], 2)
globs['stringIndDf'] = spark.createDataFrame(testData)
temp_path = tempfile.mkdtemp()
globs['temp_path'] = temp_path
try:
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
finally:
from shutil import rmtree
try:
rmtree(temp_path)
except OSError:
pass
if failure_count:
sys.exit(-1)
| apache-2.0 |
mjgrav2001/scikit-learn | sklearn/feature_extraction/text.py | 110 | 50157 | # -*- coding: utf-8 -*-
# Authors: Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Robert Layton <robertlayton@gmail.com>
# Jochen Wersdörfer <jochen@wersdoerfer.de>
# Roman Sinayev <roman.sinayev@gmail.com>
#
# License: BSD 3 clause
"""
The :mod:`sklearn.feature_extraction.text` submodule gathers utilities to
build feature vectors from text documents.
"""
from __future__ import unicode_literals
import array
from collections import Mapping, defaultdict
import numbers
from operator import itemgetter
import re
import unicodedata
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..preprocessing import normalize
from .hashing import FeatureHasher
from .stop_words import ENGLISH_STOP_WORDS
from ..utils import deprecated
from ..utils.fixes import frombuffer_empty, bincount
from ..utils.validation import check_is_fitted
__all__ = ['CountVectorizer',
'ENGLISH_STOP_WORDS',
'TfidfTransformer',
'TfidfVectorizer',
'strip_accents_ascii',
'strip_accents_unicode',
'strip_tags']
def strip_accents_unicode(s):
"""Transform accentuated unicode symbols into their simple counterpart
Warning: the python-level loop and join operations make this
implementation 20 times slower than the strip_accents_ascii basic
normalization.
See also
--------
strip_accents_ascii
Remove accentuated char for any unicode symbol that has a direct
ASCII equivalent.
"""
return ''.join([c for c in unicodedata.normalize('NFKD', s)
if not unicodedata.combining(c)])
def strip_accents_ascii(s):
"""Transform accentuated unicode symbols into ascii or nothing
Warning: this solution is only suited for languages that have a direct
transliteration to ASCII symbols.
See also
--------
strip_accents_unicode
Remove accentuated char for any unicode symbol.
"""
nkfd_form = unicodedata.normalize('NFKD', s)
return nkfd_form.encode('ASCII', 'ignore').decode('ASCII')
def strip_tags(s):
"""Basic regexp based HTML / XML tag stripper function
For serious HTML/XML preprocessing you should rather use an external
library such as lxml or BeautifulSoup.
"""
return re.compile(r"<([^>]+)>", flags=re.UNICODE).sub(" ", s)
def _check_stop_list(stop):
if stop == "english":
return ENGLISH_STOP_WORDS
elif isinstance(stop, six.string_types):
raise ValueError("not a built-in stop list: %s" % stop)
elif stop is None:
return None
else: # assume it's a collection
return frozenset(stop)
class VectorizerMixin(object):
"""Provides common code for text vectorizers (tokenization logic)."""
_white_spaces = re.compile(r"\s\s+")
def decode(self, doc):
"""Decode the input into a string of unicode symbols
The decoding strategy depends on the vectorizer parameters.
"""
if self.input == 'filename':
with open(doc, 'rb') as fh:
doc = fh.read()
elif self.input == 'file':
doc = doc.read()
if isinstance(doc, bytes):
doc = doc.decode(self.encoding, self.decode_error)
if doc is np.nan:
raise ValueError("np.nan is an invalid document, expected byte or "
"unicode string.")
return doc
def _word_ngrams(self, tokens, stop_words=None):
"""Turn tokens into a sequence of n-grams after stop words filtering"""
# handle stop words
if stop_words is not None:
tokens = [w for w in tokens if w not in stop_words]
# handle token n-grams
min_n, max_n = self.ngram_range
if max_n != 1:
original_tokens = tokens
tokens = []
n_original_tokens = len(original_tokens)
for n in xrange(min_n,
min(max_n + 1, n_original_tokens + 1)):
for i in xrange(n_original_tokens - n + 1):
tokens.append(" ".join(original_tokens[i: i + n]))
return tokens
def _char_ngrams(self, text_document):
"""Tokenize text_document into a sequence of character n-grams"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
text_len = len(text_document)
ngrams = []
min_n, max_n = self.ngram_range
for n in xrange(min_n, min(max_n + 1, text_len + 1)):
for i in xrange(text_len - n + 1):
ngrams.append(text_document[i: i + n])
return ngrams
def _char_wb_ngrams(self, text_document):
"""Whitespace sensitive char-n-gram tokenization.
Tokenize text_document into a sequence of character n-grams
excluding any whitespace (operating only inside word boundaries)"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
min_n, max_n = self.ngram_range
ngrams = []
for w in text_document.split():
w = ' ' + w + ' '
w_len = len(w)
for n in xrange(min_n, max_n + 1):
offset = 0
ngrams.append(w[offset:offset + n])
while offset + n < w_len:
offset += 1
ngrams.append(w[offset:offset + n])
if offset == 0: # count a short word (w_len < n) only once
break
return ngrams
def build_preprocessor(self):
"""Return a function to preprocess the text before tokenization"""
if self.preprocessor is not None:
return self.preprocessor
# unfortunately python functools package does not have an efficient
# `compose` function that would have allowed us to chain a dynamic
# number of functions. However the cost of a lambda call is a few
# hundreds of nanoseconds which is negligible when compared to the
# cost of tokenizing a string of 1000 chars for instance.
noop = lambda x: x
# accent stripping
if not self.strip_accents:
strip_accents = noop
elif callable(self.strip_accents):
strip_accents = self.strip_accents
elif self.strip_accents == 'ascii':
strip_accents = strip_accents_ascii
elif self.strip_accents == 'unicode':
strip_accents = strip_accents_unicode
else:
raise ValueError('Invalid value for "strip_accents": %s' %
self.strip_accents)
if self.lowercase:
return lambda x: strip_accents(x.lower())
else:
return strip_accents
def build_tokenizer(self):
"""Return a function that splits a string into a sequence of tokens"""
if self.tokenizer is not None:
return self.tokenizer
token_pattern = re.compile(self.token_pattern)
return lambda doc: token_pattern.findall(doc)
def get_stop_words(self):
"""Build or fetch the effective stop words list"""
return _check_stop_list(self.stop_words)
def build_analyzer(self):
"""Return a callable that handles preprocessing and tokenization"""
if callable(self.analyzer):
return self.analyzer
preprocess = self.build_preprocessor()
if self.analyzer == 'char':
return lambda doc: self._char_ngrams(preprocess(self.decode(doc)))
elif self.analyzer == 'char_wb':
return lambda doc: self._char_wb_ngrams(
preprocess(self.decode(doc)))
elif self.analyzer == 'word':
stop_words = self.get_stop_words()
tokenize = self.build_tokenizer()
return lambda doc: self._word_ngrams(
tokenize(preprocess(self.decode(doc))), stop_words)
else:
raise ValueError('%s is not a valid tokenization scheme/analyzer' %
self.analyzer)
def _validate_vocabulary(self):
vocabulary = self.vocabulary
if vocabulary is not None:
if not isinstance(vocabulary, Mapping):
vocab = {}
for i, t in enumerate(vocabulary):
if vocab.setdefault(t, i) != i:
msg = "Duplicate term in vocabulary: %r" % t
raise ValueError(msg)
vocabulary = vocab
else:
indices = set(six.itervalues(vocabulary))
if len(indices) != len(vocabulary):
raise ValueError("Vocabulary contains repeated indices.")
for i in xrange(len(vocabulary)):
if i not in indices:
msg = ("Vocabulary of size %d doesn't contain index "
"%d." % (len(vocabulary), i))
raise ValueError(msg)
if not vocabulary:
raise ValueError("empty vocabulary passed to fit")
self.fixed_vocabulary_ = True
self.vocabulary_ = dict(vocabulary)
else:
self.fixed_vocabulary_ = False
def _check_vocabulary(self):
"""Check if vocabulary is empty or missing (not fit-ed)"""
msg = "%(name)s - Vocabulary wasn't fitted."
check_is_fitted(self, 'vocabulary_', msg=msg),
if len(self.vocabulary_) == 0:
raise ValueError("Vocabulary is empty")
@property
@deprecated("The `fixed_vocabulary` attribute is deprecated and will be "
"removed in 0.18. Please use `fixed_vocabulary_` instead.")
def fixed_vocabulary(self):
return self.fixed_vocabulary_
class HashingVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token occurrences
It turns a collection of text documents into a scipy.sparse matrix holding
token occurrence counts (or binary occurrence information), possibly
normalized as token frequencies if norm='l1' or projected on the euclidean
unit sphere if norm='l2'.
This text vectorizer implementation uses the hashing trick to find the
token string name to feature integer index mapping.
This strategy has several advantages:
- it is very low memory scalable to large datasets as there is no need to
store a vocabulary dictionary in memory
- it is fast to pickle and un-pickle as it holds no state besides the
constructor parameters
- it can be used in a streaming (partial fit) or parallel pipeline as there
is no state computed during fit.
There are also a couple of cons (vs using a CountVectorizer with an
in-memory vocabulary):
- there is no way to compute the inverse transform (from feature indices to
string feature names) which can be a problem when trying to introspect
which features are most important to a model.
- there can be collisions: distinct tokens can be mapped to the same
feature index. However in practice this is rarely an issue if n_features
is large enough (e.g. 2 ** 18 for text classification problems).
- no IDF weighting as this would render the transformer stateful.
The hash function employed is the signed 32-bit version of Murmurhash3.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, default='utf-8'
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n), default=(1, 1)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
lowercase : boolean, default=True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
n_features : integer, default=(2 ** 20)
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
binary: boolean, default=False.
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype: type, optional
Type of the matrix returned by fit_transform() or transform().
non_negative : boolean, default=False
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
See also
--------
CountVectorizer, TfidfVectorizer
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word', n_features=(2 ** 20),
binary=False, norm='l2', non_negative=False,
dtype=np.float64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.n_features = n_features
self.ngram_range = ngram_range
self.binary = binary
self.norm = norm
self.non_negative = non_negative
self.dtype = dtype
def partial_fit(self, X, y=None):
"""Does nothing: this transformer is stateless.
This method is just there to mark the fact that this transformer
can work in a streaming setup.
"""
return self
def fit(self, X, y=None):
"""Does nothing: this transformer is stateless."""
# triggers a parameter validation
self._get_hasher().fit(X, y=y)
return self
def transform(self, X, y=None):
"""Transform a sequence of documents to a document-term matrix.
Parameters
----------
X : iterable over raw text documents, length = n_samples
Samples. Each sample must be a text document (either bytes or
unicode strings, file name or file object depending on the
constructor argument) which will be tokenized and hashed.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Document-term matrix.
"""
analyzer = self.build_analyzer()
X = self._get_hasher().transform(analyzer(doc) for doc in X)
if self.binary:
X.data.fill(1)
if self.norm is not None:
X = normalize(X, norm=self.norm, copy=False)
return X
# Alias transform to fit_transform for convenience
fit_transform = transform
def _get_hasher(self):
return FeatureHasher(n_features=self.n_features,
input_type='string', dtype=self.dtype,
non_negative=self.non_negative)
def _document_frequency(X):
"""Count the number of non-zero values for each feature in sparse X."""
if sp.isspmatrix_csr(X):
return bincount(X.indices, minlength=X.shape[1])
else:
return np.diff(sp.csc_matrix(X, copy=False).indptr)
class CountVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token counts
This implementation produces a sparse representation of the counts using
scipy.sparse.coo_matrix.
If you do not provide an a-priori dictionary and you do not use an analyzer
that does some kind of feature selection then the number of features will
be equal to the vocabulary size found by analyzing the data.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
Only applies if ``analyzer == 'word'``.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, True by default
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp select tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents. Indices
in the mapping should not be repeated and should not have any gap
between 0 and the largest index.
binary : boolean, default=False
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
Attributes
----------
vocabulary_ : dict
A mapping of terms to feature indices.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
HashingVectorizer, TfidfVectorizer
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word',
max_df=1.0, min_df=1, max_features=None,
vocabulary=None, binary=False, dtype=np.int64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.max_df = max_df
self.min_df = min_df
if max_df < 0 or min_df < 0:
raise ValueError("negative value for max_df of min_df")
self.max_features = max_features
if max_features is not None:
if (not isinstance(max_features, numbers.Integral) or
max_features <= 0):
raise ValueError(
"max_features=%r, neither a positive integer nor None"
% max_features)
self.ngram_range = ngram_range
self.vocabulary = vocabulary
self.binary = binary
self.dtype = dtype
def _sort_features(self, X, vocabulary):
"""Sort features by name
Returns a reordered matrix and modifies the vocabulary in place
"""
sorted_features = sorted(six.iteritems(vocabulary))
map_index = np.empty(len(sorted_features), dtype=np.int32)
for new_val, (term, old_val) in enumerate(sorted_features):
map_index[new_val] = old_val
vocabulary[term] = new_val
return X[:, map_index]
def _limit_features(self, X, vocabulary, high=None, low=None,
limit=None):
"""Remove too rare or too common features.
Prune features that are non zero in more samples than high or less
documents than low, modifying the vocabulary, and restricting it to
at most the limit most frequent.
This does not prune samples with zero features.
"""
if high is None and low is None and limit is None:
return X, set()
# Calculate a mask based on document frequencies
dfs = _document_frequency(X)
tfs = np.asarray(X.sum(axis=0)).ravel()
mask = np.ones(len(dfs), dtype=bool)
if high is not None:
mask &= dfs <= high
if low is not None:
mask &= dfs >= low
if limit is not None and mask.sum() > limit:
mask_inds = (-tfs[mask]).argsort()[:limit]
new_mask = np.zeros(len(dfs), dtype=bool)
new_mask[np.where(mask)[0][mask_inds]] = True
mask = new_mask
new_indices = np.cumsum(mask) - 1 # maps old indices to new
removed_terms = set()
for term, old_index in list(six.iteritems(vocabulary)):
if mask[old_index]:
vocabulary[term] = new_indices[old_index]
else:
del vocabulary[term]
removed_terms.add(term)
kept_indices = np.where(mask)[0]
if len(kept_indices) == 0:
raise ValueError("After pruning, no terms remain. Try a lower"
" min_df or a higher max_df.")
return X[:, kept_indices], removed_terms
def _count_vocab(self, raw_documents, fixed_vocab):
"""Create sparse feature matrix, and vocabulary where fixed_vocab=False
"""
if fixed_vocab:
vocabulary = self.vocabulary_
else:
# Add a new value when a new vocabulary item is seen
vocabulary = defaultdict()
vocabulary.default_factory = vocabulary.__len__
analyze = self.build_analyzer()
j_indices = _make_int_array()
indptr = _make_int_array()
indptr.append(0)
for doc in raw_documents:
for feature in analyze(doc):
try:
j_indices.append(vocabulary[feature])
except KeyError:
# Ignore out-of-vocabulary items for fixed_vocab=True
continue
indptr.append(len(j_indices))
if not fixed_vocab:
# disable defaultdict behaviour
vocabulary = dict(vocabulary)
if not vocabulary:
raise ValueError("empty vocabulary; perhaps the documents only"
" contain stop words")
j_indices = frombuffer_empty(j_indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
values = np.ones(len(j_indices))
X = sp.csr_matrix((values, j_indices, indptr),
shape=(len(indptr) - 1, len(vocabulary)),
dtype=self.dtype)
X.sum_duplicates()
return vocabulary, X
def fit(self, raw_documents, y=None):
"""Learn a vocabulary dictionary of all tokens in the raw documents.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
self
"""
self.fit_transform(raw_documents)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn the vocabulary dictionary and return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : array, [n_samples, n_features]
Document-term matrix.
"""
# We intentionally don't call the transform method to make
# fit_transform overridable without unwanted side effects in
# TfidfVectorizer.
self._validate_vocabulary()
max_df = self.max_df
min_df = self.min_df
max_features = self.max_features
vocabulary, X = self._count_vocab(raw_documents,
self.fixed_vocabulary_)
if self.binary:
X.data.fill(1)
if not self.fixed_vocabulary_:
X = self._sort_features(X, vocabulary)
n_doc = X.shape[0]
max_doc_count = (max_df
if isinstance(max_df, numbers.Integral)
else max_df * n_doc)
min_doc_count = (min_df
if isinstance(min_df, numbers.Integral)
else min_df * n_doc)
if max_doc_count < min_doc_count:
raise ValueError(
"max_df corresponds to < documents than min_df")
X, self.stop_words_ = self._limit_features(X, vocabulary,
max_doc_count,
min_doc_count,
max_features)
self.vocabulary_ = vocabulary
return X
def transform(self, raw_documents):
"""Transform documents to document-term matrix.
Extract token counts out of raw text documents using the vocabulary
fitted with fit or the one provided to the constructor.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Document-term matrix.
"""
if not hasattr(self, 'vocabulary_'):
self._validate_vocabulary()
self._check_vocabulary()
# use the same matrix-building strategy as fit_transform
_, X = self._count_vocab(raw_documents, fixed_vocab=True)
if self.binary:
X.data.fill(1)
return X
def inverse_transform(self, X):
"""Return terms per document with nonzero entries in X.
Parameters
----------
X : {array, sparse matrix}, shape = [n_samples, n_features]
Returns
-------
X_inv : list of arrays, len = n_samples
List of arrays of terms.
"""
self._check_vocabulary()
if sp.issparse(X):
# We need CSR format for fast row manipulations.
X = X.tocsr()
else:
# We need to convert X to a matrix, so that the indexing
# returns 2D objects
X = np.asmatrix(X)
n_samples = X.shape[0]
terms = np.array(list(self.vocabulary_.keys()))
indices = np.array(list(self.vocabulary_.values()))
inverse_vocabulary = terms[np.argsort(indices)]
return [inverse_vocabulary[X[i, :].nonzero()[1]].ravel()
for i in range(n_samples)]
def get_feature_names(self):
"""Array mapping from feature integer indices to feature name"""
self._check_vocabulary()
return [t for t, i in sorted(six.iteritems(self.vocabulary_),
key=itemgetter(1))]
def _make_int_array():
"""Construct an array.array of a type suitable for scipy.sparse indices."""
return array.array(str("i"))
class TfidfTransformer(BaseEstimator, TransformerMixin):
"""Transform a count matrix to a normalized tf or tf-idf representation
Tf means term-frequency while tf-idf means term-frequency times inverse
document-frequency. This is a common term weighting scheme in information
retrieval, that has also found good use in document classification.
The goal of using tf-idf instead of the raw frequencies of occurrence of a
token in a given document is to scale down the impact of tokens that occur
very frequently in a given corpus and that are hence empirically less
informative than features that occur in a small fraction of the training
corpus.
The actual formula used for tf-idf is tf * (idf + 1) = tf + tf * idf,
instead of tf * idf. The effect of this is that terms with zero idf, i.e.
that occur in all documents of a training set, will not be entirely
ignored. The formulas used to compute tf and idf depend on parameter
settings that correspond to the SMART notation used in IR, as follows:
Tf is "n" (natural) by default, "l" (logarithmic) when sublinear_tf=True.
Idf is "t" when use_idf is given, "n" (none) otherwise.
Normalization is "c" (cosine) when norm='l2', "n" (none) when norm=None.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
References
----------
.. [Yates2011] `R. Baeza-Yates and B. Ribeiro-Neto (2011). Modern
Information Retrieval. Addison Wesley, pp. 68-74.`
.. [MRS2008] `C.D. Manning, P. Raghavan and H. Schuetze (2008).
Introduction to Information Retrieval. Cambridge University
Press, pp. 118-120.`
"""
def __init__(self, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
self.norm = norm
self.use_idf = use_idf
self.smooth_idf = smooth_idf
self.sublinear_tf = sublinear_tf
def fit(self, X, y=None):
"""Learn the idf vector (global term weights)
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
"""
if not sp.issparse(X):
X = sp.csc_matrix(X)
if self.use_idf:
n_samples, n_features = X.shape
df = _document_frequency(X)
# perform idf smoothing if required
df += int(self.smooth_idf)
n_samples += int(self.smooth_idf)
# log+1 instead of log makes sure terms with zero idf don't get
# suppressed entirely.
idf = np.log(float(n_samples) / df) + 1.0
self._idf_diag = sp.spdiags(idf,
diags=0, m=n_features, n=n_features)
return self
def transform(self, X, copy=True):
"""Transform a count matrix to a tf or tf-idf representation
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
vectors : sparse matrix, [n_samples, n_features]
"""
if hasattr(X, 'dtype') and np.issubdtype(X.dtype, np.float):
# preserve float family dtype
X = sp.csr_matrix(X, copy=copy)
else:
# convert counts or binary occurrences to floats
X = sp.csr_matrix(X, dtype=np.float64, copy=copy)
n_samples, n_features = X.shape
if self.sublinear_tf:
np.log(X.data, X.data)
X.data += 1
if self.use_idf:
check_is_fitted(self, '_idf_diag', 'idf vector is not fitted')
expected_n_features = self._idf_diag.shape[0]
if n_features != expected_n_features:
raise ValueError("Input has n_features=%d while the model"
" has been trained with n_features=%d" % (
n_features, expected_n_features))
# *= doesn't work
X = X * self._idf_diag
if self.norm:
X = normalize(X, norm=self.norm, copy=False)
return X
@property
def idf_(self):
if hasattr(self, "_idf_diag"):
return np.ravel(self._idf_diag.sum(axis=0))
else:
return None
class TfidfVectorizer(CountVectorizer):
"""Convert a collection of raw documents to a matrix of TF-IDF features.
Equivalent to CountVectorizer followed by TfidfTransformer.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char'} or callable
Whether the feature should be made of word or character n-grams.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If a string, it is passed to _check_stop_list and the appropriate stop
list is returned. 'english' is currently the only supported string
value.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, default True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents.
binary : boolean, default=False
If True, all non-zero term counts are set to 1. This does not mean
outputs will have only 0/1 values, only that the tf term in tf-idf
is binary. (Set idf and normalization to False to get 0/1 outputs.)
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
Attributes
----------
idf_ : array, shape = [n_features], or None
The learned idf vector (global term weights)
when ``use_idf`` is set to True, None otherwise.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
CountVectorizer
Tokenize the documents and count the occurrences of token and return
them as a sparse matrix
TfidfTransformer
Apply Term Frequency Inverse Document Frequency normalization to a
sparse matrix of occurrence counts.
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None, lowercase=True,
preprocessor=None, tokenizer=None, analyzer='word',
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), max_df=1.0, min_df=1,
max_features=None, vocabulary=None, binary=False,
dtype=np.int64, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
super(TfidfVectorizer, self).__init__(
input=input, encoding=encoding, decode_error=decode_error,
strip_accents=strip_accents, lowercase=lowercase,
preprocessor=preprocessor, tokenizer=tokenizer, analyzer=analyzer,
stop_words=stop_words, token_pattern=token_pattern,
ngram_range=ngram_range, max_df=max_df, min_df=min_df,
max_features=max_features, vocabulary=vocabulary, binary=binary,
dtype=dtype)
self._tfidf = TfidfTransformer(norm=norm, use_idf=use_idf,
smooth_idf=smooth_idf,
sublinear_tf=sublinear_tf)
# Broadcast the TF-IDF parameters to the underlying transformer instance
# for easy grid search and repr
@property
def norm(self):
return self._tfidf.norm
@norm.setter
def norm(self, value):
self._tfidf.norm = value
@property
def use_idf(self):
return self._tfidf.use_idf
@use_idf.setter
def use_idf(self, value):
self._tfidf.use_idf = value
@property
def smooth_idf(self):
return self._tfidf.smooth_idf
@smooth_idf.setter
def smooth_idf(self, value):
self._tfidf.smooth_idf = value
@property
def sublinear_tf(self):
return self._tfidf.sublinear_tf
@sublinear_tf.setter
def sublinear_tf(self, value):
self._tfidf.sublinear_tf = value
@property
def idf_(self):
return self._tfidf.idf_
def fit(self, raw_documents, y=None):
"""Learn vocabulary and idf from training set.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
self : TfidfVectorizer
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn vocabulary and idf, return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
# X is already a transformed view of raw_documents so
# we set copy to False
return self._tfidf.transform(X, copy=False)
def transform(self, raw_documents, copy=True):
"""Transform documents to document-term matrix.
Uses the vocabulary and document frequencies (df) learned by fit (or
fit_transform).
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
check_is_fitted(self, '_tfidf', 'The tfidf vector is not fitted')
X = super(TfidfVectorizer, self).transform(raw_documents)
return self._tfidf.transform(X, copy=False)
| bsd-3-clause |
SunguckLee/MariaDB | storage/tokudb/mysql-test/tokudb/t/fast_update_blobs.py | 73 | 1798 | #!/usr/bin/env python
import sys
import random
import string
def main():
print "# generated by tokudb_fast_update_blobs.py"
print "source include/have_tokudb.inc;"
print "source include/have_innodb.inc;"
print "set default_storage_engine='tokudb';"
print "disable_warnings;"
print "drop table if exists t;"
print "enable_warnings;"
nrows = 10
blob_types = [ 'tinyblob', 'blob', 'mediumblob', 'longblob', 'text' ]
for a in blob_types:
for b in blob_types:
for c in blob_types:
for n in [ 'null', 'not null' ]:
test_blobs([ a, b, c ] , n, nrows)
return 0
def test_blobs(cols, n, nrows):
print "create table tt (id bigint unsigned primary key,"
# print " f0 int %s," % (n)
for i in range(len(cols)):
if i < len(cols)-1:
print " b%d %s %s," % (i, cols[i], n)
else:
print " b%d %s %s" % (i, cols[i], n)
print ") engine=tokudb;"
for id in range(1,nrows):
if n == 'null':
print "insert into tt (id) values (%d);" % (id)
else:
print "insert into tt values (%d,'','','');" % (id)
print "create table ti like tt;"
print "alter table ti engine=innodb;"
print "insert into ti select * from tt;"
for id in range(1,nrows):
for i in range(3):
long_str = ''.join(random.choice(string.ascii_uppercase + string.digits) for x in range(random.randint(1,32)))
print "update noar tt set b%d='%s' where id=%d;" % (i, long_str, id)
print "update noar ti set b%d='%s' where id=%d;" % (i, long_str, id)
print "let $diff_tables = test.tt, test.ti;"
print "source include/diff_tables.inc;"
print "drop table tt, ti;"
sys.exit(main())
| gpl-2.0 |
nan86150/ImageFusion | lib/python2.7/site-packages/pip/_vendor/lockfile/sqlitelockfile.py | 487 | 5540 | from __future__ import absolute_import, division
import time
import os
try:
unicode
except NameError:
unicode = str
from . import LockBase, NotLocked, NotMyLock, LockTimeout, AlreadyLocked
class SQLiteLockFile(LockBase):
"Demonstrate SQL-based locking."
testdb = None
def __init__(self, path, threaded=True, timeout=None):
"""
>>> lock = SQLiteLockFile('somefile')
>>> lock = SQLiteLockFile('somefile', threaded=False)
"""
LockBase.__init__(self, path, threaded, timeout)
self.lock_file = unicode(self.lock_file)
self.unique_name = unicode(self.unique_name)
if SQLiteLockFile.testdb is None:
import tempfile
_fd, testdb = tempfile.mkstemp()
os.close(_fd)
os.unlink(testdb)
del _fd, tempfile
SQLiteLockFile.testdb = testdb
import sqlite3
self.connection = sqlite3.connect(SQLiteLockFile.testdb)
c = self.connection.cursor()
try:
c.execute("create table locks"
"("
" lock_file varchar(32),"
" unique_name varchar(32)"
")")
except sqlite3.OperationalError:
pass
else:
self.connection.commit()
import atexit
atexit.register(os.unlink, SQLiteLockFile.testdb)
def acquire(self, timeout=None):
timeout = timeout is not None and timeout or self.timeout
end_time = time.time()
if timeout is not None and timeout > 0:
end_time += timeout
if timeout is None:
wait = 0.1
elif timeout <= 0:
wait = 0
else:
wait = timeout / 10
cursor = self.connection.cursor()
while True:
if not self.is_locked():
# Not locked. Try to lock it.
cursor.execute("insert into locks"
" (lock_file, unique_name)"
" values"
" (?, ?)",
(self.lock_file, self.unique_name))
self.connection.commit()
# Check to see if we are the only lock holder.
cursor.execute("select * from locks"
" where unique_name = ?",
(self.unique_name,))
rows = cursor.fetchall()
if len(rows) > 1:
# Nope. Someone else got there. Remove our lock.
cursor.execute("delete from locks"
" where unique_name = ?",
(self.unique_name,))
self.connection.commit()
else:
# Yup. We're done, so go home.
return
else:
# Check to see if we are the only lock holder.
cursor.execute("select * from locks"
" where unique_name = ?",
(self.unique_name,))
rows = cursor.fetchall()
if len(rows) == 1:
# We're the locker, so go home.
return
# Maybe we should wait a bit longer.
if timeout is not None and time.time() > end_time:
if timeout > 0:
# No more waiting.
raise LockTimeout("Timeout waiting to acquire"
" lock for %s" %
self.path)
else:
# Someone else has the lock and we are impatient..
raise AlreadyLocked("%s is already locked" % self.path)
# Well, okay. We'll give it a bit longer.
time.sleep(wait)
def release(self):
if not self.is_locked():
raise NotLocked("%s is not locked" % self.path)
if not self.i_am_locking():
raise NotMyLock("%s is locked, but not by me (by %s)" %
(self.unique_name, self._who_is_locking()))
cursor = self.connection.cursor()
cursor.execute("delete from locks"
" where unique_name = ?",
(self.unique_name,))
self.connection.commit()
def _who_is_locking(self):
cursor = self.connection.cursor()
cursor.execute("select unique_name from locks"
" where lock_file = ?",
(self.lock_file,))
return cursor.fetchone()[0]
def is_locked(self):
cursor = self.connection.cursor()
cursor.execute("select * from locks"
" where lock_file = ?",
(self.lock_file,))
rows = cursor.fetchall()
return not not rows
def i_am_locking(self):
cursor = self.connection.cursor()
cursor.execute("select * from locks"
" where lock_file = ?"
" and unique_name = ?",
(self.lock_file, self.unique_name))
return not not cursor.fetchall()
def break_lock(self):
cursor = self.connection.cursor()
cursor.execute("delete from locks"
" where lock_file = ?",
(self.lock_file,))
self.connection.commit()
| mit |
simonpatrick/bite-project | deps/gdata-python-client/samples/webmastertools/SitemapsFeedSummary.py | 128 | 1878 | #!/usr/bin/python
#
# Copyright (C) 2008 Yu-Jie Lin
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gdata.webmastertools.service
import gdata.service
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
import atom
import getpass
username = ''
password = ''
site_uri = ''
username = raw_input('Please enter your username: ')
password = getpass.getpass()
site_uri = raw_input('Please enter your site url: ')
client = gdata.webmastertools.service.GWebmasterToolsService(
email=username,
password=password, source='PythonWebmasterToolsSample-1')
print 'Logging in'
client.ProgrammaticLogin()
print 'Retrieving Sitemaps feed'
feed = client.GetSitemapsFeed(site_uri)
# Format the feed
print
print 'You have %d sitemap(s), last updated at %s' % (
len(feed.entry), feed.updated.text)
print
print '='*80
def safeElementText(element):
if hasattr(element, 'text'):
return element.text
return ''
# Format each site
for entry in feed.entry:
print entry.title.text.replace('http://', '')[:80]
print " Last Updated : %29s Status: %10s" % (
entry.updated.text[:29], entry.sitemap_status.text[:10])
print " Last Downloaded: %29s URL Count: %10s" % (
safeElementText(entry.sitemap_last_downloaded)[:29],
safeElementText(entry.sitemap_url_count)[:10])
print
| apache-2.0 |
npiganeau/odoo | addons/hr_expense/report/hr_expense_report.py | 12 | 5596 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import tools
from openerp.osv import fields, osv
from openerp.addons.decimal_precision import decimal_precision as dp
class hr_expense_report(osv.osv):
_name = "hr.expense.report"
_description = "Expenses Statistics"
_auto = False
_rec_name = 'date'
_columns = {
'date': fields.date('Date ', readonly=True),
'create_date': fields.datetime('Creation Date', readonly=True),
'product_id':fields.many2one('product.product', 'Product', readonly=True),
'journal_id': fields.many2one('account.journal', 'Force Journal', readonly=True),
'product_qty':fields.float('Qty', readonly=True),
'employee_id': fields.many2one('hr.employee', "Employee's Name", readonly=True),
'date_confirm': fields.date('Confirmation Date', readonly=True),
'date_valid': fields.date('Validation Date', readonly=True),
'department_id':fields.many2one('hr.department','Department', readonly=True),
'company_id':fields.many2one('res.company', 'Company', readonly=True),
'user_id':fields.many2one('res.users', 'Validation User', readonly=True),
'currency_id': fields.many2one('res.currency', 'Currency', readonly=True),
'price_total':fields.float('Total Price', readonly=True, digits_compute=dp.get_precision('Account')),
'delay_valid':fields.float('Delay to Valid', readonly=True),
'delay_confirm':fields.float('Delay to Confirm', readonly=True),
'analytic_account': fields.many2one('account.analytic.account','Analytic account',readonly=True),
'price_average':fields.float('Average Price', readonly=True, digits_compute=dp.get_precision('Account')),
'nbr':fields.integer('# of Lines', readonly=True),
'no_of_products':fields.integer('# of Products', readonly=True),
'no_of_account':fields.integer('# of Accounts', readonly=True),
'state': fields.selection([
('draft', 'Draft'),
('confirm', 'Waiting confirmation'),
('accepted', 'Accepted'),
('done', 'Done'),
('cancelled', 'Cancelled')],
'Status', readonly=True),
}
_order = 'date desc'
def init(self, cr):
tools.drop_view_if_exists(cr, 'hr_expense_report')
cr.execute("""
create or replace view hr_expense_report as (
select
min(l.id) as id,
s.date as date,
s.create_date as create_date,
s.employee_id,
s.journal_id,
s.currency_id,
s.date_confirm as date_confirm,
s.date_valid as date_valid,
s.user_valid as user_id,
s.department_id,
avg(extract('epoch' from age(s.date_valid,s.date)))/(3600*24) as delay_valid,
avg(extract('epoch' from age(s.date_valid,s.date_confirm)))/(3600*24) as delay_confirm,
l.product_id as product_id,
l.analytic_account as analytic_account,
sum(l.unit_quantity * u.factor) as product_qty,
s.company_id as company_id,
sum(l.unit_quantity*l.unit_amount) as price_total,
(sum(l.unit_quantity*l.unit_amount)/sum(case when l.unit_quantity=0 or u.factor=0 then 1 else l.unit_quantity * u.factor end))::decimal(16,2) as price_average,
count(*) as nbr,
(select unit_quantity from hr_expense_line where id=l.id and product_id is not null) as no_of_products,
(select analytic_account from hr_expense_line where id=l.id and analytic_account is not null) as no_of_account,
s.state
from hr_expense_line l
left join hr_expense_expense s on (s.id=l.expense_id)
left join product_uom u on (u.id=l.uom_id)
group by
s.date,
s.create_date,
s.date_confirm,
s.date_valid,
l.product_id,
l.analytic_account,
s.currency_id,
s.user_valid,
s.department_id,
l.uom_id,
l.id,
s.state,
s.journal_id,
s.company_id,
s.employee_id
)
""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ampax/edx-platform-backup | lms/djangoapps/dashboard/sysadmin.py | 9 | 29023 | """
This module creates a sysadmin dashboard for managing and viewing
courses.
"""
import csv
import json
import logging
import os
import subprocess
import time
import StringIO
from django.conf import settings
from django.contrib.auth import authenticate
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.core.exceptions import PermissionDenied
from django.db import IntegrityError
from django.http import HttpResponse, Http404
from django.utils.decorators import method_decorator
from django.utils.html import escape
from django.utils import timezone
from django.utils.translation import ugettext as _
from django.views.decorators.cache import cache_control
from django.views.generic.base import TemplateView
from django.views.decorators.http import condition
from django_future.csrf import ensure_csrf_cookie
from edxmako.shortcuts import render_to_response
import mongoengine
from path import path
from courseware.courses import get_course_by_id
import dashboard.git_import as git_import
from dashboard.git_import import GitImportError
from student.roles import CourseStaffRole, CourseInstructorRole
from dashboard.models import CourseImportLog
from external_auth.models import ExternalAuthMap
from external_auth.views import generate_password
from student.models import CourseEnrollment, UserProfile, Registration
import track.views
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.xml import XMLModuleStore
from opaque_keys.edx.locations import SlashSeparatedCourseKey
log = logging.getLogger(__name__)
class SysadminDashboardView(TemplateView):
"""Base class for sysadmin dashboard views with common methods"""
template_name = 'sysadmin_dashboard.html'
def __init__(self, **kwargs):
"""
Initialize base sysadmin dashboard class with modulestore,
modulestore_type and return msg
"""
self.def_ms = modulestore()
self.is_using_mongo = True
if isinstance(self.def_ms, XMLModuleStore):
self.is_using_mongo = False
self.msg = u''
self.datatable = []
super(SysadminDashboardView, self).__init__(**kwargs)
@method_decorator(ensure_csrf_cookie)
@method_decorator(login_required)
@method_decorator(cache_control(no_cache=True, no_store=True,
must_revalidate=True))
@method_decorator(condition(etag_func=None))
def dispatch(self, *args, **kwargs):
return super(SysadminDashboardView, self).dispatch(*args, **kwargs)
def get_courses(self):
""" Get an iterable list of courses."""
return self.def_ms.get_courses()
def return_csv(self, filename, header, data):
"""
Convenient function for handling the http response of a csv.
data should be iterable and is used to stream object over http
"""
csv_file = StringIO.StringIO()
writer = csv.writer(csv_file, dialect='excel', quotechar='"',
quoting=csv.QUOTE_ALL)
writer.writerow(header)
# Setup streaming of the data
def read_and_flush():
"""Read and clear buffer for optimization"""
csv_file.seek(0)
csv_data = csv_file.read()
csv_file.seek(0)
csv_file.truncate()
return csv_data
def csv_data():
"""Generator for handling potentially large CSVs"""
for row in data:
writer.writerow(row)
csv_data = read_and_flush()
yield csv_data
response = HttpResponse(csv_data(), mimetype='text/csv')
response['Content-Disposition'] = 'attachment; filename={0}'.format(
filename)
return response
class Users(SysadminDashboardView):
"""
The status view provides Web based user management, a listing of
courses loaded, and user statistics
"""
def fix_external_auth_map_passwords(self):
"""
This corrects any passwords that have drifted from eamap to
internal django auth. Needs to be removed when fixed in external_auth
"""
msg = ''
for eamap in ExternalAuthMap.objects.all():
euser = eamap.user
epass = eamap.internal_password
if euser is None:
continue
try:
testuser = authenticate(username=euser.username, password=epass)
except (TypeError, PermissionDenied, AttributeError), err:
# Translators: This message means that the user could not be authenticated (that is, we could
# not log them in for some reason - maybe they don't have permission, or their password was wrong)
msg += _('Failed in authenticating {username}, error {error}\n').format(
username=euser,
error=err
)
continue
if testuser is None:
# Translators: This message means that the user could not be authenticated (that is, we could
# not log them in for some reason - maybe they don't have permission, or their password was wrong)
msg += _('Failed in authenticating {username}\n').format(username=euser)
# Translators: this means that the password has been corrected (sometimes the database needs to be resynchronized)
# Translate this as meaning "the password was fixed" or "the password was corrected".
msg += _('fixed password')
euser.set_password(epass)
euser.save()
continue
if not msg:
# Translators: this means everything happened successfully, yay!
msg = _('All ok!')
return msg
def create_user(self, uname, name, password=None):
""" Creates a user (both SSL and regular)"""
if not uname:
return _('Must provide username')
if not name:
return _('Must provide full name')
email_domain = getattr(settings, 'SSL_AUTH_EMAIL_DOMAIN', 'MIT.EDU')
msg = u''
if settings.FEATURES['AUTH_USE_CERTIFICATES']:
if '@' not in uname:
email = '{0}@{1}'.format(uname, email_domain)
else:
email = uname
if not email.endswith('@{0}'.format(email_domain)):
# Translators: Domain is an email domain, such as "@gmail.com"
msg += _('Email address must end in {domain}').format(domain="@{0}".format(email_domain))
return msg
mit_domain = 'ssl:MIT'
if ExternalAuthMap.objects.filter(external_id=email,
external_domain=mit_domain):
msg += _('Failed - email {email_addr} already exists as {external_id}').format(
email_addr=email,
external_id="external_id"
)
return msg
new_password = generate_password()
else:
if not password:
return _('Password must be supplied if not using certificates')
email = uname
if '@' not in email:
msg += _('email address required (not username)')
return msg
new_password = password
user = User(username=uname, email=email, is_active=True)
user.set_password(new_password)
try:
user.save()
except IntegrityError:
msg += _('Oops, failed to create user {user}, {error}').format(
user=user,
error="IntegrityError"
)
return msg
reg = Registration()
reg.register(user)
profile = UserProfile(user=user)
profile.name = name
profile.save()
if settings.FEATURES['AUTH_USE_CERTIFICATES']:
credential_string = getattr(settings, 'SSL_AUTH_DN_FORMAT_STRING',
'/C=US/ST=Massachusetts/O=Massachusetts Institute of Technology/OU=Client CA v1/CN={0}/emailAddress={1}')
credentials = credential_string.format(name, email)
eamap = ExternalAuthMap(
external_id=email,
external_email=email,
external_domain=mit_domain,
external_name=name,
internal_password=new_password,
external_credentials=json.dumps(credentials),
)
eamap.user = user
eamap.dtsignup = timezone.now()
eamap.save()
msg += _('User {user} created successfully!').format(user=user)
return msg
def delete_user(self, uname):
"""Deletes a user from django auth"""
if not uname:
return _('Must provide username')
if '@' in uname:
try:
user = User.objects.get(email=uname)
except User.DoesNotExist, err:
msg = _('Cannot find user with email address {email_addr}').format(email_addr=uname)
return msg
else:
try:
user = User.objects.get(username=uname)
except User.DoesNotExist, err:
msg = _('Cannot find user with username {username} - {error}').format(
username=uname,
error=str(err)
)
return msg
user.delete()
return _('Deleted user {username}').format(username=uname)
def make_common_context(self):
"""Returns the datatable used for this view"""
self.datatable = {}
self.datatable = dict(header=[_('Statistic'), _('Value')],
title=_('Site statistics'))
self.datatable['data'] = [[_('Total number of users'),
User.objects.all().count()]]
self.msg += u'<h2>{0}</h2>'.format(
_('Courses loaded in the modulestore')
)
self.msg += u'<ol>'
for course in self.get_courses():
self.msg += u'<li>{0} ({1})</li>'.format(
escape(course.id.to_deprecated_string()), course.location.to_deprecated_string())
self.msg += u'</ol>'
def get(self, request):
if not request.user.is_staff:
raise Http404
self.make_common_context()
context = {
'datatable': self.datatable,
'msg': self.msg,
'djangopid': os.getpid(),
'modeflag': {'users': 'active-section'},
'edx_platform_version': getattr(settings, 'EDX_PLATFORM_VERSION_STRING', ''),
}
return render_to_response(self.template_name, context)
def post(self, request):
"""Handle various actions available on page"""
if not request.user.is_staff:
raise Http404
self.make_common_context()
action = request.POST.get('action', '')
track.views.server_track(request, action, {}, page='user_sysdashboard')
if action == 'download_users':
header = [_('username'), _('email'), ]
data = ([u.username, u.email] for u in
(User.objects.all().iterator()))
return self.return_csv('users_{0}.csv'.format(
request.META['SERVER_NAME']), header, data)
elif action == 'repair_eamap':
self.msg = u'<h4>{0}</h4><pre>{1}</pre>{2}'.format(
_('Repair Results'),
self.fix_external_auth_map_passwords(),
self.msg)
self.datatable = {}
elif action == 'create_user':
uname = request.POST.get('student_uname', '').strip()
name = request.POST.get('student_fullname', '').strip()
password = request.POST.get('student_password', '').strip()
self.msg = u'<h4>{0}</h4><p>{1}</p><hr />{2}'.format(
_('Create User Results'),
self.create_user(uname, name, password), self.msg)
elif action == 'del_user':
uname = request.POST.get('student_uname', '').strip()
self.msg = u'<h4>{0}</h4><p>{1}</p><hr />{2}'.format(
_('Delete User Results'), self.delete_user(uname), self.msg)
context = {
'datatable': self.datatable,
'msg': self.msg,
'djangopid': os.getpid(),
'modeflag': {'users': 'active-section'},
'edx_platform_version': getattr(settings, 'EDX_PLATFORM_VERSION_STRING', ''),
}
return render_to_response(self.template_name, context)
class Courses(SysadminDashboardView):
"""
This manages adding/updating courses from git, deleting courses, and
provides course listing information.
"""
def git_info_for_course(self, cdir):
"""This pulls out some git info like the last commit"""
cmd = ''
gdir = settings.DATA_DIR / cdir
info = ['', '', '']
# Try the data dir, then try to find it in the git import dir
if not gdir.exists():
gdir = path(git_import.GIT_REPO_DIR) / cdir
if not gdir.exists():
return info
cmd = ['git', 'log', '-1',
'--format=format:{ "commit": "%H", "author": "%an %ae", "date": "%ad"}', ]
try:
output_json = json.loads(subprocess.check_output(cmd, cwd=gdir))
info = [output_json['commit'],
output_json['date'],
output_json['author'], ]
except (ValueError, subprocess.CalledProcessError):
pass
return info
def get_course_from_git(self, gitloc, branch):
"""This downloads and runs the checks for importing a course in git"""
if not (gitloc.endswith('.git') or gitloc.startswith('http:') or
gitloc.startswith('https:') or gitloc.startswith('git:')):
return _("The git repo location should end with '.git', "
"and be a valid url")
if self.is_using_mongo:
return self.import_mongo_course(gitloc, branch)
return self.import_xml_course(gitloc, branch)
def import_mongo_course(self, gitloc, branch):
"""
Imports course using management command and captures logging output
at debug level for display in template
"""
msg = u''
log.debug('Adding course using git repo {0}'.format(gitloc))
# Grab logging output for debugging imports
output = StringIO.StringIO()
import_log_handler = logging.StreamHandler(output)
import_log_handler.setLevel(logging.DEBUG)
logger_names = ['xmodule.modulestore.xml_importer',
'dashboard.git_import',
'xmodule.modulestore.xml',
'xmodule.seq_module', ]
loggers = []
for logger_name in logger_names:
logger = logging.getLogger(logger_name)
logger.setLevel(logging.DEBUG)
logger.addHandler(import_log_handler)
loggers.append(logger)
error_msg = ''
try:
git_import.add_repo(gitloc, None, branch)
except GitImportError as ex:
error_msg = str(ex)
ret = output.getvalue()
# Remove handler hijacks
for logger in loggers:
logger.setLevel(logging.NOTSET)
logger.removeHandler(import_log_handler)
if error_msg:
msg_header = error_msg
color = 'red'
else:
msg_header = _('Added Course')
color = 'blue'
msg = u"<h4 style='color:{0}'>{1}</h4>".format(color, msg_header)
msg += "<pre>{0}</pre>".format(escape(ret))
return msg
def import_xml_course(self, gitloc, branch):
"""Imports a git course into the XMLModuleStore"""
msg = u''
if not getattr(settings, 'GIT_IMPORT_WITH_XMLMODULESTORE', False):
# Translators: "GIT_IMPORT_WITH_XMLMODULESTORE" is a variable name.
# "XMLModuleStore" and "MongoDB" are database systems. You should not
# translate these names.
return _('Refusing to import. GIT_IMPORT_WITH_XMLMODULESTORE is '
'not turned on, and it is generally not safe to import '
'into an XMLModuleStore with multithreaded. We '
'recommend you enable the MongoDB based module store '
'instead, unless this is a development environment.')
cdir = (gitloc.rsplit('/', 1)[1])[:-4]
gdir = settings.DATA_DIR / cdir
if os.path.exists(gdir):
msg += _("The course {0} already exists in the data directory! "
"(reloading anyway)").format(cdir)
cmd = ['git', 'pull', ]
cwd = gdir
else:
cmd = ['git', 'clone', gitloc, ]
cwd = settings.DATA_DIR
cwd = os.path.abspath(cwd)
try:
cmd_output = escape(
subprocess.check_output(cmd, stderr=subprocess.STDOUT, cwd=cwd)
)
except subprocess.CalledProcessError as ex:
log.exception('Git pull or clone output was: %r', ex.output)
# Translators: unable to download the course content from
# the source git repository. Clone occurs if this is brand
# new, and pull is when it is being updated from the
# source.
return _('Unable to clone or pull repository. Please check '
'your url. Output was: {0!r}'.format(ex.output))
msg += u'<pre>{0}</pre>'.format(cmd_output)
if not os.path.exists(gdir):
msg += _('Failed to clone repository to {directory_name}').format(directory_name=gdir)
return msg
# Change branch if specified
if branch:
try:
git_import.switch_branch(branch, gdir)
except GitImportError as ex:
return str(ex)
# Translators: This is a git repository branch, which is a
# specific version of a courses content
msg += u'<p>{0}</p>'.format(
_('Successfully switched to branch: '
'{branch_name}'.format(branch_name=branch)))
self.def_ms.try_load_course(os.path.abspath(gdir))
errlog = self.def_ms.errored_courses.get(cdir, '')
if errlog:
msg += u'<hr width="50%"><pre>{0}</pre>'.format(escape(errlog))
else:
course = self.def_ms.courses[os.path.abspath(gdir)]
msg += _('Loaded course {course_name}<br/>Errors:').format(
course_name="{} {}".format(cdir, course.display_name)
)
errors = self.def_ms.get_course_errors(course.id)
if not errors:
msg += u'None'
else:
msg += u'<ul>'
for (summary, err) in errors:
msg += u'<li><pre>{0}: {1}</pre></li>'.format(escape(summary),
escape(err))
msg += u'</ul>'
return msg
def make_datatable(self):
"""Creates course information datatable"""
data = []
for course in self.get_courses():
gdir = course.id.course
data.append([course.display_name, course.id.to_deprecated_string()]
+ self.git_info_for_course(gdir))
return dict(header=[_('Course Name'),
_('Directory/ID'),
# Translators: "Git Commit" is a computer command; see http://gitref.org/basic/#commit
_('Git Commit'),
_('Last Change'),
_('Last Editor')],
title=_('Information about all courses'),
data=data)
def get(self, request):
"""Displays forms and course information"""
if not request.user.is_staff:
raise Http404
context = {
'datatable': self.make_datatable(),
'msg': self.msg,
'djangopid': os.getpid(),
'modeflag': {'courses': 'active-section'},
'edx_platform_version': getattr(settings, 'EDX_PLATFORM_VERSION_STRING', ''),
}
return render_to_response(self.template_name, context)
def post(self, request):
"""Handle all actions from courses view"""
if not request.user.is_staff:
raise Http404
action = request.POST.get('action', '')
track.views.server_track(request, action, {},
page='courses_sysdashboard')
courses = {course.id: course for course in self.get_courses()}
if action == 'add_course':
gitloc = request.POST.get('repo_location', '').strip().replace(' ', '').replace(';', '')
branch = request.POST.get('repo_branch', '').strip().replace(' ', '').replace(';', '')
self.msg += self.get_course_from_git(gitloc, branch)
elif action == 'del_course':
course_id = request.POST.get('course_id', '').strip()
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course_found = False
if course_key in courses:
course_found = True
course = courses[course_key]
else:
try:
course = get_course_by_id(course_key)
course_found = True
except Exception, err: # pylint: disable=broad-except
self.msg += _(
'Error - cannot get course with ID {0}<br/><pre>{1}</pre>'
).format(
course_key,
escape(str(err))
)
is_xml_course = (modulestore().get_modulestore_type(course_key) == ModuleStoreEnum.Type.xml)
if course_found and is_xml_course:
cdir = course.data_dir
self.def_ms.courses.pop(cdir)
# now move the directory (don't actually delete it)
new_dir = "{course_dir}_deleted_{timestamp}".format(
course_dir=cdir,
timestamp=int(time.time())
)
os.rename(settings.DATA_DIR / cdir, settings.DATA_DIR / new_dir)
self.msg += (u"<font color='red'>Deleted "
u"{0} = {1} ({2})</font>".format(
cdir, course.id, course.display_name))
elif course_found and not is_xml_course:
# delete course that is stored with mongodb backend
self.def_ms.delete_course(course.id, request.user.id)
# don't delete user permission groups, though
self.msg += \
u"<font color='red'>{0} {1} = {2} ({3})</font>".format(
_('Deleted'), course.location.to_deprecated_string(), course.id.to_deprecated_string(), course.display_name)
context = {
'datatable': self.make_datatable(),
'msg': self.msg,
'djangopid': os.getpid(),
'modeflag': {'courses': 'active-section'},
'edx_platform_version': getattr(settings, 'EDX_PLATFORM_VERSION_STRING', ''),
}
return render_to_response(self.template_name, context)
class Staffing(SysadminDashboardView):
"""
The status view provides a view of staffing and enrollment in
courses that include an option to download the data as a csv.
"""
def get(self, request):
"""Displays course Enrollment and staffing course statistics"""
if not request.user.is_staff:
raise Http404
data = []
for course in self.get_courses(): # pylint: disable=unused-variable
datum = [course.display_name, course.id]
datum += [CourseEnrollment.objects.filter(
course_id=course.id).count()]
datum += [CourseStaffRole(course.id).users_with_role().count()]
datum += [','.join([x.username for x in CourseInstructorRole(
course.id).users_with_role()])]
data.append(datum)
datatable = dict(header=[_('Course Name'), _('course_id'),
_('# enrolled'), _('# staff'),
_('instructors')],
title=_('Enrollment information for all courses'),
data=data)
context = {
'datatable': datatable,
'msg': self.msg,
'djangopid': os.getpid(),
'modeflag': {'staffing': 'active-section'},
'edx_platform_version': getattr(settings, 'EDX_PLATFORM_VERSION_STRING', ''),
}
return render_to_response(self.template_name, context)
def post(self, request):
"""Handle all actions from staffing and enrollment view"""
action = request.POST.get('action', '')
track.views.server_track(request, action, {},
page='staffing_sysdashboard')
if action == 'get_staff_csv':
data = []
roles = [CourseInstructorRole, CourseStaffRole, ]
for course in self.get_courses(): # pylint: disable=unused-variable
for role in roles:
for user in role(course.id).users_with_role():
datum = [course.id, role, user.username, user.email,
user.profile.name]
data.append(datum)
header = [_('course_id'),
_('role'), _('username'),
_('email'), _('full_name'), ]
return self.return_csv('staff_{0}.csv'.format(
request.META['SERVER_NAME']), header, data)
return self.get(request)
class GitLogs(TemplateView):
"""
This provides a view into the import of courses from git repositories.
It is convenient for allowing course teams to see what may be wrong with
their xml
"""
template_name = 'sysadmin_dashboard_gitlogs.html'
@method_decorator(login_required)
def get(self, request, *args, **kwargs):
"""Shows logs of imports that happened as a result of a git import"""
course_id = kwargs.get('course_id')
if course_id:
course_id = SlashSeparatedCourseKey.from_deprecated_string(course_id)
# Set mongodb defaults even if it isn't defined in settings
mongo_db = {
'host': 'localhost',
'user': '',
'password': '',
'db': 'xlog',
}
# Allow overrides
if hasattr(settings, 'MONGODB_LOG'):
for config_item in ['host', 'user', 'password', 'db', ]:
mongo_db[config_item] = settings.MONGODB_LOG.get(
config_item, mongo_db[config_item])
mongouri = 'mongodb://{user}:{password}@{host}/{db}'.format(**mongo_db)
error_msg = ''
try:
if mongo_db['user'] and mongo_db['password']:
mdb = mongoengine.connect(mongo_db['db'], host=mongouri)
else:
mdb = mongoengine.connect(mongo_db['db'], host=mongo_db['host'])
except mongoengine.connection.ConnectionError:
log.exception('Unable to connect to mongodb to save log, '
'please check MONGODB_LOG settings.')
if course_id is None:
# Require staff if not going to specific course
if not request.user.is_staff:
raise Http404
cilset = CourseImportLog.objects.all().order_by('-created')
else:
try:
course = get_course_by_id(course_id)
except Exception: # pylint: disable=broad-except
log.info('Cannot find course {0}'.format(course_id))
raise Http404
# Allow only course team, instructors, and staff
if not (request.user.is_staff or
CourseInstructorRole(course.id).has_user(request.user) or
CourseStaffRole(course.id).has_user(request.user)):
raise Http404
log.debug('course_id={0}'.format(course_id))
cilset = CourseImportLog.objects.filter(course_id=course_id).order_by('-created')
log.debug('cilset length={0}'.format(len(cilset)))
mdb.disconnect()
context = {'cilset': cilset,
'course_id': course_id.to_deprecated_string() if course_id else None,
'error_msg': error_msg}
return render_to_response(self.template_name, context)
| agpl-3.0 |
wolet/deepy | experiments/highway_networks/highway_layer.py | 5 | 1180 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from deepy.layers import NeuralLayer
from deepy.utils import build_activation
import theano.tensor as T
class HighwayLayer(NeuralLayer):
"""
Highway network layer.
See http://arxiv.org/abs/1505.00387.
"""
def __init__(self, activation='relu', init=None, gate_bias=-5):
super(HighwayLayer, self).__init__("highway")
self.activation = activation
self.init = init
self.gate_bias = gate_bias
def setup(self):
self.output_dim = self.input_dim
self._act = build_activation(self.activation)
self.W_h = self.create_weight(self.input_dim, self.input_dim, "h", initializer=self.init)
self.W_t = self.create_weight(self.input_dim, self.input_dim, "t", initializer=self.init)
self.B_h = self.create_bias(self.input_dim, "h")
self.B_t = self.create_bias(self.input_dim, "t", value=self.gate_bias)
self.register_parameters(self.W_h, self.B_h, self.W_t, self.B_t)
def output(self, x):
t = self._act(T.dot(x, self.W_t) + self.B_t)
h = self._act(T.dot(x, self.W_h) + self.B_h)
return h * t + x * (1 - t)
| mit |
abramhindle/UnnaturalCodeFork | python/testdata/launchpad/lib/lp/services/webapp/pgsession.py | 1 | 8383 | # Copyright 2009 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""PostgreSQL server side session storage for Zope3."""
__metaclass__ = type
import cPickle as pickle
import time
from UserDict import DictMixin
from lazr.restful.utils import get_current_browser_request
from storm.zope.interfaces import IZStorm
from zope.authentication.interfaces import IUnauthenticatedPrincipal
from zope.component import getUtility
from zope.interface import implements
from zope.session.interfaces import (
IClientIdManager,
ISessionData,
ISessionDataContainer,
ISessionPkgData,
)
from lp.services.helpers import ensure_unicode
SECONDS = 1
MINUTES = 60 * SECONDS
HOURS = 60 * MINUTES
DAYS = 24 * HOURS
class PGSessionBase:
store_name = 'session'
@property
def store(self):
return getUtility(IZStorm).get(self.store_name)
class PGSessionDataContainer(PGSessionBase):
"""An ISessionDataContainer that stores data in PostgreSQL
PostgreSQL Schema:
CREATE TABLE SessionData (
client_id text PRIMARY KEY,
last_accessed timestamp with time zone
NOT NULL DEFAULT CURRENT_TIMESTAMP
);
CREATE INDEX sessiondata_last_accessed_idx ON SessionData(last_accessed);
CREATE TABLE SessionPkgData (
client_id text NOT NULL
REFERENCES SessionData(client_id) ON DELETE CASCADE,
product_id text NOT NULL,
key text NOT NULL,
pickle bytea NOT NULL,
CONSTRAINT sessiondata_key UNIQUE (client_id, product_id, key)
);
Removing expired data needs to be done out of band.
"""
implements(ISessionDataContainer)
# If we have a low enough resolution, we can determine active users
# using the session data.
resolution = 9 * MINUTES
session_data_table_name = 'SessionData'
session_pkg_data_table_name = 'SessionPkgData'
def __getitem__(self, client_id):
"""See zope.session.interfaces.ISessionDataContainer"""
return PGSessionData(self, client_id)
def __setitem__(self, client_id, session_data):
"""See zope.session.interfaces.ISessionDataContainer"""
# The SessionData / SessionPkgData objects know how to store
# themselves.
pass
class PGSessionData(PGSessionBase):
implements(ISessionData)
session_data_container = None
lastAccessTime = None
_have_ensured_client_id = False
def __init__(self, session_data_container, client_id):
self.session_data_container = session_data_container
self.client_id = ensure_unicode(client_id)
self.lastAccessTime = time.time()
# Update the last access time in the db if it is out of date
table_name = session_data_container.session_data_table_name
query = """
UPDATE %s SET last_accessed = CURRENT_TIMESTAMP
WHERE client_id = ?
AND last_accessed < CURRENT_TIMESTAMP - '%d seconds'::interval
""" % (table_name, session_data_container.resolution)
self.store.execute(query, (self.client_id,), noresult=True)
def _ensureClientId(self):
if self._have_ensured_client_id:
return
# We want to make sure the browser cookie and the database both know
# about our client id. We're doing it lazily to try and keep anonymous
# users from having a session.
self.store.execute(
"SELECT ensure_session_client_id(?)", (self.client_id,),
noresult=True)
request = get_current_browser_request()
if request is not None:
client_id_manager = getUtility(IClientIdManager)
if IUnauthenticatedPrincipal.providedBy(request.principal):
# it would be nice if this could be a monitored, logged
# message instead of an instant-OOPS.
assert (client_id_manager.namespace in request.cookies or
request.response.getCookie(
client_id_manager.namespace) is not None), (
'Session data should generally only be stored for '
'authenticated users, and for users who have just logged '
'out. If an unauthenticated user has just logged out, '
'they should have a session cookie set for ten minutes. '
'This should be plenty of time for passing notifications '
'about successfully logging out. Because this assertion '
'failed, it means that some code is trying to set '
'session data for an unauthenticated user who has been '
'logged out for more than ten minutes: something that '
'should not happen. The code setting the session data '
'should be reviewed; and failing that, the cookie '
'timeout after logout (set in '
'webapp.login) should perhaps be '
'increased a bit, if a ten minute fudge factor is not '
'enough to handle the vast majority of computers with '
'not-very-accurate system clocks. In an exceptional '
'case, the code may set the necessary cookies itself to '
'assert that yes, it *should* set the session for an '
'unauthenticated user. See the webapp.login module for '
'an example of this, as well.')
else:
client_id_manager.setRequestId(request, self.client_id)
self._have_ensured_client_id = True
def __getitem__(self, product_id):
"""Return an ISessionPkgData"""
return PGSessionPkgData(self, product_id)
def __setitem__(self, product_id, session_pkg_data):
"""See zope.session.interfaces.ISessionData
This is a noop in the RDBMS implementation.
"""
pass
class PGSessionPkgData(DictMixin, PGSessionBase):
implements(ISessionPkgData)
@property
def store(self):
return self.session_data.store
def __init__(self, session_data, product_id):
self.session_data = session_data
self.product_id = ensure_unicode(product_id)
self.table_name = (
session_data.session_data_container.session_pkg_data_table_name)
self._populate()
_data_cache = None
def _populate(self):
self._data_cache = {}
query = """
SELECT key, pickle FROM %s WHERE client_id = ?
AND product_id = ?
""" % self.table_name
result = self.store.execute(query, (self.session_data.client_id,
self.product_id))
for key, pickled_value in result:
value = pickle.loads(str(pickled_value))
self._data_cache[key] = value
def __getitem__(self, key):
return self._data_cache[key]
def __setitem__(self, key, value):
key = ensure_unicode(key)
pickled_value = pickle.dumps(value, pickle.HIGHEST_PROTOCOL)
self.session_data._ensureClientId()
self.store.execute(
"SELECT set_session_pkg_data(?, ?, ?, ?)",
(self.session_data.client_id,
self.product_id, key, pickled_value),
noresult=True)
# Store the value in the cache too
self._data_cache[key] = value
def __delitem__(self, key):
"""Delete an item.
Note that this will never fail in order to avoid
race conditions in code using the session machinery
"""
try:
del self._data_cache[key]
except KeyError:
# Not in the cache, then it won't be in the DB. Or if it is,
# another process has inserted it and we should keep our grubby
# fingers out of it.
return
query = """
DELETE FROM %s WHERE client_id = ? AND product_id = ? AND key = ?
""" % self.table_name
self.store.execute(
query,
(self.session_data.client_id,
self.product_id, ensure_unicode(key)),
noresult=True)
def keys(self):
return self._data_cache.keys()
data_container = PGSessionDataContainer()
| agpl-3.0 |
pluskid/mxnet | example/reinforcement-learning/parallel_actor_critic/envs.py | 15 | 1089 | import numpy as np
class Atari8080Preprocessor(object):
def __init__(self):
self.prev = None
self.obs_size = 80*80
def reset(self):
self.prev = None
def preprocess(self, img):
"""
Preprocess a 210x160x3 uint8 frame into a 6400 (80x80) (1 x input_size)
float vector.
"""
# Crop, down-sample, erase background and set foreground to 1.
# See https://gist.github.com/karpathy/a4166c7fe253700972fcbc77e4ea32c5
img = img[35:195]
img = img[::2, ::2, 0]
img[img == 144] = 0
img[img == 109] = 0
img[img != 0] = 1
curr = np.expand_dims(img.astype(np.float).ravel(), axis=0)
# Subtract the last preprocessed image.
diff = (curr - self.prev if self.prev is not None
else np.zeros((1, curr.shape[1])))
self.prev = curr
return diff
class IdentityPreprocessor(object):
def __init__(self, obs_size):
self.obs_size = obs_size
def reset(self):
pass
def preprocess(self, x):
return x
| apache-2.0 |
snyderr/robotframework | utest/reporting/test_jsexecutionresult.py | 7 | 4033 | import unittest
from robot.utils.asserts import assert_true, assert_equal
from test_jsmodelbuilders import remap
from robot.reporting.jsexecutionresult import (JsExecutionResult,
_KeywordRemover, StringIndex)
from robot.reporting.jsmodelbuilders import SuiteBuilder, JsBuildingContext
from robot.result import TestSuite
class TestRemoveDataNotNeededInReport(unittest.TestCase):
def test_remove_keywords(self):
model = self._create_suite_model()
expected = self._get_expected_suite_model(model)
result = _KeywordRemover().remove_keywords(model)
assert_equal(result, expected)
self._verify_model_contains_no_keywords(result)
def _create_suite_model(self):
self.context = JsBuildingContext()
return SuiteBuilder(self.context).build(self._get_suite())
def _get_suite(self):
suite = TestSuite(name='root', doc='sdoc', metadata={'m': 'v'})
suite.keywords.create(kwname='keyword')
sub = suite.suites.create(name='suite', metadata={'a': '1', 'b': '2'})
sub.keywords.create(kwname='keyword')
t1 = sub.tests.create(name='test', tags=['t1'])
t1.keywords.create(kwname='keyword')
t1.keywords.create(kwname='keyword')
t2 = sub.tests.create(name='test', tags=['t1', 't2'])
t2.keywords.create(kwname='keyword')
return suite
def _get_expected_suite_model(self, suite):
suite = list(suite)
suite[-4] = tuple(self._get_expected_suite_model(s) for s in suite[-4])
suite[-3] = tuple(self._get_expected_test_model(t) for t in suite[-3])
suite[-2] = ()
return tuple(suite)
def _get_expected_test_model(self, test):
test = list(test)
test[-1] = ()
return tuple(test)
def _verify_model_contains_no_keywords(self, model, mapped=False):
if not mapped:
model = remap(model, self.context.strings)
assert_true('keyword' not in model, 'Not all keywords removed')
for item in model:
if isinstance(item, tuple):
self._verify_model_contains_no_keywords(item, mapped=True)
def test_remove_unused_strings(self):
strings = ('', 'hei', 'hoi')
model = (1, StringIndex(0), 42, StringIndex(2), -1, None)
model, strings = _KeywordRemover().remove_unused_strings(model, strings)
assert_equal(strings, ('', 'hoi'))
assert_equal(model, (1, 0, 42, 1, -1, None))
def test_remove_unused_strings_nested(self):
strings = tuple(' abcde')
model = (StringIndex(0), StringIndex(1), 2, 3, StringIndex(4), 5,
(0, StringIndex(1), 2, StringIndex(3), 4, 5))
model, strings = _KeywordRemover().remove_unused_strings(model, strings)
assert_equal(strings, tuple(' acd'))
assert_equal(model, (0, 1, 2, 3, 3, 5, (0, 1, 2, 2, 4, 5)))
def test_through_jsexecutionresult(self):
suite = (0, StringIndex(1), 2, 3, 4, StringIndex(5),
((0, 1, 2, StringIndex(3), 4, 5, (), (), ('suite', 'kws'), 9),),
((0, 1, 2, StringIndex(3), 4, 5, ('test', 'kws')),
(0, StringIndex(1), 2, 3, 4, 5, ('test', 'kws'))),
('suite', 'kws'), 9)
exp_s = (0, 0, 2, 3, 4, 2,
((0, 1, 2, 1, 4, 5, (), (), (), 9),),
((0, 1, 2, 1, 4, 5, ()),
(0, 0, 2, 3, 4, 5, ())),
(), 9)
result = JsExecutionResult(suite=suite, strings=tuple(' ABCDEF'),
errors=(1, 2), statistics={}, basemillis=0,
min_level='DEBUG')
assert_equal(result.data['errors'], (1, 2))
result.remove_data_not_needed_in_report()
assert_equal(result.strings, tuple('ACE'))
assert_equal(result.suite, exp_s)
assert_equal(result.min_level, 'DEBUG')
assert_true('errors' not in result.data)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
dparlevliet/zelenka-report-storage | server-db/twisted/web/tap.py | 35 | 7882 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Support for creating a service which runs a web server.
"""
import os
# Twisted Imports
from twisted.web import server, static, twcgi, script, demo, distrib, wsgi
from twisted.internet import interfaces, reactor
from twisted.python import usage, reflect, threadpool
from twisted.spread import pb
from twisted.application import internet, service, strports
class Options(usage.Options):
"""
Define the options accepted by the I{twistd web} plugin.
"""
synopsis = "[web options]"
optParameters = [["port", "p", None, "strports description of the port to "
"start the server on."],
["logfile", "l", None, "Path to web CLF (Combined Log Format) log file."],
["https", None, None, "Port to listen on for Secure HTTP."],
["certificate", "c", "server.pem", "SSL certificate to use for HTTPS. "],
["privkey", "k", "server.pem", "SSL certificate to use for HTTPS."],
]
optFlags = [["personal", "",
"Instead of generating a webserver, generate a "
"ResourcePublisher which listens on the port given by "
"--port, or ~/%s " % (distrib.UserDirectory.userSocketName,) +
"if --port is not specified."],
["notracebacks", "n", "Do not display tracebacks in broken web pages. " +
"Displaying tracebacks to users may be security risk!"],
]
compData = usage.Completions(
optActions={"logfile" : usage.CompleteFiles("*.log"),
"certificate" : usage.CompleteFiles("*.pem"),
"privkey" : usage.CompleteFiles("*.pem")}
)
longdesc = """\
This starts a webserver. If you specify no arguments, it will be a
demo webserver that has the Test class from twisted.web.demo in it."""
def __init__(self):
usage.Options.__init__(self)
self['indexes'] = []
self['root'] = None
def opt_index(self, indexName):
"""
Add the name of a file used to check for directory indexes.
[default: index, index.html]
"""
self['indexes'].append(indexName)
opt_i = opt_index
def opt_user(self):
"""
Makes a server with ~/public_html and ~/.twistd-web-pb support for
users.
"""
self['root'] = distrib.UserDirectory()
opt_u = opt_user
def opt_path(self, path):
"""
<path> is either a specific file or a directory to be set as the root
of the web server. Use this if you have a directory full of HTML, cgi,
epy, or rpy files or any other files that you want to be served up raw.
"""
self['root'] = static.File(os.path.abspath(path))
self['root'].processors = {
'.cgi': twcgi.CGIScript,
'.epy': script.PythonScript,
'.rpy': script.ResourceScript,
}
def opt_processor(self, proc):
"""
`ext=class' where `class' is added as a Processor for files ending
with `ext'.
"""
if not isinstance(self['root'], static.File):
raise usage.UsageError("You can only use --processor after --path.")
ext, klass = proc.split('=', 1)
self['root'].processors[ext] = reflect.namedClass(klass)
def opt_class(self, className):
"""
Create a Resource subclass with a zero-argument constructor.
"""
classObj = reflect.namedClass(className)
self['root'] = classObj()
def opt_resource_script(self, name):
"""
An .rpy file to be used as the root resource of the webserver.
"""
self['root'] = script.ResourceScriptWrapper(name)
def opt_wsgi(self, name):
"""
The FQPN of a WSGI application object to serve as the root resource of
the webserver.
"""
try:
application = reflect.namedAny(name)
except (AttributeError, ValueError):
raise usage.UsageError("No such WSGI application: %r" % (name,))
pool = threadpool.ThreadPool()
reactor.callWhenRunning(pool.start)
reactor.addSystemEventTrigger('after', 'shutdown', pool.stop)
self['root'] = wsgi.WSGIResource(reactor, pool, application)
def opt_mime_type(self, defaultType):
"""
Specify the default mime-type for static files.
"""
if not isinstance(self['root'], static.File):
raise usage.UsageError("You can only use --mime_type after --path.")
self['root'].defaultType = defaultType
opt_m = opt_mime_type
def opt_allow_ignore_ext(self):
"""
Specify whether or not a request for 'foo' should return 'foo.ext'
"""
if not isinstance(self['root'], static.File):
raise usage.UsageError("You can only use --allow_ignore_ext "
"after --path.")
self['root'].ignoreExt('*')
def opt_ignore_ext(self, ext):
"""
Specify an extension to ignore. These will be processed in order.
"""
if not isinstance(self['root'], static.File):
raise usage.UsageError("You can only use --ignore_ext "
"after --path.")
self['root'].ignoreExt(ext)
def postOptions(self):
"""
Set up conditional defaults and check for dependencies.
If SSL is not available but an HTTPS server was configured, raise a
L{UsageError} indicating that this is not possible.
If no server port was supplied, select a default appropriate for the
other options supplied.
"""
if self['https']:
try:
from twisted.internet.ssl import DefaultOpenSSLContextFactory
except ImportError:
raise usage.UsageError("SSL support not installed")
if self['port'] is None:
if self['personal']:
path = os.path.expanduser(
os.path.join('~', distrib.UserDirectory.userSocketName))
self['port'] = 'unix:' + path
else:
self['port'] = 'tcp:8080'
def makePersonalServerFactory(site):
"""
Create and return a factory which will respond to I{distrib} requests
against the given site.
@type site: L{twisted.web.server.Site}
@rtype: L{twisted.internet.protocol.Factory}
"""
return pb.PBServerFactory(distrib.ResourcePublisher(site))
def makeService(config):
s = service.MultiService()
if config['root']:
root = config['root']
if config['indexes']:
config['root'].indexNames = config['indexes']
else:
# This really ought to be web.Admin or something
root = demo.Test()
if isinstance(root, static.File):
root.registry.setComponent(interfaces.IServiceCollection, s)
if config['logfile']:
site = server.Site(root, logPath=config['logfile'])
else:
site = server.Site(root)
site.displayTracebacks = not config["notracebacks"]
if config['personal']:
personal = strports.service(
config['port'], makePersonalServerFactory(site))
personal.setServiceParent(s)
else:
if config['https']:
from twisted.internet.ssl import DefaultOpenSSLContextFactory
i = internet.SSLServer(int(config['https']), site,
DefaultOpenSSLContextFactory(config['privkey'],
config['certificate']))
i.setServiceParent(s)
strports.service(config['port'], site).setServiceParent(s)
return s
| lgpl-3.0 |
CyanogenMod/android_external_chromium_org | tools/telemetry/third_party/pyserial/serial/urlhandler/protocol_socket.py | 141 | 10154 | #! python
#
# Python Serial Port Extension for Win32, Linux, BSD, Jython
# see __init__.py
#
# This module implements a simple socket based client.
# It does not support changing any port parameters and will silently ignore any
# requests to do so.
#
# The purpose of this module is that applications using pySerial can connect to
# TCP/IP to serial port converters that do not support RFC 2217.
#
# (C) 2001-2011 Chris Liechti <cliechti@gmx.net>
# this is distributed under a free software license, see license.txt
#
# URL format: socket://<host>:<port>[/option[/option...]]
# options:
# - "debug" print diagnostic messages
from serial.serialutil import *
import time
import socket
import logging
# map log level names to constants. used in fromURL()
LOGGER_LEVELS = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
}
POLL_TIMEOUT = 2
class SocketSerial(SerialBase):
"""Serial port implementation for plain sockets."""
BAUDRATES = (50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800,
9600, 19200, 38400, 57600, 115200)
def open(self):
"""Open port with current settings. This may throw a SerialException
if the port cannot be opened."""
self.logger = None
if self._port is None:
raise SerialException("Port must be configured before it can be used.")
if self._isOpen:
raise SerialException("Port is already open.")
try:
# XXX in future replace with create_connection (py >=2.6)
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.connect(self.fromURL(self.portstr))
except Exception, msg:
self._socket = None
raise SerialException("Could not open port %s: %s" % (self.portstr, msg))
self._socket.settimeout(POLL_TIMEOUT) # used for write timeout support :/
# not that there anything to configure...
self._reconfigurePort()
# all things set up get, now a clean start
self._isOpen = True
if not self._rtscts:
self.setRTS(True)
self.setDTR(True)
self.flushInput()
self.flushOutput()
def _reconfigurePort(self):
"""Set communication parameters on opened port. for the socket://
protocol all settings are ignored!"""
if self._socket is None:
raise SerialException("Can only operate on open ports")
if self.logger:
self.logger.info('ignored port configuration change')
def close(self):
"""Close port"""
if self._isOpen:
if self._socket:
try:
self._socket.shutdown(socket.SHUT_RDWR)
self._socket.close()
except:
# ignore errors.
pass
self._socket = None
self._isOpen = False
# in case of quick reconnects, give the server some time
time.sleep(0.3)
def makeDeviceName(self, port):
raise SerialException("there is no sensible way to turn numbers into URLs")
def fromURL(self, url):
"""extract host and port from an URL string"""
if url.lower().startswith("socket://"): url = url[9:]
try:
# is there a "path" (our options)?
if '/' in url:
# cut away options
url, options = url.split('/', 1)
# process options now, directly altering self
for option in options.split('/'):
if '=' in option:
option, value = option.split('=', 1)
else:
value = None
if option == 'logging':
logging.basicConfig() # XXX is that good to call it here?
self.logger = logging.getLogger('pySerial.socket')
self.logger.setLevel(LOGGER_LEVELS[value])
self.logger.debug('enabled logging')
else:
raise ValueError('unknown option: %r' % (option,))
# get host and port
host, port = url.split(':', 1) # may raise ValueError because of unpacking
port = int(port) # and this if it's not a number
if not 0 <= port < 65536: raise ValueError("port not in range 0...65535")
except ValueError, e:
raise SerialException('expected a string in the form "[rfc2217://]<host>:<port>[/option[/option...]]": %s' % e)
return (host, port)
# - - - - - - - - - - - - - - - - - - - - - - - -
def inWaiting(self):
"""Return the number of characters currently in the input buffer."""
if not self._isOpen: raise portNotOpenError
if self.logger:
# set this one to debug as the function could be called often...
self.logger.debug('WARNING: inWaiting returns dummy value')
return 0 # hmmm, see comment in read()
def read(self, size=1):
"""Read size bytes from the serial port. If a timeout is set it may
return less characters as requested. With no timeout it will block
until the requested number of bytes is read."""
if not self._isOpen: raise portNotOpenError
data = bytearray()
if self._timeout is not None:
timeout = time.time() + self._timeout
else:
timeout = None
while len(data) < size and (timeout is None or time.time() < timeout):
try:
# an implementation with internal buffer would be better
# performing...
t = time.time()
block = self._socket.recv(size - len(data))
duration = time.time() - t
if block:
data.extend(block)
else:
# no data -> EOF (connection probably closed)
break
except socket.timeout:
# just need to get out of recv from time to time to check if
# still alive
continue
except socket.error, e:
# connection fails -> terminate loop
raise SerialException('connection failed (%s)' % e)
return bytes(data)
def write(self, data):
"""Output the given string over the serial port. Can block if the
connection is blocked. May raise SerialException if the connection is
closed."""
if not self._isOpen: raise portNotOpenError
try:
self._socket.sendall(to_bytes(data))
except socket.error, e:
# XXX what exception if socket connection fails
raise SerialException("socket connection failed: %s" % e)
return len(data)
def flushInput(self):
"""Clear input buffer, discarding all that is in the buffer."""
if not self._isOpen: raise portNotOpenError
if self.logger:
self.logger.info('ignored flushInput')
def flushOutput(self):
"""Clear output buffer, aborting the current output and
discarding all that is in the buffer."""
if not self._isOpen: raise portNotOpenError
if self.logger:
self.logger.info('ignored flushOutput')
def sendBreak(self, duration=0.25):
"""Send break condition. Timed, returns to idle state after given
duration."""
if not self._isOpen: raise portNotOpenError
if self.logger:
self.logger.info('ignored sendBreak(%r)' % (duration,))
def setBreak(self, level=True):
"""Set break: Controls TXD. When active, to transmitting is
possible."""
if not self._isOpen: raise portNotOpenError
if self.logger:
self.logger.info('ignored setBreak(%r)' % (level,))
def setRTS(self, level=True):
"""Set terminal status line: Request To Send"""
if not self._isOpen: raise portNotOpenError
if self.logger:
self.logger.info('ignored setRTS(%r)' % (level,))
def setDTR(self, level=True):
"""Set terminal status line: Data Terminal Ready"""
if not self._isOpen: raise portNotOpenError
if self.logger:
self.logger.info('ignored setDTR(%r)' % (level,))
def getCTS(self):
"""Read terminal status line: Clear To Send"""
if not self._isOpen: raise portNotOpenError
if self.logger:
self.logger.info('returning dummy for getCTS()')
return True
def getDSR(self):
"""Read terminal status line: Data Set Ready"""
if not self._isOpen: raise portNotOpenError
if self.logger:
self.logger.info('returning dummy for getDSR()')
return True
def getRI(self):
"""Read terminal status line: Ring Indicator"""
if not self._isOpen: raise portNotOpenError
if self.logger:
self.logger.info('returning dummy for getRI()')
return False
def getCD(self):
"""Read terminal status line: Carrier Detect"""
if not self._isOpen: raise portNotOpenError
if self.logger:
self.logger.info('returning dummy for getCD()')
return True
# - - - platform specific - - -
# None so far
# assemble Serial class with the platform specific implementation and the base
# for file-like behavior. for Python 2.6 and newer, that provide the new I/O
# library, derive from io.RawIOBase
try:
import io
except ImportError:
# classic version with our own file-like emulation
class Serial(SocketSerial, FileLike):
pass
else:
# io library present
class Serial(SocketSerial, io.RawIOBase):
pass
# simple client test
if __name__ == '__main__':
import sys
s = Serial('socket://localhost:7000')
sys.stdout.write('%s\n' % s)
sys.stdout.write("write...\n")
s.write("hello\n")
s.flush()
sys.stdout.write("read: %s\n" % s.read(5))
s.close()
| bsd-3-clause |
bearbin/box-server | requests/packages/charade/langhungarianmodel.py | 184 | 12761 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Character Mapping Table:
Latin2_HungarianCharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 28, 40, 54, 45, 32, 50, 49, 38, 39, 53, 36, 41, 34, 35, 47,
46, 71, 43, 33, 37, 57, 48, 64, 68, 55, 52,253,253,253,253,253,
253, 2, 18, 26, 17, 1, 27, 12, 20, 9, 22, 7, 6, 13, 4, 8,
23, 67, 10, 5, 3, 21, 19, 65, 62, 16, 11,253,253,253,253,253,
159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,
175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,
191,192,193,194,195,196,197, 75,198,199,200,201,202,203,204,205,
79,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,
221, 51, 81,222, 78,223,224,225,226, 44,227,228,229, 61,230,231,
232,233,234, 58,235, 66, 59,236,237,238, 60, 69, 63,239,240,241,
82, 14, 74,242, 70, 80,243, 72,244, 15, 83, 77, 84, 30, 76, 85,
245,246,247, 25, 73, 42, 24,248,249,250, 31, 56, 29,251,252,253,
)
win1250HungarianCharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 28, 40, 54, 45, 32, 50, 49, 38, 39, 53, 36, 41, 34, 35, 47,
46, 72, 43, 33, 37, 57, 48, 64, 68, 55, 52,253,253,253,253,253,
253, 2, 18, 26, 17, 1, 27, 12, 20, 9, 22, 7, 6, 13, 4, 8,
23, 67, 10, 5, 3, 21, 19, 65, 62, 16, 11,253,253,253,253,253,
161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,
177,178,179,180, 78,181, 69,182,183,184,185,186,187,188,189,190,
191,192,193,194,195,196,197, 76,198,199,200,201,202,203,204,205,
81,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,
221, 51, 83,222, 80,223,224,225,226, 44,227,228,229, 61,230,231,
232,233,234, 58,235, 66, 59,236,237,238, 60, 70, 63,239,240,241,
84, 14, 75,242, 71, 82,243, 73,244, 15, 85, 79, 86, 30, 77, 87,
245,246,247, 25, 74, 42, 24,248,249,250, 31, 56, 29,251,252,253,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 94.7368%
# first 1024 sequences:5.2623%
# rest sequences: 0.8894%
# negative sequences: 0.0009%
HungarianLangModel = (
0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,2,2,3,3,1,1,2,2,2,2,2,1,2,
3,2,2,3,3,3,3,3,2,3,3,3,3,3,3,1,2,3,3,3,3,2,3,3,1,1,3,3,0,1,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,
3,2,1,3,3,3,3,3,2,3,3,3,3,3,1,1,2,3,3,3,3,3,3,3,1,1,3,2,0,1,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,1,1,2,3,3,3,1,3,3,3,3,3,1,3,3,2,2,0,3,2,3,
0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,3,2,3,3,2,3,3,3,3,3,2,3,3,2,2,3,2,3,2,0,3,2,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,
3,3,3,3,3,3,2,3,3,3,3,3,2,3,3,3,1,2,3,2,2,3,1,2,3,3,2,2,0,3,3,3,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,2,2,3,3,3,3,3,3,2,3,3,3,3,2,3,3,3,3,0,2,3,2,
0,0,0,1,1,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,1,1,1,3,3,2,1,3,2,2,3,2,1,3,2,2,1,0,3,3,1,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,2,2,3,3,3,3,3,1,2,3,3,3,3,1,2,1,3,3,3,3,2,2,3,1,1,3,2,0,1,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,2,2,3,3,3,3,3,2,1,3,3,3,3,3,2,2,1,3,3,3,0,1,1,2,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,2,3,3,2,3,3,3,2,0,3,2,3,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,1,0,
3,3,3,3,3,3,2,3,3,3,2,3,2,3,3,3,1,3,2,2,2,3,1,1,3,3,1,1,0,3,3,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,2,3,3,3,2,3,2,3,3,3,2,3,3,3,3,3,1,2,3,2,2,0,2,2,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,2,2,2,3,1,3,3,2,2,1,3,3,3,1,1,3,1,2,3,2,3,2,2,2,1,0,2,2,2,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,
3,1,1,3,3,3,3,3,1,2,3,3,3,3,1,2,1,3,3,3,2,2,3,2,1,0,3,2,0,1,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,3,3,3,3,3,1,2,3,3,3,3,1,1,0,3,3,3,3,0,2,3,0,0,2,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,2,3,3,2,2,2,2,3,3,0,1,2,3,2,3,2,2,3,2,1,2,0,2,2,2,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,
3,3,3,3,3,3,1,2,3,3,3,2,1,2,3,3,2,2,2,3,2,3,3,1,3,3,1,1,0,2,3,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,1,2,2,2,2,3,3,3,1,1,1,3,3,1,1,3,1,1,3,2,1,2,3,1,1,0,2,2,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,2,1,2,1,1,3,3,1,1,1,1,3,3,1,1,2,2,1,2,1,1,2,2,1,1,0,2,2,1,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,1,1,2,1,1,3,3,1,0,1,1,3,3,2,0,1,1,2,3,1,0,2,2,1,0,0,1,3,2,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,2,1,3,3,3,3,3,1,2,3,2,3,3,2,1,1,3,2,3,2,1,2,2,0,1,2,1,0,0,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,2,2,2,2,3,1,2,2,1,1,3,3,0,3,2,1,2,3,2,1,3,3,1,1,0,2,1,3,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,2,2,2,3,2,3,3,3,2,1,1,3,3,1,1,1,2,2,3,2,3,2,2,2,1,0,2,2,1,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1,0,0,3,3,3,3,3,0,0,3,3,2,3,0,0,0,2,3,3,1,0,1,2,0,0,1,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,2,3,3,3,3,3,1,2,3,3,2,2,1,1,0,3,3,2,2,1,2,2,1,0,2,2,0,1,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,2,2,1,3,1,2,3,3,2,2,1,1,2,2,1,1,1,1,3,2,1,1,1,1,2,1,0,1,2,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
2,3,3,1,1,1,1,1,3,3,3,0,1,1,3,3,1,1,1,1,1,2,2,0,3,1,1,2,0,2,1,1,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,1,0,1,2,1,2,2,0,1,2,3,1,2,0,0,0,2,1,1,1,1,1,2,0,0,1,1,0,0,0,0,
1,2,1,2,2,2,1,2,1,2,0,2,0,2,2,1,1,2,1,1,2,1,1,1,0,1,0,0,0,1,1,0,
1,1,1,2,3,2,3,3,0,1,2,2,3,1,0,1,0,2,1,2,2,0,1,1,0,0,1,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,3,3,2,2,1,0,0,3,2,3,2,0,0,0,1,1,3,0,0,1,1,0,0,2,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,2,2,3,3,1,0,1,3,2,3,1,1,1,0,1,1,1,1,1,3,1,0,0,2,2,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,1,2,2,2,1,0,1,2,3,3,2,0,0,0,2,1,1,1,2,1,1,1,0,1,1,1,0,0,0,
1,2,2,2,2,2,1,1,1,2,0,2,1,1,1,1,1,2,1,1,1,1,1,1,0,1,1,1,0,0,1,1,
3,2,2,1,0,0,1,1,2,2,0,3,0,1,2,1,1,0,0,1,1,1,0,1,1,1,1,0,2,1,1,1,
2,2,1,1,1,2,1,2,1,1,1,1,1,1,1,2,1,1,1,2,3,1,1,1,1,1,1,1,1,1,0,1,
2,3,3,0,1,0,0,0,3,3,1,0,0,1,2,2,1,0,0,0,0,2,0,0,1,1,1,0,2,1,1,1,
2,1,1,1,1,1,1,2,1,1,0,1,1,0,1,1,1,0,1,2,1,1,0,1,1,1,1,1,1,1,0,1,
2,3,3,0,1,0,0,0,2,2,0,0,0,0,1,2,2,0,0,0,0,1,0,0,1,1,0,0,2,0,1,0,
2,1,1,1,1,2,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,1,1,2,0,1,1,1,1,1,0,1,
3,2,2,0,1,0,1,0,2,3,2,0,0,1,2,2,1,0,0,1,1,1,0,0,2,1,0,1,2,2,1,1,
2,1,1,1,1,1,1,2,1,1,1,1,1,1,0,2,1,0,1,1,0,1,1,1,0,1,1,2,1,1,0,1,
2,2,2,0,0,1,0,0,2,2,1,1,0,0,2,1,1,0,0,0,1,2,0,0,2,1,0,0,2,1,1,1,
2,1,1,1,1,2,1,2,1,1,1,2,2,1,1,2,1,1,1,2,1,1,1,1,1,1,1,1,1,1,0,1,
1,2,3,0,0,0,1,0,3,2,1,0,0,1,2,1,1,0,0,0,0,2,1,0,1,1,0,0,2,1,2,1,
1,1,0,0,0,1,0,1,1,1,1,1,2,0,0,1,0,0,0,2,0,0,1,1,1,1,1,1,1,1,0,1,
3,0,0,2,1,2,2,1,0,0,2,1,2,2,0,0,0,2,1,1,1,0,1,1,0,0,1,1,2,0,0,0,
1,2,1,2,2,1,1,2,1,2,0,1,1,1,1,1,1,1,1,1,2,1,1,0,0,1,1,1,1,0,0,1,
1,3,2,0,0,0,1,0,2,2,2,0,0,0,2,2,1,0,0,0,0,3,1,1,1,1,0,0,2,1,1,1,
2,1,0,1,1,1,0,1,1,1,1,1,1,1,0,2,1,0,0,1,0,1,1,0,1,1,1,1,1,1,0,1,
2,3,2,0,0,0,1,0,2,2,0,0,0,0,2,1,1,0,0,0,0,2,1,0,1,1,0,0,2,1,1,0,
2,1,1,1,1,2,1,2,1,2,0,1,1,1,0,2,1,1,1,2,1,1,1,1,0,1,1,1,1,1,0,1,
3,1,1,2,2,2,3,2,1,1,2,2,1,1,0,1,0,2,2,1,1,1,1,1,0,0,1,1,0,1,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,0,0,0,0,0,2,2,0,0,0,0,2,2,1,0,0,0,1,1,0,0,1,2,0,0,2,1,1,1,
2,2,1,1,1,2,1,2,1,1,0,1,1,1,1,2,1,1,1,2,1,1,1,1,0,1,2,1,1,1,0,1,
1,0,0,1,2,3,2,1,0,0,2,0,1,1,0,0,0,1,1,1,1,0,1,1,0,0,1,0,0,0,0,0,
1,2,1,2,1,2,1,1,1,2,0,2,1,1,1,0,1,2,0,0,1,1,1,0,0,0,0,0,0,0,0,0,
2,3,2,0,0,0,0,0,1,1,2,1,0,0,1,1,1,0,0,0,0,2,0,0,1,1,0,0,2,1,1,1,
2,1,1,1,1,1,1,2,1,0,1,1,1,1,0,2,1,1,1,1,1,1,0,1,0,1,1,1,1,1,0,1,
1,2,2,0,1,1,1,0,2,2,2,0,0,0,3,2,1,0,0,0,1,1,0,0,1,1,0,1,1,1,0,0,
1,1,0,1,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,2,1,1,1,0,0,1,1,1,0,1,0,1,
2,1,0,2,1,1,2,2,1,1,2,1,1,1,0,0,0,1,1,0,1,1,1,1,0,0,1,1,1,0,0,0,
1,2,2,2,2,2,1,1,1,2,0,2,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,0,0,0,1,0,
1,2,3,0,0,0,1,0,2,2,0,0,0,0,2,2,0,0,0,0,0,1,0,0,1,0,0,0,2,0,1,0,
2,1,1,1,1,1,0,2,0,0,0,1,2,1,1,1,1,0,1,2,0,1,0,1,0,1,1,1,0,1,0,1,
2,2,2,0,0,0,1,0,2,1,2,0,0,0,1,1,2,0,0,0,0,1,0,0,1,1,0,0,2,1,0,1,
2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,2,0,1,1,1,1,1,0,1,
1,2,2,0,0,0,1,0,2,2,2,0,0,0,1,1,0,0,0,0,0,1,1,0,2,0,0,1,1,1,0,1,
1,0,1,1,1,1,1,1,0,1,1,1,1,0,0,1,0,0,1,1,0,1,0,1,1,1,1,1,0,0,0,1,
1,0,0,1,0,1,2,1,0,0,1,1,1,2,0,0,0,1,1,0,1,0,1,1,0,0,1,0,0,0,0,0,
0,2,1,2,1,1,1,1,1,2,0,2,0,1,1,0,1,2,1,0,1,1,1,0,0,0,0,0,0,1,0,0,
2,1,1,0,1,2,0,0,1,1,1,0,0,0,1,1,0,0,0,0,0,1,0,0,1,0,0,0,2,1,0,1,
2,2,1,1,1,1,1,2,1,1,0,1,1,1,1,2,1,1,1,2,1,1,0,1,0,1,1,1,1,1,0,1,
1,2,2,0,0,0,0,0,1,1,0,0,0,0,2,1,0,0,0,0,0,2,0,0,2,2,0,0,2,0,0,1,
2,1,1,1,1,1,1,1,0,1,1,0,1,1,0,1,0,0,0,1,1,1,1,0,0,1,1,1,1,0,0,1,
1,1,2,0,0,3,1,0,2,1,1,1,0,0,1,1,1,0,0,0,1,1,0,0,0,1,0,0,1,0,1,0,
1,2,1,0,1,1,1,2,1,1,0,1,1,1,1,1,0,0,0,1,1,1,1,1,0,1,0,0,0,1,0,0,
2,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,1,0,0,0,1,0,0,0,0,2,0,0,0,
2,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1,2,1,1,0,0,1,1,1,1,1,0,1,
2,1,1,1,2,1,1,1,0,1,1,2,1,0,0,0,0,1,1,1,1,0,1,0,0,0,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,0,1,1,1,1,1,0,0,1,1,2,1,0,0,0,1,1,0,0,0,1,1,0,0,1,0,1,0,0,0,
1,2,1,1,1,1,1,1,1,1,0,1,0,1,1,1,1,1,1,0,1,1,1,0,0,0,0,0,0,1,0,0,
2,0,0,0,1,1,1,1,0,0,1,1,0,0,0,0,0,1,1,1,2,0,0,1,0,0,1,0,1,0,0,0,
0,1,1,1,1,1,1,1,1,2,0,1,1,1,1,0,1,1,1,0,1,1,1,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,1,1,0,0,2,1,0,1,0,0,0,1,0,1,0,0,0,0,0,0,1,0,0,0,0,0,
0,1,1,1,1,1,1,0,1,1,0,1,0,1,1,0,1,1,0,0,1,1,1,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
0,1,1,1,1,1,0,0,1,1,0,1,0,1,0,0,1,1,1,0,1,1,1,0,0,0,0,0,0,0,0,0,
0,0,0,1,0,0,0,0,0,0,1,1,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,1,1,1,0,1,0,0,1,1,0,1,0,1,1,0,1,1,1,0,1,1,1,0,0,0,0,0,0,0,0,0,
2,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,0,0,1,0,0,1,0,1,0,1,1,1,0,0,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,1,0,0,0,1,1,1,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,
0,1,1,1,1,1,1,0,1,1,0,1,0,1,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,
)
Latin2HungarianModel = {
'charToOrderMap': Latin2_HungarianCharToOrderMap,
'precedenceMatrix': HungarianLangModel,
'mTypicalPositiveRatio': 0.947368,
'keepEnglishLetter': True,
'charsetName': "ISO-8859-2"
}
Win1250HungarianModel = {
'charToOrderMap': win1250HungarianCharToOrderMap,
'precedenceMatrix': HungarianLangModel,
'mTypicalPositiveRatio': 0.947368,
'keepEnglishLetter': True,
'charsetName': "windows-1250"
}
# flake8: noqa
| mit |
liamgh/liamgreenhughes-sl4a-tf101 | python/src/Lib/multiprocessing/util.py | 59 | 7839 | #
# Module providing various facilities to other parts of the package
#
# multiprocessing/util.py
#
# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
#
import itertools
import weakref
import atexit
import threading # we want threading to install it's
# cleanup function before multiprocessing does
from multiprocessing.process import current_process, active_children
__all__ = [
'sub_debug', 'debug', 'info', 'sub_warning', 'get_logger',
'log_to_stderr', 'get_temp_dir', 'register_after_fork',
'is_exiting', 'Finalize', 'ForkAwareThreadLock', 'ForkAwareLocal',
'SUBDEBUG', 'SUBWARNING',
]
#
# Logging
#
NOTSET = 0
SUBDEBUG = 5
DEBUG = 10
INFO = 20
SUBWARNING = 25
LOGGER_NAME = 'multiprocessing'
DEFAULT_LOGGING_FORMAT = '[%(levelname)s/%(processName)s] %(message)s'
_logger = None
_log_to_stderr = False
def sub_debug(msg, *args):
if _logger:
_logger.log(SUBDEBUG, msg, *args)
def debug(msg, *args):
if _logger:
_logger.log(DEBUG, msg, *args)
def info(msg, *args):
if _logger:
_logger.log(INFO, msg, *args)
def sub_warning(msg, *args):
if _logger:
_logger.log(SUBWARNING, msg, *args)
def get_logger():
'''
Returns logger used by multiprocessing
'''
global _logger
import logging, atexit
logging._acquireLock()
try:
if not _logger:
_logger = logging.getLogger(LOGGER_NAME)
_logger.propagate = 0
logging.addLevelName(SUBDEBUG, 'SUBDEBUG')
logging.addLevelName(SUBWARNING, 'SUBWARNING')
# XXX multiprocessing should cleanup before logging
if hasattr(atexit, 'unregister'):
atexit.unregister(_exit_function)
atexit.register(_exit_function)
else:
atexit._exithandlers.remove((_exit_function, (), {}))
atexit._exithandlers.append((_exit_function, (), {}))
finally:
logging._releaseLock()
return _logger
def log_to_stderr(level=None):
'''
Turn on logging and add a handler which prints to stderr
'''
global _log_to_stderr
import logging
logger = get_logger()
formatter = logging.Formatter(DEFAULT_LOGGING_FORMAT)
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
if level:
logger.setLevel(level)
_log_to_stderr = True
return _logger
#
# Function returning a temp directory which will be removed on exit
#
def get_temp_dir():
# get name of a temp directory which will be automatically cleaned up
if current_process()._tempdir is None:
import shutil, tempfile
tempdir = tempfile.mkdtemp(prefix='pymp-')
info('created temp directory %s', tempdir)
Finalize(None, shutil.rmtree, args=[tempdir], exitpriority=-100)
current_process()._tempdir = tempdir
return current_process()._tempdir
#
# Support for reinitialization of objects when bootstrapping a child process
#
_afterfork_registry = weakref.WeakValueDictionary()
_afterfork_counter = itertools.count()
def _run_after_forkers():
items = list(_afterfork_registry.items())
items.sort()
for (index, ident, func), obj in items:
try:
func(obj)
except Exception, e:
info('after forker raised exception %s', e)
def register_after_fork(obj, func):
_afterfork_registry[(_afterfork_counter.next(), id(obj), func)] = obj
#
# Finalization using weakrefs
#
_finalizer_registry = {}
_finalizer_counter = itertools.count()
class Finalize(object):
'''
Class which supports object finalization using weakrefs
'''
def __init__(self, obj, callback, args=(), kwargs=None, exitpriority=None):
assert exitpriority is None or type(exitpriority) is int
if obj is not None:
self._weakref = weakref.ref(obj, self)
else:
assert exitpriority is not None
self._callback = callback
self._args = args
self._kwargs = kwargs or {}
self._key = (exitpriority, _finalizer_counter.next())
_finalizer_registry[self._key] = self
def __call__(self, wr=None):
'''
Run the callback unless it has already been called or cancelled
'''
try:
del _finalizer_registry[self._key]
except KeyError:
sub_debug('finalizer no longer registered')
else:
sub_debug('finalizer calling %s with args %s and kwargs %s',
self._callback, self._args, self._kwargs)
res = self._callback(*self._args, **self._kwargs)
self._weakref = self._callback = self._args = \
self._kwargs = self._key = None
return res
def cancel(self):
'''
Cancel finalization of the object
'''
try:
del _finalizer_registry[self._key]
except KeyError:
pass
else:
self._weakref = self._callback = self._args = \
self._kwargs = self._key = None
def still_active(self):
'''
Return whether this finalizer is still waiting to invoke callback
'''
return self._key in _finalizer_registry
def __repr__(self):
try:
obj = self._weakref()
except (AttributeError, TypeError):
obj = None
if obj is None:
return '<Finalize object, dead>'
x = '<Finalize object, callback=%s' % \
getattr(self._callback, '__name__', self._callback)
if self._args:
x += ', args=' + str(self._args)
if self._kwargs:
x += ', kwargs=' + str(self._kwargs)
if self._key[0] is not None:
x += ', exitprority=' + str(self._key[0])
return x + '>'
def _run_finalizers(minpriority=None):
'''
Run all finalizers whose exit priority is not None and at least minpriority
Finalizers with highest priority are called first; finalizers with
the same priority will be called in reverse order of creation.
'''
if minpriority is None:
f = lambda p : p[0][0] is not None
else:
f = lambda p : p[0][0] is not None and p[0][0] >= minpriority
items = [x for x in _finalizer_registry.items() if f(x)]
items.sort(reverse=True)
for key, finalizer in items:
sub_debug('calling %s', finalizer)
try:
finalizer()
except Exception:
import traceback
traceback.print_exc()
if minpriority is None:
_finalizer_registry.clear()
#
# Clean up on exit
#
def is_exiting():
'''
Returns true if the process is shutting down
'''
return _exiting or _exiting is None
_exiting = False
def _exit_function():
global _exiting
info('process shutting down')
debug('running all "atexit" finalizers with priority >= 0')
_run_finalizers(0)
for p in active_children():
if p._daemonic:
info('calling terminate() for daemon %s', p.name)
p._popen.terminate()
for p in active_children():
info('calling join() for process %s', p.name)
p.join()
debug('running the remaining "atexit" finalizers')
_run_finalizers()
atexit.register(_exit_function)
#
# Some fork aware types
#
class ForkAwareThreadLock(object):
def __init__(self):
self._lock = threading.Lock()
self.acquire = self._lock.acquire
self.release = self._lock.release
register_after_fork(self, ForkAwareThreadLock.__init__)
class ForkAwareLocal(threading.local):
def __init__(self):
register_after_fork(self, lambda obj : obj.__dict__.clear())
def __reduce__(self):
return type(self), ()
| apache-2.0 |
mheap/ansible | lib/ansible/modules/cloud/ovirt/ovirt_cluster.py | 8 | 31829 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_cluster
short_description: Module to manage clusters in oVirt/RHV
version_added: "2.3"
author: "Ondra Machacek (@machacekondra)"
description:
- "Module to manage clusters in oVirt/RHV"
options:
name:
description:
- "Name of the cluster to manage."
required: true
state:
description:
- "Should the cluster be present or absent"
choices: ['present', 'absent']
default: present
data_center:
description:
- "Datacenter name where cluster reside."
description:
description:
- "Description of the cluster."
comment:
description:
- "Comment of the cluster."
network:
description:
- "Management network of cluster to access cluster hosts."
ballooning:
description:
- "If I(True) enable memory balloon optimization. Memory balloon is used to
re-distribute / reclaim the host memory based on VM needs
in a dynamic way."
virt:
description:
- "If I(True), hosts in this cluster will be used to run virtual machines."
gluster:
description:
- "If I(True), hosts in this cluster will be used as Gluster Storage
server nodes, and not for running virtual machines."
- "By default the cluster is created for virtual machine hosts."
threads_as_cores:
description:
- "If I(True) the exposed host threads would be treated as cores
which can be utilized by virtual machines."
ksm:
description:
- "I I(True) MoM enables to run Kernel Same-page Merging I(KSM) when
necessary and when it can yield a memory saving benefit that
outweighs its CPU cost."
ksm_numa:
description:
- "If I(True) enables KSM C(ksm) for best berformance inside NUMA nodes."
ha_reservation:
description:
- "If I(True) enable the oVirt/RHV to monitor cluster capacity for highly
available virtual machines."
trusted_service:
description:
- "If (True) enable integration with an OpenAttestation server."
vm_reason:
description:
- "If I(True) enable an optional reason field when a virtual machine
is shut down from the Manager, allowing the administrator to
provide an explanation for the maintenance."
host_reason:
description:
- "If I(True) enable an optional reason field when a host is placed
into maintenance mode from the Manager, allowing the administrator
to provide an explanation for the maintenance."
memory_policy:
description:
- "I(disabled) - Disables memory page sharing."
- "I(server) - Sets the memory page sharing threshold to 150% of the system memory on each host."
- "I(desktop) - Sets the memory page sharing threshold to 200% of the system memory on each host."
choices: ['disabled', 'server', 'desktop']
rng_sources:
description:
- "List that specify the random number generator devices that all hosts in the cluster will use."
- "Supported generators are: I(hwrng) and I(random)."
spice_proxy:
description:
- "The proxy by which the SPICE client will connect to virtual machines."
- "The address must be in the following format: I(protocol://[host]:[port])"
fence_enabled:
description:
- "If I(True) enables fencing on the cluster."
- "Fencing is enabled by default."
fence_skip_if_sd_active:
description:
- "If I(True) any hosts in the cluster that are Non Responsive
and still connected to storage will not be fenced."
fence_skip_if_connectivity_broken:
description:
- "If I(True) fencing will be temporarily disabled if the percentage
of hosts in the cluster that are experiencing connectivity issues
is greater than or equal to the defined threshold."
- "The threshold can be specified by C(fence_connectivity_threshold)."
fence_connectivity_threshold:
description:
- "The threshold used by C(fence_skip_if_connectivity_broken)."
resilience_policy:
description:
- "The resilience policy defines how the virtual machines are prioritized in the migration."
- "Following values are supported:"
- "C(do_not_migrate) - Prevents virtual machines from being migrated. "
- "C(migrate) - Migrates all virtual machines in order of their defined priority."
- "C(migrate_highly_available) - Migrates only highly available virtual machines to prevent overloading other hosts."
choices: ['do_not_migrate', 'migrate', 'migrate_highly_available']
migration_bandwidth:
description:
- "The bandwidth settings define the maximum bandwidth of both outgoing and incoming migrations per host."
- "Following bandwidth options are supported:"
- "C(auto) - Bandwidth is copied from the I(rate limit) [Mbps] setting in the data center host network QoS."
- "C(hypervisor_default) - Bandwidth is controlled by local VDSM setting on sending host."
- "C(custom) - Defined by user (in Mbps)."
choices: ['auto', 'hypervisor_default', 'custom']
migration_bandwidth_limit:
description:
- "Set the I(custom) migration bandwidth limit."
- "This parameter is used only when C(migration_bandwidth) is I(custom)."
migration_auto_converge:
description:
- "If I(True) auto-convergence is used during live migration of virtual machines."
- "Used only when C(migration_policy) is set to I(legacy)."
- "Following options are supported:"
- "C(true) - Override the global setting to I(true)."
- "C(false) - Override the global setting to I(false)."
- "C(inherit) - Use value which is set globally."
choices: ['true', 'false', 'inherit']
migration_compressed:
description:
- "If I(True) compression is used during live migration of the virtual machine."
- "Used only when C(migration_policy) is set to I(legacy)."
- "Following options are supported:"
- "C(true) - Override the global setting to I(true)."
- "C(false) - Override the global setting to I(false)."
- "C(inherit) - Use value which is set globally."
choices: ['true', 'false', 'inherit']
migration_policy:
description:
- "A migration policy defines the conditions for live migrating
virtual machines in the event of host failure."
- "Following policies are supported:"
- "C(legacy) - Legacy behavior of 3.6 version."
- "C(minimal_downtime) - Virtual machines should not experience any significant downtime."
- "C(suspend_workload) - Virtual machines may experience a more significant downtime."
- "C(post_copy) - Virtual machines should not experience any significant downtime.
If the VM migration is not converging for a long time, the migration will be switched to post-copy.
Added in version I(2.4)."
choices: ['legacy', 'minimal_downtime', 'suspend_workload', 'post_copy']
serial_policy:
description:
- "Specify a serial number policy for the virtual machines in the cluster."
- "Following options are supported:"
- "C(vm) - Sets the virtual machine's UUID as its serial number."
- "C(host) - Sets the host's UUID as the virtual machine's serial number."
- "C(custom) - Allows you to specify a custom serial number in C(serial_policy_value)."
serial_policy_value:
description:
- "Allows you to specify a custom serial number."
- "This parameter is used only when C(serial_policy) is I(custom)."
scheduling_policy:
description:
- "Name of the scheduling policy to be used for cluster."
scheduling_policy_properties:
description:
- "Custom scheduling policy properties of the cluster."
- "These optional properties override the properties of the
scheduling policy specified by the C(scheduling_policy) parameter."
version_added: "2.6"
cpu_arch:
description:
- "CPU architecture of cluster."
choices: ['x86_64', 'ppc64', 'undefined']
cpu_type:
description:
- "CPU codename. For example I(Intel SandyBridge Family)."
switch_type:
description:
- "Type of switch to be used by all networks in given cluster.
Either I(legacy) which is using linux brigde or I(ovs) using
Open vSwitch."
choices: ['legacy', 'ovs']
compatibility_version:
description:
- "The compatibility version of the cluster. All hosts in this
cluster must support at least this compatibility version."
mac_pool:
description:
- "MAC pool to be used by this cluster."
- "C(Note:)"
- "This is supported since oVirt version 4.1."
version_added: 2.4
external_network_providers:
description:
- "List of references to the external network providers available
in the cluster. If the automatic deployment of the external
network provider is supported, the networks of the referenced
network provider are available on every host in the cluster."
- "External network provider is described by following dictionary:"
- "C(name) - Name of the external network provider. Either C(name)
or C(id) is required."
- "C(id) - ID of the external network provider. Either C(name) or
C(id) is required."
- "This is supported since oVirt version 4.2."
version_added: 2.5
extends_documentation_fragment: ovirt
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Create cluster
- ovirt_cluster:
data_center: mydatacenter
name: mycluster
cpu_type: Intel SandyBridge Family
description: mycluster
compatibility_version: 4.0
# Create virt service cluster:
- ovirt_cluster:
data_center: mydatacenter
name: mycluster
cpu_type: Intel Nehalem Family
description: mycluster
switch_type: legacy
compatibility_version: 4.0
ballooning: true
gluster: false
threads_as_cores: true
ha_reservation: true
trusted_service: false
host_reason: false
vm_reason: true
ksm_numa: true
memory_policy: server
rng_sources:
- hwrng
- random
# Create cluster with default network provider
- ovirt_cluster:
name: mycluster
data_center: Default
cpu_type: Intel SandyBridge Family
external_network_providers:
- name: ovirt-provider-ovn
# Remove cluster
- ovirt_cluster:
state: absent
name: mycluster
'''
RETURN = '''
id:
description: ID of the cluster which is managed
returned: On success if cluster is found.
type: str
sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
cluster:
description: "Dictionary of all the cluster attributes. Cluster attributes can be found on your oVirt/RHV instance
at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/cluster."
type: dict
returned: On success if cluster is found.
'''
import traceback
try:
import ovirtsdk4.types as otypes
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
BaseModule,
check_sdk,
create_connection,
equal,
ovirt_full_argument_spec,
search_by_name,
get_id_by_name,
)
class ClustersModule(BaseModule):
def __get_major(self, full_version):
if full_version is None:
return None
if isinstance(full_version, otypes.Version):
return full_version.major
return int(full_version.split('.')[0])
def __get_minor(self, full_version):
if full_version is None:
return None
if isinstance(full_version, otypes.Version):
return full_version.minor
return int(full_version.split('.')[1])
def param(self, name, default=None):
return self._module.params.get(name, default)
def _get_memory_policy(self):
memory_policy = self.param('memory_policy')
if memory_policy == 'desktop':
return 200
elif memory_policy == 'server':
return 150
elif memory_policy == 'disabled':
return 100
def _get_policy_id(self):
# These are hardcoded IDs, once there is API, please fix this.
# legacy - 00000000-0000-0000-0000-000000000000
# minimal downtime - 80554327-0569-496b-bdeb-fcbbf52b827b
# suspend workload if needed - 80554327-0569-496b-bdeb-fcbbf52b827c
# post copy - a7aeedb2-8d66-4e51-bb22-32595027ce71
migration_policy = self.param('migration_policy')
if migration_policy == 'legacy':
return '00000000-0000-0000-0000-000000000000'
elif migration_policy == 'minimal_downtime':
return '80554327-0569-496b-bdeb-fcbbf52b827b'
elif migration_policy == 'suspend_workload':
return '80554327-0569-496b-bdeb-fcbbf52b827c'
elif migration_policy == 'post_copy':
return 'a7aeedb2-8d66-4e51-bb22-32595027ce71'
def _get_sched_policy(self):
sched_policy = None
if self.param('scheduling_policy'):
sched_policies_service = self._connection.system_service().scheduling_policies_service()
sched_policy = search_by_name(sched_policies_service, self.param('scheduling_policy'))
if not sched_policy:
raise Exception("Scheduling policy '%s' was not found" % self.param('scheduling_policy'))
return sched_policy
def _get_mac_pool(self):
mac_pool = None
if self._module.params.get('mac_pool'):
mac_pool = search_by_name(
self._connection.system_service().mac_pools_service(),
self._module.params.get('mac_pool'),
)
return mac_pool
def _get_external_network_providers(self):
return self.param('external_network_providers') or []
def _get_external_network_provider_id(self, external_provider):
return external_provider.get('id') or get_id_by_name(
self._connection.system_service().openstack_network_providers_service(),
external_provider.get('name')
)
def _get_external_network_providers_entity(self):
if self.param('external_network_providers') is not None:
return [otypes.ExternalProvider(id=self._get_external_network_provider_id(external_provider))
for external_provider in self.param('external_network_providers')]
def build_entity(self):
sched_policy = self._get_sched_policy()
return otypes.Cluster(
name=self.param('name'),
comment=self.param('comment'),
description=self.param('description'),
ballooning_enabled=self.param('ballooning'),
gluster_service=self.param('gluster'),
virt_service=self.param('virt'),
threads_as_cores=self.param('threads_as_cores'),
ha_reservation=self.param('ha_reservation'),
trusted_service=self.param('trusted_service'),
optional_reason=self.param('vm_reason'),
maintenance_reason_required=self.param('host_reason'),
scheduling_policy=otypes.SchedulingPolicy(
id=sched_policy.id,
) if sched_policy else None,
serial_number=otypes.SerialNumber(
policy=otypes.SerialNumberPolicy(self.param('serial_policy')),
value=self.param('serial_policy_value'),
) if (
self.param('serial_policy') is not None or
self.param('serial_policy_value') is not None
) else None,
migration=otypes.MigrationOptions(
auto_converge=otypes.InheritableBoolean(
self.param('migration_auto_converge'),
) if self.param('migration_auto_converge') else None,
bandwidth=otypes.MigrationBandwidth(
assignment_method=otypes.MigrationBandwidthAssignmentMethod(
self.param('migration_bandwidth'),
) if self.param('migration_bandwidth') else None,
custom_value=self.param('migration_bandwidth_limit'),
) if (
self.param('migration_bandwidth') or
self.param('migration_bandwidth_limit')
) else None,
compressed=otypes.InheritableBoolean(
self.param('migration_compressed'),
) if self.param('migration_compressed') else None,
policy=otypes.MigrationPolicy(
id=self._get_policy_id()
) if self.param('migration_policy') else None,
) if (
self.param('migration_bandwidth') is not None or
self.param('migration_bandwidth_limit') is not None or
self.param('migration_auto_converge') is not None or
self.param('migration_compressed') is not None or
self.param('migration_policy') is not None
) else None,
error_handling=otypes.ErrorHandling(
on_error=otypes.MigrateOnError(
self.param('resilience_policy')
),
) if self.param('resilience_policy') else None,
fencing_policy=otypes.FencingPolicy(
enabled=self.param('fence_enabled'),
skip_if_connectivity_broken=otypes.SkipIfConnectivityBroken(
enabled=self.param('fence_skip_if_connectivity_broken'),
threshold=self.param('fence_connectivity_threshold'),
) if (
self.param('fence_skip_if_connectivity_broken') is not None or
self.param('fence_connectivity_threshold') is not None
) else None,
skip_if_sd_active=otypes.SkipIfSdActive(
enabled=self.param('fence_skip_if_sd_active'),
) if self.param('fence_skip_if_sd_active') is not None else None,
) if (
self.param('fence_enabled') is not None or
self.param('fence_skip_if_sd_active') is not None or
self.param('fence_skip_if_connectivity_broken') is not None or
self.param('fence_connectivity_threshold') is not None
) else None,
display=otypes.Display(
proxy=self.param('spice_proxy'),
) if self.param('spice_proxy') else None,
required_rng_sources=[
otypes.RngSource(rng) for rng in self.param('rng_sources')
] if self.param('rng_sources') else None,
memory_policy=otypes.MemoryPolicy(
over_commit=otypes.MemoryOverCommit(
percent=self._get_memory_policy(),
),
) if self.param('memory_policy') else None,
ksm=otypes.Ksm(
enabled=self.param('ksm'),
merge_across_nodes=not self.param('ksm_numa'),
) if (
self.param('ksm_numa') is not None or
self.param('ksm') is not None
) else None,
data_center=otypes.DataCenter(
name=self.param('data_center'),
) if self.param('data_center') else None,
management_network=otypes.Network(
name=self.param('network'),
) if self.param('network') else None,
cpu=otypes.Cpu(
architecture=otypes.Architecture(
self.param('cpu_arch')
) if self.param('cpu_arch') else None,
type=self.param('cpu_type'),
) if (
self.param('cpu_arch') or self.param('cpu_type')
) else None,
version=otypes.Version(
major=self.__get_major(self.param('compatibility_version')),
minor=self.__get_minor(self.param('compatibility_version')),
) if self.param('compatibility_version') else None,
switch_type=otypes.SwitchType(
self.param('switch_type')
) if self.param('switch_type') else None,
mac_pool=otypes.MacPool(
id=get_id_by_name(self._connection.system_service().mac_pools_service(), self.param('mac_pool'))
) if self.param('mac_pool') else None,
external_network_providers=self._get_external_network_providers_entity(),
custom_scheduling_policy_properties=[
otypes.Property(
name=sp.get('name'),
value=str(sp.get('value')),
) for sp in self.param('scheduling_policy_properties') if sp
] if self.param('scheduling_policy_properties') is not None else None,
)
def _matches_entity(self, item, entity):
return equal(item.get('id'), entity.id) and equal(item.get('name'), entity.name)
def _update_check_external_network_providers(self, entity):
if self.param('external_network_providers') is None:
return True
if entity.external_network_providers is None:
return not self.param('external_network_providers')
entity_providers = self._connection.follow_link(entity.external_network_providers)
entity_provider_ids = [provider.id for provider in entity_providers]
entity_provider_names = [provider.name for provider in entity_providers]
for provider in self._get_external_network_providers():
if provider.get('id'):
if provider.get('id') not in entity_provider_ids:
return False
elif provider.get('name') and provider.get('name') not in entity_provider_names:
return False
for entity_provider in entity_providers:
if not any([self._matches_entity(provider, entity_provider)
for provider in self._get_external_network_providers()]):
return False
return True
def update_check(self, entity):
sched_policy = self._get_sched_policy()
migration_policy = getattr(entity.migration, 'policy', None)
cluster_cpu = getattr(entity, 'cpu', dict())
def check_custom_scheduling_policy_properties():
if self.param('scheduling_policy_properties'):
current = []
if entity.custom_scheduling_policy_properties:
current = [(sp.name, str(sp.value)) for sp in entity.custom_scheduling_policy_properties]
passed = [(sp.get('name'), str(sp.get('value'))) for sp in self.param('scheduling_policy_properties') if sp]
for p in passed:
if p not in current:
return False
return True
return (
check_custom_scheduling_policy_properties() and
equal(self.param('comment'), entity.comment) and
equal(self.param('description'), entity.description) and
equal(self.param('switch_type'), str(entity.switch_type)) and
equal(self.param('cpu_arch'), str(getattr(cluster_cpu, 'architecture', None))) and
equal(self.param('cpu_type'), getattr(cluster_cpu, 'type', None)) and
equal(self.param('ballooning'), entity.ballooning_enabled) and
equal(self.param('gluster'), entity.gluster_service) and
equal(self.param('virt'), entity.virt_service) and
equal(self.param('threads_as_cores'), entity.threads_as_cores) and
equal(self.param('ksm_numa'), not entity.ksm.merge_across_nodes) and
equal(self.param('ksm'), entity.ksm.enabled) and
equal(self.param('ha_reservation'), entity.ha_reservation) and
equal(self.param('trusted_service'), entity.trusted_service) and
equal(self.param('host_reason'), entity.maintenance_reason_required) and
equal(self.param('vm_reason'), entity.optional_reason) and
equal(self.param('spice_proxy'), getattr(entity.display, 'proxy', None)) and
equal(self.param('fence_enabled'), entity.fencing_policy.enabled) and
equal(self.param('fence_skip_if_sd_active'), entity.fencing_policy.skip_if_sd_active.enabled) and
equal(self.param('fence_skip_if_connectivity_broken'), entity.fencing_policy.skip_if_connectivity_broken.enabled) and
equal(self.param('fence_connectivity_threshold'), entity.fencing_policy.skip_if_connectivity_broken.threshold) and
equal(self.param('resilience_policy'), str(entity.error_handling.on_error)) and
equal(self.param('migration_bandwidth'), str(entity.migration.bandwidth.assignment_method)) and
equal(self.param('migration_auto_converge'), str(entity.migration.auto_converge)) and
equal(self.param('migration_compressed'), str(entity.migration.compressed)) and
equal(self.param('serial_policy'), str(getattr(entity.serial_number, 'policy', None))) and
equal(self.param('serial_policy_value'), getattr(entity.serial_number, 'value', None)) and
equal(self.param('scheduling_policy'), getattr(self._connection.follow_link(entity.scheduling_policy), 'name', None)) and
equal(self._get_policy_id(), getattr(migration_policy, 'id', None)) and
equal(self._get_memory_policy(), entity.memory_policy.over_commit.percent) and
equal(self.__get_minor(self.param('compatibility_version')), self.__get_minor(entity.version)) and
equal(self.__get_major(self.param('compatibility_version')), self.__get_major(entity.version)) and
equal(
self.param('migration_bandwidth_limit') if self.param('migration_bandwidth') == 'custom' else None,
entity.migration.bandwidth.custom_value
) and
equal(
sorted(self.param('rng_sources')) if self.param('rng_sources') else None,
sorted([
str(source) for source in entity.required_rng_sources
])
) and
equal(
get_id_by_name(self._connection.system_service().mac_pools_service(), self.param('mac_pool'), raise_error=False),
entity.mac_pool.id
) and
self._update_check_external_network_providers(entity)
)
def main():
argument_spec = ovirt_full_argument_spec(
state=dict(
choices=['present', 'absent'],
default='present',
),
name=dict(default=None, required=True),
ballooning=dict(default=None, type='bool', aliases=['balloon']),
gluster=dict(default=None, type='bool'),
virt=dict(default=None, type='bool'),
threads_as_cores=dict(default=None, type='bool'),
ksm_numa=dict(default=None, type='bool'),
ksm=dict(default=None, type='bool'),
ha_reservation=dict(default=None, type='bool'),
trusted_service=dict(default=None, type='bool'),
vm_reason=dict(default=None, type='bool'),
host_reason=dict(default=None, type='bool'),
memory_policy=dict(default=None, choices=['disabled', 'server', 'desktop'], aliases=['performance_preset']),
rng_sources=dict(default=None, type='list'),
spice_proxy=dict(default=None),
fence_enabled=dict(default=None, type='bool'),
fence_skip_if_sd_active=dict(default=None, type='bool'),
fence_skip_if_connectivity_broken=dict(default=None, type='bool'),
fence_connectivity_threshold=dict(default=None, type='int'),
resilience_policy=dict(default=None, choices=['migrate_highly_available', 'migrate', 'do_not_migrate']),
migration_bandwidth=dict(default=None, choices=['auto', 'hypervisor_default', 'custom']),
migration_bandwidth_limit=dict(default=None, type='int'),
migration_auto_converge=dict(default=None, choices=['true', 'false', 'inherit']),
migration_compressed=dict(default=None, choices=['true', 'false', 'inherit']),
migration_policy=dict(
default=None,
choices=['legacy', 'minimal_downtime', 'suspend_workload', 'post_copy']
),
serial_policy=dict(default=None, choices=['vm', 'host', 'custom']),
serial_policy_value=dict(default=None),
scheduling_policy=dict(default=None),
data_center=dict(default=None),
description=dict(default=None),
comment=dict(default=None),
network=dict(default=None),
cpu_arch=dict(default=None, choices=['ppc64', 'undefined', 'x86_64']),
cpu_type=dict(default=None),
switch_type=dict(default=None, choices=['legacy', 'ovs']),
compatibility_version=dict(default=None),
mac_pool=dict(default=None),
external_network_providers=dict(default=None, type='list'),
scheduling_policy_properties=dict(type='list'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
if module._name == 'ovirt_clusters':
module.deprecate("The 'ovirt_clusters' module is being renamed 'ovirt_cluster'", version=2.8)
check_sdk(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
clusters_service = connection.system_service().clusters_service()
clusters_module = ClustersModule(
connection=connection,
module=module,
service=clusters_service,
)
state = module.params['state']
if state == 'present':
ret = clusters_module.create()
elif state == 'absent':
ret = clusters_module.remove()
module.exit_json(**ret)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == "__main__":
main()
| gpl-3.0 |
staute/shinken_deb | test/test_multi_hostgroups_def.py | 18 | 1655 | #!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from shinken_test import *
class TestConfig(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_multi_hostgroups_def.cfg')
def test_dummy(self):
#
# Config is not correct because of a wrong relative path
# in the main config file
#
print "Get the hosts and services"
now = time.time()
host = self.sched.hosts.find_by_name("will crash")
self.assertIsNot(host, None)
svc = self.sched.services.find_srv_by_name_and_hostname("will crash", "Crash")
self.assertIsNot(svc, None)
grp = self.sched.servicegroups.find_by_name("Crashed")
self.assertIsNot(grp, None)
self.assertIn(svc, grp.members)
if __name__ == '__main__':
unittest.main()
| agpl-3.0 |
Panos-Bletsos/spark-cost-model-optimizer | examples/src/main/python/mllib/svm_with_sgd_example.py | 22 | 1846 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyspark import SparkContext
# $example on$
from pyspark.mllib.classification import SVMWithSGD, SVMModel
from pyspark.mllib.regression import LabeledPoint
# $example off$
if __name__ == "__main__":
sc = SparkContext(appName="PythonSVMWithSGDExample")
# $example on$
# Load and parse the data
def parsePoint(line):
values = [float(x) for x in line.split(' ')]
return LabeledPoint(values[0], values[1:])
data = sc.textFile("data/mllib/sample_svm_data.txt")
parsedData = data.map(parsePoint)
# Build the model
model = SVMWithSGD.train(parsedData, iterations=100)
# Evaluating the model on training data
labelsAndPreds = parsedData.map(lambda p: (p.label, model.predict(p.features)))
trainErr = labelsAndPreds.filter(lambda (v, p): v != p).count() / float(parsedData.count())
print("Training Error = " + str(trainErr))
# Save and load model
model.save(sc, "target/tmp/pythonSVMWithSGDModel")
sameModel = SVMModel.load(sc, "target/tmp/pythonSVMWithSGDModel")
# $example off$
| apache-2.0 |
LinuxTeam-teilar/kpytool-configs | scripts/scripts.py | 1 | 1870 | #!/usr/bin/env python
# Copyright 2012 by Giorgos Tsiapaliokas <terietor@gmail.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301, USA.
from os import getcwd, path, mkdir
from gitToConfigParser.gitToConfigParser import gitToConfigParser
from svnToConfigParser.svnToConfigParser import svnToConfigParser
from thirdParty.thirdparty import thirdParty
print """
The config dirs will be created in the current dir,
if you aren't in the right dir, abort!!!
"""
raw_input("""
If this isn\'t the first time that you use this script.
It will mess up your config files! So don\'t use it \n
Press enter to continue...
""")
print 'please wait....!'
#get the dest path
destPath = path.abspath(getcwd()) + '/config/'
#now create the config path
#this is the path in which we will store our configs
if not path.exists(destPath):
mkdir(destPath)
#this is the xml which kde-projects provided
XML_SOURCE = 'https://projects.kde.org/kde_projects.xml'
w = gitToConfigParser(XML_SOURCE, destPath)
w.do()
#create the cfg file for the projects which are on svn
svnToConfigParser.do(destPath)
#create the cfg file for the 3rd-party projects
thirdParty.do(destPath) | gpl-2.0 |
abstract-open-solutions/OCB | addons/payment_ogone/models/ogone.py | 34 | 19078 | # -*- coding: utf-'8' "-*-"
from hashlib import sha1
import logging
from lxml import etree, objectify
from pprint import pformat
import time
from urllib import urlencode
import urllib2
import urlparse
from openerp.addons.payment.models.payment_acquirer import ValidationError
from openerp.addons.payment_ogone.controllers.main import OgoneController
from openerp.addons.payment_ogone.data import ogone
from openerp.osv import osv, fields
from openerp.tools import float_round
from openerp.tools.float_utils import float_compare
_logger = logging.getLogger(__name__)
class PaymentAcquirerOgone(osv.Model):
_inherit = 'payment.acquirer'
def _get_ogone_urls(self, cr, uid, environment, context=None):
""" Ogone URLS:
- standard order: POST address for form-based
@TDETODO: complete me
"""
return {
'ogone_standard_order_url': 'https://secure.ogone.com/ncol/%s/orderstandard_utf8.asp' % (environment,),
'ogone_direct_order_url': 'https://secure.ogone.com/ncol/%s/orderdirect_utf8.asp' % (environment,),
'ogone_direct_query_url': 'https://secure.ogone.com/ncol/%s/querydirect_utf8.asp' % (environment,),
'ogone_afu_agree_url': 'https://secure.ogone.com/ncol/%s/AFU_agree.asp' % (environment,),
}
def _get_providers(self, cr, uid, context=None):
providers = super(PaymentAcquirerOgone, self)._get_providers(cr, uid, context=context)
providers.append(['ogone', 'Ogone'])
return providers
_columns = {
'ogone_pspid': fields.char('PSPID', required_if_provider='ogone'),
'ogone_userid': fields.char('API User ID', required_if_provider='ogone'),
'ogone_password': fields.char('API User Password', required_if_provider='ogone'),
'ogone_shakey_in': fields.char('SHA Key IN', size=32, required_if_provider='ogone'),
'ogone_shakey_out': fields.char('SHA Key OUT', size=32, required_if_provider='ogone'),
}
def _ogone_generate_shasign(self, acquirer, inout, values):
""" Generate the shasign for incoming or outgoing communications.
:param browse acquirer: the payment.acquirer browse record. It should
have a shakey in shaky out
:param string inout: 'in' (openerp contacting ogone) or 'out' (ogone
contacting openerp). In this last case only some
fields should be contained (see e-Commerce basic)
:param dict values: transaction values
:return string: shasign
"""
assert inout in ('in', 'out')
assert acquirer.provider == 'ogone'
key = getattr(acquirer, 'ogone_shakey_' + inout)
def filter_key(key):
if inout == 'in':
return True
else:
# SHA-OUT keys
# source https://viveum.v-psp.com/Ncol/Viveum_e-Com-BAS_EN.pdf
keys = [
'AAVADDRESS',
'AAVCHECK',
'AAVMAIL',
'AAVNAME',
'AAVPHONE',
'AAVZIP',
'ACCEPTANCE',
'ALIAS',
'AMOUNT',
'BIC',
'BIN',
'BRAND',
'CARDNO',
'CCCTY',
'CN',
'COMPLUS',
'CREATION_STATUS',
'CURRENCY',
'CVCCHECK',
'DCC_COMMPERCENTAGE',
'DCC_CONVAMOUNT',
'DCC_CONVCCY',
'DCC_EXCHRATE',
'DCC_EXCHRATESOURCE',
'DCC_EXCHRATETS',
'DCC_INDICATOR',
'DCC_MARGINPERCENTAGE',
'DCC_VALIDHOURS',
'DIGESTCARDNO',
'ECI',
'ED',
'ENCCARDNO',
'FXAMOUNT',
'FXCURRENCY',
'IBAN',
'IP',
'IPCTY',
'NBREMAILUSAGE',
'NBRIPUSAGE',
'NBRIPUSAGE_ALLTX',
'NBRUSAGE',
'NCERROR',
'NCERRORCARDNO',
'NCERRORCN',
'NCERRORCVC',
'NCERRORED',
'ORDERID',
'PAYID',
'PM',
'SCO_CATEGORY',
'SCORING',
'STATUS',
'SUBBRAND',
'SUBSCRIPTION_ID',
'TRXDATE',
'VC'
]
return key.upper() in keys
items = sorted((k.upper(), v) for k, v in values.items())
sign = ''.join('%s=%s%s' % (k, v, key) for k, v in items if v and filter_key(k))
sign = sign.encode("utf-8")
shasign = sha1(sign).hexdigest()
return shasign
def ogone_form_generate_values(self, cr, uid, id, partner_values, tx_values, context=None):
base_url = self.pool['ir.config_parameter'].get_param(cr, uid, 'web.base.url')
acquirer = self.browse(cr, uid, id, context=context)
ogone_tx_values = dict(tx_values)
temp_ogone_tx_values = {
'PSPID': acquirer.ogone_pspid,
'ORDERID': tx_values['reference'],
'AMOUNT': '%d' % int(float_round(tx_values['amount'], 2) * 100),
'CURRENCY': tx_values['currency'] and tx_values['currency'].name or '',
'LANGUAGE': partner_values['lang'],
'CN': partner_values['name'],
'EMAIL': partner_values['email'],
'OWNERZIP': partner_values['zip'],
'OWNERADDRESS': partner_values['address'],
'OWNERTOWN': partner_values['city'],
'OWNERCTY': partner_values['country'] and partner_values['country'].code or '',
'OWNERTELNO': partner_values['phone'],
'ACCEPTURL': '%s' % urlparse.urljoin(base_url, OgoneController._accept_url),
'DECLINEURL': '%s' % urlparse.urljoin(base_url, OgoneController._decline_url),
'EXCEPTIONURL': '%s' % urlparse.urljoin(base_url, OgoneController._exception_url),
'CANCELURL': '%s' % urlparse.urljoin(base_url, OgoneController._cancel_url),
}
if ogone_tx_values.get('return_url'):
temp_ogone_tx_values['PARAMPLUS'] = 'return_url=%s' % ogone_tx_values.pop('return_url')
shasign = self._ogone_generate_shasign(acquirer, 'in', temp_ogone_tx_values)
temp_ogone_tx_values['SHASIGN'] = shasign
ogone_tx_values.update(temp_ogone_tx_values)
return partner_values, ogone_tx_values
def ogone_get_form_action_url(self, cr, uid, id, context=None):
acquirer = self.browse(cr, uid, id, context=context)
return self._get_ogone_urls(cr, uid, acquirer.environment, context=context)['ogone_standard_order_url']
class PaymentTxOgone(osv.Model):
_inherit = 'payment.transaction'
# ogone status
_ogone_valid_tx_status = [5, 9]
_ogone_wait_tx_status = [41, 50, 51, 52, 55, 56, 91, 92, 99]
_ogone_pending_tx_status = [46] # 3DS HTML response
_ogone_cancel_tx_status = [1]
_columns = {
'ogone_3ds': fields.boolean('3DS Activated'),
'ogone_3ds_html': fields.html('3DS HTML'),
'ogone_complus': fields.char('Complus'),
'ogone_payid': fields.char('PayID', help='Payment ID, generated by Ogone')
}
# --------------------------------------------------
# FORM RELATED METHODS
# --------------------------------------------------
def _ogone_form_get_tx_from_data(self, cr, uid, data, context=None):
""" Given a data dict coming from ogone, verify it and find the related
transaction record. """
reference, pay_id, shasign = data.get('orderID'), data.get('PAYID'), data.get('SHASIGN')
if not reference or not pay_id or not shasign:
error_msg = 'Ogone: received data with missing reference (%s) or pay_id (%s) or shashign (%s)' % (reference, pay_id, shasign)
_logger.error(error_msg)
raise ValidationError(error_msg)
# find tx -> @TDENOTE use paytid ?
tx_ids = self.search(cr, uid, [('reference', '=', reference)], context=context)
if not tx_ids or len(tx_ids) > 1:
error_msg = 'Ogone: received data for reference %s' % (reference)
if not tx_ids:
error_msg += '; no order found'
else:
error_msg += '; multiple order found'
_logger.error(error_msg)
raise ValidationError(error_msg)
tx = self.pool['payment.transaction'].browse(cr, uid, tx_ids[0], context=context)
# verify shasign
shasign_check = self.pool['payment.acquirer']._ogone_generate_shasign(tx.acquirer_id, 'out', data)
if shasign_check.upper() != shasign.upper():
error_msg = 'Ogone: invalid shasign, received %s, computed %s, for data %s' % (shasign, shasign_check, data)
_logger.error(error_msg)
raise ValidationError(error_msg)
return tx
def _ogone_form_get_invalid_parameters(self, cr, uid, tx, data, context=None):
invalid_parameters = []
# TODO: txn_id: should be false at draft, set afterwards, and verified with txn details
if tx.acquirer_reference and data.get('PAYID') != tx.acquirer_reference:
invalid_parameters.append(('PAYID', data.get('PAYID'), tx.acquirer_reference))
# check what is bought
if float_compare(float(data.get('amount', '0.0')), tx.amount, 2) != 0:
invalid_parameters.append(('amount', data.get('amount'), '%.2f' % tx.amount))
if data.get('currency') != tx.currency_id.name:
invalid_parameters.append(('currency', data.get('currency'), tx.currency_id.name))
return invalid_parameters
def _ogone_form_validate(self, cr, uid, tx, data, context=None):
if tx.state == 'done':
_logger.warning('Ogone: trying to validate an already validated tx (ref %s)' % tx.reference)
return True
status = int(data.get('STATUS', '0'))
if status in self._ogone_valid_tx_status:
tx.write({
'state': 'done',
'date_validate': data['TRXDATE'],
'acquirer_reference': data['PAYID'],
})
return True
elif status in self._ogone_cancel_tx_status:
tx.write({
'state': 'cancel',
'acquirer_reference': data.get('PAYID'),
})
elif status in self._ogone_pending_tx_status:
tx.write({
'state': 'pending',
'acquirer_reference': data.get('PAYID'),
})
else:
error = 'Ogone: feedback error: %(error_str)s\n\n%(error_code)s: %(error_msg)s' % {
'error_str': data.get('NCERROR'),
'error_code': data.get('NCERRORPLUS'),
'error_msg': ogone.OGONE_ERROR_MAP.get(data.get('NCERRORPLUS')),
}
_logger.info(error)
tx.write({
'state': 'error',
'state_message': error,
'acquirer_reference': data.get('PAYID'),
})
return False
# --------------------------------------------------
# S2S RELATED METHODS
# --------------------------------------------------
def ogone_s2s_create_alias(self, cr, uid, id, values, context=None):
""" Create an alias at Ogone via batch.
.. versionadded:: pre-v8 saas-3
.. warning::
Experimental code. You should not use it before OpenERP v8 official
release.
"""
tx = self.browse(cr, uid, id, context=context)
assert tx.type == 'server2server', 'Calling s2s dedicated method for a %s acquirer' % tx.type
alias = 'OPENERP-%d-%d' % (tx.partner_id.id, tx.id)
expiry_date = '%s%s' % (values['expiry_date_mm'], values['expiry_date_yy'][2:])
line = 'ADDALIAS;%(alias)s;%(holder_name)s;%(number)s;%(expiry_date)s;%(brand)s;%(pspid)s'
line = line % dict(values, alias=alias, expiry_date=expiry_date, pspid=tx.acquirer_id.ogone_pspid)
tx_data = {
'FILE_REFERENCE': 'OPENERP-NEW-ALIAS-%s' % time.time(), # something unique,
'TRANSACTION_CODE': 'ATR',
'OPERATION': 'SAL',
'NB_PAYMENTS': 1, # even if we do not actually have any payment, ogone want it to not be 0
'FILE': line,
'REPLY_TYPE': 'XML',
'PSPID': tx.acquirer_id.ogone_pspid,
'USERID': tx.acquirer_id.ogone_userid,
'PSWD': tx.acquirer_id.ogone_password,
'PROCESS_MODE': 'CHECKANDPROCESS',
}
# TODO: fix URL computation
request = urllib2.Request(tx.acquirer_id.ogone_afu_agree_url, urlencode(tx_data))
result = urllib2.urlopen(request).read()
try:
tree = objectify.fromstring(result)
except etree.XMLSyntaxError:
_logger.exception('Invalid xml response from ogone')
return None
error_code = error_str = None
if hasattr(tree, 'PARAMS_ERROR'):
error_code = tree.NCERROR.text
error_str = 'PARAMS ERROR: %s' % (tree.PARAMS_ERROR.text or '',)
else:
node = tree.FORMAT_CHECK
error_node = getattr(node, 'FORMAT_CHECK_ERROR', None)
if error_node is not None:
error_code = error_node.NCERROR.text
error_str = 'CHECK ERROR: %s' % (error_node.ERROR.text or '',)
if error_code:
error_msg = ogone.OGONE_ERROR_MAP.get(error_code)
error = '%s\n\n%s: %s' % (error_str, error_code, error_msg)
_logger.error(error)
raise Exception(error) # TODO specific exception
tx.write({'partner_reference': alias})
return True
def ogone_s2s_generate_values(self, cr, uid, id, custom_values, context=None):
""" Generate valid Ogone values for a s2s tx.
.. versionadded:: pre-v8 saas-3
.. warning::
Experimental code. You should not use it before OpenERP v8 official
release.
"""
tx = self.browse(cr, uid, id, context=context)
tx_data = {
'PSPID': tx.acquirer_id.ogone_pspid,
'USERID': tx.acquirer_id.ogone_userid,
'PSWD': tx.acquirer_id.ogone_password,
'OrderID': tx.reference,
'amount': '%d' % int(float_round(tx.amount, 2) * 100), # tde check amount or str * 100 ?
'CURRENCY': tx.currency_id.name,
'LANGUAGE': tx.partner_lang,
'OPERATION': 'SAL',
'ECI': 2, # Recurring (from MOTO)
'ALIAS': tx.partner_reference,
'RTIMEOUT': 30,
}
if custom_values.get('ogone_cvc'):
tx_data['CVC'] = custom_values.get('ogone_cvc')
if custom_values.pop('ogone_3ds', None):
tx_data.update({
'FLAG3D': 'Y', # YEAH!!
})
if custom_values.get('ogone_complus'):
tx_data['COMPLUS'] = custom_values.get('ogone_complus')
if custom_values.get('ogone_accept_url'):
pass
shasign = self.pool['payment.acquirer']._ogone_generate_shasign(tx.acquirer_id, 'in', tx_data)
tx_data['SHASIGN'] = shasign
return tx_data
def ogone_s2s_feedback(self, cr, uid, data, context=None):
"""
.. versionadded:: pre-v8 saas-3
.. warning::
Experimental code. You should not use it before OpenERP v8 official
release.
"""
pass
def ogone_s2s_execute(self, cr, uid, id, values, context=None):
"""
.. versionadded:: pre-v8 saas-3
.. warning::
Experimental code. You should not use it before OpenERP v8 official
release.
"""
tx = self.browse(cr, uid, id, context=context)
tx_data = self.ogone_s2s_generate_values(cr, uid, id, values, context=context)
_logger.info('Generated Ogone s2s data %s', pformat(tx_data)) # debug
request = urllib2.Request(tx.acquirer_id.ogone_direct_order_url, urlencode(tx_data))
result = urllib2.urlopen(request).read()
_logger.info('Contacted Ogone direct order; result %s', result) # debug
tree = objectify.fromstring(result)
payid = tree.get('PAYID')
query_direct_data = dict(
PSPID=tx.acquirer_id.ogone_pspid,
USERID=tx.acquirer_id.ogone_userid,
PSWD=tx.acquirer_id.ogone_password,
ID=payid,
)
query_direct_url = 'https://secure.ogone.com/ncol/%s/querydirect.asp' % (tx.acquirer_id.environment,)
tries = 2
tx_done = False
tx_status = False
while not tx_done or tries > 0:
try:
tree = objectify.fromstring(result)
except etree.XMLSyntaxError:
# invalid response from ogone
_logger.exception('Invalid xml response from ogone')
raise
# see https://secure.ogone.com/ncol/paymentinfos1.asp
VALID_TX = [5, 9]
WAIT_TX = [41, 50, 51, 52, 55, 56, 91, 92, 99]
PENDING_TX = [46] # 3DS HTML response
# other status are errors...
status = tree.get('STATUS')
if status == '':
status = None
else:
status = int(status)
if status in VALID_TX:
tx_status = True
tx_done = True
elif status in PENDING_TX:
html = str(tree.HTML_ANSWER)
tx_data.update(ogone_3ds_html=html.decode('base64'))
tx_status = False
tx_done = True
elif status in WAIT_TX:
time.sleep(1500)
request = urllib2.Request(query_direct_url, urlencode(query_direct_data))
result = urllib2.urlopen(request).read()
_logger.debug('Contacted Ogone query direct; result %s', result)
else:
error_code = tree.get('NCERROR')
if not ogone.retryable(error_code):
error_str = tree.get('NCERRORPLUS')
error_msg = ogone.OGONE_ERROR_MAP.get(error_code)
error = 'ERROR: %s\n\n%s: %s' % (error_str, error_code, error_msg)
_logger.info(error)
raise Exception(error)
tries = tries - 1
if not tx_done and tries == 0:
raise Exception('Cannot get transaction status...')
return tx_status
| agpl-3.0 |
xujun10110/pth-toolkit | lib/python2.7/site-packages/samba/external/testtools/tests/test_deferredruntest.py | 8 | 29167 | # Copyright (c) 2010-2011 testtools developers. See LICENSE for details.
"""Tests for the DeferredRunTest single test execution logic."""
import os
import signal
from testtools import (
skipIf,
TestCase,
TestResult,
)
from testtools.content import (
text_content,
)
from testtools.helpers import try_import
from testtools.matchers import (
Equals,
KeysEqual,
MatchesException,
Raises,
)
from testtools.runtest import RunTest
from testtools.testresult.doubles import ExtendedTestResult
from testtools.tests.test_spinner import NeedsTwistedTestCase
assert_fails_with = try_import('testtools.deferredruntest.assert_fails_with')
AsynchronousDeferredRunTest = try_import(
'testtools.deferredruntest.AsynchronousDeferredRunTest')
flush_logged_errors = try_import(
'testtools.deferredruntest.flush_logged_errors')
SynchronousDeferredRunTest = try_import(
'testtools.deferredruntest.SynchronousDeferredRunTest')
defer = try_import('twisted.internet.defer')
failure = try_import('twisted.python.failure')
log = try_import('twisted.python.log')
DelayedCall = try_import('twisted.internet.base.DelayedCall')
class X(object):
"""Tests that we run as part of our tests, nested to avoid discovery."""
class Base(TestCase):
def setUp(self):
super(X.Base, self).setUp()
self.calls = ['setUp']
self.addCleanup(self.calls.append, 'clean-up')
def test_something(self):
self.calls.append('test')
def tearDown(self):
self.calls.append('tearDown')
super(X.Base, self).tearDown()
class ErrorInSetup(Base):
expected_calls = ['setUp', 'clean-up']
expected_results = [('addError', RuntimeError)]
def setUp(self):
super(X.ErrorInSetup, self).setUp()
raise RuntimeError("Error in setUp")
class ErrorInTest(Base):
expected_calls = ['setUp', 'tearDown', 'clean-up']
expected_results = [('addError', RuntimeError)]
def test_something(self):
raise RuntimeError("Error in test")
class FailureInTest(Base):
expected_calls = ['setUp', 'tearDown', 'clean-up']
expected_results = [('addFailure', AssertionError)]
def test_something(self):
self.fail("test failed")
class ErrorInTearDown(Base):
expected_calls = ['setUp', 'test', 'clean-up']
expected_results = [('addError', RuntimeError)]
def tearDown(self):
raise RuntimeError("Error in tearDown")
class ErrorInCleanup(Base):
expected_calls = ['setUp', 'test', 'tearDown', 'clean-up']
expected_results = [('addError', ZeroDivisionError)]
def test_something(self):
self.calls.append('test')
self.addCleanup(lambda: 1/0)
class TestIntegration(NeedsTwistedTestCase):
def assertResultsMatch(self, test, result):
events = list(result._events)
self.assertEqual(('startTest', test), events.pop(0))
for expected_result in test.expected_results:
result = events.pop(0)
if len(expected_result) == 1:
self.assertEqual((expected_result[0], test), result)
else:
self.assertEqual((expected_result[0], test), result[:2])
error_type = expected_result[1]
self.assertIn(error_type.__name__, str(result[2]))
self.assertEqual([('stopTest', test)], events)
def test_runner(self):
result = ExtendedTestResult()
test = self.test_factory('test_something', runTest=self.runner)
test.run(result)
self.assertEqual(test.calls, self.test_factory.expected_calls)
self.assertResultsMatch(test, result)
def make_integration_tests():
from unittest import TestSuite
from testtools import clone_test_with_new_id
runners = [
('RunTest', RunTest),
('SynchronousDeferredRunTest', SynchronousDeferredRunTest),
('AsynchronousDeferredRunTest', AsynchronousDeferredRunTest),
]
tests = [
X.ErrorInSetup,
X.ErrorInTest,
X.ErrorInTearDown,
X.FailureInTest,
X.ErrorInCleanup,
]
base_test = X.TestIntegration('test_runner')
integration_tests = []
for runner_name, runner in runners:
for test in tests:
new_test = clone_test_with_new_id(
base_test, '%s(%s, %s)' % (
base_test.id(),
runner_name,
test.__name__))
new_test.test_factory = test
new_test.runner = runner
integration_tests.append(new_test)
return TestSuite(integration_tests)
class TestSynchronousDeferredRunTest(NeedsTwistedTestCase):
def make_result(self):
return ExtendedTestResult()
def make_runner(self, test):
return SynchronousDeferredRunTest(test, test.exception_handlers)
def test_success(self):
class SomeCase(TestCase):
def test_success(self):
return defer.succeed(None)
test = SomeCase('test_success')
runner = self.make_runner(test)
result = self.make_result()
runner.run(result)
self.assertThat(
result._events, Equals([
('startTest', test),
('addSuccess', test),
('stopTest', test)]))
def test_failure(self):
class SomeCase(TestCase):
def test_failure(self):
return defer.maybeDeferred(self.fail, "Egads!")
test = SomeCase('test_failure')
runner = self.make_runner(test)
result = self.make_result()
runner.run(result)
self.assertThat(
[event[:2] for event in result._events], Equals([
('startTest', test),
('addFailure', test),
('stopTest', test)]))
def test_setUp_followed_by_test(self):
class SomeCase(TestCase):
def setUp(self):
super(SomeCase, self).setUp()
return defer.succeed(None)
def test_failure(self):
return defer.maybeDeferred(self.fail, "Egads!")
test = SomeCase('test_failure')
runner = self.make_runner(test)
result = self.make_result()
runner.run(result)
self.assertThat(
[event[:2] for event in result._events], Equals([
('startTest', test),
('addFailure', test),
('stopTest', test)]))
class TestAsynchronousDeferredRunTest(NeedsTwistedTestCase):
def make_reactor(self):
from twisted.internet import reactor
return reactor
def make_result(self):
return ExtendedTestResult()
def make_runner(self, test, timeout=None):
if timeout is None:
timeout = self.make_timeout()
return AsynchronousDeferredRunTest(
test, test.exception_handlers, timeout=timeout)
def make_timeout(self):
return 0.005
def test_setUp_returns_deferred_that_fires_later(self):
# setUp can return a Deferred that might fire at any time.
# AsynchronousDeferredRunTest will not go on to running the test until
# the Deferred returned by setUp actually fires.
call_log = []
marker = object()
d = defer.Deferred().addCallback(call_log.append)
class SomeCase(TestCase):
def setUp(self):
super(SomeCase, self).setUp()
call_log.append('setUp')
return d
def test_something(self):
call_log.append('test')
def fire_deferred():
self.assertThat(call_log, Equals(['setUp']))
d.callback(marker)
test = SomeCase('test_something')
timeout = self.make_timeout()
runner = self.make_runner(test, timeout=timeout)
result = self.make_result()
reactor = self.make_reactor()
reactor.callLater(timeout, fire_deferred)
runner.run(result)
self.assertThat(call_log, Equals(['setUp', marker, 'test']))
def test_calls_setUp_test_tearDown_in_sequence(self):
# setUp, the test method and tearDown can all return
# Deferreds. AsynchronousDeferredRunTest will make sure that each of
# these are run in turn, only going on to the next stage once the
# Deferred from the previous stage has fired.
call_log = []
a = defer.Deferred()
a.addCallback(lambda x: call_log.append('a'))
b = defer.Deferred()
b.addCallback(lambda x: call_log.append('b'))
c = defer.Deferred()
c.addCallback(lambda x: call_log.append('c'))
class SomeCase(TestCase):
def setUp(self):
super(SomeCase, self).setUp()
call_log.append('setUp')
return a
def test_success(self):
call_log.append('test')
return b
def tearDown(self):
super(SomeCase, self).tearDown()
call_log.append('tearDown')
return c
test = SomeCase('test_success')
timeout = self.make_timeout()
runner = self.make_runner(test, timeout)
result = self.make_result()
reactor = self.make_reactor()
def fire_a():
self.assertThat(call_log, Equals(['setUp']))
a.callback(None)
def fire_b():
self.assertThat(call_log, Equals(['setUp', 'a', 'test']))
b.callback(None)
def fire_c():
self.assertThat(
call_log, Equals(['setUp', 'a', 'test', 'b', 'tearDown']))
c.callback(None)
reactor.callLater(timeout * 0.25, fire_a)
reactor.callLater(timeout * 0.5, fire_b)
reactor.callLater(timeout * 0.75, fire_c)
runner.run(result)
self.assertThat(
call_log, Equals(['setUp', 'a', 'test', 'b', 'tearDown', 'c']))
def test_async_cleanups(self):
# Cleanups added with addCleanup can return
# Deferreds. AsynchronousDeferredRunTest will run each of them in
# turn.
class SomeCase(TestCase):
def test_whatever(self):
pass
test = SomeCase('test_whatever')
call_log = []
a = defer.Deferred().addCallback(lambda x: call_log.append('a'))
b = defer.Deferred().addCallback(lambda x: call_log.append('b'))
c = defer.Deferred().addCallback(lambda x: call_log.append('c'))
test.addCleanup(lambda: a)
test.addCleanup(lambda: b)
test.addCleanup(lambda: c)
def fire_a():
self.assertThat(call_log, Equals([]))
a.callback(None)
def fire_b():
self.assertThat(call_log, Equals(['a']))
b.callback(None)
def fire_c():
self.assertThat(call_log, Equals(['a', 'b']))
c.callback(None)
timeout = self.make_timeout()
reactor = self.make_reactor()
reactor.callLater(timeout * 0.25, fire_a)
reactor.callLater(timeout * 0.5, fire_b)
reactor.callLater(timeout * 0.75, fire_c)
runner = self.make_runner(test, timeout)
result = self.make_result()
runner.run(result)
self.assertThat(call_log, Equals(['a', 'b', 'c']))
def test_clean_reactor(self):
# If there's cruft left over in the reactor, the test fails.
reactor = self.make_reactor()
timeout = self.make_timeout()
class SomeCase(TestCase):
def test_cruft(self):
reactor.callLater(timeout * 10.0, lambda: None)
test = SomeCase('test_cruft')
runner = self.make_runner(test, timeout)
result = self.make_result()
runner.run(result)
self.assertThat(
[event[:2] for event in result._events],
Equals(
[('startTest', test),
('addError', test),
('stopTest', test)]))
error = result._events[1][2]
self.assertThat(error, KeysEqual('traceback', 'twisted-log'))
def test_exports_reactor(self):
# The reactor is set as an attribute on the test case.
reactor = self.make_reactor()
timeout = self.make_timeout()
class SomeCase(TestCase):
def test_cruft(self):
self.assertIs(reactor, self.reactor)
test = SomeCase('test_cruft')
runner = self.make_runner(test, timeout)
result = TestResult()
runner.run(result)
self.assertEqual([], result.errors)
self.assertEqual([], result.failures)
def test_unhandled_error_from_deferred(self):
# If there's a Deferred with an unhandled error, the test fails. Each
# unhandled error is reported with a separate traceback.
class SomeCase(TestCase):
def test_cruft(self):
# Note we aren't returning the Deferred so that the error will
# be unhandled.
defer.maybeDeferred(lambda: 1/0)
defer.maybeDeferred(lambda: 2/0)
test = SomeCase('test_cruft')
runner = self.make_runner(test)
result = self.make_result()
runner.run(result)
error = result._events[1][2]
result._events[1] = ('addError', test, None)
self.assertThat(result._events, Equals(
[('startTest', test),
('addError', test, None),
('stopTest', test)]))
self.assertThat(
error, KeysEqual(
'twisted-log',
'unhandled-error-in-deferred',
'unhandled-error-in-deferred-1',
))
def test_unhandled_error_from_deferred_combined_with_error(self):
# If there's a Deferred with an unhandled error, the test fails. Each
# unhandled error is reported with a separate traceback, and the error
# is still reported.
class SomeCase(TestCase):
def test_cruft(self):
# Note we aren't returning the Deferred so that the error will
# be unhandled.
defer.maybeDeferred(lambda: 1/0)
2 / 0
test = SomeCase('test_cruft')
runner = self.make_runner(test)
result = self.make_result()
runner.run(result)
error = result._events[1][2]
result._events[1] = ('addError', test, None)
self.assertThat(result._events, Equals(
[('startTest', test),
('addError', test, None),
('stopTest', test)]))
self.assertThat(
error, KeysEqual(
'traceback',
'twisted-log',
'unhandled-error-in-deferred',
))
@skipIf(os.name != "posix", "Sending SIGINT with os.kill is posix only")
def test_keyboard_interrupt_stops_test_run(self):
# If we get a SIGINT during a test run, the test stops and no more
# tests run.
SIGINT = getattr(signal, 'SIGINT', None)
if not SIGINT:
raise self.skipTest("SIGINT unavailable")
class SomeCase(TestCase):
def test_pause(self):
return defer.Deferred()
test = SomeCase('test_pause')
reactor = self.make_reactor()
timeout = self.make_timeout()
runner = self.make_runner(test, timeout * 5)
result = self.make_result()
reactor.callLater(timeout, os.kill, os.getpid(), SIGINT)
self.assertThat(lambda:runner.run(result),
Raises(MatchesException(KeyboardInterrupt)))
@skipIf(os.name != "posix", "Sending SIGINT with os.kill is posix only")
def test_fast_keyboard_interrupt_stops_test_run(self):
# If we get a SIGINT during a test run, the test stops and no more
# tests run.
SIGINT = getattr(signal, 'SIGINT', None)
if not SIGINT:
raise self.skipTest("SIGINT unavailable")
class SomeCase(TestCase):
def test_pause(self):
return defer.Deferred()
test = SomeCase('test_pause')
reactor = self.make_reactor()
timeout = self.make_timeout()
runner = self.make_runner(test, timeout * 5)
result = self.make_result()
reactor.callWhenRunning(os.kill, os.getpid(), SIGINT)
self.assertThat(lambda:runner.run(result),
Raises(MatchesException(KeyboardInterrupt)))
def test_timeout_causes_test_error(self):
# If a test times out, it reports itself as having failed with a
# TimeoutError.
class SomeCase(TestCase):
def test_pause(self):
return defer.Deferred()
test = SomeCase('test_pause')
runner = self.make_runner(test)
result = self.make_result()
runner.run(result)
error = result._events[1][2]
self.assertThat(
[event[:2] for event in result._events], Equals(
[('startTest', test),
('addError', test),
('stopTest', test)]))
self.assertIn('TimeoutError', str(error['traceback']))
def test_convenient_construction(self):
# As a convenience method, AsynchronousDeferredRunTest has a
# classmethod that returns an AsynchronousDeferredRunTest
# factory. This factory has the same API as the RunTest constructor.
reactor = object()
timeout = object()
handler = object()
factory = AsynchronousDeferredRunTest.make_factory(reactor, timeout)
runner = factory(self, [handler])
self.assertIs(reactor, runner._reactor)
self.assertIs(timeout, runner._timeout)
self.assertIs(self, runner.case)
self.assertEqual([handler], runner.handlers)
def test_use_convenient_factory(self):
# Make sure that the factory can actually be used.
factory = AsynchronousDeferredRunTest.make_factory()
class SomeCase(TestCase):
run_tests_with = factory
def test_something(self):
pass
case = SomeCase('test_something')
case.run()
def test_convenient_construction_default_reactor(self):
# As a convenience method, AsynchronousDeferredRunTest has a
# classmethod that returns an AsynchronousDeferredRunTest
# factory. This factory has the same API as the RunTest constructor.
reactor = object()
handler = object()
factory = AsynchronousDeferredRunTest.make_factory(reactor=reactor)
runner = factory(self, [handler])
self.assertIs(reactor, runner._reactor)
self.assertIs(self, runner.case)
self.assertEqual([handler], runner.handlers)
def test_convenient_construction_default_timeout(self):
# As a convenience method, AsynchronousDeferredRunTest has a
# classmethod that returns an AsynchronousDeferredRunTest
# factory. This factory has the same API as the RunTest constructor.
timeout = object()
handler = object()
factory = AsynchronousDeferredRunTest.make_factory(timeout=timeout)
runner = factory(self, [handler])
self.assertIs(timeout, runner._timeout)
self.assertIs(self, runner.case)
self.assertEqual([handler], runner.handlers)
def test_convenient_construction_default_debugging(self):
# As a convenience method, AsynchronousDeferredRunTest has a
# classmethod that returns an AsynchronousDeferredRunTest
# factory. This factory has the same API as the RunTest constructor.
handler = object()
factory = AsynchronousDeferredRunTest.make_factory(debug=True)
runner = factory(self, [handler])
self.assertIs(self, runner.case)
self.assertEqual([handler], runner.handlers)
self.assertEqual(True, runner._debug)
def test_deferred_error(self):
class SomeTest(TestCase):
def test_something(self):
return defer.maybeDeferred(lambda: 1/0)
test = SomeTest('test_something')
runner = self.make_runner(test)
result = self.make_result()
runner.run(result)
self.assertThat(
[event[:2] for event in result._events],
Equals([
('startTest', test),
('addError', test),
('stopTest', test)]))
error = result._events[1][2]
self.assertThat(error, KeysEqual('traceback', 'twisted-log'))
def test_only_addError_once(self):
# Even if the reactor is unclean and the test raises an error and the
# cleanups raise errors, we only called addError once per test.
reactor = self.make_reactor()
class WhenItRains(TestCase):
def it_pours(self):
# Add a dirty cleanup.
self.addCleanup(lambda: 3 / 0)
# Dirty the reactor.
from twisted.internet.protocol import ServerFactory
reactor.listenTCP(0, ServerFactory())
# Unhandled error.
defer.maybeDeferred(lambda: 2 / 0)
# Actual error.
raise RuntimeError("Excess precipitation")
test = WhenItRains('it_pours')
runner = self.make_runner(test)
result = self.make_result()
runner.run(result)
self.assertThat(
[event[:2] for event in result._events],
Equals([
('startTest', test),
('addError', test),
('stopTest', test)]))
error = result._events[1][2]
self.assertThat(
error, KeysEqual(
'traceback',
'traceback-1',
'traceback-2',
'twisted-log',
'unhandled-error-in-deferred',
))
def test_log_err_is_error(self):
# An error logged during the test run is recorded as an error in the
# tests.
class LogAnError(TestCase):
def test_something(self):
try:
1/0
except ZeroDivisionError:
f = failure.Failure()
log.err(f)
test = LogAnError('test_something')
runner = self.make_runner(test)
result = self.make_result()
runner.run(result)
self.assertThat(
[event[:2] for event in result._events],
Equals([
('startTest', test),
('addError', test),
('stopTest', test)]))
error = result._events[1][2]
self.assertThat(error, KeysEqual('logged-error', 'twisted-log'))
def test_log_err_flushed_is_success(self):
# An error logged during the test run is recorded as an error in the
# tests.
class LogAnError(TestCase):
def test_something(self):
try:
1/0
except ZeroDivisionError:
f = failure.Failure()
log.err(f)
flush_logged_errors(ZeroDivisionError)
test = LogAnError('test_something')
runner = self.make_runner(test)
result = self.make_result()
runner.run(result)
self.assertThat(
result._events,
Equals([
('startTest', test),
('addSuccess', test, {'twisted-log': text_content('')}),
('stopTest', test)]))
def test_log_in_details(self):
class LogAnError(TestCase):
def test_something(self):
log.msg("foo")
1/0
test = LogAnError('test_something')
runner = self.make_runner(test)
result = self.make_result()
runner.run(result)
self.assertThat(
[event[:2] for event in result._events],
Equals([
('startTest', test),
('addError', test),
('stopTest', test)]))
error = result._events[1][2]
self.assertThat(error, KeysEqual('traceback', 'twisted-log'))
def test_debugging_unchanged_during_test_by_default(self):
debugging = [(defer.Deferred.debug, DelayedCall.debug)]
class SomeCase(TestCase):
def test_debugging_enabled(self):
debugging.append((defer.Deferred.debug, DelayedCall.debug))
test = SomeCase('test_debugging_enabled')
runner = AsynchronousDeferredRunTest(
test, handlers=test.exception_handlers,
reactor=self.make_reactor(), timeout=self.make_timeout())
runner.run(self.make_result())
self.assertEqual(debugging[0], debugging[1])
def test_debugging_enabled_during_test_with_debug_flag(self):
self.patch(defer.Deferred, 'debug', False)
self.patch(DelayedCall, 'debug', False)
debugging = []
class SomeCase(TestCase):
def test_debugging_enabled(self):
debugging.append((defer.Deferred.debug, DelayedCall.debug))
test = SomeCase('test_debugging_enabled')
runner = AsynchronousDeferredRunTest(
test, handlers=test.exception_handlers,
reactor=self.make_reactor(), timeout=self.make_timeout(),
debug=True)
runner.run(self.make_result())
self.assertEqual([(True, True)], debugging)
self.assertEqual(False, defer.Deferred.debug)
self.assertEqual(False, defer.Deferred.debug)
class TestAssertFailsWith(NeedsTwistedTestCase):
"""Tests for `assert_fails_with`."""
if SynchronousDeferredRunTest is not None:
run_tests_with = SynchronousDeferredRunTest
def test_assert_fails_with_success(self):
# assert_fails_with fails the test if it's given a Deferred that
# succeeds.
marker = object()
d = assert_fails_with(defer.succeed(marker), RuntimeError)
def check_result(failure):
failure.trap(self.failureException)
self.assertThat(
str(failure.value),
Equals("RuntimeError not raised (%r returned)" % (marker,)))
d.addCallbacks(
lambda x: self.fail("Should not have succeeded"), check_result)
return d
def test_assert_fails_with_success_multiple_types(self):
# assert_fails_with fails the test if it's given a Deferred that
# succeeds.
marker = object()
d = assert_fails_with(
defer.succeed(marker), RuntimeError, ZeroDivisionError)
def check_result(failure):
failure.trap(self.failureException)
self.assertThat(
str(failure.value),
Equals("RuntimeError, ZeroDivisionError not raised "
"(%r returned)" % (marker,)))
d.addCallbacks(
lambda x: self.fail("Should not have succeeded"), check_result)
return d
def test_assert_fails_with_wrong_exception(self):
# assert_fails_with fails the test if it's given a Deferred that
# succeeds.
d = assert_fails_with(
defer.maybeDeferred(lambda: 1/0), RuntimeError, KeyboardInterrupt)
def check_result(failure):
failure.trap(self.failureException)
lines = str(failure.value).splitlines()
self.assertThat(
lines[:2],
Equals([
("ZeroDivisionError raised instead of RuntimeError, "
"KeyboardInterrupt:"),
" Traceback (most recent call last):",
]))
d.addCallbacks(
lambda x: self.fail("Should not have succeeded"), check_result)
return d
def test_assert_fails_with_expected_exception(self):
# assert_fails_with calls back with the value of the failure if it's
# one of the expected types of failures.
try:
1/0
except ZeroDivisionError:
f = failure.Failure()
d = assert_fails_with(defer.fail(f), ZeroDivisionError)
return d.addCallback(self.assertThat, Equals(f.value))
def test_custom_failure_exception(self):
# If assert_fails_with is passed a 'failureException' keyword
# argument, then it will raise that instead of `AssertionError`.
class CustomException(Exception):
pass
marker = object()
d = assert_fails_with(
defer.succeed(marker), RuntimeError,
failureException=CustomException)
def check_result(failure):
failure.trap(CustomException)
self.assertThat(
str(failure.value),
Equals("RuntimeError not raised (%r returned)" % (marker,)))
return d.addCallbacks(
lambda x: self.fail("Should not have succeeded"), check_result)
def test_suite():
from unittest import TestLoader, TestSuite
return TestSuite(
[TestLoader().loadTestsFromName(__name__),
make_integration_tests()])
| bsd-2-clause |
windinthew/audacity | lib-src/lv2/serd/waflib/Runner.py | 330 | 4483 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import random,atexit
try:
from queue import Queue
except ImportError:
from Queue import Queue
from waflib import Utils,Task,Errors,Logs
GAP=10
class TaskConsumer(Utils.threading.Thread):
def __init__(self):
Utils.threading.Thread.__init__(self)
self.ready=Queue()
self.setDaemon(1)
self.start()
def run(self):
try:
self.loop()
except Exception:
pass
def loop(self):
while 1:
tsk=self.ready.get()
if not isinstance(tsk,Task.TaskBase):
tsk(self)
else:
tsk.process()
pool=Queue()
def get_pool():
try:
return pool.get(False)
except Exception:
return TaskConsumer()
def put_pool(x):
pool.put(x)
def _free_resources():
global pool
lst=[]
while pool.qsize():
lst.append(pool.get())
for x in lst:
x.ready.put(None)
for x in lst:
x.join()
pool=None
atexit.register(_free_resources)
class Parallel(object):
def __init__(self,bld,j=2):
self.numjobs=j
self.bld=bld
self.outstanding=[]
self.frozen=[]
self.out=Queue(0)
self.count=0
self.processed=1
self.stop=False
self.error=[]
self.biter=None
self.dirty=False
def get_next_task(self):
if not self.outstanding:
return None
return self.outstanding.pop(0)
def postpone(self,tsk):
if random.randint(0,1):
self.frozen.insert(0,tsk)
else:
self.frozen.append(tsk)
def refill_task_list(self):
while self.count>self.numjobs*GAP:
self.get_out()
while not self.outstanding:
if self.count:
self.get_out()
elif self.frozen:
try:
cond=self.deadlock==self.processed
except AttributeError:
pass
else:
if cond:
msg='check the build order for the tasks'
for tsk in self.frozen:
if not tsk.run_after:
msg='check the methods runnable_status'
break
lst=[]
for tsk in self.frozen:
lst.append('%s\t-> %r'%(repr(tsk),[id(x)for x in tsk.run_after]))
raise Errors.WafError('Deadlock detected: %s%s'%(msg,''.join(lst)))
self.deadlock=self.processed
if self.frozen:
self.outstanding+=self.frozen
self.frozen=[]
elif not self.count:
self.outstanding.extend(self.biter.next())
self.total=self.bld.total()
break
def add_more_tasks(self,tsk):
if getattr(tsk,'more_tasks',None):
self.outstanding+=tsk.more_tasks
self.total+=len(tsk.more_tasks)
def get_out(self):
tsk=self.out.get()
if not self.stop:
self.add_more_tasks(tsk)
self.count-=1
self.dirty=True
return tsk
def error_handler(self,tsk):
if not self.bld.keep:
self.stop=True
self.error.append(tsk)
def add_task(self,tsk):
try:
self.pool
except AttributeError:
self.init_task_pool()
self.ready.put(tsk)
def init_task_pool(self):
pool=self.pool=[get_pool()for i in range(self.numjobs)]
self.ready=Queue(0)
def setq(consumer):
consumer.ready=self.ready
for x in pool:
x.ready.put(setq)
return pool
def free_task_pool(self):
def setq(consumer):
consumer.ready=Queue(0)
self.out.put(self)
try:
pool=self.pool
except AttributeError:
pass
else:
for x in pool:
self.ready.put(setq)
for x in pool:
self.get_out()
for x in pool:
put_pool(x)
self.pool=[]
def start(self):
self.total=self.bld.total()
while not self.stop:
self.refill_task_list()
tsk=self.get_next_task()
if not tsk:
if self.count:
continue
else:
break
if tsk.hasrun:
self.processed+=1
continue
if self.stop:
break
try:
st=tsk.runnable_status()
except Exception:
self.processed+=1
tsk.err_msg=Utils.ex_stack()
if not self.stop and self.bld.keep:
tsk.hasrun=Task.SKIPPED
if self.bld.keep==1:
if Logs.verbose>1 or not self.error:
self.error.append(tsk)
self.stop=True
else:
if Logs.verbose>1:
self.error.append(tsk)
continue
tsk.hasrun=Task.EXCEPTION
self.error_handler(tsk)
continue
if st==Task.ASK_LATER:
self.postpone(tsk)
elif st==Task.SKIP_ME:
self.processed+=1
tsk.hasrun=Task.SKIPPED
self.add_more_tasks(tsk)
else:
tsk.position=(self.processed,self.total)
self.count+=1
tsk.master=self
self.processed+=1
if self.numjobs==1:
tsk.process()
else:
self.add_task(tsk)
while self.error and self.count:
self.get_out()
assert(self.count==0 or self.stop)
self.free_task_pool()
| gpl-2.0 |
kimkvn/weather-react | node_modules/node-gyp/gyp/pylib/gyp/simple_copy.py | 1869 | 1247 | # Copyright 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A clone of the default copy.deepcopy that doesn't handle cyclic
structures or complex types except for dicts and lists. This is
because gyp copies so large structure that small copy overhead ends up
taking seconds in a project the size of Chromium."""
class Error(Exception):
pass
__all__ = ["Error", "deepcopy"]
def deepcopy(x):
"""Deep copy operation on gyp objects such as strings, ints, dicts
and lists. More than twice as fast as copy.deepcopy but much less
generic."""
try:
return _deepcopy_dispatch[type(x)](x)
except KeyError:
raise Error('Unsupported type %s for deepcopy. Use copy.deepcopy ' +
'or expand simple_copy support.' % type(x))
_deepcopy_dispatch = d = {}
def _deepcopy_atomic(x):
return x
for x in (type(None), int, long, float,
bool, str, unicode, type):
d[x] = _deepcopy_atomic
def _deepcopy_list(x):
return [deepcopy(a) for a in x]
d[list] = _deepcopy_list
def _deepcopy_dict(x):
y = {}
for key, value in x.iteritems():
y[deepcopy(key)] = deepcopy(value)
return y
d[dict] = _deepcopy_dict
del d
| mit |
PanneauLED/Teensy-Files | Panel_Website/node_modules/socket.io/node_modules/socket.io-client/node_modules/engine.io-client/node_modules/engine.io-parser/node_modules/utf8/tests/generate-test-data.py | 1788 | 1435 | #!/usr/bin/env python
import re
import json
# https://mathiasbynens.be/notes/javascript-encoding#surrogate-formulae
# http://stackoverflow.com/a/13436167/96656
def unisymbol(codePoint):
if codePoint >= 0x0000 and codePoint <= 0xFFFF:
return unichr(codePoint)
elif codePoint >= 0x010000 and codePoint <= 0x10FFFF:
highSurrogate = int((codePoint - 0x10000) / 0x400) + 0xD800
lowSurrogate = int((codePoint - 0x10000) % 0x400) + 0xDC00
return unichr(highSurrogate) + unichr(lowSurrogate)
else:
return 'Error'
def hexify(codePoint):
return 'U+' + hex(codePoint)[2:].upper().zfill(6)
def writeFile(filename, contents):
print filename
with open(filename, 'w') as f:
f.write(contents.strip() + '\n')
data = []
for codePoint in range(0x000000, 0x10FFFF + 1):
# Skip non-scalar values.
if codePoint >= 0xD800 and codePoint <= 0xDFFF:
continue
symbol = unisymbol(codePoint)
# http://stackoverflow.com/a/17199950/96656
bytes = symbol.encode('utf8').decode('latin1')
data.append({
'codePoint': codePoint,
'decoded': symbol,
'encoded': bytes
});
jsonData = json.dumps(data, sort_keys=False, indent=2, separators=(',', ': '))
# Use tabs instead of double spaces for indentation
jsonData = jsonData.replace(' ', '\t')
# Escape hexadecimal digits in escape sequences
jsonData = re.sub(
r'\\u([a-fA-F0-9]{4})',
lambda match: r'\u{}'.format(match.group(1).upper()),
jsonData
)
writeFile('data.json', jsonData)
| mit |
gregdek/ansible | lib/ansible/modules/notification/hipchat.py | 77 | 6205 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: hipchat
version_added: "1.2"
short_description: Send a message to Hipchat.
description:
- Send a message to a Hipchat room, with options to control the formatting.
options:
token:
description:
- API token.
required: true
room:
description:
- ID or name of the room.
required: true
from:
description:
- Name the message will appear to be sent from. Max length is 15
characters - above this it will be truncated.
default: Ansible
msg:
description:
- The message body.
required: true
color:
description:
- Background color for the message.
default: yellow
choices: [ "yellow", "red", "green", "purple", "gray", "random" ]
msg_format:
description:
- Message format.
default: text
choices: [ "text", "html" ]
notify:
description:
- If true, a notification will be triggered for users in the room.
type: bool
default: 'yes'
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
type: bool
default: 'yes'
version_added: 1.5.1
api:
description:
- API url if using a self-hosted hipchat server. For Hipchat API version
2 use the default URI with C(/v2) instead of C(/v1).
default: 'https://api.hipchat.com/v1'
version_added: 1.6.0
author:
- Shirou Wakayama (@shirou)
- Paul Bourdel (@pb8226)
'''
EXAMPLES = '''
- hipchat:
room: notif
msg: Ansible task finished
# Use Hipchat API version 2
- hipchat:
api: https://api.hipchat.com/v2/
token: OAUTH2_TOKEN
room: notify
msg: Ansible task finished
'''
# ===========================================
# HipChat module specific support methods.
#
import json
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves.urllib.parse import urlencode
from ansible.module_utils.six.moves.urllib.request import pathname2url
from ansible.module_utils._text import to_native
from ansible.module_utils.urls import fetch_url
DEFAULT_URI = "https://api.hipchat.com/v1"
MSG_URI_V1 = "/rooms/message"
NOTIFY_URI_V2 = "/room/{id_or_name}/notification"
def send_msg_v1(module, token, room, msg_from, msg, msg_format='text',
color='yellow', notify=False, api=MSG_URI_V1):
'''sending message to hipchat v1 server'''
params = {}
params['room_id'] = room
params['from'] = msg_from[:15] # max length is 15
params['message'] = msg
params['message_format'] = msg_format
params['color'] = color
params['api'] = api
params['notify'] = int(notify)
url = api + MSG_URI_V1 + "?auth_token=%s" % (token)
data = urlencode(params)
if module.check_mode:
# In check mode, exit before actually sending the message
module.exit_json(changed=False)
response, info = fetch_url(module, url, data=data)
if info['status'] == 200:
return response.read()
else:
module.fail_json(msg="failed to send message, return status=%s" % str(info['status']))
def send_msg_v2(module, token, room, msg_from, msg, msg_format='text',
color='yellow', notify=False, api=NOTIFY_URI_V2):
'''sending message to hipchat v2 server'''
headers = {'Authorization': 'Bearer %s' % token, 'Content-Type': 'application/json'}
body = dict()
body['message'] = msg
body['color'] = color
body['message_format'] = msg_format
body['notify'] = notify
POST_URL = api + NOTIFY_URI_V2
url = POST_URL.replace('{id_or_name}', pathname2url(room))
data = json.dumps(body)
if module.check_mode:
# In check mode, exit before actually sending the message
module.exit_json(changed=False)
response, info = fetch_url(module, url, data=data, headers=headers, method='POST')
# https://www.hipchat.com/docs/apiv2/method/send_room_notification shows
# 204 to be the expected result code.
if info['status'] in [200, 204]:
return response.read()
else:
module.fail_json(msg="failed to send message, return status=%s" % str(info['status']))
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec=dict(
token=dict(required=True, no_log=True),
room=dict(required=True),
msg=dict(required=True),
msg_from=dict(default="Ansible", aliases=['from']),
color=dict(default="yellow", choices=["yellow", "red", "green",
"purple", "gray", "random"]),
msg_format=dict(default="text", choices=["text", "html"]),
notify=dict(default=True, type='bool'),
validate_certs=dict(default='yes', type='bool'),
api=dict(default=DEFAULT_URI),
),
supports_check_mode=True
)
token = module.params["token"]
room = str(module.params["room"])
msg = module.params["msg"]
msg_from = module.params["msg_from"]
color = module.params["color"]
msg_format = module.params["msg_format"]
notify = module.params["notify"]
api = module.params["api"]
try:
if api.find('/v2') != -1:
send_msg_v2(module, token, room, msg_from, msg, msg_format, color, notify, api)
else:
send_msg_v1(module, token, room, msg_from, msg, msg_format, color, notify, api)
except Exception as e:
module.fail_json(msg="unable to send msg: %s" % to_native(e), exception=traceback.format_exc())
changed = True
module.exit_json(changed=changed, room=room, msg_from=msg_from, msg=msg)
if __name__ == '__main__':
main()
| gpl-3.0 |
ebsaral/django-rest-framework | tests/test_serializer.py | 64 | 10280 | # coding: utf-8
from __future__ import unicode_literals
import pickle
import pytest
from rest_framework import serializers
from rest_framework.compat import unicode_repr
from .utils import MockObject
# Tests for core functionality.
# -----------------------------
class TestSerializer:
def setup(self):
class ExampleSerializer(serializers.Serializer):
char = serializers.CharField()
integer = serializers.IntegerField()
self.Serializer = ExampleSerializer
def test_valid_serializer(self):
serializer = self.Serializer(data={'char': 'abc', 'integer': 123})
assert serializer.is_valid()
assert serializer.validated_data == {'char': 'abc', 'integer': 123}
assert serializer.errors == {}
def test_invalid_serializer(self):
serializer = self.Serializer(data={'char': 'abc'})
assert not serializer.is_valid()
assert serializer.validated_data == {}
assert serializer.errors == {'integer': ['This field is required.']}
def test_partial_validation(self):
serializer = self.Serializer(data={'char': 'abc'}, partial=True)
assert serializer.is_valid()
assert serializer.validated_data == {'char': 'abc'}
assert serializer.errors == {}
def test_empty_serializer(self):
serializer = self.Serializer()
assert serializer.data == {'char': '', 'integer': None}
def test_missing_attribute_during_serialization(self):
class MissingAttributes:
pass
instance = MissingAttributes()
serializer = self.Serializer(instance)
with pytest.raises(AttributeError):
serializer.data
class TestValidateMethod:
def test_non_field_error_validate_method(self):
class ExampleSerializer(serializers.Serializer):
char = serializers.CharField()
integer = serializers.IntegerField()
def validate(self, attrs):
raise serializers.ValidationError('Non field error')
serializer = ExampleSerializer(data={'char': 'abc', 'integer': 123})
assert not serializer.is_valid()
assert serializer.errors == {'non_field_errors': ['Non field error']}
def test_field_error_validate_method(self):
class ExampleSerializer(serializers.Serializer):
char = serializers.CharField()
integer = serializers.IntegerField()
def validate(self, attrs):
raise serializers.ValidationError({'char': 'Field error'})
serializer = ExampleSerializer(data={'char': 'abc', 'integer': 123})
assert not serializer.is_valid()
assert serializer.errors == {'char': ['Field error']}
class TestBaseSerializer:
def setup(self):
class ExampleSerializer(serializers.BaseSerializer):
def to_representation(self, obj):
return {
'id': obj['id'],
'email': obj['name'] + '@' + obj['domain']
}
def to_internal_value(self, data):
name, domain = str(data['email']).split('@')
return {
'id': int(data['id']),
'name': name,
'domain': domain,
}
self.Serializer = ExampleSerializer
def test_serialize_instance(self):
instance = {'id': 1, 'name': 'tom', 'domain': 'example.com'}
serializer = self.Serializer(instance)
assert serializer.data == {'id': 1, 'email': 'tom@example.com'}
def test_serialize_list(self):
instances = [
{'id': 1, 'name': 'tom', 'domain': 'example.com'},
{'id': 2, 'name': 'ann', 'domain': 'example.com'},
]
serializer = self.Serializer(instances, many=True)
assert serializer.data == [
{'id': 1, 'email': 'tom@example.com'},
{'id': 2, 'email': 'ann@example.com'}
]
def test_validate_data(self):
data = {'id': 1, 'email': 'tom@example.com'}
serializer = self.Serializer(data=data)
assert serializer.is_valid()
assert serializer.validated_data == {
'id': 1,
'name': 'tom',
'domain': 'example.com'
}
def test_validate_list(self):
data = [
{'id': 1, 'email': 'tom@example.com'},
{'id': 2, 'email': 'ann@example.com'},
]
serializer = self.Serializer(data=data, many=True)
assert serializer.is_valid()
assert serializer.validated_data == [
{'id': 1, 'name': 'tom', 'domain': 'example.com'},
{'id': 2, 'name': 'ann', 'domain': 'example.com'}
]
class TestStarredSource:
"""
Tests for `source='*'` argument, which is used for nested representations.
For example:
nested_field = NestedField(source='*')
"""
data = {
'nested1': {'a': 1, 'b': 2},
'nested2': {'c': 3, 'd': 4}
}
def setup(self):
class NestedSerializer1(serializers.Serializer):
a = serializers.IntegerField()
b = serializers.IntegerField()
class NestedSerializer2(serializers.Serializer):
c = serializers.IntegerField()
d = serializers.IntegerField()
class TestSerializer(serializers.Serializer):
nested1 = NestedSerializer1(source='*')
nested2 = NestedSerializer2(source='*')
self.Serializer = TestSerializer
def test_nested_validate(self):
"""
A nested representation is validated into a flat internal object.
"""
serializer = self.Serializer(data=self.data)
assert serializer.is_valid()
assert serializer.validated_data == {
'a': 1,
'b': 2,
'c': 3,
'd': 4
}
def test_nested_serialize(self):
"""
An object can be serialized into a nested representation.
"""
instance = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
serializer = self.Serializer(instance)
assert serializer.data == self.data
class TestIncorrectlyConfigured:
def test_incorrect_field_name(self):
class ExampleSerializer(serializers.Serializer):
incorrect_name = serializers.IntegerField()
class ExampleObject:
def __init__(self):
self.correct_name = 123
instance = ExampleObject()
serializer = ExampleSerializer(instance)
with pytest.raises(AttributeError) as exc_info:
serializer.data
msg = str(exc_info.value)
assert msg.startswith(
"Got AttributeError when attempting to get a value for field `incorrect_name` on serializer `ExampleSerializer`.\n"
"The serializer field might be named incorrectly and not match any attribute or key on the `ExampleObject` instance.\n"
"Original exception text was:"
)
class TestUnicodeRepr:
def test_unicode_repr(self):
class ExampleSerializer(serializers.Serializer):
example = serializers.CharField()
class ExampleObject:
def __init__(self):
self.example = '한국'
def __repr__(self):
return unicode_repr(self.example)
instance = ExampleObject()
serializer = ExampleSerializer(instance)
repr(serializer) # Should not error.
class TestNotRequiredOutput:
def test_not_required_output_for_dict(self):
"""
'required=False' should allow a dictionary key to be missing in output.
"""
class ExampleSerializer(serializers.Serializer):
omitted = serializers.CharField(required=False)
included = serializers.CharField()
serializer = ExampleSerializer(data={'included': 'abc'})
serializer.is_valid()
assert serializer.data == {'included': 'abc'}
def test_not_required_output_for_object(self):
"""
'required=False' should allow an object attribute to be missing in output.
"""
class ExampleSerializer(serializers.Serializer):
omitted = serializers.CharField(required=False)
included = serializers.CharField()
def create(self, validated_data):
return MockObject(**validated_data)
serializer = ExampleSerializer(data={'included': 'abc'})
serializer.is_valid()
serializer.save()
assert serializer.data == {'included': 'abc'}
def test_default_required_output_for_dict(self):
"""
'default="something"' should require dictionary key.
We need to handle this as the field will have an implicit
'required=False', but it should still have a value.
"""
class ExampleSerializer(serializers.Serializer):
omitted = serializers.CharField(default='abc')
included = serializers.CharField()
serializer = ExampleSerializer({'included': 'abc'})
with pytest.raises(KeyError):
serializer.data
def test_default_required_output_for_object(self):
"""
'default="something"' should require object attribute.
We need to handle this as the field will have an implicit
'required=False', but it should still have a value.
"""
class ExampleSerializer(serializers.Serializer):
omitted = serializers.CharField(default='abc')
included = serializers.CharField()
instance = MockObject(included='abc')
serializer = ExampleSerializer(instance)
with pytest.raises(AttributeError):
serializer.data
class TestCacheSerializerData:
def test_cache_serializer_data(self):
"""
Caching serializer data with pickle will drop the serializer info,
but does preserve the data itself.
"""
class ExampleSerializer(serializers.Serializer):
field1 = serializers.CharField()
field2 = serializers.CharField()
serializer = ExampleSerializer({'field1': 'a', 'field2': 'b'})
pickled = pickle.dumps(serializer.data)
data = pickle.loads(pickled)
assert data == {'field1': 'a', 'field2': 'b'}
| bsd-2-clause |
sexroute/commandergenius | project/jni/python/src/Lib/test/test_extcall.py | 55 | 6263 | """Doctest for method/function calls.
We're going the use these types for extra testing
>>> from UserList import UserList
>>> from UserDict import UserDict
We're defining four helper functions
>>> def e(a,b):
... print a, b
>>> def f(*a, **k):
... print a, test_support.sortdict(k)
>>> def g(x, *y, **z):
... print x, y, test_support.sortdict(z)
>>> def h(j=1, a=2, h=3):
... print j, a, h
Argument list examples
>>> f()
() {}
>>> f(1)
(1,) {}
>>> f(1, 2)
(1, 2) {}
>>> f(1, 2, 3)
(1, 2, 3) {}
>>> f(1, 2, 3, *(4, 5))
(1, 2, 3, 4, 5) {}
>>> f(1, 2, 3, *[4, 5])
(1, 2, 3, 4, 5) {}
>>> f(1, 2, 3, *UserList([4, 5]))
(1, 2, 3, 4, 5) {}
Here we add keyword arguments
>>> f(1, 2, 3, **{'a':4, 'b':5})
(1, 2, 3) {'a': 4, 'b': 5}
>>> f(1, 2, 3, *[4, 5], **{'a':6, 'b':7})
(1, 2, 3, 4, 5) {'a': 6, 'b': 7}
>>> f(1, 2, 3, x=4, y=5, *(6, 7), **{'a':8, 'b': 9})
(1, 2, 3, 6, 7) {'a': 8, 'b': 9, 'x': 4, 'y': 5}
>>> f(1, 2, 3, **UserDict(a=4, b=5))
(1, 2, 3) {'a': 4, 'b': 5}
>>> f(1, 2, 3, *(4, 5), **UserDict(a=6, b=7))
(1, 2, 3, 4, 5) {'a': 6, 'b': 7}
>>> f(1, 2, 3, x=4, y=5, *(6, 7), **UserDict(a=8, b=9))
(1, 2, 3, 6, 7) {'a': 8, 'b': 9, 'x': 4, 'y': 5}
Examples with invalid arguments (TypeErrors). We're also testing the function
names in the exception messages.
Verify clearing of SF bug #733667
>>> e(c=4)
Traceback (most recent call last):
...
TypeError: e() got an unexpected keyword argument 'c'
>>> g()
Traceback (most recent call last):
...
TypeError: g() takes at least 1 argument (0 given)
>>> g(*())
Traceback (most recent call last):
...
TypeError: g() takes at least 1 argument (0 given)
>>> g(*(), **{})
Traceback (most recent call last):
...
TypeError: g() takes at least 1 argument (0 given)
>>> g(1)
1 () {}
>>> g(1, 2)
1 (2,) {}
>>> g(1, 2, 3)
1 (2, 3) {}
>>> g(1, 2, 3, *(4, 5))
1 (2, 3, 4, 5) {}
>>> class Nothing: pass
...
>>> g(*Nothing())
Traceback (most recent call last):
...
TypeError: g() argument after * must be a sequence, not instance
>>> class Nothing:
... def __len__(self): return 5
...
>>> g(*Nothing())
Traceback (most recent call last):
...
TypeError: g() argument after * must be a sequence, not instance
>>> class Nothing():
... def __len__(self): return 5
... def __getitem__(self, i):
... if i<3: return i
... else: raise IndexError(i)
...
>>> g(*Nothing())
0 (1, 2) {}
>>> class Nothing:
... def __init__(self): self.c = 0
... def __iter__(self): return self
... def next(self):
... if self.c == 4:
... raise StopIteration
... c = self.c
... self.c += 1
... return c
...
>>> g(*Nothing())
0 (1, 2, 3) {}
Make sure that the function doesn't stomp the dictionary
>>> d = {'a': 1, 'b': 2, 'c': 3}
>>> d2 = d.copy()
>>> g(1, d=4, **d)
1 () {'a': 1, 'b': 2, 'c': 3, 'd': 4}
>>> d == d2
True
What about willful misconduct?
>>> def saboteur(**kw):
... kw['x'] = 'm'
... return kw
>>> d = {}
>>> kw = saboteur(a=1, **d)
>>> d
{}
>>> g(1, 2, 3, **{'x': 4, 'y': 5})
Traceback (most recent call last):
...
TypeError: g() got multiple values for keyword argument 'x'
>>> f(**{1:2})
Traceback (most recent call last):
...
TypeError: f() keywords must be strings
>>> h(**{'e': 2})
Traceback (most recent call last):
...
TypeError: h() got an unexpected keyword argument 'e'
>>> h(*h)
Traceback (most recent call last):
...
TypeError: h() argument after * must be a sequence, not function
>>> dir(*h)
Traceback (most recent call last):
...
TypeError: dir() argument after * must be a sequence, not function
>>> None(*h)
Traceback (most recent call last):
...
TypeError: NoneType object argument after * must be a sequence, \
not function
>>> h(**h)
Traceback (most recent call last):
...
TypeError: h() argument after ** must be a mapping, not function
>>> dir(**h)
Traceback (most recent call last):
...
TypeError: dir() argument after ** must be a mapping, not function
>>> None(**h)
Traceback (most recent call last):
...
TypeError: NoneType object argument after ** must be a mapping, \
not function
>>> dir(b=1, **{'b': 1})
Traceback (most recent call last):
...
TypeError: dir() got multiple values for keyword argument 'b'
Another helper function
>>> def f2(*a, **b):
... return a, b
>>> d = {}
>>> for i in xrange(512):
... key = 'k%d' % i
... d[key] = i
>>> a, b = f2(1, *(2,3), **d)
>>> len(a), len(b), b == d
(3, 512, True)
>>> class Foo:
... def method(self, arg1, arg2):
... return arg1+arg2
>>> x = Foo()
>>> Foo.method(*(x, 1, 2))
3
>>> Foo.method(x, *(1, 2))
3
>>> Foo.method(*(1, 2, 3))
Traceback (most recent call last):
...
TypeError: unbound method method() must be called with Foo instance as \
first argument (got int instance instead)
>>> Foo.method(1, *[2, 3])
Traceback (most recent call last):
...
TypeError: unbound method method() must be called with Foo instance as \
first argument (got int instance instead)
A PyCFunction that takes only positional parameters shoud allow an
empty keyword dictionary to pass without a complaint, but raise a
TypeError if te dictionary is not empty
>>> try:
... silence = id(1, *{})
... True
... except:
... False
True
>>> id(1, **{'foo': 1})
Traceback (most recent call last):
...
TypeError: id() takes no keyword arguments
"""
from test import test_support
def test_main():
from test import test_extcall # self import
test_support.run_doctest(test_extcall, True)
if __name__ == '__main__':
test_main()
| lgpl-2.1 |
rspavel/spack | var/spack/repos/builtin/packages/ncbi-toolkit/package.py | 5 | 3169 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from glob import glob
from spack import *
class NcbiToolkit(AutotoolsPackage):
"""NCBI C++ Toolkit"""
homepage = "https://www.ncbi.nlm.nih.gov/IEB/ToolBox/CPP_DOC/"
url = "ftp://ftp.ncbi.nih.gov/toolbox/ncbi_tools++/CURRENT/ncbi_cxx--22_0_0.tar.gz"
version('22_0_0', sha256='ef39429bbc7f13c44c0d327432d9cfb430f9f20d10d825e6b2c4ddd7ccce457f',
url='ftp://ftp.ncbi.nih.gov/toolbox/ncbi_tools++/ARCHIVE/2019/Mar_28_2019/ncbi_cxx--22_0_0.tar.gz')
version('21_0_0', sha256='48cc3ae24ca63d1ab1be148e7525e8c5b9f4eaa5eb36d172800784b640a84a4f',
url='ftp://ftp.ncbi.nih.gov/toolbox/ncbi_tools++/ARCHIVE/2018/Apr_2_2018/ncbi_cxx--21_0_0.tar.gz')
variant('debug', default=False,
description='Build debug versions of libs and apps')
depends_on('boost@1.35.0:')
depends_on('bzip2')
depends_on('jpeg')
depends_on('libpng')
depends_on('libtiff')
depends_on('libxml2')
depends_on('libxslt@1.1.14:')
depends_on('lzo')
depends_on('pcre')
depends_on('giflib')
depends_on('sqlite@3.6.6:')
depends_on('zlib')
depends_on('samtools')
depends_on('bamtools')
def configure_args(self):
args = ['--without-sybase', '--without-fastcgi']
if '+debug' not in self.spec:
args += ['--without-debug']
return args
def patch(self):
with working_dir(join_path('src', 'util', 'image')):
filter_file(r'jpeg_start_compress(&cinfo, true)',
'jpeg_start_compress(&cinfo, TRUE)',
'image_io_jpeg.cpp', string=True)
# TODO: Convert these substitutions into BOOST_VERSION preprocessor
# patches to send upstream.
if self.spec.satisfies('^boost@1.69:'):
with working_dir(join_path('include', 'corelib')):
filter_file(r'(boost::unit_test::decorator::collector)',
r'\1_t', 'test_boost.hpp')
if self.spec.satisfies('^boost@1.70:'):
with working_dir(join_path('include', 'corelib')):
filter_file(('unit_test::ut_detail::'
'ignore_unused_variable_warning'),
'ignore_unused', 'test_boost.hpp', string=True)
with working_dir(join_path('src', 'corelib')):
for file_ in ['test_boost.cpp', 'teamcity_boost.cpp']:
filter_file(
r'(void log_build_info\s*\(.*ostream&[^)]*)\);',
r'\1, bool log_build_info = true);', file_)
filter_file(r'(::log_build_info\(.*ostream.*&[^)]+)\)',
r'\1, bool log_build_info)', file_)
filter_file(r'(log_build_info\(ostr)\)', r'\1, true)',
file_)
def build(self, spec, prefix):
with working_dir(join_path(glob(
'*MT64')[0], 'build')):
make('all_r')
| lgpl-2.1 |
miketry0313/test | Tools/autotest/autotest.py | 6 | 14074 | #!/usr/bin/env python
# APM automatic test suite
# Andrew Tridgell, October 2011
import pexpect, os, sys, shutil, atexit
import optparse, fnmatch, time, glob, traceback, signal
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), 'pysim'))
import util
os.environ['PYTHONUNBUFFERED'] = '1'
os.putenv('TMPDIR', util.reltopdir('tmp'))
def get_default_params(atype):
'''get default parameters'''
sil = util.start_SIL(atype, wipe=True)
mavproxy = util.start_MAVProxy_SIL(atype)
print("Dumping defaults")
idx = mavproxy.expect(['Please Run Setup', 'Saved [0-9]+ parameters to (\S+)'])
if idx == 0:
# we need to restart it after eeprom erase
util.pexpect_close(mavproxy)
util.pexpect_close(sil)
sil = util.start_SIL(atype)
mavproxy = util.start_MAVProxy_SIL(atype)
idx = mavproxy.expect('Saved [0-9]+ parameters to (\S+)')
parmfile = mavproxy.match.group(1)
dest = util.reltopdir('../buildlogs/%s-defaults.parm' % atype)
shutil.copy(parmfile, dest)
util.pexpect_close(mavproxy)
util.pexpect_close(sil)
print("Saved defaults for %s to %s" % (atype, dest))
return True
def dump_logs(atype, logname=None):
'''dump DataFlash logs'''
print("Dumping logs for %s" % atype)
if logname is None:
logname = atype
sil = util.start_SIL(atype)
logfile = util.reltopdir('../buildlogs/%s.flashlog' % logname)
log = open(logfile, mode='w')
mavproxy = util.start_MAVProxy_SIL(atype, setup=True, logfile=log)
mavproxy.send('\n\n\n')
print("navigating menus")
mavproxy.expect(']')
mavproxy.send("logs\n")
mavproxy.expect("logs enabled:")
lognums = []
i = mavproxy.expect(["No logs", "(\d+) logs"])
if i == 0:
numlogs = 0
else:
numlogs = int(mavproxy.match.group(1))
for i in range(numlogs):
mavproxy.expect("Log (\d+)")
lognums.append(int(mavproxy.match.group(1)))
mavproxy.expect("Log]")
for i in range(numlogs):
print("Dumping log %u (i=%u)" % (lognums[i], i))
mavproxy.send("dump %u\n" % lognums[i])
mavproxy.expect("logs enabled:", timeout=120)
mavproxy.expect("Log]")
util.pexpect_close(mavproxy)
util.pexpect_close(sil)
log.close()
print("Saved log for %s to %s" % (atype, logfile))
return True
def build_all():
'''run the build_all.sh script'''
print("Running build_all.sh")
if util.run_cmd(util.reltopdir('Tools/scripts/build_all.sh'), dir=util.reltopdir('.')) != 0:
print("Failed build_all.sh")
return False
return True
def build_binaries():
'''run the build_binaries.sh script'''
print("Running build_binaries.sh")
import shutil
# copy the script as it changes git branch, which can change the script while running
orig=util.reltopdir('Tools/scripts/build_binaries.sh')
copy=util.reltopdir('./build_binaries.sh')
shutil.copyfile(orig, copy)
shutil.copymode(orig, copy)
if util.run_cmd(copy, dir=util.reltopdir('.')) != 0:
print("Failed build_binaries.sh")
return False
return True
def build_examples():
'''run the build_examples.sh script'''
print("Running build_examples.sh")
if util.run_cmd(util.reltopdir('Tools/scripts/build_examples.sh'), dir=util.reltopdir('.')) != 0:
print("Failed build_examples.sh")
return False
return True
def build_parameters():
'''run the param_parse.py script'''
print("Running param_parse.py")
if util.run_cmd(util.reltopdir('Tools/autotest/param_metadata/param_parse.py'), dir=util.reltopdir('.')) != 0:
print("Failed param_parse.py")
return False
return True
def convert_gpx():
'''convert any tlog files to GPX and KML'''
import glob
mavlog = glob.glob(util.reltopdir("../buildlogs/*.tlog"))
for m in mavlog:
util.run_cmd(util.reltopdir("../mavlink/pymavlink/tools/mavtogpx.py") + " --nofixcheck " + m)
gpx = m + '.gpx'
kml = m + '.kml'
util.run_cmd('gpsbabel -i gpx -f %s -o kml,units=m,floating=1,extrude=1 -F %s' % (gpx, kml), checkfail=False)
util.run_cmd('zip %s.kmz %s.kml' % (m, m), checkfail=False)
util.run_cmd("mavflightview.py --imagefile=%s.png %s" % (m,m))
return True
def test_prerequesites():
'''check we have the right directories and tools to run tests'''
print("Testing prerequesites")
util.mkdir_p(util.reltopdir('../buildlogs'))
return True
def alarm_handler(signum, frame):
'''handle test timeout'''
global results, opts
try:
results.add('TIMEOUT', '<span class="failed-text">FAILED</span>', opts.timeout)
util.pexpect_close_all()
convert_gpx()
write_fullresults()
os.killpg(0, signal.SIGKILL)
except Exception:
pass
sys.exit(1)
############## main program #############
parser = optparse.OptionParser("autotest")
parser.add_option("--skip", type='string', default='', help='list of steps to skip (comma separated)')
parser.add_option("--list", action='store_true', default=False, help='list the available steps')
parser.add_option("--viewerip", default=None, help='IP address to send MAVLink and fg packets to')
parser.add_option("--map", action='store_true', default=False, help='show map')
parser.add_option("--experimental", default=False, action='store_true', help='enable experimental tests')
parser.add_option("--timeout", default=3000, type='int', help='maximum runtime in seconds')
opts, args = parser.parse_args()
import arducopter, arduplane, apmrover2
steps = [
'prerequesites',
'build.All',
'build.Binaries',
'build.Examples',
'build.Parameters',
'build2560.ArduPlane',
'build.ArduPlane',
'defaults.ArduPlane',
'fly.ArduPlane',
'logs.ArduPlane',
'build2560.APMrover2',
'build.APMrover2',
'defaults.APMrover2',
'drive.APMrover2',
'logs.APMrover2',
'build2560.ArduCopter',
'build.ArduCopter',
'defaults.ArduCopter',
'fly.ArduCopter',
'logs.ArduCopter',
'fly.CopterAVC',
'logs.CopterAVC',
'convertgpx',
]
skipsteps = opts.skip.split(',')
# ensure we catch timeouts
signal.signal(signal.SIGALRM, alarm_handler)
signal.alarm(opts.timeout)
if opts.list:
for step in steps:
print(step)
sys.exit(0)
def skip_step(step):
'''see if a step should be skipped'''
for skip in skipsteps:
if fnmatch.fnmatch(step.lower(), skip.lower()):
return True
return False
def run_step(step):
'''run one step'''
if step == "prerequesites":
return test_prerequesites()
if step == 'build.ArduPlane':
return util.build_SIL('ArduPlane')
if step == 'build.APMrover2':
return util.build_SIL('APMrover2')
if step == 'build.ArduCopter':
return util.build_SIL('ArduCopter')
if step == 'build2560.ArduCopter':
return util.build_AVR('ArduCopter', board='mega2560')
if step == 'build2560.ArduPlane':
return util.build_AVR('ArduPlane', board='mega2560')
if step == 'build2560.APMrover2':
return util.build_AVR('APMrover2', board='mega2560')
if step == 'defaults.ArduPlane':
return get_default_params('ArduPlane')
if step == 'defaults.ArduCopter':
return get_default_params('ArduCopter')
if step == 'defaults.APMrover2':
return get_default_params('APMrover2')
if step == 'logs.ArduPlane':
return dump_logs('ArduPlane')
if step == 'logs.ArduCopter':
return dump_logs('ArduCopter')
if step == 'logs.CopterAVC':
return dump_logs('ArduCopter', 'CopterAVC')
if step == 'logs.APMrover2':
return dump_logs('APMrover2')
if step == 'fly.ArduCopter':
return arducopter.fly_ArduCopter(viewerip=opts.viewerip, map=opts.map)
if step == 'fly.CopterAVC':
return arducopter.fly_CopterAVC(viewerip=opts.viewerip, map=opts.map)
if step == 'fly.ArduPlane':
return arduplane.fly_ArduPlane(viewerip=opts.viewerip, map=opts.map)
if step == 'drive.APMrover2':
return apmrover2.drive_APMrover2(viewerip=opts.viewerip, map=opts.map)
if step == 'build.All':
return build_all()
if step == 'build.Binaries':
return build_binaries()
if step == 'build.Examples':
return build_examples()
if step == 'build.Parameters':
return build_parameters()
if step == 'convertgpx':
return convert_gpx()
raise RuntimeError("Unknown step %s" % step)
class TestResult(object):
'''test result class'''
def __init__(self, name, result, elapsed):
self.name = name
self.result = result
self.elapsed = "%.1f" % elapsed
class TestFile(object):
'''test result file'''
def __init__(self, name, fname):
self.name = name
self.fname = fname
class TestResults(object):
'''test results class'''
def __init__(self):
self.date = time.asctime()
self.githash = util.run_cmd('git rev-parse HEAD', output=True, dir=util.reltopdir('.')).strip()
self.tests = []
self.files = []
self.images = []
def add(self, name, result, elapsed):
'''add a result'''
self.tests.append(TestResult(name, result, elapsed))
def addfile(self, name, fname):
'''add a result file'''
self.files.append(TestFile(name, fname))
def addimage(self, name, fname):
'''add a result image'''
self.images.append(TestFile(name, fname))
def addglob(self, name, pattern):
'''add a set of files'''
import glob
for f in glob.glob(util.reltopdir('../buildlogs/%s' % pattern)):
self.addfile(name, os.path.basename(f))
def addglobimage(self, name, pattern):
'''add a set of images'''
import glob
for f in glob.glob(util.reltopdir('../buildlogs/%s' % pattern)):
self.addimage(name, os.path.basename(f))
def write_webresults(results):
'''write webpage results'''
from pymavlink.generator import mavtemplate
t = mavtemplate.MAVTemplate()
for h in glob.glob(util.reltopdir('Tools/autotest/web/*.html')):
html = util.loadfile(h)
f = open(util.reltopdir("../buildlogs/%s" % os.path.basename(h)), mode='w')
t.write(f, html, results)
f.close()
for f in glob.glob(util.reltopdir('Tools/autotest/web/*.png')):
shutil.copy(f, util.reltopdir('../buildlogs/%s' % os.path.basename(f)))
def write_fullresults():
'''write out full results set'''
global results
results.addglob("Google Earth track", '*.kmz')
results.addfile('Full Logs', 'autotest-output.txt')
results.addglob('DataFlash Log', '*.flashlog')
results.addglob("MAVLink log", '*.tlog')
results.addglob("GPX track", '*.gpx')
results.addfile('ArduPlane build log', 'ArduPlane.txt')
results.addfile('ArduPlane code size', 'ArduPlane.sizes.txt')
results.addfile('ArduPlane stack sizes', 'ArduPlane.framesizes.txt')
results.addfile('ArduPlane defaults', 'ArduPlane-defaults.parm')
results.addfile('ArduCopter build log', 'ArduCopter.txt')
results.addfile('ArduCopter code size', 'ArduCopter.sizes.txt')
results.addfile('ArduCopter stack sizes', 'ArduCopter.framesizes.txt')
results.addfile('ArduCopter defaults', 'ArduCopter-defaults.parm')
results.addfile('APMrover2 build log', 'APMrover2.txt')
results.addfile('APMrover2 code size', 'APMrover2.sizes.txt')
results.addfile('APMrover2 stack sizes', 'APMrover2.framesizes.txt')
results.addfile('APMrover2 defaults', 'APMrover2-defaults.parm')
results.addglob('APM:Libraries documentation', 'docs/libraries/index.html')
results.addglob('APM:Plane documentation', 'docs/ArduPlane/index.html')
results.addglob('APM:Copter documentation', 'docs/ArduCopter/index.html')
results.addglob('APM:Rover documentation', 'docs/APMrover2/index.html')
results.addglobimage("Flight Track", '*.png')
write_webresults(results)
results = TestResults()
def run_tests(steps):
'''run a list of steps'''
global results
passed = True
failed = []
for step in steps:
util.pexpect_close_all()
if skip_step(step):
continue
t1 = time.time()
print(">>>> RUNNING STEP: %s at %s" % (step, time.asctime()))
try:
if not run_step(step):
print(">>>> FAILED STEP: %s at %s" % (step, time.asctime()))
passed = False
failed.append(step)
results.add(step, '<span class="failed-text">FAILED</span>', time.time() - t1)
continue
except Exception, msg:
passed = False
failed.append(step)
print(">>>> FAILED STEP: %s at %s (%s)" % (step, time.asctime(), msg))
traceback.print_exc(file=sys.stdout)
results.add(step, '<span class="failed-text">FAILED</span>', time.time() - t1)
continue
results.add(step, '<span class="passed-text">PASSED</span>', time.time() - t1)
print(">>>> PASSED STEP: %s at %s" % (step, time.asctime()))
if not passed:
print("FAILED %u tests: %s" % (len(failed), failed))
util.pexpect_close_all()
write_fullresults()
return passed
util.mkdir_p(util.reltopdir('../buildlogs'))
lck = util.lock_file(util.reltopdir('../buildlogs/autotest.lck'))
if lck is None:
print("autotest is locked - exiting")
sys.exit(0)
atexit.register(util.pexpect_close_all)
if len(args) > 0:
# allow a wildcard list of steps
matched = []
for a in args:
for s in steps:
if fnmatch.fnmatch(s.lower(), a.lower()):
matched.append(s)
steps = matched
try:
if not run_tests(steps):
sys.exit(1)
except KeyboardInterrupt:
util.pexpect_close_all()
sys.exit(1)
except Exception:
# make sure we kill off any children
util.pexpect_close_all()
raise
| gpl-3.0 |
sunze/py_flask | venv/lib/python3.4/site-packages/sqlalchemy/util/_collections.py | 56 | 27842 | # util/_collections.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Collection classes and helpers."""
from __future__ import absolute_import
import weakref
import operator
from .compat import threading, itertools_filterfalse, string_types
from . import py2k
import types
import collections
EMPTY_SET = frozenset()
class AbstractKeyedTuple(tuple):
__slots__ = ()
def keys(self):
"""Return a list of string key names for this :class:`.KeyedTuple`.
.. seealso::
:attr:`.KeyedTuple._fields`
"""
return list(self._fields)
class KeyedTuple(AbstractKeyedTuple):
"""``tuple`` subclass that adds labeled names.
E.g.::
>>> k = KeyedTuple([1, 2, 3], labels=["one", "two", "three"])
>>> k.one
1
>>> k.two
2
Result rows returned by :class:`.Query` that contain multiple
ORM entities and/or column expressions make use of this
class to return rows.
The :class:`.KeyedTuple` exhibits similar behavior to the
``collections.namedtuple()`` construct provided in the Python
standard library, however is architected very differently.
Unlike ``collections.namedtuple()``, :class:`.KeyedTuple` is
does not rely on creation of custom subtypes in order to represent
a new series of keys, instead each :class:`.KeyedTuple` instance
receives its list of keys in place. The subtype approach
of ``collections.namedtuple()`` introduces significant complexity
and performance overhead, which is not necessary for the
:class:`.Query` object's use case.
.. versionchanged:: 0.8
Compatibility methods with ``collections.namedtuple()`` have been
added including :attr:`.KeyedTuple._fields` and
:meth:`.KeyedTuple._asdict`.
.. seealso::
:ref:`ormtutorial_querying`
"""
def __new__(cls, vals, labels=None):
t = tuple.__new__(cls, vals)
if labels:
t.__dict__.update(zip(labels, vals))
else:
labels = []
t.__dict__['_labels'] = labels
return t
@property
def _fields(self):
"""Return a tuple of string key names for this :class:`.KeyedTuple`.
This method provides compatibility with ``collections.namedtuple()``.
.. versionadded:: 0.8
.. seealso::
:meth:`.KeyedTuple.keys`
"""
return tuple([l for l in self._labels if l is not None])
def __setattr__(self, key, value):
raise AttributeError("Can't set attribute: %s" % key)
def _asdict(self):
"""Return the contents of this :class:`.KeyedTuple` as a dictionary.
This method provides compatibility with ``collections.namedtuple()``,
with the exception that the dictionary returned is **not** ordered.
.. versionadded:: 0.8
"""
return dict((key, self.__dict__[key]) for key in self.keys())
class _LW(AbstractKeyedTuple):
__slots__ = ()
def __new__(cls, vals):
return tuple.__new__(cls, vals)
def __reduce__(self):
# for pickling, degrade down to the regular
# KeyedTuple, thus avoiding anonymous class pickling
# difficulties
return KeyedTuple, (list(self), self._real_fields)
def _asdict(self):
"""Return the contents of this :class:`.KeyedTuple` as a dictionary."""
d = dict(zip(self._real_fields, self))
d.pop(None, None)
return d
class ImmutableContainer(object):
def _immutable(self, *arg, **kw):
raise TypeError("%s object is immutable" % self.__class__.__name__)
__delitem__ = __setitem__ = __setattr__ = _immutable
class immutabledict(ImmutableContainer, dict):
clear = pop = popitem = setdefault = \
update = ImmutableContainer._immutable
def __new__(cls, *args):
new = dict.__new__(cls)
dict.__init__(new, *args)
return new
def __init__(self, *args):
pass
def __reduce__(self):
return immutabledict, (dict(self), )
def union(self, d):
if not d:
return self
elif not self:
if isinstance(d, immutabledict):
return d
else:
return immutabledict(d)
else:
d2 = immutabledict(self)
dict.update(d2, d)
return d2
def __repr__(self):
return "immutabledict(%s)" % dict.__repr__(self)
class Properties(object):
"""Provide a __getattr__/__setattr__ interface over a dict."""
__slots__ = '_data',
def __init__(self, data):
object.__setattr__(self, '_data', data)
def __len__(self):
return len(self._data)
def __iter__(self):
return iter(list(self._data.values()))
def __add__(self, other):
return list(self) + list(other)
def __setitem__(self, key, object):
self._data[key] = object
def __getitem__(self, key):
return self._data[key]
def __delitem__(self, key):
del self._data[key]
def __setattr__(self, key, obj):
self._data[key] = obj
def __getstate__(self):
return {'_data': self.__dict__['_data']}
def __setstate__(self, state):
self.__dict__['_data'] = state['_data']
def __getattr__(self, key):
try:
return self._data[key]
except KeyError:
raise AttributeError(key)
def __contains__(self, key):
return key in self._data
def as_immutable(self):
"""Return an immutable proxy for this :class:`.Properties`."""
return ImmutableProperties(self._data)
def update(self, value):
self._data.update(value)
def get(self, key, default=None):
if key in self:
return self[key]
else:
return default
def keys(self):
return list(self._data)
def values(self):
return list(self._data.values())
def items(self):
return list(self._data.items())
def has_key(self, key):
return key in self._data
def clear(self):
self._data.clear()
class OrderedProperties(Properties):
"""Provide a __getattr__/__setattr__ interface with an OrderedDict
as backing store."""
__slots__ = ()
def __init__(self):
Properties.__init__(self, OrderedDict())
class ImmutableProperties(ImmutableContainer, Properties):
"""Provide immutable dict/object attribute to an underlying dictionary."""
__slots__ = ()
class OrderedDict(dict):
"""A dict that returns keys/values/items in the order they were added."""
__slots__ = '_list',
def __reduce__(self):
return OrderedDict, (self.items(),)
def __init__(self, ____sequence=None, **kwargs):
self._list = []
if ____sequence is None:
if kwargs:
self.update(**kwargs)
else:
self.update(____sequence, **kwargs)
def clear(self):
self._list = []
dict.clear(self)
def copy(self):
return self.__copy__()
def __copy__(self):
return OrderedDict(self)
def sort(self, *arg, **kw):
self._list.sort(*arg, **kw)
def update(self, ____sequence=None, **kwargs):
if ____sequence is not None:
if hasattr(____sequence, 'keys'):
for key in ____sequence.keys():
self.__setitem__(key, ____sequence[key])
else:
for key, value in ____sequence:
self[key] = value
if kwargs:
self.update(kwargs)
def setdefault(self, key, value):
if key not in self:
self.__setitem__(key, value)
return value
else:
return self.__getitem__(key)
def __iter__(self):
return iter(self._list)
def keys(self):
return list(self)
def values(self):
return [self[key] for key in self._list]
def items(self):
return [(key, self[key]) for key in self._list]
if py2k:
def itervalues(self):
return iter(self.values())
def iterkeys(self):
return iter(self)
def iteritems(self):
return iter(self.items())
def __setitem__(self, key, object):
if key not in self:
try:
self._list.append(key)
except AttributeError:
# work around Python pickle loads() with
# dict subclass (seems to ignore __setstate__?)
self._list = [key]
dict.__setitem__(self, key, object)
def __delitem__(self, key):
dict.__delitem__(self, key)
self._list.remove(key)
def pop(self, key, *default):
present = key in self
value = dict.pop(self, key, *default)
if present:
self._list.remove(key)
return value
def popitem(self):
item = dict.popitem(self)
self._list.remove(item[0])
return item
class OrderedSet(set):
def __init__(self, d=None):
set.__init__(self)
self._list = []
if d is not None:
self._list = unique_list(d)
set.update(self, self._list)
else:
self._list = []
def add(self, element):
if element not in self:
self._list.append(element)
set.add(self, element)
def remove(self, element):
set.remove(self, element)
self._list.remove(element)
def insert(self, pos, element):
if element not in self:
self._list.insert(pos, element)
set.add(self, element)
def discard(self, element):
if element in self:
self._list.remove(element)
set.remove(self, element)
def clear(self):
set.clear(self)
self._list = []
def __getitem__(self, key):
return self._list[key]
def __iter__(self):
return iter(self._list)
def __add__(self, other):
return self.union(other)
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self._list)
__str__ = __repr__
def update(self, iterable):
for e in iterable:
if e not in self:
self._list.append(e)
set.add(self, e)
return self
__ior__ = update
def union(self, other):
result = self.__class__(self)
result.update(other)
return result
__or__ = union
def intersection(self, other):
other = set(other)
return self.__class__(a for a in self if a in other)
__and__ = intersection
def symmetric_difference(self, other):
other = set(other)
result = self.__class__(a for a in self if a not in other)
result.update(a for a in other if a not in self)
return result
__xor__ = symmetric_difference
def difference(self, other):
other = set(other)
return self.__class__(a for a in self if a not in other)
__sub__ = difference
def intersection_update(self, other):
other = set(other)
set.intersection_update(self, other)
self._list = [a for a in self._list if a in other]
return self
__iand__ = intersection_update
def symmetric_difference_update(self, other):
set.symmetric_difference_update(self, other)
self._list = [a for a in self._list if a in self]
self._list += [a for a in other._list if a in self]
return self
__ixor__ = symmetric_difference_update
def difference_update(self, other):
set.difference_update(self, other)
self._list = [a for a in self._list if a in self]
return self
__isub__ = difference_update
class IdentitySet(object):
"""A set that considers only object id() for uniqueness.
This strategy has edge cases for builtin types- it's possible to have
two 'foo' strings in one of these sets, for example. Use sparingly.
"""
_working_set = set
def __init__(self, iterable=None):
self._members = dict()
if iterable:
for o in iterable:
self.add(o)
def add(self, value):
self._members[id(value)] = value
def __contains__(self, value):
return id(value) in self._members
def remove(self, value):
del self._members[id(value)]
def discard(self, value):
try:
self.remove(value)
except KeyError:
pass
def pop(self):
try:
pair = self._members.popitem()
return pair[1]
except KeyError:
raise KeyError('pop from an empty set')
def clear(self):
self._members.clear()
def __cmp__(self, other):
raise TypeError('cannot compare sets using cmp()')
def __eq__(self, other):
if isinstance(other, IdentitySet):
return self._members == other._members
else:
return False
def __ne__(self, other):
if isinstance(other, IdentitySet):
return self._members != other._members
else:
return True
def issubset(self, iterable):
other = type(self)(iterable)
if len(self) > len(other):
return False
for m in itertools_filterfalse(other._members.__contains__,
iter(self._members.keys())):
return False
return True
def __le__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.issubset(other)
def __lt__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return len(self) < len(other) and self.issubset(other)
def issuperset(self, iterable):
other = type(self)(iterable)
if len(self) < len(other):
return False
for m in itertools_filterfalse(self._members.__contains__,
iter(other._members.keys())):
return False
return True
def __ge__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.issuperset(other)
def __gt__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return len(self) > len(other) and self.issuperset(other)
def union(self, iterable):
result = type(self)()
# testlib.pragma exempt:__hash__
members = self._member_id_tuples()
other = _iter_id(iterable)
result._members.update(self._working_set(members).union(other))
return result
def __or__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.union(other)
def update(self, iterable):
self._members = self.union(iterable)._members
def __ior__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
self.update(other)
return self
def difference(self, iterable):
result = type(self)()
# testlib.pragma exempt:__hash__
members = self._member_id_tuples()
other = _iter_id(iterable)
result._members.update(self._working_set(members).difference(other))
return result
def __sub__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.difference(other)
def difference_update(self, iterable):
self._members = self.difference(iterable)._members
def __isub__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
self.difference_update(other)
return self
def intersection(self, iterable):
result = type(self)()
# testlib.pragma exempt:__hash__
members = self._member_id_tuples()
other = _iter_id(iterable)
result._members.update(self._working_set(members).intersection(other))
return result
def __and__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.intersection(other)
def intersection_update(self, iterable):
self._members = self.intersection(iterable)._members
def __iand__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
self.intersection_update(other)
return self
def symmetric_difference(self, iterable):
result = type(self)()
# testlib.pragma exempt:__hash__
members = self._member_id_tuples()
other = _iter_id(iterable)
result._members.update(
self._working_set(members).symmetric_difference(other))
return result
def _member_id_tuples(self):
return ((id(v), v) for v in self._members.values())
def __xor__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.symmetric_difference(other)
def symmetric_difference_update(self, iterable):
self._members = self.symmetric_difference(iterable)._members
def __ixor__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
self.symmetric_difference(other)
return self
def copy(self):
return type(self)(iter(self._members.values()))
__copy__ = copy
def __len__(self):
return len(self._members)
def __iter__(self):
return iter(self._members.values())
def __hash__(self):
raise TypeError('set objects are unhashable')
def __repr__(self):
return '%s(%r)' % (type(self).__name__, list(self._members.values()))
class WeakSequence(object):
def __init__(self, __elements=()):
self._storage = [
weakref.ref(element, self._remove) for element in __elements
]
def append(self, item):
self._storage.append(weakref.ref(item, self._remove))
def _remove(self, ref):
self._storage.remove(ref)
def __len__(self):
return len(self._storage)
def __iter__(self):
return (obj for obj in
(ref() for ref in self._storage) if obj is not None)
def __getitem__(self, index):
try:
obj = self._storage[index]
except KeyError:
raise IndexError("Index %s out of range" % index)
else:
return obj()
class OrderedIdentitySet(IdentitySet):
class _working_set(OrderedSet):
# a testing pragma: exempt the OIDS working set from the test suite's
# "never call the user's __hash__" assertions. this is a big hammer,
# but it's safe here: IDS operates on (id, instance) tuples in the
# working set.
__sa_hash_exempt__ = True
def __init__(self, iterable=None):
IdentitySet.__init__(self)
self._members = OrderedDict()
if iterable:
for o in iterable:
self.add(o)
class PopulateDict(dict):
"""A dict which populates missing values via a creation function.
Note the creation function takes a key, unlike
collections.defaultdict.
"""
def __init__(self, creator):
self.creator = creator
def __missing__(self, key):
self[key] = val = self.creator(key)
return val
# Define collections that are capable of storing
# ColumnElement objects as hashable keys/elements.
# At this point, these are mostly historical, things
# used to be more complicated.
column_set = set
column_dict = dict
ordered_column_set = OrderedSet
populate_column_dict = PopulateDict
_getters = PopulateDict(operator.itemgetter)
_property_getters = PopulateDict(
lambda idx: property(operator.itemgetter(idx)))
def unique_list(seq, hashfunc=None):
seen = set()
seen_add = seen.add
if not hashfunc:
return [x for x in seq
if x not in seen
and not seen_add(x)]
else:
return [x for x in seq
if hashfunc(x) not in seen
and not seen_add(hashfunc(x))]
class UniqueAppender(object):
"""Appends items to a collection ensuring uniqueness.
Additional appends() of the same object are ignored. Membership is
determined by identity (``is a``) not equality (``==``).
"""
def __init__(self, data, via=None):
self.data = data
self._unique = {}
if via:
self._data_appender = getattr(data, via)
elif hasattr(data, 'append'):
self._data_appender = data.append
elif hasattr(data, 'add'):
self._data_appender = data.add
def append(self, item):
id_ = id(item)
if id_ not in self._unique:
self._data_appender(item)
self._unique[id_] = True
def __iter__(self):
return iter(self.data)
def coerce_generator_arg(arg):
if len(arg) == 1 and isinstance(arg[0], types.GeneratorType):
return list(arg[0])
else:
return arg
def to_list(x, default=None):
if x is None:
return default
if not isinstance(x, collections.Iterable) or isinstance(x, string_types):
return [x]
elif isinstance(x, list):
return x
else:
return list(x)
def has_intersection(set_, iterable):
"""return True if any items of set_ are present in iterable.
Goes through special effort to ensure __hash__ is not called
on items in iterable that don't support it.
"""
# TODO: optimize, write in C, etc.
return bool(
set_.intersection([i for i in iterable if i.__hash__])
)
def to_set(x):
if x is None:
return set()
if not isinstance(x, set):
return set(to_list(x))
else:
return x
def to_column_set(x):
if x is None:
return column_set()
if not isinstance(x, column_set):
return column_set(to_list(x))
else:
return x
def update_copy(d, _new=None, **kw):
"""Copy the given dict and update with the given values."""
d = d.copy()
if _new:
d.update(_new)
d.update(**kw)
return d
def flatten_iterator(x):
"""Given an iterator of which further sub-elements may also be
iterators, flatten the sub-elements into a single iterator.
"""
for elem in x:
if not isinstance(elem, str) and hasattr(elem, '__iter__'):
for y in flatten_iterator(elem):
yield y
else:
yield elem
class LRUCache(dict):
"""Dictionary with 'squishy' removal of least
recently used items.
Note that either get() or [] should be used here, but
generally its not safe to do an "in" check first as the dictionary
can change subsequent to that call.
"""
def __init__(self, capacity=100, threshold=.5):
self.capacity = capacity
self.threshold = threshold
self._counter = 0
self._mutex = threading.Lock()
def _inc_counter(self):
self._counter += 1
return self._counter
def get(self, key, default=None):
item = dict.get(self, key, default)
if item is not default:
item[2] = self._inc_counter()
return item[1]
else:
return default
def __getitem__(self, key):
item = dict.__getitem__(self, key)
item[2] = self._inc_counter()
return item[1]
def values(self):
return [i[1] for i in dict.values(self)]
def setdefault(self, key, value):
if key in self:
return self[key]
else:
self[key] = value
return value
def __setitem__(self, key, value):
item = dict.get(self, key)
if item is None:
item = [key, value, self._inc_counter()]
dict.__setitem__(self, key, item)
else:
item[1] = value
self._manage_size()
def _manage_size(self):
if not self._mutex.acquire(False):
return
try:
while len(self) > self.capacity + self.capacity * self.threshold:
by_counter = sorted(dict.values(self),
key=operator.itemgetter(2),
reverse=True)
for item in by_counter[self.capacity:]:
try:
del self[item[0]]
except KeyError:
# deleted elsewhere; skip
continue
finally:
self._mutex.release()
_lw_tuples = LRUCache(100)
def lightweight_named_tuple(name, fields):
hash_ = (name, ) + tuple(fields)
tp_cls = _lw_tuples.get(hash_)
if tp_cls:
return tp_cls
tp_cls = type(
name, (_LW,),
dict([
(field, _property_getters[idx])
for idx, field in enumerate(fields) if field is not None
] + [('__slots__', ())])
)
tp_cls._real_fields = fields
tp_cls._fields = tuple([f for f in fields if f is not None])
_lw_tuples[hash_] = tp_cls
return tp_cls
class ScopedRegistry(object):
"""A Registry that can store one or multiple instances of a single
class on the basis of a "scope" function.
The object implements ``__call__`` as the "getter", so by
calling ``myregistry()`` the contained object is returned
for the current scope.
:param createfunc:
a callable that returns a new object to be placed in the registry
:param scopefunc:
a callable that will return a key to store/retrieve an object.
"""
def __init__(self, createfunc, scopefunc):
"""Construct a new :class:`.ScopedRegistry`.
:param createfunc: A creation function that will generate
a new value for the current scope, if none is present.
:param scopefunc: A function that returns a hashable
token representing the current scope (such as, current
thread identifier).
"""
self.createfunc = createfunc
self.scopefunc = scopefunc
self.registry = {}
def __call__(self):
key = self.scopefunc()
try:
return self.registry[key]
except KeyError:
return self.registry.setdefault(key, self.createfunc())
def has(self):
"""Return True if an object is present in the current scope."""
return self.scopefunc() in self.registry
def set(self, obj):
"""Set the value for the current scope."""
self.registry[self.scopefunc()] = obj
def clear(self):
"""Clear the current scope, if any."""
try:
del self.registry[self.scopefunc()]
except KeyError:
pass
class ThreadLocalRegistry(ScopedRegistry):
"""A :class:`.ScopedRegistry` that uses a ``threading.local()``
variable for storage.
"""
def __init__(self, createfunc):
self.createfunc = createfunc
self.registry = threading.local()
def __call__(self):
try:
return self.registry.value
except AttributeError:
val = self.registry.value = self.createfunc()
return val
def has(self):
return hasattr(self.registry, "value")
def set(self, obj):
self.registry.value = obj
def clear(self):
try:
del self.registry.value
except AttributeError:
pass
def _iter_id(iterable):
"""Generator: ((id(o), o) for o in iterable)."""
for item in iterable:
yield id(item), item
| mit |
mxOBS/deb-pkg_trusty_chromium-browser | chrome/tools/inconsistent-eol.py | 185 | 4330 | #!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Find and fix files with inconsistent line endings.
This script requires 'dos2unix.exe' and 'unix2dos.exe' from Cygwin; they
must be in the user's PATH.
Arg: Either one or more files to examine, or (with --file-list) one or more
files that themselves contain lists of files. The argument(s) passed to
this script, as well as the paths in the file if any, may be relative or
absolute Windows-style paths (with either type of slash). The list might
be generated with 'find -type f' or extracted from a gcl change listing,
for example.
"""
import errno
import logging
import optparse
import subprocess
import sys
# Whether to produce excessive debugging output for each file in the list.
DEBUGGING = False
class Error(Exception):
"""Local exception class."""
pass
def CountChars(text, str):
"""Count the number of instances of the given string in the text."""
split = text.split(str)
logging.debug(len(split) - 1)
return len(split) - 1
def PrevailingEOLName(crlf, cr, lf):
"""Describe the most common line ending.
Args:
crlf: How many CRLF (\r\n) sequences are in the file.
cr: How many CR (\r) characters are in the file, excluding CRLF sequences.
lf: How many LF (\n) characters are in the file, excluding CRLF sequences.
Returns:
A string describing the most common of the three line endings.
"""
most = max(crlf, cr, lf)
if most == cr:
return 'cr'
if most == crlf:
return 'crlf'
return 'lf'
def FixEndings(file, crlf, cr, lf):
"""Change the file's line endings to CRLF or LF, whichever is more common."""
most = max(crlf, cr, lf)
if most == crlf:
result = subprocess.call('unix2dos.exe %s' % file, shell=True)
if result:
raise Error('Error running unix2dos.exe %s' % file)
else:
result = subprocess.call('dos2unix.exe %s' % file, shell=True)
if result:
raise Error('Error running dos2unix.exe %s' % file)
def ProcessFiles(filelist):
"""Fix line endings in each file in the filelist list."""
for filename in filelist:
filename = filename.strip()
logging.debug(filename)
try:
# Open in binary mode to preserve existing line endings.
text = open(filename, 'rb').read()
except IOError, e:
if e.errno != errno.ENOENT:
raise
logging.warning('File %s not found.' % filename)
continue
crlf = CountChars(text, '\r\n')
cr = CountChars(text, '\r') - crlf
lf = CountChars(text, '\n') - crlf
if options.force_lf:
if crlf > 0 or cr > 0:
print '%s: forcing to LF' % filename
# Fudge the counts to force switching to LF.
FixEndings(filename, 0, 0, 1)
else:
if ((crlf > 0 and cr > 0) or
(crlf > 0 and lf > 0) or
( lf > 0 and cr > 0)):
print '%s: mostly %s' % (filename, PrevailingEOLName(crlf, cr, lf))
FixEndings(filename, crlf, cr, lf)
def process(options, args):
"""Process the files."""
if not args or len(args) < 1:
raise Error('No files given.')
if options.file_list:
for arg in args:
filelist = open(arg, 'r').readlines()
ProcessFiles(filelist)
else:
filelist = args
ProcessFiles(filelist)
return 0
def main():
if DEBUGGING:
debug_level = logging.DEBUG
else:
debug_level = logging.INFO
logging.basicConfig(level=debug_level,
format='%(asctime)s %(levelname)-7s: %(message)s',
datefmt='%H:%M:%S')
option_parser = optparse.OptionParser()
option_parser.add_option("", "--file-list", action="store_true",
default=False,
help="Treat the arguments as files containing "
"lists of files to examine, rather than as "
"the files to be checked.")
option_parser.add_option("", "--force-lf", action="store_true",
default=False,
help="Force any files with CRLF to LF instead.")
options, args = option_parser.parse_args()
return process(options, args)
if '__main__' == __name__:
sys.exit(main())
| bsd-3-clause |
spivachuk/sovrin-node | indy_client/agent/helper.py | 2 | 2543 | import os
from plenum.common.signer_did import DidSigner
from plenum.common.util import friendlyToRaw, rawToFriendly
from indy_client.client.wallet.wallet import Wallet
from indy_common.config_util import getConfig
from stp_core.crypto.util import ed25519PkToCurve25519
def processInvAccept(wallet, msg):
pass
def rawVerkeyToPubkey(raw_verkey):
return ed25519PkToCurve25519(raw_verkey)
def friendlyVerkeyToPubkey(verkey):
vkRaw = friendlyToRaw(verkey)
pkraw = ed25519PkToCurve25519(vkRaw)
return rawToFriendly(pkraw)
def getClaimVersionFileName(agentName):
return agentName.replace(" ", "-").lower() + "-schema-version.txt"
def updateAndGetNextClaimVersionNumber(basedirpath, fileName):
claimVersionFilePath = '{}/{}'.format(basedirpath, fileName)
# get version number from file
claimVersionNumber = 0.01
if os.path.isfile(claimVersionFilePath):
with open(claimVersionFilePath, mode='r+') as file:
claimVersionNumber = float(file.read()) + 0.001
file.seek(0)
# increment version and update file
file.write(str(claimVersionNumber))
file.truncate()
else:
with open(claimVersionFilePath, mode='w') as file:
file.write(str(claimVersionNumber))
return claimVersionNumber
def build_wallet_core(wallet_name, seed_file):
config = getConfig()
baseDir = os.path.expanduser(config.CLI_BASE_DIR)
seedFilePath = '{}/{}'.format(baseDir, seed_file)
seed = wallet_name + '0' * (32 - len(wallet_name))
# if seed file is available, read seed from it
if os.path.isfile(seedFilePath):
with open(seedFilePath, mode='r+') as file:
seed = file.read().strip(' \t\n\r')
wallet = Wallet(wallet_name)
seed = bytes(seed, encoding='utf-8')
wallet.addIdentifier(signer=DidSigner(seed=seed))
return wallet
async def bootstrap_schema(agent, attrib_def_name, schema_name, schema_version, p_prime, q_prime):
schema_id = await agent.publish_schema(attrib_def_name,
schema_name=schema_name,
schema_version=schema_version)
_, _ = await agent.publish_issuer_keys(schema_id, p_prime=p_prime, q_prime=q_prime)
# TODO not fully implemented yet!
# await agent.publish_revocation_registry(schema_id=schema_id)
return schema_id
def buildAgentWallet(name, seed):
wallet = Wallet(name)
wallet.addIdentifier(signer=DidSigner(seed=seed))
return wallet
| apache-2.0 |
j00bar/ansible | lib/ansible/modules/files/stat.py | 19 | 18851 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: stat
version_added: "1.3"
short_description: retrieve file or file system status
description:
- Retrieves facts for a file similar to the linux/unix 'stat' command.
options:
path:
description:
- The full path of the file/object to get the facts of
required: true
default: null
follow:
description:
- Whether to follow symlinks
required: false
default: no
get_md5:
description:
- Whether to return the md5 sum of the file. Will return None if we're
unable to use md5 (Common for FIPS-140 compliant systems)
required: false
default: yes
get_checksum:
description:
- Whether to return a checksum of the file (default sha1)
required: false
default: yes
version_added: "1.8"
checksum_algorithm:
description:
- Algorithm to determine checksum of file. Will throw an error if the
host is unable to use specified algorithm.
required: false
choices: [ 'sha1', 'sha224', 'sha256', 'sha384', 'sha512' ]
default: sha1
aliases: [ 'checksum_algo', 'checksum' ]
version_added: "2.0"
get_mime:
description:
- Use file magic and return data about the nature of the file. this uses
the 'file' utility found on most Linux/Unix systems.
- This will add both `mime_type` and 'charset' fields to the return, if possible.
- In 2.3 this option changed from 'mime' to 'get_mime' and the default changed to 'Yes'
required: false
choices: [ Yes, No ]
default: Yes
version_added: "2.1"
aliases: [ 'mime', 'mime_type', 'mime-type' ]
get_attributes:
description:
- Get file attributes using lsattr tool if present.
required: false
default: True
version_added: "2.3"
aliases: [ 'attributes', 'attr' ]
author: "Bruce Pennypacker (@bpennypacker)"
'''
EXAMPLES = '''
# Obtain the stats of /etc/foo.conf, and check that the file still belongs
# to 'root'. Fail otherwise.
- stat:
path: /etc/foo.conf
register: st
- fail:
msg: "Whoops! file ownership has changed"
when: st.stat.pw_name != 'root'
# Determine if a path exists and is a symlink. Note that if the path does
# not exist, and we test sym.stat.islnk, it will fail with an error. So
# therefore, we must test whether it is defined.
# Run this to understand the structure, the skipped ones do not pass the
# check performed by 'when'
- stat:
path: /path/to/something
register: sym
- debug:
msg: "islnk isn't defined (path doesn't exist)"
when: sym.stat.islnk is not defined
- debug:
msg: "islnk is defined (path must exist)"
when: sym.stat.islnk is defined
- debug:
msg: "Path exists and is a symlink"
when: sym.stat.islnk is defined and sym.stat.islnk
- debug:
msg: "Path exists and isn't a symlink"
when: sym.stat.islnk is defined and sym.stat.islnk == False
# Determine if a path exists and is a directory. Note that we need to test
# both that p.stat.isdir actually exists, and also that it's set to true.
- stat:
path: /path/to/something
register: p
- debug:
msg: "Path exists and is a directory"
when: p.stat.isdir is defined and p.stat.isdir
# Don't do md5 checksum
- stat:
path: /path/to/myhugefile
get_md5: no
# Use sha256 to calculate checksum
- stat:
path: /path/to/something
checksum_algorithm: sha256
'''
RETURN = '''
stat:
description: dictionary containing all the stat data, some platforms might add additional fields
returned: success
type: dictionary
contains:
exists:
description: if the destination path actually exists or not
returned: success
type: boolean
sample: True
path:
description: The full path of the file/object to get the facts of
returned: success and if path exists
type: string
sample: '/path/to/file'
mode:
description: Unix permissions of the file in octal
returned: success, path exists and user can read stats
type: octal
sample: 1755
isdir:
description: Tells you if the path is a directory
returned: success, path exists and user can read stats
type: boolean
sample: False
ischr:
description: Tells you if the path is a character device
returned: success, path exists and user can read stats
type: boolean
sample: False
isblk:
description: Tells you if the path is a block device
returned: success, path exists and user can read stats
type: boolean
sample: False
isreg:
description: Tells you if the path is a regular file
returned: success, path exists and user can read stats
type: boolean
sample: True
isfifo:
description: Tells you if the path is a named pipe
returned: success, path exists and user can read stats
type: boolean
sample: False
islnk:
description: Tells you if the path is a symbolic link
returned: success, path exists and user can read stats
type: boolean
sample: False
issock:
description: Tells you if the path is a unix domain socket
returned: success, path exists and user can read stats
type: boolean
sample: False
uid:
description: Numeric id representing the file owner
returned: success, path exists and user can read stats
type: int
sample: 1003
gid:
description: Numeric id representing the group of the owner
returned: success, path exists and user can read stats
type: int
sample: 1003
size:
description: Size in bytes for a plain file, amount of data for some special files
returned: success, path exists and user can read stats
type: int
sample: 203
inode:
description: Inode number of the path
returned: success, path exists and user can read stats
type: int
sample: 12758
dev:
description: Device the inode resides on
returned: success, path exists and user can read stats
type: int
sample: 33
nlink:
description: Number of links to the inode (hard links)
returned: success, path exists and user can read stats
type: int
sample: 1
atime:
description: Time of last access
returned: success, path exists and user can read stats
type: float
sample: 1424348972.575
mtime:
description: Time of last modification
returned: success, path exists and user can read stats
type: float
sample: 1424348972.575
ctime:
description: Time of last metadata update or creation (depends on OS)
returned: success, path exists and user can read stats
type: float
sample: 1424348972.575
wusr:
description: Tells you if the owner has write permission
returned: success, path exists and user can read stats
type: boolean
sample: True
rusr:
description: Tells you if the owner has read permission
returned: success, path exists and user can read stats
type: boolean
sample: True
xusr:
description: Tells you if the owner has execute permission
returned: success, path exists and user can read stats
type: boolean
sample: True
wgrp:
description: Tells you if the owner's group has write permission
returned: success, path exists and user can read stats
type: boolean
sample: False
rgrp:
description: Tells you if the owner's group has read permission
returned: success, path exists and user can read stats
type: boolean
sample: True
xgrp:
description: Tells you if the owner's group has execute permission
returned: success, path exists and user can read stats
type: boolean
sample: True
woth:
description: Tells you if others have write permission
returned: success, path exists and user can read stats
type: boolean
sample: False
roth:
description: Tells you if others have read permission
returned: success, path exists and user can read stats
type: boolean
sample: True
xoth:
description: Tells you if others have execute permission
returned: success, path exists and user can read stats
type: boolean
sample: True
isuid:
description: Tells you if the invoking user's id matches the owner's id
returned: success, path exists and user can read stats
type: boolean
sample: False
isgid:
description: Tells you if the invoking user's group id matches the owner's group id
returned: success, path exists and user can read stats
type: boolean
sample: False
lnk_source:
description: Original path
returned: success, path exists and user can read stats and the path is a symbolic link
type: string
sample: /home/foobar/21102015-1445431274-908472971
md5:
description: md5 hash of the path
returned: success, path exists and user can read stats and path
supports hashing and md5 is supported
type: string
sample: f88fa92d8cf2eeecf4c0a50ccc96d0c0
checksum:
description: hash of the path
returned: success, path exists, user can read stats, path supports
hashing and supplied checksum algorithm is available
type: string
sample: 50ba294cdf28c0d5bcde25708df53346825a429f
pw_name:
description: User name of owner
returned: success, path exists and user can read stats and installed python supports it
type: string
sample: httpd
gr_name:
description: Group name of owner
returned: success, path exists and user can read stats and installed python supports it
type: string
sample: www-data
mime_type:
description: file magic data or mime-type
returned: success, path exists and user can read stats and
installed python supports it and the `mime` option was true, will
return 'unknown' on error.
type: string
sample: PDF document, version 1.2
charset:
description: file character set or encoding
returned: success, path exists and user can read stats and
installed python supports it and the `mime` option was true, will
return 'unknown' on error.
type: string
sample: us-ascii
readable:
description: Tells you if the invoking user has the right to read the path
returned: success, path exists and user can read the path
type: boolean
sample: False
version_added: 2.2
writeable:
description: Tells you if the invoking user has the right to write the path
returned: success, path exists and user can write the path
type: boolean
sample: False
version_added: 2.2
executable:
description: Tells you if the invoking user has the execute the path
returned: success, path exists and user can execute the path
type: boolean
sample: False
version_added: 2.2
attributes:
description: list of file attributes
returned: success, path exists and user can execute the path
type: boolean
sample: [ immutable, extent ]
version_added: 2.3
'''
import errno
import grp
import os
import pwd
import stat
# import module snippets
from ansible.module_utils.basic import AnsibleModule, format_attributes
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils._text import to_bytes
def format_output(module, path, st):
mode = st.st_mode
# back to ansible
output = dict(
exists=True,
path=path,
mode="%04o" % stat.S_IMODE(mode),
isdir=stat.S_ISDIR(mode),
ischr=stat.S_ISCHR(mode),
isblk=stat.S_ISBLK(mode),
isreg=stat.S_ISREG(mode),
isfifo=stat.S_ISFIFO(mode),
islnk=stat.S_ISLNK(mode),
issock=stat.S_ISSOCK(mode),
uid=st.st_uid,
gid=st.st_gid,
size=st.st_size,
inode=st.st_ino,
dev=st.st_dev,
nlink=st.st_nlink,
atime=st.st_atime,
mtime=st.st_mtime,
ctime=st.st_ctime,
wusr=bool(mode & stat.S_IWUSR),
rusr=bool(mode & stat.S_IRUSR),
xusr=bool(mode & stat.S_IXUSR),
wgrp=bool(mode & stat.S_IWGRP),
rgrp=bool(mode & stat.S_IRGRP),
xgrp=bool(mode & stat.S_IXGRP),
woth=bool(mode & stat.S_IWOTH),
roth=bool(mode & stat.S_IROTH),
xoth=bool(mode & stat.S_IXOTH),
isuid=bool(mode & stat.S_ISUID),
isgid=bool(mode & stat.S_ISGID),
)
# Platform dependant flags:
for other in [
# Some Linux
('st_blocks','blocks'),
('st_blksize', 'block_size'),
('st_rdev','device_type'),
('st_flags', 'flags'),
# Some Berkley based
('st_gen', 'generation'),
('st_birthtime', 'birthtime'),
# RISCOS
('st_ftype', 'file_type'),
('st_attrs', 'attrs'),
('st_obtype', 'object_type'),
# OS X
('st_rsize', 'real_size'),
('st_creator', 'creator'),
('st_type', 'file_type'),
]:
if hasattr(st, other[0]):
output[other[1]] = getattr(st, other[0])
return output
def main():
module = AnsibleModule(
argument_spec=dict(
path=dict(required=True, type='path'),
follow=dict(default='no', type='bool'),
get_md5=dict(default='yes', type='bool'),
get_checksum=dict(default='yes', type='bool'),
get_mime=dict(default=True, type='bool', aliases=['mime', 'mime_type', 'mime-type']),
get_attributes=dict(default=True, type='bool', aliases=['attributes', 'attr']),
checksum_algorithm=dict(default='sha1', type='str',
choices=['sha1', 'sha224', 'sha256', 'sha384', 'sha512'],
aliases=['checksum_algo', 'checksum']),
),
supports_check_mode=True
)
path = module.params.get('path')
b_path = to_bytes(path, errors='surrogate_or_strict')
follow = module.params.get('follow')
get_mime = module.params.get('get_mime')
get_attr = module.params.get('get_attributes')
get_md5 = module.params.get('get_md5')
get_checksum = module.params.get('get_checksum')
checksum_algorithm = module.params.get('checksum_algorithm')
# main stat data
try:
if follow:
st = os.stat(b_path)
else:
st = os.lstat(b_path)
except OSError:
e = get_exception()
if e.errno == errno.ENOENT:
output = {'exists': False}
module.exit_json(changed=False, stat=output)
module.fail_json(msg=e.strerror)
# process base results
output = format_output(module, path, st)
# resolved permissions
for perm in [('readable', os.R_OK), ('writeable', os.W_OK), ('executable', os.X_OK)]:
output[perm[0]] = os.access(path, perm[1])
# symlink info
if output.get('islnk'):
output['lnk_source'] = os.path.realpath(path)
try: # user data
pw = pwd.getpwuid(st.st_uid)
output['pw_name'] = pw.pw_name
except:
pass
try: # group data
grp_info = grp.getgrgid(st.st_gid)
output['gr_name'] = grp_info.gr_name
except:
pass
# checksums
if output.get('isreg') and output.get('readable'):
if get_md5:
# Will fail on FIPS-140 compliant systems
try:
output['md5'] = module.md5(path)
except ValueError:
output['md5'] = None
if get_checksum:
output['checksum'] = module.digest_from_file(path, checksum_algorithm)
# try to get mime data if requested
if get_mime:
output['mimetype'] = output['charset'] = 'unknown'
mimecmd = module.get_bin_path('file')
if mimecmd:
mimecmd = [mimecmd, '-i', path]
try:
rc, out, err = module.run_command(mimecmd)
if rc == 0:
mimetype, charset = out.split(':')[1].split(';')
output['mimetype'] = mimetype.strip()
output['charset'] = charset.split('=')[1].strip()
except:
pass
# try to get attr data
if get_attr:
output['version'] = None
output['attributes'] = []
output['attr_flags'] = ''
out = module.get_file_attributes(path)
for x in ('version', 'attributes', 'attr_flags'):
if x in out:
output[x] = out[x]
module.exit_json(changed=False, stat=output)
if __name__ == '__main__':
main()
| gpl-3.0 |
rahushen/ansible | lib/ansible/modules/cloud/amazon/route53_facts.py | 39 | 12906 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: route53_facts
short_description: Retrieves route53 details using AWS methods
description:
- Gets various details related to Route53 zone, record set or health check details
version_added: "2.0"
options:
query:
description:
- specifies the query action to take
required: True
choices: [
'change',
'checker_ip_range',
'health_check',
'hosted_zone',
'record_sets',
'reusable_delegation_set',
]
change_id:
description:
- The ID of the change batch request.
The value that you specify here is the value that
ChangeResourceRecordSets returned in the Id element
when you submitted the request.
required: false
hosted_zone_id:
description:
- The Hosted Zone ID of the DNS zone
required: false
max_items:
description:
- Maximum number of items to return for various get/list requests
required: false
next_marker:
description:
- "Some requests such as list_command: hosted_zones will return a maximum
number of entries - EG 100 or the number specified by max_items.
If the number of entries exceeds this maximum another request can be sent
using the NextMarker entry from the first response to get the next page
of results"
required: false
delegation_set_id:
description:
- The DNS Zone delegation set ID
required: false
start_record_name:
description:
- "The first name in the lexicographic ordering of domain names that you want
the list_command: record_sets to start listing from"
required: false
type:
description:
- The type of DNS record
required: false
choices: [ 'A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'CAA', 'NS' ]
dns_name:
description:
- The first name in the lexicographic ordering of domain names that you want
the list_command to start listing from
required: false
resource_id:
description:
- The ID/s of the specified resource/s
required: false
aliases: ['resource_ids']
health_check_id:
description:
- The ID of the health check
required: false
hosted_zone_method:
description:
- "This is used in conjunction with query: hosted_zone.
It allows for listing details, counts or tags of various
hosted zone details."
required: false
choices: [
'details',
'list',
'list_by_name',
'count',
'tags',
]
default: 'list'
health_check_method:
description:
- "This is used in conjunction with query: health_check.
It allows for listing details, counts or tags of various
health check details."
required: false
choices: [
'list',
'details',
'status',
'failure_reason',
'count',
'tags',
]
default: 'list'
author: Karen Cheng(@Etherdaemon)
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Simple example of listing all hosted zones
- name: List all hosted zones
route53_facts:
query: hosted_zone
register: hosted_zones
# Getting a count of hosted zones
- name: Return a count of all hosted zones
route53_facts:
query: hosted_zone
hosted_zone_method: count
register: hosted_zone_count
- name: List the first 20 resource record sets in a given hosted zone
route53_facts:
profile: account_name
query: record_sets
hosted_zone_id: ZZZ1111112222
max_items: 20
register: record_sets
- name: List first 20 health checks
route53_facts:
query: health_check
health_check_method: list
max_items: 20
register: health_checks
- name: Get health check last failure_reason
route53_facts:
query: health_check
health_check_method: failure_reason
health_check_id: 00000000-1111-2222-3333-12345678abcd
register: health_check_failure_reason
- name: Retrieve reusable delegation set details
route53_facts:
query: reusable_delegation_set
delegation_set_id: delegation id
register: delegation_sets
- name: setup of example for using next_marker
route53_facts:
query: hosted_zone
max_items: 1
register: first_facts
- name: example for using next_marker
route53_facts:
query: hosted_zone
next_marker: "{{ first_facts.NextMarker }}"
max_items: 1
when: "{{ 'NextMarker' in first_facts }}"
'''
try:
import boto
import botocore
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
try:
import boto3
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info
def get_hosted_zone(client, module):
params = dict()
if module.params.get('hosted_zone_id'):
params['Id'] = module.params.get('hosted_zone_id')
else:
module.fail_json(msg="Hosted Zone Id is required")
results = client.get_hosted_zone(**params)
return results
def reusable_delegation_set_details(client, module):
params = dict()
if not module.params.get('delegation_set_id'):
if module.params.get('max_items'):
params['MaxItems'] = module.params.get('max_items')
if module.params.get('next_marker'):
params['Marker'] = module.params.get('next_marker')
results = client.list_reusable_delegation_sets(**params)
else:
params['DelegationSetId'] = module.params.get('delegation_set_id')
results = client.get_reusable_delegation_set(**params)
return results
def list_hosted_zones(client, module):
params = dict()
if module.params.get('max_items'):
params['MaxItems'] = module.params.get('max_items')
if module.params.get('next_marker'):
params['Marker'] = module.params.get('next_marker')
if module.params.get('delegation_set_id'):
params['DelegationSetId'] = module.params.get('delegation_set_id')
results = client.list_hosted_zones(**params)
return results
def list_hosted_zones_by_name(client, module):
params = dict()
if module.params.get('hosted_zone_id'):
params['HostedZoneId'] = module.params.get('hosted_zone_id')
if module.params.get('dns_name'):
params['DNSName'] = module.params.get('dns_name')
if module.params.get('max_items'):
params['MaxItems'] = module.params.get('max_items')
results = client.list_hosted_zones_by_name(**params)
return results
def change_details(client, module):
params = dict()
if module.params.get('change_id'):
params['Id'] = module.params.get('change_id')
else:
module.fail_json(msg="change_id is required")
results = client.get_change(**params)
return results
def checker_ip_range_details(client, module):
results = client.get_checker_ip_ranges()
return results
def get_count(client, module):
if module.params.get('query') == 'health_check':
results = client.get_health_check_count()
else:
results = client.get_hosted_zone_count()
return results
def get_health_check(client, module):
params = dict()
if not module.params.get('health_check_id'):
module.fail_json(msg="health_check_id is required")
else:
params['HealthCheckId'] = module.params.get('health_check_id')
if module.params.get('health_check_method') == 'details':
results = client.get_health_check(**params)
elif module.params.get('health_check_method') == 'failure_reason':
results = client.get_health_check_last_failure_reason(**params)
elif module.params.get('health_check_method') == 'status':
results = client.get_health_check_status(**params)
return results
def get_resource_tags(client, module):
params = dict()
if module.params.get('resource_id'):
params['ResourceIds'] = module.params.get('resource_id')
else:
module.fail_json(msg="resource_id or resource_ids is required")
if module.params.get('query') == 'health_check':
params['ResourceType'] = 'healthcheck'
else:
params['ResourceType'] = 'hostedzone'
results = client.list_tags_for_resources(**params)
return results
def list_health_checks(client, module):
params = dict()
if module.params.get('max_items'):
params['MaxItems'] = module.params.get('max_items')
if module.params.get('next_marker'):
params['Marker'] = module.params.get('next_marker')
results = client.list_health_checks(**params)
return results
def record_sets_details(client, module):
params = dict()
if module.params.get('hosted_zone_id'):
params['HostedZoneId'] = module.params.get('hosted_zone_id')
else:
module.fail_json(msg="Hosted Zone Id is required")
if module.params.get('max_items'):
params['MaxItems'] = module.params.get('max_items')
if module.params.get('start_record_name'):
params['StartRecordName'] = module.params.get('start_record_name')
if module.params.get('type') and not module.params.get('start_record_name'):
module.fail_json(msg="start_record_name must be specified if type is set")
elif module.params.get('type'):
params['StartRecordType'] = module.params.get('type')
results = client.list_resource_record_sets(**params)
return results
def health_check_details(client, module):
health_check_invocations = {
'list': list_health_checks,
'details': get_health_check,
'status': get_health_check,
'failure_reason': get_health_check,
'count': get_count,
'tags': get_resource_tags,
}
results = health_check_invocations[module.params.get('health_check_method')](client, module)
return results
def hosted_zone_details(client, module):
hosted_zone_invocations = {
'details': get_hosted_zone,
'list': list_hosted_zones,
'list_by_name': list_hosted_zones_by_name,
'count': get_count,
'tags': get_resource_tags,
}
results = hosted_zone_invocations[module.params.get('hosted_zone_method')](client, module)
return results
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
query=dict(choices=[
'change',
'checker_ip_range',
'health_check',
'hosted_zone',
'record_sets',
'reusable_delegation_set',
], required=True),
change_id=dict(),
hosted_zone_id=dict(),
max_items=dict(type='str'),
next_marker=dict(),
delegation_set_id=dict(),
start_record_name=dict(),
type=dict(choices=[
'A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'CAA', 'NS'
]),
dns_name=dict(),
resource_id=dict(type='list', aliases=['resource_ids']),
health_check_id=dict(),
hosted_zone_method=dict(choices=[
'details',
'list',
'list_by_name',
'count',
'tags'
], default='list'),
health_check_method=dict(choices=[
'list',
'details',
'status',
'failure_reason',
'count',
'tags',
], default='list'),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=[
['hosted_zone_method', 'health_check_method'],
],
)
# Validate Requirements
if not (HAS_BOTO or HAS_BOTO3):
module.fail_json(msg='json and boto/boto3 is required.')
try:
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
route53 = boto3_conn(module, conn_type='client', resource='route53', region=region, endpoint=ec2_url, **aws_connect_kwargs)
except boto.exception.NoAuthHandlerFound as e:
module.fail_json(msg="Can't authorize connection - %s " % str(e))
invocations = {
'change': change_details,
'checker_ip_range': checker_ip_range_details,
'health_check': health_check_details,
'hosted_zone': hosted_zone_details,
'record_sets': record_sets_details,
'reusable_delegation_set': reusable_delegation_set_details,
}
results = invocations[module.params.get('query')](route53, module)
module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 |
piMoll/SEILAPLAN | lib/reportlab/graphics/testdrawings.py | 2 | 9433 | #!/bin/env python
#Copyright ReportLab Europe Ltd. 2000-2017
#see license.txt for license details
#history https://hg.reportlab.com/hg-public/reportlab/log/tip/src/reportlab/graphics/testdrawings.py
__version__='3.3.0'
__doc__="""Defines some standard drawings to use as test cases
This contains a number of routines to generate test drawings
for reportlab/graphics. For now they are contrived, but we will expand them
to try and trip up any parser. Feel free to add more.
"""
from reportlab.graphics.shapes import *
from reportlab.lib import colors
def getDrawing1():
"""Hello World, on a rectangular background"""
D = Drawing(400, 200)
D.add(Rect(50, 50, 300, 100, fillColor=colors.yellow)) #round corners
D.add(String(180,100, 'Hello World', fillColor=colors.red))
return D
def getDrawing2():
"""This demonstrates the basic shapes. There are
no groups or references. Each solid shape should have
a purple fill."""
D = Drawing(400, 200) #, fillColor=colors.purple)
D.add(Line(10,10,390,190))
D.add(Circle(100,100,20, fillColor=colors.purple))
D.add(Circle(200,100,20, fillColor=colors.purple))
D.add(Circle(300,100,20, fillColor=colors.purple))
D.add(Wedge(330,100,40, -10,40, fillColor=colors.purple))
D.add(PolyLine([120,10,130,20,140,10,150,20,160,10,
170,20,180,10,190,20,200,10]))
D.add(Polygon([300,20,350,20,390,80,300,75, 330, 40]))
D.add(Ellipse(50, 150, 40, 20))
D.add(Rect(120, 150, 60, 30,
strokeWidth=10,
strokeColor=colors.red,
fillColor=colors.yellow)) #square corners
D.add(Rect(220, 150, 60, 30, 10, 10)) #round corners
D.add(String(10,50, 'Basic Shapes', fillColor=colors.black))
return D
##def getDrawing2():
## """This drawing uses groups. Each group has two circles and a comment.
## The line style is set at group level and should be red for the left,
## bvlue for the right."""
## D = Drawing(400, 200)
##
## Group1 = Group()
##
## Group1.add(String(50, 50, 'Group 1', fillColor=colors.black))
## Group1.add(Circle(75,100,25))
## Group1.add(Circle(125,100,25))
## D.add(Group1)
##
## Group2 = Group(
## String(250, 50, 'Group 2', fillColor=colors.black),
## Circle(275,100,25),
## Circle(325,100,25)#,
##def getDrawing2():
## """This drawing uses groups. Each group has two circles and a comment.
## The line style is set at group level and should be red for the left,
## bvlue for the right."""
## D = Drawing(400, 200)
##
## Group1 = Group()
##
## Group1.add(String(50, 50, 'Group 1', fillColor=colors.black))
## Group1.add(Circle(75,100,25))
## Group1.add(Circle(125,100,25))
## D.add(Group1)
##
## Group2 = Group(
## String(250, 50, 'Group 2', fillColor=colors.black),
## Circle(275,100,25),
## Circle(325,100,25)#,
##
## #group attributes
## #strokeColor=colors.blue
## )
## D.add(Group2)
## return D
##
##
##def getDrawing3():
## """This uses a named reference object. The house is a 'subroutine'
## the basic brick colored walls are defined, but the roof and window
## color are undefined and may be set by the container."""
##
## D = Drawing(400, 200, fill=colors.bisque)
##
##
## House = Group(
## Rect(2,20,36,30, fill=colors.bisque), #walls
## Polygon([0,20,40,20,20,5]), #roof
## Rect(8, 38, 8, 12), #door
## Rect(25, 38, 8, 7), #window
## Rect(8, 25, 8, 7), #window
## Rect(25, 25, 8, 7) #window
##
## )
## D.addDef('MyHouse', House)
##
## # one row all the same color
## D.add(String(20, 40, 'British Street...',fill=colors.black))
## for i in range(6):
## x = i * 50
## D.add(NamedReference('MyHouse',
## House,
## transform=translate(x, 40),
## fill = colors.brown
## )
## )
##
## # now do a row all different
## D.add(String(20, 120, 'Mediterranean Street...',fill=colors.black))
## x = 0
## for color in (colors.blue, colors.yellow, colors.orange,
## colors.red, colors.green, colors.chartreuse):
## D.add(NamedReference('MyHouse',
## House,
## transform=translate(x,120),
## fill = color,
## )
## )
## x = x + 50
## #..by popular demand, the mayor gets a big one at the end
## D.add(NamedReference('MyHouse',
## House,
## transform=mmult(translate(x,110), scale(1.2,1.2)),
## fill = color,
## )
## )
##
##
## return D
##
##def getDrawing4():
## """This tests that attributes are 'unset' correctly when
## one steps back out of a drawing node. All the circles are part of a
## group setting the line color to blue; the second circle explicitly
## sets it to red. Ideally, the third circle should go back to blue."""
## D = Drawing(400, 200)
##
##
## G = Group(
## Circle(100,100,20),
## Circle(200,100,20, stroke=colors.blue),
## Circle(300,100,20),
## stroke=colors.red,
## stroke_width=3,
## fill=colors.aqua
## )
## D.add(G)
##
##
## D.add(String(10,50, 'Stack Unwinding - should be red, blue, red'))
##
## return D
##
##
##def getDrawing5():
## """This Rotates Coordinate Axes"""
## D = Drawing(400, 200)
##
##
##
## Axis = Group(
## Line(0,0,100,0), #x axis
## Line(0,0,0,50), # y axis
## Line(0,10,10,10), #ticks on y axis
## Line(0,20,10,20),
## Line(0,30,10,30),
## Line(0,40,10,40),
## Line(10,0,10,10), #ticks on x axis
## Line(20,0,20,10),
## Line(30,0,30,10),
## Line(40,0,40,10),
## Line(50,0,50,10),
## Line(60,0,60,10),
## Line(70,0,70,10),
## Line(80,0,80,10),
## Line(90,0,90,10),
## String(20, 35, 'Axes', fill=colors.black)
## )
##
## D.addDef('Axes', Axis)
##
## D.add(NamedReference('Axis', Axis,
## transform=translate(10,10)))
## D.add(NamedReference('Axis', Axis,
## transform=mmult(translate(150,10),rotate(15)))
## )
## return D
##
##def getDrawing6():
## """This Rotates Text"""
## D = Drawing(400, 300, fill=colors.black)
##
## xform = translate(200,150)
## C = (colors.black,colors.red,colors.green,colors.blue,colors.brown,colors.gray, colors.pink,
## colors.lavender,colors.lime, colors.mediumblue, colors.magenta, colors.limegreen)
##
## for i in range(12):
## D.add(String(0, 0, ' - - Rotated Text', fill=C[i%len(C)], transform=mmult(xform, rotate(30*i))))
##
## return D
##
##def getDrawing7():
## """This defines and tests a simple UserNode0 (the trailing zero denotes
## an experimental method which is not part of the supported API yet).
## Each of the four charts is a subclass of UserNode which generates a random
## series when rendered."""
##
## class MyUserNode(UserNode0):
## import whrandom, math
##
##
## def provideNode(self, sender):
## """draw a simple chart that changes everytime it's drawn"""
## # print "here's a random number %s" % self.whrandom.random()
## #print "MyUserNode.provideNode being called by %s" % sender
## g = Group()
## #g._state = self._state # this is naughty
## PingoNode.__init__(g, self._state) # is this less naughty ?
## w = 80.0
## h = 50.0
## g.add(Rect(0,0, w, h, stroke=colors.black))
## N = 10.0
## x,y = (0,h)
## dx = w/N
## for ii in range(N):
## dy = (h/N) * self.whrandom.random()
## g.add(Line(x,y,x+dx, y-dy))
## x = x + dx
## y = y - dy
## return g
##
## D = Drawing(400,200, fill=colors.white) # AR - same size as others
##
## D.add(MyUserNode())
##
## graphcolor= [colors.green, colors.red, colors.brown, colors.purple]
## for ii in range(4):
## D.add(Group( MyUserNode(stroke=graphcolor[ii], stroke_width=2),
## transform=translate(ii*90,0) ))
##
## #un = MyUserNode()
## #print un.provideNode()
## return D
##
##def getDrawing8():
## """Test Path operations--lineto, curveTo, etc."""
## D = Drawing(400, 200, fill=None, stroke=colors.purple, stroke_width=2)
##
## xform = translate(200,100)
## C = (colors.black,colors.red,colors.green,colors.blue,colors.brown,colors.gray, colors.pink,
## colors.lavender,colors.lime, colors.mediumblue, colors.magenta, colors.limegreen)
## p = Path(50,50)
## p.lineTo(100,100)
## p.moveBy(-25,25)
## p.curveTo(150,125, 125,125, 200,50)
## p.curveTo(175, 75, 175, 98, 62, 87)
##
##
## D.add(p)
## D.add(String(10,30, 'Tests of path elements-lines and bezier curves-and text formating'))
## D.add(Line(220,150, 220,200, stroke=colors.red))
## D.add(String(220,180, "Text should be centered", text_anchor="middle") )
##
##
## return D
if __name__=='__main__':
print(__doc__)
| gpl-2.0 |
allevin/PyGithub | tests/Issue50.py | 2 | 4558 | # -*- coding: utf-8 -*-
############################ Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from . import Framework
class Issue50(Framework.TestCase): # https://github.com/jacquev6/PyGithub/issues/50
def setUp(self):
super().setUp()
self.repo = self.g.get_user().get_repo("PyGithub")
self.issue = self.repo.get_issue(50)
self.labelName = "Label with spaces and strange characters (&*#$)"
def testCreateLabel(self):
label = self.repo.create_label(self.labelName, "ffff00")
self.assertEqual(label.name, self.labelName)
def testGetLabel(self):
label = self.repo.get_label(self.labelName)
self.assertEqual(label.name, self.labelName)
def testGetLabels(self):
self.assertListKeyEqual(
self.repo.get_labels(),
lambda l: l.name,
[
"Refactoring",
"Public interface",
"Functionalities",
"Project management",
"Bug",
"Question",
"RequestedByUser",
self.labelName,
],
)
def testAddLabelToIssue(self):
self.issue.add_to_labels(self.repo.get_label(self.labelName))
def testRemoveLabelFromIssue(self):
self.issue.remove_from_labels(self.repo.get_label(self.labelName))
def testSetIssueLabels(self):
self.issue.set_labels(
self.repo.get_label("Bug"),
self.repo.get_label("RequestedByUser"),
self.repo.get_label(self.labelName),
)
def testIssueLabels(self):
self.assertListKeyEqual(
self.issue.labels,
lambda l: l.name,
["Bug", self.labelName, "RequestedByUser"],
)
def testIssueGetLabels(self):
self.assertListKeyEqual(
self.issue.get_labels(),
lambda l: l.name,
["Bug", self.labelName, "RequestedByUser"],
)
def testGetIssuesWithLabel(self):
self.assertListKeyEqual(
self.repo.get_issues(labels=[self.repo.get_label(self.labelName)]),
lambda i: i.number,
[52, 50],
)
def testCreateIssueWithLabel(self):
issue = self.repo.create_issue(
"Issue created by PyGithub to test issue #50",
labels=[self.repo.get_label(self.labelName)],
)
self.assertListKeyEqual(issue.labels, lambda l: l.name, [self.labelName])
self.assertEqual(issue.number, 52)
| lgpl-3.0 |
tbs1980/densplot | densplot.py | 1 | 6416 | import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import logging
import math
import matplotlib.ticker as mtick
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
from scipy.ndimage import gaussian_filter
def weighted_avg_and_std(values, weights):
"""
Return the weighted average and standard deviation.
values, weights -- Numpy ndarrays with the same shape.
http://stackoverflow.com/questions/2413522/weighted-standard-deviation-in-numpy
"""
average = np.average(values, weights=weights)
variance = np.average((values-average)**2, weights=weights) # Fast and numerically precise
return (average, math.sqrt(variance))
def densplot(samples,weights):
"""
A function to plot the densities from MCMC chains.
@param samples mcmc samples
@param weights weights for samples
"""
# sanity checks
if samples.shape[0] != weights.shape[0] :
raise ValueError("Number of rows in samples and weights should be equal")
if samples.shape[0] < samples.shape[1] :
raise ValueError("We require more samples than number of parameters")
# define the dimensions
num_params = samples.shape[1]
majorFormatter = FormatStrFormatter('%4.2e')
K = num_params
factor = 1.5 # size of one side of one panel
lbdim = 1.5 * factor # size of left/bottom margin
trdim = 1.5 * factor # size of top/right margin
whspace = 0.05 # w/hspace size
plotdim = factor * K + factor * (K - 1.) * whspace
dim = lbdim + plotdim + trdim
# get the subplots
fig, axes = plt.subplots(num_params,num_params,sharex=False, sharey=False,
squeeze=False,figsize=(dim, dim))
fig.subplots_adjust(wspace=0., hspace=0.)
for ent_i in range(num_params):
for ent_j in range(num_params):
ax = axes[ent_i,ent_j]
ax.tick_params(axis='both',which='both',bottom='off', top='off',
right='off',left='off', labelbottom='off', labeltop='off',
labelleft='off',labelright='off')
# delete all the upper triangle
if ent_j > ent_i :
fig.delaxes(ax)
# histogram of each parameter
if ent_j == ent_i :
mu,sig = weighted_avg_and_std(samples[:,ent_j],np.exp(weights))
rmin = mu - 4.*sig
rmax = mu + 4.*sig
counts,bins,patchs = ax.hist(samples[:,ent_j],histtype='step',
weights=np.exp(weights),normed=True,color='black',
bins=20,range=(rmin,rmax))
counts_smooth = gaussian_filter(counts,sigma=2)
counts_smooth_sorted_flat = sorted(counts_smooth.flatten(),reverse=True)
hist_axes = ax.axis()
ax.plot([mu+sig,mu+sig],[hist_axes[2],hist_axes[3]],color='r')
ax.plot([mu-sig,mu-sig],[hist_axes[2],hist_axes[3]],color='r')
# 2-d histogram for cross
if ent_j < ent_i :
mu_i,sig_i = weighted_avg_and_std(samples[:,ent_i],np.exp(weights))
mu_j,sig_j = weighted_avg_and_std(samples[:,ent_j],np.exp(weights))
rmin_i = mu_i - 4.*sig_i
rmax_i = mu_i + 4.*sig_i
rmin_j = mu_j - 4.*sig_j
rmax_j = mu_j + 4.*sig_j
hist_range = ([rmin_i,rmax_i],[rmin_j,rmax_j])
counts,ybins,xbins,image = ax.hist2d(samples[:,ent_i],samples[:,ent_j],weights=np.exp(weights),
normed=True,bins=40,range=hist_range,cmap='Greys')
counts_smooth = gaussian_filter(counts,sigma=2)
counts_smooth_sorted_flat = sorted(counts_smooth.flatten(),reverse=True)
total = np.sum(counts_smooth_sorted_flat)
one_simga = total*0.68268949
sum_counts = 0.
level_1 = 0.
for i in range(len(counts_smooth_sorted_flat)):
sum_counts += counts_smooth_sorted_flat[i]
if sum_counts >= one_simga:
level_1 = counts_smooth_sorted_flat[i]
break
ax.contour(counts_smooth,extent=[rmin_i,rmax_i,rmin_j,rmax_j],
levels=[level_1],colors=['red'])
"""
two_simga = total*0.95449974
sum_counts = 0.
level_2 = 0.
for i in range(len(counts_smooth_sorted_flat)):
sum_counts += counts_smooth_sorted_flat[i]
if sum_counts >= two_simga:
level_2 = counts_smooth_sorted_flat[i]
break
ax.contour(counts_smooth,extent=[rmin_i,rmax_i,rmin_j,rmax_j],
levels=[level_1,level_2],colors=['red','blue'])
"""
# axis ticks and labels
if ent_i == num_params - 1:
ax.tick_params(axis='x',which='major',direction='in',
bottom='on', top='off',right='off',left='off',
labelbottom='on', labeltop='off',labelleft='off',labelright='off')
mu_i,sig_i = weighted_avg_and_std(samples[:,ent_i],np.exp(weights))
rmin_i = mu_i - 4.*sig_i
rmax_i = mu_i + 4.*sig_i
x_ticks = np.linspace(rmin_i,rmax_i,4)
ax.set_xticks(x_ticks[1:3])
ax.xaxis.set_major_formatter(majorFormatter)
[l.set_rotation(45) for l in ax.get_xticklabels()]
if ent_j == 0 and ent_j != ent_i:
ax.tick_params(axis='y',which='major',direction='in',
bottom='off', top='off',right='off',left='on',
labelbottom='off', labeltop='off',labelleft='on',labelright='off')
mu_j,sig_j = weighted_avg_and_std(samples[:,ent_j],np.exp(weights))
rmin_j = mu_j - 4.*sig_j
rmax_j = mu_j + 4.*sig_j
y_ticks = np.linspace(rmin_j,rmax_j,4)
ax.set_yticks(y_ticks[1:3])
ax.xaxis.set_major_formatter(majorFormatter)
ax.yaxis.set_major_formatter(majorFormatter)
[l.set_rotation(45) for l in ax.get_xticklabels()]
[l.set_rotation(45) for l in ax.get_yticklabels()]
return axes
| apache-2.0 |
Fat-Zer/FreeCAD_sf_master | src/Mod/Fem/femtaskpanels/task_constraint_initialflowvelocity.py | 12 | 5415 | # ***************************************************************************
# * Copyright (c) 2017 Markus Hovorka <m.hovorka@live.de> *
# * Copyright (c) 2020 Bernd Hahnebach <bernd@bimstatik.org> *
# * *
# * This file is part of the FreeCAD CAx development system. *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this program; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
__title__ = "FreeCAD FEM constraint initial flow velocity task panel for the document object"
__author__ = "Markus Hovorka, Bernd Hahnebach"
__url__ = "https://www.freecadweb.org"
## @package task_constraint_initialflowvelocity
# \ingroup FEM
# \brief task panel for constraint initial flow velocity object
import FreeCAD
import FreeCADGui
from FreeCAD import Units
from femtools import femutils
from femtools import membertools
class _TaskPanel(object):
def __init__(self, obj):
self._obj = obj
self._paramWidget = FreeCADGui.PySideUic.loadUi(
FreeCAD.getHomePath() + "Mod/Fem/Resources/ui/InitialFlowVelocity.ui")
self._initParamWidget()
self.form = [self._paramWidget]
analysis = obj.getParentGroup()
self._mesh = None
self._part = None
if analysis is not None:
self._mesh = membertools.get_single_member(analysis, "Fem::FemMeshObject")
if self._mesh is not None:
self._part = femutils.get_part_to_mesh(self._mesh)
self._partVisible = None
self._meshVisible = None
def open(self):
if self._mesh is not None and self._part is not None:
self._meshVisible = self._mesh.ViewObject.isVisible()
self._partVisible = self._part.ViewObject.isVisible()
self._mesh.ViewObject.hide()
self._part.ViewObject.show()
def reject(self):
FreeCADGui.ActiveDocument.resetEdit()
self._restoreVisibility()
return True
def accept(self):
self._applyWidgetChanges()
self._obj.Document.recompute()
FreeCADGui.ActiveDocument.resetEdit()
self._restoreVisibility()
return True
def _restoreVisibility(self):
if self._mesh is not None and self._part is not None:
if self._meshVisible:
self._mesh.ViewObject.show()
else:
self._mesh.ViewObject.hide()
if self._partVisible:
self._part.ViewObject.show()
else:
self._part.ViewObject.hide()
def _initParamWidget(self):
unit = "m/s"
self._paramWidget.velocityXTxt.setText(
str(self._obj.VelocityX) + unit)
self._paramWidget.velocityYTxt.setText(
str(self._obj.VelocityY) + unit)
self._paramWidget.velocityZTxt.setText(
str(self._obj.VelocityZ) + unit)
self._paramWidget.velocityXBox.setChecked(
not self._obj.VelocityXEnabled)
self._paramWidget.velocityYBox.setChecked(
not self._obj.VelocityYEnabled)
self._paramWidget.velocityZBox.setChecked(
not self._obj.VelocityZEnabled)
def _applyWidgetChanges(self):
unit = "m/s"
self._obj.VelocityXEnabled = \
not self._paramWidget.velocityXBox.isChecked()
if self._obj.VelocityXEnabled:
quantity = Units.Quantity(self._paramWidget.velocityXTxt.text())
self._obj.VelocityX = float(quantity.getValueAs(unit))
self._obj.VelocityYEnabled = \
not self._paramWidget.velocityYBox.isChecked()
if self._obj.VelocityYEnabled:
quantity = Units.Quantity(self._paramWidget.velocityYTxt.text())
self._obj.VelocityY = float(quantity.getValueAs(unit))
self._obj.VelocityZEnabled = \
not self._paramWidget.velocityZBox.isChecked()
if self._obj.VelocityZEnabled:
quantity = Units.Quantity(self._paramWidget.velocityZTxt.text())
self._obj.VelocityZ = float(quantity.getValueAs(unit))
| lgpl-2.1 |
mstreatfield/anim-studio-tools | miki/sources/miki/builder.py | 5 | 7907 | #
# Dr. D Studios
# ---------------------------------------------------
"""
Handle building the documentation
"""
import os
import re
import sys
import errno
import shutil
import subprocess
import cStringIO as StringIO
from sphinx import main as sphinx_main
from rodin import logging, terminal
from . import sphinx_config
from . import structure
from . import errors
VALID_TARGETS = ["html", "pdf", "latex", "doxygen", "changes"]
def build(source, destination, defaults=None, targets=None, verbose=False, doxyfile=None, clean=False):
"""
Build documentation in source outputting to destination directory.
:param source: directory - the docs directory containing conf.py
:param destination: directory to build in (e.g. _build)
:param defaults: dictionary of default values to pass to Sphinx configuration (e.g. project name).
:param targets: list of output targets to build (html, pdf, latex etc). If None a default set is used.
:param verbose: if True will output more information as build proceeds
:param doxyfile: if set will attempt to build doxygen documentation sources first using doxyfile
:param clean: if True will remove each targets output directory before build
:raises BuilderError: on fail
.. note::
Sub-folders per documentation type will be made under the destination directory.
.. versionchanged:: 1.0
Miki now reports all warnings and errors regardless of whether verbose argument is True or False.
"""
log = logging.get_logger()
important_message_reg = re.compile("(WARNING|ERROR|FAILED)")
# Sphinx likes elements to be interpreted as relative to conf.py so change directory for build to source
# wrap in a try,finally block to ensure directory gets changed back even if there are errors
current_directory = os.getcwd()
os.chdir(source)
try:
config_file = structure.get_config_file(source)
if not os.path.isfile(config_file):
raise errors.BuilderError("Cannot build - required config file not found at expected location: %s" % config_file)
# update configuration with passed arguments
if defaults:
log.info("Adding defaults to configuration: %s" % defaults)
sphinx_config.__dict__.update(defaults)
sphinx_config.__dict__.update(sphinx_config.compute_derived_values(**defaults))
# build targets
if targets is None:
targets = ["html", "pdf"]
if doxyfile:
targets.insert(0, "doxygen")
for target in targets:
if target not in VALID_TARGETS:
raise errors.BuilderError("Invalid target '%s' specified - must be one of %s." % (target, ", ".join(VALID_TARGETS)))
output = os.path.join(destination, target)
if clean and os.path.exists(output):
log.info("Cleaning existing output for %s target %s" % (target, output))
shutil.rmtree(output)
log.info("Building %s in %s" % (target, output))
try:
os.makedirs(output)
except OSError, e:
if e.errno == errno.EEXIST:
pass
else:
raise
# At present the pdf builder breaks the standard code-block directive
# so it is added in on a need only basis
if target == "pdf":
sphinx_config.extensions.append("rst2pdf.pdfbuilder")
else:
if "rst2pdf.pdfbuilder" in sphinx_config.extensions:
sphinx_config.extensions.remove("rst2pdf.pdfbuilder")
if target == "doxygen":
# read doxyfile contents
fd = open(doxyfile, "r")
contents = fd.read()
fd.close()
# doxygen will take the last specified argument as the main one when encountering the same argument
# more than once, so can just append the overrides
contents = contents.split("\n")
contents.append("PROJECT_NAME = %s" % defaults.get("project", "Unknown"))
contents.append("OUTPUT_DIRECTORY = %s" % output)
contents.append("GENERATE_XML = YES")
contents.append("XML_OUTPUT = xml")
contents.append("CREATE_SUBDIRS = NO")
contents = "\n".join(contents)
# now run doxygen in a subprocess
p = subprocess.Popen(["doxygen", "-"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=os.path.dirname(doxyfile))
output = p.communicate(input=contents)
if verbose:
print output[0]
if p.returncode != 0:
raise errors.BuilderError("Doxygen build failed: %s" % output[0])
else:
# Sphinx build
args = ["sphinx-build"]
args.extend(["-b", target])
args.append(source)
args.append(output)
# redirect output if not verbose
saved_out = sys.stdout
saved_err = sys.stderr
logging.silence("rst2pdf")
stdout = StringIO.StringIO()
stderr = stdout
sys.stdout = stdout
sys.stderr = stderr
try:
result = sphinx_main(args)
except Exception, error:
pass
finally:
sys.stdout = saved_out
sys.stderr = saved_err
output = stdout.getvalue()
# parse output for errors and warnings
failed = False
if "Exception" in output:
log.exception(output)
else:
lines = output.split(os.linesep)
for line in lines:
match = important_message_reg.search(line)
if match:
if match.group(1) == 'WARNING':
log.warning(line)
elif match.group(1) in ('ERROR', 'FAILED'):
log.error(line)
failed = True
elif verbose:
log.info(line)
# handle errors
if failed:
raise errors.BuilderError("Errors occurred during build. Use -l/--loud as build argument for full output.")
finally:
# change directory back
os.chdir(current_directory)
# Copyright 2008-2012 Dr D Studios Pty Limited (ACN 127 184 954) (Dr. D Studios)
#
# This file is part of anim-studio-tools.
#
# anim-studio-tools is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# anim-studio-tools is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with anim-studio-tools. If not, see <http://www.gnu.org/licenses/>.
| gpl-3.0 |
matrix-org/synapse | synapse/config/_base.py | 1 | 31995 | # Copyright 2014-2016 OpenMarket Ltd
# Copyright 2017-2018 New Vector Ltd
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import errno
import os
from collections import OrderedDict
from hashlib import sha256
from textwrap import dedent
from typing import Any, Iterable, List, MutableMapping, Optional, Union
import attr
import jinja2
import pkg_resources
import yaml
from synapse.util.templates import _create_mxc_to_http_filter, _format_ts_filter
class ConfigError(Exception):
"""Represents a problem parsing the configuration
Args:
msg: A textual description of the error.
path: Where appropriate, an indication of where in the configuration
the problem lies.
"""
def __init__(self, msg: str, path: Optional[Iterable[str]] = None):
self.msg = msg
self.path = path
# We split these messages out to allow packages to override with package
# specific instructions.
MISSING_REPORT_STATS_CONFIG_INSTRUCTIONS = """\
Please opt in or out of reporting anonymized homeserver usage statistics, by
setting the `report_stats` key in your config file to either True or False.
"""
MISSING_REPORT_STATS_SPIEL = """\
We would really appreciate it if you could help our project out by reporting
anonymized usage statistics from your homeserver. Only very basic aggregate
data (e.g. number of users) will be reported, but it helps us to track the
growth of the Matrix community, and helps us to make Matrix a success, as well
as to convince other networks that they should peer with us.
Thank you.
"""
MISSING_SERVER_NAME = """\
Missing mandatory `server_name` config option.
"""
CONFIG_FILE_HEADER = """\
# Configuration file for Synapse.
#
# This is a YAML file: see [1] for a quick introduction. Note in particular
# that *indentation is important*: all the elements of a list or dictionary
# should have the same indentation.
#
# [1] https://docs.ansible.com/ansible/latest/reference_appendices/YAMLSyntax.html
"""
def path_exists(file_path):
"""Check if a file exists
Unlike os.path.exists, this throws an exception if there is an error
checking if the file exists (for example, if there is a perms error on
the parent dir).
Returns:
bool: True if the file exists; False if not.
"""
try:
os.stat(file_path)
return True
except OSError as e:
if e.errno != errno.ENOENT:
raise e
return False
class Config:
"""
A configuration section, containing configuration keys and values.
Attributes:
section (str): The section title of this config object, such as
"tls" or "logger". This is used to refer to it on the root
logger (for example, `config.tls.some_option`). Must be
defined in subclasses.
"""
section = None
def __init__(self, root_config=None):
self.root = root_config
# Get the path to the default Synapse template directory
self.default_template_dir = pkg_resources.resource_filename(
"synapse", "res/templates"
)
def __getattr__(self, item: str) -> Any:
"""
Try and fetch a configuration option that does not exist on this class.
This is so that existing configs that rely on `self.value`, where value
is actually from a different config section, continue to work.
"""
if item in ["generate_config_section", "read_config"]:
raise AttributeError(item)
if self.root is None:
raise AttributeError(item)
else:
return self.root._get_unclassed_config(self.section, item)
@staticmethod
def parse_size(value):
if isinstance(value, int):
return value
sizes = {"K": 1024, "M": 1024 * 1024}
size = 1
suffix = value[-1]
if suffix in sizes:
value = value[:-1]
size = sizes[suffix]
return int(value) * size
@staticmethod
def parse_duration(value: Union[str, int]) -> int:
"""Convert a duration as a string or integer to a number of milliseconds.
If an integer is provided it is treated as milliseconds and is unchanged.
String durations can have a suffix of 's', 'm', 'h', 'd', 'w', or 'y'.
No suffix is treated as milliseconds.
Args:
value: The duration to parse.
Returns:
The number of milliseconds in the duration.
"""
if isinstance(value, int):
return value
second = 1000
minute = 60 * second
hour = 60 * minute
day = 24 * hour
week = 7 * day
year = 365 * day
sizes = {"s": second, "m": minute, "h": hour, "d": day, "w": week, "y": year}
size = 1
suffix = value[-1]
if suffix in sizes:
value = value[:-1]
size = sizes[suffix]
return int(value) * size
@staticmethod
def abspath(file_path):
return os.path.abspath(file_path) if file_path else file_path
@classmethod
def path_exists(cls, file_path):
return path_exists(file_path)
@classmethod
def check_file(cls, file_path, config_name):
if file_path is None:
raise ConfigError("Missing config for %s." % (config_name,))
try:
os.stat(file_path)
except OSError as e:
raise ConfigError(
"Error accessing file '%s' (config for %s): %s"
% (file_path, config_name, e.strerror)
)
return cls.abspath(file_path)
@classmethod
def ensure_directory(cls, dir_path):
dir_path = cls.abspath(dir_path)
try:
os.makedirs(dir_path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
if not os.path.isdir(dir_path):
raise ConfigError("%s is not a directory" % (dir_path,))
return dir_path
@classmethod
def read_file(cls, file_path, config_name):
"""Deprecated: call read_file directly"""
return read_file(file_path, (config_name,))
def read_template(self, filename: str) -> jinja2.Template:
"""Load a template file from disk.
This function will attempt to load the given template from the default Synapse
template directory.
Files read are treated as Jinja templates. The templates is not rendered yet
and has autoescape enabled.
Args:
filename: A template filename to read.
Raises:
ConfigError: if the file's path is incorrect or otherwise cannot be read.
Returns:
A jinja2 template.
"""
return self.read_templates([filename])[0]
def read_templates(
self,
filenames: List[str],
custom_template_directory: Optional[str] = None,
) -> List[jinja2.Template]:
"""Load a list of template files from disk using the given variables.
This function will attempt to load the given templates from the default Synapse
template directory. If `custom_template_directory` is supplied, that directory
is tried first.
Files read are treated as Jinja templates. The templates are not rendered yet
and have autoescape enabled.
Args:
filenames: A list of template filenames to read.
custom_template_directory: A directory to try to look for the templates
before using the default Synapse template directory instead.
Raises:
ConfigError: if the file's path is incorrect or otherwise cannot be read.
Returns:
A list of jinja2 templates.
"""
search_directories = [self.default_template_dir]
# The loader will first look in the custom template directory (if specified) for the
# given filename. If it doesn't find it, it will use the default template dir instead
if custom_template_directory:
# Check that the given template directory exists
if not self.path_exists(custom_template_directory):
raise ConfigError(
"Configured template directory does not exist: %s"
% (custom_template_directory,)
)
# Search the custom template directory as well
search_directories.insert(0, custom_template_directory)
# TODO: switch to synapse.util.templates.build_jinja_env
loader = jinja2.FileSystemLoader(search_directories)
env = jinja2.Environment(
loader=loader,
autoescape=jinja2.select_autoescape(),
)
# Update the environment with our custom filters
env.filters.update(
{
"format_ts": _format_ts_filter,
"mxc_to_http": _create_mxc_to_http_filter(self.public_baseurl),
}
)
# Load the templates
return [env.get_template(filename) for filename in filenames]
class RootConfig:
"""
Holder of an application's configuration.
What configuration this object holds is defined by `config_classes`, a list
of Config classes that will be instantiated and given the contents of a
configuration file to read. They can then be accessed on this class by their
section name, defined in the Config or dynamically set to be the name of the
class, lower-cased and with "Config" removed.
"""
config_classes = []
def __init__(self):
self._configs = OrderedDict()
for config_class in self.config_classes:
if config_class.section is None:
raise ValueError("%r requires a section name" % (config_class,))
try:
conf = config_class(self)
except Exception as e:
raise Exception("Failed making %s: %r" % (config_class.section, e))
self._configs[config_class.section] = conf
def __getattr__(self, item: str) -> Any:
"""
Redirect lookups on this object either to config objects, or values on
config objects, so that `config.tls.blah` works, as well as legacy uses
of things like `config.server_name`. It will first look up the config
section name, and then values on those config classes.
"""
if item in self._configs.keys():
return self._configs[item]
return self._get_unclassed_config(None, item)
def _get_unclassed_config(self, asking_section: Optional[str], item: str):
"""
Fetch a config value from one of the instantiated config classes that
has not been fetched directly.
Args:
asking_section: If this check is coming from a Config child, which
one? This section will not be asked if it has the value.
item: The configuration value key.
Raises:
AttributeError if no config classes have the config key. The body
will contain what sections were checked.
"""
for key, val in self._configs.items():
if key == asking_section:
continue
if item in dir(val):
return getattr(val, item)
raise AttributeError(item, "not found in %s" % (list(self._configs.keys()),))
def invoke_all(self, func_name: str, *args, **kwargs) -> MutableMapping[str, Any]:
"""
Invoke a function on all instantiated config objects this RootConfig is
configured to use.
Args:
func_name: Name of function to invoke
*args
**kwargs
Returns:
ordered dictionary of config section name and the result of the
function from it.
"""
res = OrderedDict()
for name, config in self._configs.items():
if hasattr(config, func_name):
res[name] = getattr(config, func_name)(*args, **kwargs)
return res
@classmethod
def invoke_all_static(cls, func_name: str, *args, **kwargs):
"""
Invoke a static function on config objects this RootConfig is
configured to use.
Args:
func_name: Name of function to invoke
*args
**kwargs
Returns:
ordered dictionary of config section name and the result of the
function from it.
"""
for config in cls.config_classes:
if hasattr(config, func_name):
getattr(config, func_name)(*args, **kwargs)
def generate_config(
self,
config_dir_path,
data_dir_path,
server_name,
generate_secrets=False,
report_stats=None,
open_private_ports=False,
listeners=None,
tls_certificate_path=None,
tls_private_key_path=None,
):
"""
Build a default configuration file
This is used when the user explicitly asks us to generate a config file
(eg with --generate_config).
Args:
config_dir_path (str): The path where the config files are kept. Used to
create filenames for things like the log config and the signing key.
data_dir_path (str): The path where the data files are kept. Used to create
filenames for things like the database and media store.
server_name (str): The server name. Used to initialise the server_name
config param, but also used in the names of some of the config files.
generate_secrets (bool): True if we should generate new secrets for things
like the macaroon_secret_key. If False, these parameters will be left
unset.
report_stats (bool|None): Initial setting for the report_stats setting.
If None, report_stats will be left unset.
open_private_ports (bool): True to leave private ports (such as the non-TLS
HTTP listener) open to the internet.
listeners (list(dict)|None): A list of descriptions of the listeners
synapse should start with each of which specifies a port (str), a list of
resources (list(str)), tls (bool) and type (str). For example:
[{
"port": 8448,
"resources": [{"names": ["federation"]}],
"tls": True,
"type": "http",
},
{
"port": 443,
"resources": [{"names": ["client"]}],
"tls": False,
"type": "http",
}],
database (str|None): The database type to configure, either `psycog2`
or `sqlite3`.
tls_certificate_path (str|None): The path to the tls certificate.
tls_private_key_path (str|None): The path to the tls private key.
Returns:
str: the yaml config file
"""
return CONFIG_FILE_HEADER + "\n\n".join(
dedent(conf)
for conf in self.invoke_all(
"generate_config_section",
config_dir_path=config_dir_path,
data_dir_path=data_dir_path,
server_name=server_name,
generate_secrets=generate_secrets,
report_stats=report_stats,
open_private_ports=open_private_ports,
listeners=listeners,
tls_certificate_path=tls_certificate_path,
tls_private_key_path=tls_private_key_path,
).values()
)
@classmethod
def load_config(cls, description, argv):
"""Parse the commandline and config files
Doesn't support config-file-generation: used by the worker apps.
Returns: Config object.
"""
config_parser = argparse.ArgumentParser(description=description)
cls.add_arguments_to_parser(config_parser)
obj, _ = cls.load_config_with_parser(config_parser, argv)
return obj
@classmethod
def add_arguments_to_parser(cls, config_parser):
"""Adds all the config flags to an ArgumentParser.
Doesn't support config-file-generation: used by the worker apps.
Used for workers where we want to add extra flags/subcommands.
Args:
config_parser (ArgumentParser): App description
"""
config_parser.add_argument(
"-c",
"--config-path",
action="append",
metavar="CONFIG_FILE",
help="Specify config file. Can be given multiple times and"
" may specify directories containing *.yaml files.",
)
config_parser.add_argument(
"--keys-directory",
metavar="DIRECTORY",
help="Where files such as certs and signing keys are stored when"
" their location is not given explicitly in the config."
" Defaults to the directory containing the last config file",
)
cls.invoke_all_static("add_arguments", config_parser)
@classmethod
def load_config_with_parser(cls, parser, argv):
"""Parse the commandline and config files with the given parser
Doesn't support config-file-generation: used by the worker apps.
Used for workers where we want to add extra flags/subcommands.
Args:
parser (ArgumentParser)
argv (list[str])
Returns:
tuple[HomeServerConfig, argparse.Namespace]: Returns the parsed
config object and the parsed argparse.Namespace object from
`parser.parse_args(..)`
"""
obj = cls()
config_args = parser.parse_args(argv)
config_files = find_config_files(search_paths=config_args.config_path)
if not config_files:
parser.error("Must supply a config file.")
if config_args.keys_directory:
config_dir_path = config_args.keys_directory
else:
config_dir_path = os.path.dirname(config_files[-1])
config_dir_path = os.path.abspath(config_dir_path)
data_dir_path = os.getcwd()
config_dict = read_config_files(config_files)
obj.parse_config_dict(
config_dict, config_dir_path=config_dir_path, data_dir_path=data_dir_path
)
obj.invoke_all("read_arguments", config_args)
return obj, config_args
@classmethod
def load_or_generate_config(cls, description, argv):
"""Parse the commandline and config files
Supports generation of config files, so is used for the main homeserver app.
Returns: Config object, or None if --generate-config or --generate-keys was set
"""
parser = argparse.ArgumentParser(description=description)
parser.add_argument(
"-c",
"--config-path",
action="append",
metavar="CONFIG_FILE",
help="Specify config file. Can be given multiple times and"
" may specify directories containing *.yaml files.",
)
generate_group = parser.add_argument_group("Config generation")
generate_group.add_argument(
"--generate-config",
action="store_true",
help="Generate a config file, then exit.",
)
generate_group.add_argument(
"--generate-missing-configs",
"--generate-keys",
action="store_true",
help="Generate any missing additional config files, then exit.",
)
generate_group.add_argument(
"-H", "--server-name", help="The server name to generate a config file for."
)
generate_group.add_argument(
"--report-stats",
action="store",
help="Whether the generated config reports anonymized usage statistics.",
choices=["yes", "no"],
)
generate_group.add_argument(
"--config-directory",
"--keys-directory",
metavar="DIRECTORY",
help=(
"Specify where additional config files such as signing keys and log"
" config should be stored. Defaults to the same directory as the last"
" config file."
),
)
generate_group.add_argument(
"--data-directory",
metavar="DIRECTORY",
help=(
"Specify where data such as the media store and database file should be"
" stored. Defaults to the current working directory."
),
)
generate_group.add_argument(
"--open-private-ports",
action="store_true",
help=(
"Leave private ports (such as the non-TLS HTTP listener) open to the"
" internet. Do not use this unless you know what you are doing."
),
)
cls.invoke_all_static("add_arguments", parser)
config_args = parser.parse_args(argv)
config_files = find_config_files(search_paths=config_args.config_path)
if not config_files:
parser.error(
"Must supply a config file.\nA config file can be automatically"
' generated using "--generate-config -H SERVER_NAME'
' -c CONFIG-FILE"'
)
if config_args.config_directory:
config_dir_path = config_args.config_directory
else:
config_dir_path = os.path.dirname(config_files[-1])
config_dir_path = os.path.abspath(config_dir_path)
data_dir_path = os.getcwd()
generate_missing_configs = config_args.generate_missing_configs
obj = cls()
if config_args.generate_config:
if config_args.report_stats is None:
parser.error(
"Please specify either --report-stats=yes or --report-stats=no\n\n"
+ MISSING_REPORT_STATS_SPIEL
)
(config_path,) = config_files
if not path_exists(config_path):
print("Generating config file %s" % (config_path,))
if config_args.data_directory:
data_dir_path = config_args.data_directory
else:
data_dir_path = os.getcwd()
data_dir_path = os.path.abspath(data_dir_path)
server_name = config_args.server_name
if not server_name:
raise ConfigError(
"Must specify a server_name to a generate config for."
" Pass -H server.name."
)
config_str = obj.generate_config(
config_dir_path=config_dir_path,
data_dir_path=data_dir_path,
server_name=server_name,
report_stats=(config_args.report_stats == "yes"),
generate_secrets=True,
open_private_ports=config_args.open_private_ports,
)
if not path_exists(config_dir_path):
os.makedirs(config_dir_path)
with open(config_path, "w") as config_file:
config_file.write(config_str)
config_file.write("\n\n# vim:ft=yaml")
config_dict = yaml.safe_load(config_str)
obj.generate_missing_files(config_dict, config_dir_path)
print(
(
"A config file has been generated in %r for server name"
" %r. Please review this file and customise it"
" to your needs."
)
% (config_path, server_name)
)
return
else:
print(
(
"Config file %r already exists. Generating any missing config"
" files."
)
% (config_path,)
)
generate_missing_configs = True
config_dict = read_config_files(config_files)
if generate_missing_configs:
obj.generate_missing_files(config_dict, config_dir_path)
return None
obj.parse_config_dict(
config_dict, config_dir_path=config_dir_path, data_dir_path=data_dir_path
)
obj.invoke_all("read_arguments", config_args)
return obj
def parse_config_dict(self, config_dict, config_dir_path=None, data_dir_path=None):
"""Read the information from the config dict into this Config object.
Args:
config_dict (dict): Configuration data, as read from the yaml
config_dir_path (str): The path where the config files are kept. Used to
create filenames for things like the log config and the signing key.
data_dir_path (str): The path where the data files are kept. Used to create
filenames for things like the database and media store.
"""
self.invoke_all(
"read_config",
config_dict,
config_dir_path=config_dir_path,
data_dir_path=data_dir_path,
)
def generate_missing_files(self, config_dict, config_dir_path):
self.invoke_all("generate_files", config_dict, config_dir_path)
def read_config_files(config_files):
"""Read the config files into a dict
Args:
config_files (iterable[str]): A list of the config files to read
Returns: dict
"""
specified_config = {}
for config_file in config_files:
with open(config_file) as file_stream:
yaml_config = yaml.safe_load(file_stream)
if not isinstance(yaml_config, dict):
err = "File %r is empty or doesn't parse into a key-value map. IGNORING."
print(err % (config_file,))
continue
specified_config.update(yaml_config)
if "server_name" not in specified_config:
raise ConfigError(MISSING_SERVER_NAME)
if "report_stats" not in specified_config:
raise ConfigError(
MISSING_REPORT_STATS_CONFIG_INSTRUCTIONS + "\n" + MISSING_REPORT_STATS_SPIEL
)
return specified_config
def find_config_files(search_paths):
"""Finds config files using a list of search paths. If a path is a file
then that file path is added to the list. If a search path is a directory
then all the "*.yaml" files in that directory are added to the list in
sorted order.
Args:
search_paths(list(str)): A list of paths to search.
Returns:
list(str): A list of file paths.
"""
config_files = []
if search_paths:
for config_path in search_paths:
if os.path.isdir(config_path):
# We accept specifying directories as config paths, we search
# inside that directory for all files matching *.yaml, and then
# we apply them in *sorted* order.
files = []
for entry in os.listdir(config_path):
entry_path = os.path.join(config_path, entry)
if not os.path.isfile(entry_path):
err = "Found subdirectory in config directory: %r. IGNORING."
print(err % (entry_path,))
continue
if not entry.endswith(".yaml"):
err = (
"Found file in config directory that does not end in "
"'.yaml': %r. IGNORING."
)
print(err % (entry_path,))
continue
files.append(entry_path)
config_files.extend(sorted(files))
else:
config_files.append(config_path)
return config_files
@attr.s
class ShardedWorkerHandlingConfig:
"""Algorithm for choosing which instance is responsible for handling some
sharded work.
For example, the federation senders use this to determine which instances
handles sending stuff to a given destination (which is used as the `key`
below).
"""
instances = attr.ib(type=List[str])
def should_handle(self, instance_name: str, key: str) -> bool:
"""Whether this instance is responsible for handling the given key."""
# If no instances are defined we assume some other worker is handling
# this.
if not self.instances:
return False
return self._get_instance(key) == instance_name
def _get_instance(self, key: str) -> str:
"""Get the instance responsible for handling the given key.
Note: For federation sending and pushers the config for which instance
is sending is known only to the sender instance, so we don't expose this
method by default.
"""
if not self.instances:
raise Exception("Unknown worker")
if len(self.instances) == 1:
return self.instances[0]
# We shard by taking the hash, modulo it by the number of instances and
# then checking whether this instance matches the instance at that
# index.
#
# (Technically this introduces some bias and is not entirely uniform,
# but since the hash is so large the bias is ridiculously small).
dest_hash = sha256(key.encode("utf8")).digest()
dest_int = int.from_bytes(dest_hash, byteorder="little")
remainder = dest_int % (len(self.instances))
return self.instances[remainder]
@attr.s
class RoutableShardedWorkerHandlingConfig(ShardedWorkerHandlingConfig):
"""A version of `ShardedWorkerHandlingConfig` that is used for config
options where all instances know which instances are responsible for the
sharded work.
"""
def __attrs_post_init__(self):
# We require that `self.instances` is non-empty.
if not self.instances:
raise Exception("Got empty list of instances for shard config")
def get_instance(self, key: str) -> str:
"""Get the instance responsible for handling the given key."""
return self._get_instance(key)
def read_file(file_path: Any, config_path: Iterable[str]) -> str:
"""Check the given file exists, and read it into a string
If it does not, emit an error indicating the problem
Args:
file_path: the file to be read
config_path: where in the configuration file_path came from, so that a useful
error can be emitted if it does not exist.
Returns:
content of the file.
Raises:
ConfigError if there is a problem reading the file.
"""
if not isinstance(file_path, str):
raise ConfigError("%r is not a string", config_path)
try:
os.stat(file_path)
with open(file_path) as file_stream:
return file_stream.read()
except OSError as e:
raise ConfigError("Error accessing file %r" % (file_path,), config_path) from e
__all__ = [
"Config",
"RootConfig",
"ShardedWorkerHandlingConfig",
"RoutableShardedWorkerHandlingConfig",
"read_file",
]
| apache-2.0 |
xaxa89/mitmproxy | mitmproxy/contrib/kaitaistruct/gif.py | 3 | 10294 | # This is a generated file! Please edit source .ksy file and use kaitai-struct-compiler to rebuild
# The source was png.ksy from here - https://github.com/kaitai-io/kaitai_struct_formats/blob/562154250bea0081fed4e232751b934bc270a0c7/image/gif.ksy
import array
import struct
import zlib
from enum import Enum
from kaitaistruct import KaitaiStruct, KaitaiStream, BytesIO
class Gif(KaitaiStruct):
class BlockType(Enum):
extension = 33
local_image_descriptor = 44
end_of_file = 59
class ExtensionLabel(Enum):
graphic_control = 249
comment = 254
application = 255
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.header = self._root.Header(self._io, self, self._root)
self.logical_screen_descriptor = self._root.LogicalScreenDescriptor(self._io, self, self._root)
if self.logical_screen_descriptor.has_color_table:
self._raw_global_color_table = self._io.read_bytes((self.logical_screen_descriptor.color_table_size * 3))
io = KaitaiStream(BytesIO(self._raw_global_color_table))
self.global_color_table = self._root.ColorTable(io, self, self._root)
self.blocks = []
while not self._io.is_eof():
self.blocks.append(self._root.Block(self._io, self, self._root))
class ImageData(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.lzw_min_code_size = self._io.read_u1()
self.subblocks = self._root.Subblocks(self._io, self, self._root)
class ColorTableEntry(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.red = self._io.read_u1()
self.green = self._io.read_u1()
self.blue = self._io.read_u1()
class LogicalScreenDescriptor(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.screen_width = self._io.read_u2le()
self.screen_height = self._io.read_u2le()
self.flags = self._io.read_u1()
self.bg_color_index = self._io.read_u1()
self.pixel_aspect_ratio = self._io.read_u1()
@property
def has_color_table(self):
if hasattr(self, '_m_has_color_table'):
return self._m_has_color_table if hasattr(self, '_m_has_color_table') else None
self._m_has_color_table = (self.flags & 128) != 0
return self._m_has_color_table if hasattr(self, '_m_has_color_table') else None
@property
def color_table_size(self):
if hasattr(self, '_m_color_table_size'):
return self._m_color_table_size if hasattr(self, '_m_color_table_size') else None
self._m_color_table_size = (2 << (self.flags & 7))
return self._m_color_table_size if hasattr(self, '_m_color_table_size') else None
class LocalImageDescriptor(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.left = self._io.read_u2le()
self.top = self._io.read_u2le()
self.width = self._io.read_u2le()
self.height = self._io.read_u2le()
self.flags = self._io.read_u1()
if self.has_color_table:
self._raw_local_color_table = self._io.read_bytes((self.color_table_size * 3))
io = KaitaiStream(BytesIO(self._raw_local_color_table))
self.local_color_table = self._root.ColorTable(io, self, self._root)
self.image_data = self._root.ImageData(self._io, self, self._root)
@property
def has_color_table(self):
if hasattr(self, '_m_has_color_table'):
return self._m_has_color_table if hasattr(self, '_m_has_color_table') else None
self._m_has_color_table = (self.flags & 128) != 0
return self._m_has_color_table if hasattr(self, '_m_has_color_table') else None
@property
def has_interlace(self):
if hasattr(self, '_m_has_interlace'):
return self._m_has_interlace if hasattr(self, '_m_has_interlace') else None
self._m_has_interlace = (self.flags & 64) != 0
return self._m_has_interlace if hasattr(self, '_m_has_interlace') else None
@property
def has_sorted_color_table(self):
if hasattr(self, '_m_has_sorted_color_table'):
return self._m_has_sorted_color_table if hasattr(self, '_m_has_sorted_color_table') else None
self._m_has_sorted_color_table = (self.flags & 32) != 0
return self._m_has_sorted_color_table if hasattr(self, '_m_has_sorted_color_table') else None
@property
def color_table_size(self):
if hasattr(self, '_m_color_table_size'):
return self._m_color_table_size if hasattr(self, '_m_color_table_size') else None
self._m_color_table_size = (2 << (self.flags & 7))
return self._m_color_table_size if hasattr(self, '_m_color_table_size') else None
class Block(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.block_type = self._root.BlockType(self._io.read_u1())
_on = self.block_type
if _on == self._root.BlockType.extension:
self.body = self._root.Extension(self._io, self, self._root)
elif _on == self._root.BlockType.local_image_descriptor:
self.body = self._root.LocalImageDescriptor(self._io, self, self._root)
class ColorTable(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.entries = []
while not self._io.is_eof():
self.entries.append(self._root.ColorTableEntry(self._io, self, self._root))
class Header(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.magic = self._io.ensure_fixed_contents(struct.pack('3b', 71, 73, 70))
self.version = self._io.read_bytes(3)
class ExtGraphicControl(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.block_size = self._io.ensure_fixed_contents(struct.pack('1b', 4))
self.flags = self._io.read_u1()
self.delay_time = self._io.read_u2le()
self.transparent_idx = self._io.read_u1()
self.terminator = self._io.ensure_fixed_contents(struct.pack('1b', 0))
@property
def transparent_color_flag(self):
if hasattr(self, '_m_transparent_color_flag'):
return self._m_transparent_color_flag if hasattr(self, '_m_transparent_color_flag') else None
self._m_transparent_color_flag = (self.flags & 1) != 0
return self._m_transparent_color_flag if hasattr(self, '_m_transparent_color_flag') else None
@property
def user_input_flag(self):
if hasattr(self, '_m_user_input_flag'):
return self._m_user_input_flag if hasattr(self, '_m_user_input_flag') else None
self._m_user_input_flag = (self.flags & 2) != 0
return self._m_user_input_flag if hasattr(self, '_m_user_input_flag') else None
class Subblock(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.num_bytes = self._io.read_u1()
self.bytes = self._io.read_bytes(self.num_bytes)
class ExtApplication(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.application_id = self._root.Subblock(self._io, self, self._root)
self.subblocks = []
while True:
_ = self._root.Subblock(self._io, self, self._root)
self.subblocks.append(_)
if _.num_bytes == 0:
break
class Subblocks(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.entries = []
while True:
_ = self._root.Subblock(self._io, self, self._root)
self.entries.append(_)
if _.num_bytes == 0:
break
class Extension(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.label = self._root.ExtensionLabel(self._io.read_u1())
_on = self.label
if _on == self._root.ExtensionLabel.application:
self.body = self._root.ExtApplication(self._io, self, self._root)
elif _on == self._root.ExtensionLabel.comment:
self.body = self._root.Subblocks(self._io, self, self._root)
elif _on == self._root.ExtensionLabel.graphic_control:
self.body = self._root.ExtGraphicControl(self._io, self, self._root)
else:
self.body = self._root.Subblocks(self._io, self, self._root)
| mit |
40123247/2015cd_midterm | static/Brython3.1.0-20150301-090019/Lib/copy.py | 628 | 8905 | """Generic (shallow and deep) copying operations.
Interface summary:
import copy
x = copy.copy(y) # make a shallow copy of y
x = copy.deepcopy(y) # make a deep copy of y
For module specific errors, copy.Error is raised.
The difference between shallow and deep copying is only relevant for
compound objects (objects that contain other objects, like lists or
class instances).
- A shallow copy constructs a new compound object and then (to the
extent possible) inserts *the same objects* into it that the
original contains.
- A deep copy constructs a new compound object and then, recursively,
inserts *copies* into it of the objects found in the original.
Two problems often exist with deep copy operations that don't exist
with shallow copy operations:
a) recursive objects (compound objects that, directly or indirectly,
contain a reference to themselves) may cause a recursive loop
b) because deep copy copies *everything* it may copy too much, e.g.
administrative data structures that should be shared even between
copies
Python's deep copy operation avoids these problems by:
a) keeping a table of objects already copied during the current
copying pass
b) letting user-defined classes override the copying operation or the
set of components copied
This version does not copy types like module, class, function, method,
nor stack trace, stack frame, nor file, socket, window, nor array, nor
any similar types.
Classes can use the same interfaces to control copying that they use
to control pickling: they can define methods called __getinitargs__(),
__getstate__() and __setstate__(). See the documentation for module
"pickle" for information on these methods.
"""
import types
import weakref
from copyreg import dispatch_table
import builtins
class Error(Exception):
pass
error = Error # backward compatibility
# module org.python.core does not exist in Brython, so lets just ignore
# this import request.
#try:
# from org.python.core import PyStringMap
#except ImportError:
# PyStringMap = None
PyStringMap = None
__all__ = ["Error", "copy", "deepcopy"]
def copy(x):
"""Shallow copy operation on arbitrary Python objects.
See the module's __doc__ string for more info.
"""
cls = type(x)
copier = _copy_dispatch.get(cls)
if copier:
return copier(x)
copier = getattr(cls, "__copy__", None)
if copier:
return copier(x)
reductor = dispatch_table.get(cls)
if reductor:
rv = reductor(x)
else:
reductor = getattr(x, "__reduce_ex__", None)
if reductor:
rv = reductor(2)
else:
reductor = getattr(x, "__reduce__", None)
if reductor:
rv = reductor()
else:
raise Error("un(shallow)copyable object of type %s" % cls)
return _reconstruct(x, rv, 0)
_copy_dispatch = d = {}
def _copy_immutable(x):
return x
for t in (type(None), int, float, bool, str, tuple,
frozenset, type, range,
types.BuiltinFunctionType, type(Ellipsis),
types.FunctionType, weakref.ref):
d[t] = _copy_immutable
t = getattr(types, "CodeType", None)
if t is not None:
d[t] = _copy_immutable
for name in ("complex", "unicode"):
t = getattr(builtins, name, None)
if t is not None:
d[t] = _copy_immutable
def _copy_with_constructor(x):
return type(x)(x)
for t in (list, dict, set):
d[t] = _copy_with_constructor
def _copy_with_copy_method(x):
return x.copy()
if PyStringMap is not None:
d[PyStringMap] = _copy_with_copy_method
del d
def deepcopy(x, memo=None, _nil=[]):
"""Deep copy operation on arbitrary Python objects.
See the module's __doc__ string for more info.
"""
if memo is None:
memo = {}
d = id(x)
y = memo.get(d, _nil)
if y is not _nil:
return y
cls = type(x)
copier = _deepcopy_dispatch.get(cls)
if copier:
y = copier(x, memo)
else:
try:
issc = issubclass(cls, type)
except TypeError: # cls is not a class (old Boost; see SF #502085)
issc = 0
if issc:
y = _deepcopy_atomic(x, memo)
else:
copier = getattr(x, "__deepcopy__", None)
if copier:
y = copier(memo)
else:
reductor = dispatch_table.get(cls)
if reductor:
rv = reductor(x)
else:
reductor = getattr(x, "__reduce_ex__", None)
if reductor:
rv = reductor(2)
else:
reductor = getattr(x, "__reduce__", None)
if reductor:
rv = reductor()
else:
raise Error(
"un(deep)copyable object of type %s" % cls)
y = _reconstruct(x, rv, 1, memo)
# If is its own copy, don't memoize.
if y is not x:
memo[d] = y
_keep_alive(x, memo) # Make sure x lives at least as long as d
return y
_deepcopy_dispatch = d = {}
def _deepcopy_atomic(x, memo):
return x
d[type(None)] = _deepcopy_atomic
d[type(Ellipsis)] = _deepcopy_atomic
d[int] = _deepcopy_atomic
d[float] = _deepcopy_atomic
d[bool] = _deepcopy_atomic
try:
d[complex] = _deepcopy_atomic
except NameError:
pass
d[bytes] = _deepcopy_atomic
d[str] = _deepcopy_atomic
try:
d[types.CodeType] = _deepcopy_atomic
except AttributeError:
pass
d[type] = _deepcopy_atomic
d[range] = _deepcopy_atomic
d[types.BuiltinFunctionType] = _deepcopy_atomic
d[types.FunctionType] = _deepcopy_atomic
d[weakref.ref] = _deepcopy_atomic
def _deepcopy_list(x, memo):
y = []
memo[id(x)] = y
for a in x:
y.append(deepcopy(a, memo))
return y
d[list] = _deepcopy_list
def _deepcopy_tuple(x, memo):
y = []
for a in x:
y.append(deepcopy(a, memo))
# We're not going to put the tuple in the memo, but it's still important we
# check for it, in case the tuple contains recursive mutable structures.
try:
return memo[id(x)]
except KeyError:
pass
for i in range(len(x)):
if x[i] is not y[i]:
y = tuple(y)
break
else:
y = x
return y
d[tuple] = _deepcopy_tuple
def _deepcopy_dict(x, memo):
y = {}
memo[id(x)] = y
for key, value in x.items():
y[deepcopy(key, memo)] = deepcopy(value, memo)
return y
d[dict] = _deepcopy_dict
if PyStringMap is not None:
d[PyStringMap] = _deepcopy_dict
def _deepcopy_method(x, memo): # Copy instance methods
return type(x)(x.__func__, deepcopy(x.__self__, memo))
_deepcopy_dispatch[types.MethodType] = _deepcopy_method
def _keep_alive(x, memo):
"""Keeps a reference to the object x in the memo.
Because we remember objects by their id, we have
to assure that possibly temporary objects are kept
alive by referencing them.
We store a reference at the id of the memo, which should
normally not be used unless someone tries to deepcopy
the memo itself...
"""
try:
memo[id(memo)].append(x)
except KeyError:
# aha, this is the first one :-)
memo[id(memo)]=[x]
def _reconstruct(x, info, deep, memo=None):
if isinstance(info, str):
return x
assert isinstance(info, tuple)
if memo is None:
memo = {}
n = len(info)
assert n in (2, 3, 4, 5)
callable, args = info[:2]
if n > 2:
state = info[2]
else:
state = {}
if n > 3:
listiter = info[3]
else:
listiter = None
if n > 4:
dictiter = info[4]
else:
dictiter = None
if deep:
args = deepcopy(args, memo)
y = callable(*args)
memo[id(x)] = y
if state:
if deep:
state = deepcopy(state, memo)
if hasattr(y, '__setstate__'):
y.__setstate__(state)
else:
if isinstance(state, tuple) and len(state) == 2:
state, slotstate = state
else:
slotstate = None
if state is not None:
y.__dict__.update(state)
if slotstate is not None:
for key, value in slotstate.items():
setattr(y, key, value)
if listiter is not None:
for item in listiter:
if deep:
item = deepcopy(item, memo)
y.append(item)
if dictiter is not None:
for key, value in dictiter:
if deep:
key = deepcopy(key, memo)
value = deepcopy(value, memo)
y[key] = value
return y
del d
del types
# Helper for instance creation without calling __init__
class _EmptyClass:
pass
| gpl-3.0 |
sikmir/QGIS | python/user.py | 15 | 3310 | # -*- coding: utf-8 -*-
"""
***************************************************************************
user.py
---------------------
Date : January 2015
Copyright : (C) 2015 by Nathan Woodrow
Email : woodrow dot nathan at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Nathan Woodrow'
__date__ = 'January 2015'
__copyright__ = '(C) 2015, Nathan Woodrow'
import os
import sys
import glob
import traceback
from qgis.PyQt.QtCore import QCoreApplication
from qgis.core import Qgis, QgsApplication, QgsMessageLog
def load_user_expressions(path):
"""
Load all user expressions from the given paths
"""
#Loop all py files and import them
modules = glob.glob(path + "/*.py")
names = [os.path.basename(f)[:-3] for f in modules]
for name in names:
if name == "__init__":
continue
# As user expression functions should be registered with qgsfunction
# just importing the file is enough to get it to load the functions into QGIS
try:
__import__("expressions.{0}".format(name), locals(), globals())
except:
error = traceback.format_exc()
msgtitle = QCoreApplication.translate("UserExpressions", "User expressions")
msg = QCoreApplication.translate("UserExpressions", "The user expression {0} is not valid").format(name)
QgsMessageLog.logMessage(msg + "\n" + error, msgtitle, Qgis.Warning)
userpythonhome = os.path.join(QgsApplication.qgisSettingsDirPath(), "python")
expressionspath = os.path.join(userpythonhome, "expressions")
sys.path.append(userpythonhome)
if not os.path.exists(expressionspath):
os.makedirs(expressionspath)
initfile = os.path.join(expressionspath, "__init__.py")
if not os.path.exists(initfile):
open(initfile, "w").close()
template = """from qgis.core import *
from qgis.gui import *
@qgsfunction(args='auto', group='Custom')
def my_sum(value1, value2, feature, parent):
\"\"\"
Calculates the sum of the two parameters value1 and value2.
<h2>Example usage:</h2>
<ul>
<li>my_sum(5, 8) -> 13</li>
<li>my_sum(\"field1\", \"field2\") -> 42</li>
</ul>
\"\"\"
return value1 + value2
"""
default_expression_template = template
try:
import expressions
expressions.load = load_user_expressions
expressions.load(expressionspath)
expressions.template = template
except ImportError:
# We get a import error and crash for some reason even if we make the expressions package
# TODO Fix the crash on first load with no expressions folder
# But for now it's not the end of the world if it doesn't load the first time
pass
| gpl-2.0 |
ddico/server-tools | auth_dynamic_groups/model/res_users.py | 5 | 2423 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2013 Therp BV (<http://therp.nl>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.models import Model
from openerp import SUPERUSER_ID
class ResUsers(Model):
_inherit = 'res.users'
def _login(self, db, login, password):
uid = super(ResUsers, self)._login(db, login, password)
if uid and uid != SUPERUSER_ID:
self.update_dynamic_groups(uid, db)
return uid
def update_dynamic_groups(self, uid, db):
cr = self.pool._db.cursor(serialized=False)
groups_obj = self.pool.get('res.groups')
try:
dynamic_groups = groups_obj.browse(
cr, SUPERUSER_ID, groups_obj.search(
cr, SUPERUSER_ID, [('is_dynamic', '=', True)]))
if dynamic_groups:
cr.execute(
'delete from res_groups_users_rel '
'where uid=%s and gid in %s',
(uid, tuple(dynamic_groups.ids))
)
for dynamic_group in dynamic_groups:
if dynamic_group.eval_dynamic_group_condition(uid=uid):
cr.execute(
'insert into res_groups_users_rel (uid, gid) values '
'(%s, %s)',
(uid, dynamic_group.id))
self.invalidate_cache(cr, uid, ['groups_id'], [uid])
# we really need a new transaction
# pylint: disable=invalid-commit
cr.commit()
finally:
cr.close()
| agpl-3.0 |
ttm/oscEmRede | venv/lib/python2.7/site-packages/networkx/algorithms/tests/test_mst.py | 44 | 4871 | #!/usr/bin/env python
from nose.tools import *
import networkx as nx
class TestMST:
def setUp(self):
# example from Wikipedia: http://en.wikipedia.org/wiki/Kruskal's_algorithm
G=nx.Graph()
edgelist = [(0,3,[('weight',5)]),
(0,1,[('weight',7)]),
(1,3,[('weight',9)]),
(1,2,[('weight',8)]),
(1,4,[('weight',7)]),
(3,4,[('weight',15)]),
(3,5,[('weight',6)]),
(2,4,[('weight',5)]),
(4,5,[('weight',8)]),
(4,6,[('weight',9)]),
(5,6,[('weight',11)])]
G.add_edges_from(edgelist)
self.G=G
tree_edgelist = [(0,1,{'weight':7}),
(0,3,{'weight':5}),
(3,5,{'weight':6}),
(1,4,{'weight':7}),
(4,2,{'weight':5}),
(4,6,{'weight':9})]
self.tree_edgelist=sorted((sorted((u, v))[0], sorted((u, v))[1], d)
for u,v,d in tree_edgelist)
def test_mst(self):
T=nx.minimum_spanning_tree(self.G)
assert_equal(T.edges(data=True),self.tree_edgelist)
def test_mst_edges(self):
edgelist=sorted(nx.minimum_spanning_edges(self.G))
assert_equal(edgelist,self.tree_edgelist)
def test_mst_disconnected(self):
G=nx.Graph()
G.add_path([1,2])
G.add_path([10,20])
T=nx.minimum_spanning_tree(G)
assert_equal(sorted(T.edges()),[(1, 2), (20, 10)])
assert_equal(sorted(T.nodes()),[1, 2, 10, 20])
def test_mst_isolate(self):
G=nx.Graph()
G.add_nodes_from([1,2])
T=nx.minimum_spanning_tree(G)
assert_equal(sorted(T.nodes()),[1, 2])
assert_equal(sorted(T.edges()),[])
def test_mst_attributes(self):
G=nx.Graph()
G.add_edge(1,2,weight=1,color='red',distance=7)
G.add_edge(2,3,weight=1,color='green',distance=2)
G.add_edge(1,3,weight=10,color='blue',distance=1)
G.add_node(13,color='purple')
G.graph['foo']='bar'
T=nx.minimum_spanning_tree(G)
assert_equal(T.graph,G.graph)
assert_equal(T.node[13],G.node[13])
assert_equal(T.edge[1][2],G.edge[1][2])
def test_mst_edges_specify_weight(self):
G=nx.Graph()
G.add_edge(1,2,weight=1,color='red',distance=7)
G.add_edge(1,3,weight=30,color='blue',distance=1)
G.add_edge(2,3,weight=1,color='green',distance=1)
G.add_node(13,color='purple')
G.graph['foo']='bar'
T=nx.minimum_spanning_tree(G)
assert_equal(sorted(T.nodes()),[1,2,3,13])
assert_equal(sorted(T.edges()),[(1,2),(2,3)])
T=nx.minimum_spanning_tree(G,weight='distance')
assert_equal(sorted(T.edges()),[(1,3),(2,3)])
assert_equal(sorted(T.nodes()),[1,2,3,13])
def test_prim_mst(self):
T=nx.prim_mst(self.G)
assert_equal(T.edges(data=True),self.tree_edgelist)
def test_prim_mst_edges(self):
edgelist=sorted(nx.prim_mst_edges(self.G))
edgelist=sorted((sorted((u, v))[0], sorted((u, v))[1], d)
for u,v,d in edgelist)
assert_equal(edgelist,self.tree_edgelist)
def test_prim_mst_disconnected(self):
G=nx.Graph()
G.add_path([1,2])
G.add_path([10,20])
T=nx.prim_mst(G)
assert_equal(sorted(T.edges()),[(1, 2), (20, 10)])
assert_equal(sorted(T.nodes()),[1, 2, 10, 20])
def test_prim_mst_isolate(self):
G=nx.Graph()
G.add_nodes_from([1,2])
T=nx.prim_mst(G)
assert_equal(sorted(T.nodes()),[1, 2])
assert_equal(sorted(T.edges()),[])
def test_prim_mst_attributes(self):
G=nx.Graph()
G.add_edge(1,2,weight=1,color='red',distance=7)
G.add_edge(2,3,weight=1,color='green',distance=2)
G.add_edge(1,3,weight=10,color='blue',distance=1)
G.add_node(13,color='purple')
G.graph['foo']='bar'
T=nx.prim_mst(G)
assert_equal(T.graph,G.graph)
assert_equal(T.node[13],G.node[13])
assert_equal(T.edge[1][2],G.edge[1][2])
def test_prim_mst_edges_specify_weight(self):
G=nx.Graph()
G.add_edge(1,2,weight=1,color='red',distance=7)
G.add_edge(1,3,weight=30,color='blue',distance=1)
G.add_edge(2,3,weight=1,color='green',distance=1)
G.add_node(13,color='purple')
G.graph['foo']='bar'
T=nx.prim_mst(G)
assert_equal(sorted(T.nodes()),[1,2,3,13])
assert_equal(sorted(T.edges()),[(1,2),(2,3)])
T=nx.prim_mst(G,weight='distance')
assert_equal(sorted(T.edges()),[(1,3),(2,3)])
assert_equal(sorted(T.nodes()),[1,2,3,13])
| gpl-3.0 |
superdesk/superdesk-core | superdesk/publish/formatters/idml_formatter/package/stories/story_table.py | 2 | 5330 | from lxml import etree
from .story import Story
class StoryTable(Story):
"""
Story which represents `table` html tag.
"""
TABLE_DEFAULTS = {
"HeaderRowCount": "0",
"FooterRowCount": "0",
"AppliedTableStyle": "TableStyle/$ID/[Basic Table]",
"TableDirection": "LeftToRightDirection",
}
CELL_DEFAULTS = {"CellType": "TextTypeCell", "AppliedCellStyle": "CellStyle/$ID/[None]"}
def __init__(self, self_id, element, inner_page_width, attributes=None, markup_tag=None):
self._inner_page_width = inner_page_width
super().__init__(self_id, element, attributes, markup_tag)
def _add_story(self):
# merge Story attributes
story_attributes = self.merge_attributes(self.STORY_DEFAULTS, self._attributes.get("Story", {}))
story_attributes.update({"Self": self.self_id})
# Story
story = etree.SubElement(self._etree, "Story", attrib=story_attributes)
if self._markup_tag:
# XMLElement to tag a story
table_container = etree.SubElement(
story,
"XMLElement",
attrib={
"Self": "{}_{}".format(self.self_id, self._markup_tag.lower()),
"XMLContent": self.self_id,
"MarkupTag": "XMLTag/{}".format(self._markup_tag),
},
)
else:
table_container = story
# create Table and insert it into table_container
table_container.insert(1, self._create_table())
# StoryPreference
etree.SubElement(
story,
"StoryPreference",
attrib=self.merge_attributes(self.STORYPREFERENCE_DEFAULTS, self._attributes.get("StoryPreference", {})),
)
return story
def _create_table(self):
table_data = {}
table_data["cells"] = self._element.xpath(".//td")
table_data["rows_count"] = int(self._element.xpath("count(.//tr)"))
table_data["columns_count"] = int(self._element.xpath("count(.//td)") / table_data["rows_count"])
# Table
table = etree.Element(
"Table", attrib=self.merge_attributes(self.TABLE_DEFAULTS, self._attributes.get("Table", {}))
)
table.set("Self", "{}_table".format(self.self_id))
table.set("BodyRowCount", str(table_data["rows_count"]))
table.set("ColumnCount", str(table_data["columns_count"]))
# Row(s)
for i in range(table_data["rows_count"]):
etree.SubElement(table, "Row", attrib={"Self": "{}_table_row{}".format(self.self_id, i), "Name": str(i)})
# Column(s)
column_width = self._inner_page_width / table_data["columns_count"]
for i in range(table_data["columns_count"]):
etree.SubElement(
table,
"Column",
attrib={
"Self": "{}_table_column{}".format(self.self_id, i),
"Name": str(i),
"SingleColumnWidth": str(column_width),
},
)
# Cells
cell_counter = 0
for r in range(table_data["rows_count"]):
for c in range(table_data["columns_count"]):
# Cell
cell = etree.SubElement(
table,
"Cell",
attrib=self.merge_attributes(
self.CELL_DEFAULTS,
{
"Self": "{}_table_i{}".format(self.self_id, cell_counter),
"Name": "{cell}:{row}".format(cell=c, row=r),
},
),
)
# CharacterStyleRange(s) + <Br />
for p in table_data["cells"][cell_counter].xpath(".//p"):
if cell.find("CharacterStyleRange") is not None:
etree.SubElement(cell, "Br")
cell[:] += self._handle_inline_tags(p)
cell_counter += 1
return table
@property
def length(self):
raise NotImplementedError
@staticmethod
def guess_height(story, inner_width):
table_height = 0
table_element = story._etree.xpath(".//Table")[0]
column_count = int(table_element.get("ColumnCount"))
row_count = int(table_element.get("BodyRowCount"))
for row_number in range(row_count):
highest_cell_len = None
for column_number in range(column_count):
try:
cell = story._etree.xpath('.//Cell[@Name="{}:{}"]'.format(column_number, row_number))[0]
except IndexError:
continue
else:
current_cell_len = 0
for content in cell.xpath(".//Content"):
current_cell_len += len(" ".join(etree.XPath(".//text()")(content)).strip())
if not highest_cell_len or current_cell_len > highest_cell_len:
highest_cell_len = current_cell_len
row_height = highest_cell_len / (inner_width / column_count) * 90 + 10
if row_height < 20:
row_height = 20
table_height += row_height
return table_height
| agpl-3.0 |
gangadharkadam/shfr | frappe/model/naming.py | 11 | 5133 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import now_datetime, cint
def set_new_name(doc):
if doc.name:
return
# amendments
if getattr(doc, "amended_from", None):
return _get_amended_name(doc)
elif hasattr(doc, "run_method"):
doc.run_method("autoname")
if doc.name:
return
autoname = frappe.get_meta(doc.doctype).autoname
# based on a field
if autoname:
if autoname.startswith('field:'):
n = doc.get(autoname[6:])
if not n:
raise Exception, 'Name is required'
doc.name = n.strip()
elif autoname.startswith("naming_series:"):
if not doc.naming_series:
doc.naming_series = get_default_naming_series(doc.doctype)
if not doc.naming_series:
frappe.msgprint(frappe._("Naming Series mandatory"), raise_exception=True)
doc.name = make_autoname(doc.naming_series+'.#####')
# call the method!
elif autoname=='Prompt':
# set from __newname in save.py
if not doc.name:
frappe.throw(frappe._("Name not set via Prompt"))
else:
doc.name = make_autoname(autoname, doc.doctype)
# default name for table
elif doc.meta.istable:
doc.name = make_autoname('hash', doc.doctype)
elif doc.meta.issingle:
doc.name = doc.doctype
# unable to determine a name, use global series
if not doc.name:
doc.name = make_autoname('hash', doc.doctype)
doc.name = doc.name.strip()
validate_name(doc.doctype, doc.name)
def make_autoname(key, doctype=''):
"""
Creates an autoname from the given key:
**Autoname rules:**
* The key is separated by '.'
* '####' represents a series. The string before this part becomes the prefix:
Example: ABC.#### creates a series ABC0001, ABC0002 etc
* 'MM' represents the current month
* 'YY' and 'YYYY' represent the current year
*Example:*
* DE/./.YY./.MM./.##### will create a series like
DE/09/01/0001 where 09 is the year, 01 is the month and 0001 is the series
"""
if key=="hash":
return frappe.generate_hash(doctype)
if not "#" in key:
key = key + ".#####"
n = ''
l = key.split('.')
series_set = False
today = now_datetime()
for e in l:
en = ''
if e.startswith('#'):
if not series_set:
digits = len(e)
en = getseries(n, digits, doctype)
series_set = True
elif e=='YY':
en = today.strftime('%y')
elif e=='MM':
en = today.strftime('%m')
elif e=='DD':
en = today.strftime("%d")
elif e=='YYYY':
en = today.strftime('%Y')
else: en = e
n+=en
return n
def getseries(key, digits, doctype=''):
# series created ?
current = frappe.db.sql("select `current` from `tabSeries` where name=%s for update", (key,))
if current and current[0][0] is not None:
current = current[0][0]
# yes, update it
frappe.db.sql("update tabSeries set current = current+1 where name=%s", (key,))
current = cint(current) + 1
else:
# no, create it
frappe.db.sql("insert into tabSeries (name, current) values (%s, 1)", (key,))
current = 1
return ('%0'+str(digits)+'d') % current
def revert_series_if_last(key, name):
if ".#" in key:
prefix, hashes = key.rsplit(".", 1)
if "#" not in hashes:
return
else:
prefix = key
count = cint(name.replace(prefix, ""))
current = frappe.db.sql("select `current` from `tabSeries` where name=%s for update", (prefix,))
if current and current[0][0]==count:
frappe.db.sql("update tabSeries set current=current-1 where name=%s", prefix)
def get_default_naming_series(doctype):
"""get default value for `naming_series` property"""
naming_series = frappe.get_meta(doctype).get_field("naming_series").options or ""
if naming_series:
naming_series = naming_series.split("\n")
return naming_series[0] or naming_series[1]
else:
return None
def validate_name(doctype, name, case=None, merge=False):
if not name: return 'No Name Specified for %s' % doctype
if name.startswith('New '+doctype):
frappe.throw(_('There were some errors setting the name, please contact the administrator'), frappe.NameError)
if case=='Title Case': name = name.title()
if case=='UPPER CASE': name = name.upper()
name = name.strip()
if not frappe.get_meta(doctype).get("issingle") and doctype == name:
frappe.throw(_("Name of {0} cannot be {1}").format(doctype, name), frappe.NameError)
return name
def _get_amended_name(doc):
am_id = 1
am_prefix = doc.amended_from
if frappe.db.get_value(doc.doctype, doc.amended_from, "amended_from"):
am_id = cint(doc.amended_from.split('-')[-1]) + 1
am_prefix = '-'.join(doc.amended_from.split('-')[:-1]) # except the last hyphen
doc.name = am_prefix + '-' + str(am_id)
return doc.name
def append_number_if_name_exists(doc):
if frappe.db.exists(doc.doctype, doc.name):
last = frappe.db.sql("""select name from `tab{}`
where name regexp '{}-[[:digit:]]+'
order by name desc limit 1""".format(doc.doctype, doc.name))
if last:
count = str(cint(last[0][0].rsplit("-", 1)[1]) + 1)
else:
count = "1"
doc.name = "{0}-{1}".format(doc.name, count)
| mit |
dumbbell/virt-manager | src/virtManager/nodedev.py | 3 | 1563 | #
# Copyright (C) 2011 Red Hat, Inc.
# Copyright (C) 2011 Cole Robinson <crobinso@redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301 USA.
#
import virtinst
from virtManager.libvirtobject import vmmLibvirtObject
class vmmNodeDevice(vmmLibvirtObject):
def __init__(self, conn, backend, name):
vmmLibvirtObject.__init__(self, conn)
self.name = name
self._backend = backend
self._virtinst_obj = None
self.get_virtinst_obj()
def _XMLDesc(self, flags):
return self._backend.XMLDesc(flags)
def get_name(self):
return self.name
def is_active(self):
return True
def get_virtinst_obj(self):
if not self._virtinst_obj:
self._virtinst_obj = virtinst.NodeDeviceParser.parse(
self._backend.XMLDesc(0))
return self._virtinst_obj
vmmLibvirtObject.type_register(vmmNodeDevice)
| gpl-2.0 |
buildbot/supybot | src/ircutils.py | 1 | 25631 | ###
# Copyright (c) 2002-2005, Jeremiah Fincher
# Copyright (c) 2009,2011,2015 James McCoy
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
"""
Provides a great number of useful utility functions for IRC. Things to muck
around with hostmasks, set bold or color on strings, IRC-case-insensitive
dicts, a nick class to handle nicks (so comparisons and hashing and whatnot
work in an IRC-case-insensitive fashion), and numerous other things.
"""
import re
import time
import random
import string
import textwrap
from cStringIO import StringIO as sio
from . import utils
def debug(s, *args):
"""Prints a debug string. Most likely replaced by our logging debug."""
print '***', s % args
userHostmaskRe = re.compile(r'^\S+!\S+@\S+$')
def isUserHostmask(s):
"""Returns whether or not the string s is a valid User hostmask."""
return userHostmaskRe.match(s) is not None
def isServerHostmask(s):
"""s => bool
Returns True if s is a valid server hostmask."""
return not isUserHostmask(s)
def nickFromHostmask(hostmask):
"""hostmask => nick
Returns the nick from a user hostmask."""
assert isUserHostmask(hostmask)
return splitHostmask(hostmask)[0]
def userFromHostmask(hostmask):
"""hostmask => user
Returns the user from a user hostmask."""
assert isUserHostmask(hostmask)
return splitHostmask(hostmask)[1]
def hostFromHostmask(hostmask):
"""hostmask => host
Returns the host from a user hostmask."""
assert isUserHostmask(hostmask)
return splitHostmask(hostmask)[2]
def splitHostmask(hostmask):
"""hostmask => (nick, user, host)
Returns the nick, user, host of a user hostmask."""
assert isUserHostmask(hostmask)
nick, rest = hostmask.split('!', 1)
user, host = rest.split('@', 1)
return (intern(nick), intern(user), intern(host))
def joinHostmask(nick, ident, host):
"""nick, user, host => hostmask
Joins the nick, ident, host into a user hostmask."""
assert nick and ident and host
return intern('%s!%s@%s' % (nick, ident, host))
_rfc1459trans = string.maketrans(string.ascii_uppercase + r'\[]~',
string.ascii_lowercase + r'|{}^')
def toLower(s, casemapping=None):
"""s => s
Returns the string s lowered according to IRC case rules."""
if casemapping is None or casemapping == 'rfc1459':
return s.translate(_rfc1459trans)
elif casemapping == 'ascii': # freenode
return s.lower()
else:
raise ValueError, 'Invalid casemapping: %r' % casemapping
def strEqual(nick1, nick2):
"""s1, s2 => bool
Returns True if nick1 == nick2 according to IRC case rules."""
assert isinstance(nick1, basestring)
assert isinstance(nick2, basestring)
return toLower(nick1) == toLower(nick2)
nickEqual = strEqual
_nickchars = r'[]\`_^{|}'
nickRe = re.compile(r'^[A-Za-z%s][-0-9A-Za-z%s]*$'
% (re.escape(_nickchars), re.escape(_nickchars)))
def isNick(s, strictRfc=True, nicklen=None):
"""s => bool
Returns True if s is a valid IRC nick."""
if strictRfc:
ret = bool(nickRe.match(s))
if ret and nicklen is not None:
ret = len(s) <= nicklen
return ret
else:
return not isChannel(s) and \
not isUserHostmask(s) and \
not ' ' in s and not '!' in s
def isChannel(s, chantypes='#&+!', channellen=50):
"""s => bool
Returns True if s is a valid IRC channel name."""
return s and \
',' not in s and \
'\x07' not in s and \
s[0] in chantypes and \
len(s) <= channellen and \
len(s.split(None, 1)) == 1
_patternCache = utils.structures.CacheDict(1000)
def _hostmaskPatternEqual(pattern, hostmask):
try:
return _patternCache[pattern](hostmask) is not None
except KeyError:
# We make our own regexps, rather than use fnmatch, because fnmatch's
# case-insensitivity is not IRC's case-insensitity.
fd = sio()
for c in pattern:
if c == '*':
fd.write('.*')
elif c == '?':
fd.write('.')
elif c in '[{':
fd.write('[[{]')
elif c in '}]':
fd.write(r'[}\]]')
elif c in '|\\':
fd.write(r'[|\\]')
elif c in '^~':
fd.write('[~^]')
else:
fd.write(re.escape(c))
fd.write('$')
f = re.compile(fd.getvalue(), re.I).match
_patternCache[pattern] = f
return f(hostmask) is not None
_hostmaskPatternEqualCache = utils.structures.CacheDict(1000)
def hostmaskPatternEqual(pattern, hostmask):
"""pattern, hostmask => bool
Returns True if hostmask matches the hostmask pattern pattern."""
try:
return _hostmaskPatternEqualCache[(pattern, hostmask)]
except KeyError:
b = _hostmaskPatternEqual(pattern, hostmask)
_hostmaskPatternEqualCache[(pattern, hostmask)] = b
return b
def banmask(hostmask):
"""Returns a properly generic banning hostmask for a hostmask.
>>> banmask('nick!user@host.domain.tld')
'*!*@*.domain.tld'
>>> banmask('nick!user@10.0.0.1')
'*!*@10.0.0.*'
"""
assert isUserHostmask(hostmask)
host = hostFromHostmask(hostmask)
if utils.net.isIPV4(host):
L = host.split('.')
L[-1] = '*'
return '*!*@' + '.'.join(L)
elif utils.net.isIPV6(host):
L = host.split(':')
L[-1] = '*'
return '*!*@' + ':'.join(L)
else:
if len(host.split('.')) > 2: # If it is a subdomain
return '*!*@*%s' % host[host.find('.'):]
else:
return '*!*@' + host
_plusRequireArguments = 'ovhblkqe'
_minusRequireArguments = 'ovhbkqe'
def separateModes(args):
"""Separates modelines into single mode change tuples. Basically, you
should give it the .args of a MODE IrcMsg.
Examples:
>>> separateModes(['+ooo', 'jemfinch', 'StoneTable', 'philmes'])
[('+o', 'jemfinch'), ('+o', 'StoneTable'), ('+o', 'philmes')]
>>> separateModes(['+o-o', 'jemfinch', 'PeterB'])
[('+o', 'jemfinch'), ('-o', 'PeterB')]
>>> separateModes(['+s-o', 'test'])
[('+s', None), ('-o', 'test')]
>>> separateModes(['+sntl', '100'])
[('+s', None), ('+n', None), ('+t', None), ('+l', 100)]
"""
if not args:
return []
modes = args[0]
assert modes[0] in '+-', 'Invalid args: %r' % args
args = list(args[1:])
ret = []
for c in modes:
if c in '+-':
last = c
else:
if last == '+':
requireArguments = _plusRequireArguments
else:
requireArguments = _minusRequireArguments
if c in requireArguments:
arg = args.pop(0)
try:
arg = int(arg)
except ValueError:
pass
ret.append((last + c, arg))
else:
ret.append((last + c, None))
return ret
def joinModes(modes):
"""[(mode, targetOrNone), ...] => args
Joins modes of the same form as returned by separateModes."""
args = []
modeChars = []
currentMode = '\x00'
for (mode, arg) in modes:
if arg is not None:
args.append(arg)
if not mode.startswith(currentMode):
currentMode = mode[0]
modeChars.append(mode[0])
modeChars.append(mode[1])
args.insert(0, ''.join(modeChars))
return args
def bold(s):
"""Returns the string s, bolded."""
return '\x02%s\x02' % s
def reverse(s):
"""Returns the string s, reverse-videoed."""
return '\x16%s\x16' % s
def underline(s):
"""Returns the string s, underlined."""
return '\x1F%s\x1F' % s
# Definition of mircColors dictionary moved below because it became an IrcDict.
def mircColor(s, fg=None, bg=None):
"""Returns s with the appropriate mIRC color codes applied."""
if fg is None and bg is None:
return s
elif bg is None:
fg = mircColors[str(fg)]
return '\x03%s%s\x03' % (fg.zfill(2), s)
elif fg is None:
bg = mircColors[str(bg)]
# According to the mirc color doc, a fg color MUST be specified if a
# background color is specified. So, we'll specify 00 (white) if the
# user doesn't specify one.
return '\x0300,%s%s\x03' % (bg.zfill(2), s)
else:
fg = mircColors[str(fg)]
bg = mircColors[str(bg)]
# No need to zfill fg because the comma delimits.
return '\x03%s,%s%s\x03' % (fg, bg.zfill(2), s)
def canonicalColor(s, bg=False, shift=0):
"""Assigns an (fg, bg) canonical color pair to a string based on its hash
value. This means it might change between Python versions. This pair can
be used as a *parameter to mircColor. The shift parameter is how much to
right-shift the hash value initially.
"""
h = hash(s) >> shift
fg = h % 14 + 2 # The + 2 is to rule out black and white.
if bg:
bg = (h >> 4) & 3 # The 5th, 6th, and 7th least significant bits.
if fg < 8:
bg += 8
else:
bg += 2
return (fg, bg)
else:
return (fg, None)
def stripBold(s):
"""Returns the string s, with bold removed."""
return s.replace('\x02', '')
_stripColorRe = re.compile(r'\x03(?:\d{1,2},\d{1,2}|\d{1,2}|,\d{1,2}|)')
def stripColor(s):
"""Returns the string s, with color removed."""
return _stripColorRe.sub('', s)
def stripReverse(s):
"""Returns the string s, with reverse-video removed."""
return s.replace('\x16', '')
def stripUnderline(s):
"""Returns the string s, with underlining removed."""
return s.replace('\x1f', '').replace('\x1F', '')
def stripFormatting(s):
"""Returns the string s, with all formatting removed."""
# stripColor has to go first because of some strings, check the tests.
s = stripColor(s)
s = stripBold(s)
s = stripReverse(s)
s = stripUnderline(s)
return s.replace('\x0f', '').replace('\x0F', '')
_containsFormattingRe = re.compile(r'[\x02\x03\x16\x1f]')
def formatWhois(irc, replies, caller='', channel=''):
"""Returns a string describing the target of a WHOIS command.
Arguments are:
* irc: the irclib.Irc object on which the replies was received
* replies: a dict mapping the reply codes ('311', '312', etc.) to their
corresponding ircmsg.IrcMsg
* caller: an optional nick specifying who requested the whois information
* channel: an optional channel specifying where the reply will be sent
If provided, caller and channel will be used to avoid leaking information
that the caller/channel shouldn't be privy to.
"""
hostmask = '@'.join(replies['311'].args[2:4])
nick = replies['318'].args[1]
user = replies['311'].args[-1]
if _containsFormattingRe.search(user) and user[-1] != '\x0f':
# For good measure, disable any formatting
user = '%s\x0f' % user
if '319' in replies:
channels = replies['319'].args[-1].split()
ops = []
voices = []
normal = []
halfops = []
for chan in channels:
origchan = chan
chan = chan.lstrip('@%+~!')
# UnrealIRCd uses & for user modes and disallows it as a
# channel-prefix, flying in the face of the RFC. Have to
# handle this specially when processing WHOIS response.
testchan = chan.lstrip('&')
if testchan != chan and irc.isChannel(testchan):
chan = testchan
diff = len(chan) - len(origchan)
modes = origchan[:diff]
chanState = irc.state.channels.get(chan)
# The user is in a channel the bot is in, so the ircd may have
# responded with otherwise private data.
if chanState:
# Skip channels the callee isn't in. This helps prevents
# us leaking information when the channel is +s or the
# target is +i
if caller not in chanState.users:
continue
# Skip +s channels the target is in only if the reply isn't
# being sent to that channel
if 's' in chanState.modes and \
not ircutils.strEqual(channel or '', chan):
continue
if not modes:
normal.append(chan)
elif utils.iter.any(lambda c: c in modes,('@', '&', '~', '!')):
ops.append(chan[1:])
elif utils.iter.any(lambda c: c in modes, ('%',)):
halfops.append(chan[1:])
elif utils.iter.any(lambda c: c in modes, ('+',)):
voices.append(chan[1:])
L = []
if ops:
L.append(format('is an op on %L', ops))
if halfops:
L.append(format('is a halfop on %L', halfops))
if voices:
L.append(format('is voiced on %L', voices))
if normal:
if L:
L.append(format('is also on %L', normal))
else:
L.append(format('is on %L', normal))
else:
L = ['isn\'t on any non-secret channels']
channels = format('%L', L)
if '317' in replies:
idle = utils.timeElapsed(replies['317'].args[2])
signon = utils.str.timestamp(float(replies['317'].args[3]))
else:
idle = '<unknown>'
signon = '<unknown>'
if '312' in replies:
server = replies['312'].args[2]
else:
server = '<unknown>'
if '301' in replies:
away = ' %s is away: %s.' % (nick, replies['301'].args[2])
else:
away = ''
if '320' in replies:
if replies['320'].args[2]:
identify = ' identified'
else:
identify = ''
else:
identify = ''
s = utils.str.format('%s (%s) has been%s on server %s since %s '
'(idle for %s) and %s.%s',
user, hostmask, identify, server, signon, idle,
channels, away)
return s
class FormatContext(object):
def __init__(self):
self.reset()
def reset(self):
self.fg = None
self.bg = None
self.bold = False
self.reverse = False
self.underline = False
def start(self, s):
"""Given a string, starts all the formatters in this context."""
if self.bold:
s = '\x02' + s
if self.reverse:
s = '\x16' + s
if self.underline:
s = '\x1f' + s
if self.fg is not None or self.bg is not None:
s = mircColor(s, fg=self.fg, bg=self.bg)[:-1] # Remove \x03.
return s
def end(self, s):
"""Given a string, ends all the formatters in this context."""
if self.bold or self.reverse or \
self.fg or self.bg or self.underline:
# Should we individually end formatters?
s += '\x0f'
return s
class FormatParser(object):
def __init__(self, s):
self.fd = sio(s)
self.last = None
def getChar(self):
if self.last is not None:
c = self.last
self.last = None
return c
else:
return self.fd.read(1)
def ungetChar(self, c):
self.last = c
def parse(self):
context = FormatContext()
c = self.getChar()
while c:
if c == '\x02':
context.bold = not context.bold
elif c == '\x16':
context.reverse = not context.reverse
elif c == '\x1f':
context.underline = not context.underline
elif c == '\x0f':
context.reset()
elif c == '\x03':
self.getColor(context)
c = self.getChar()
return context
def getInt(self):
i = 0
setI = False
c = self.getChar()
while c.isdigit():
j = i * 10
j += int(c)
if j >= 16:
self.ungetChar(c)
break
else:
setI = True
i = j
c = self.getChar()
self.ungetChar(c)
if setI:
return i
else:
return None
def getColor(self, context):
context.fg = self.getInt()
c = self.getChar()
if c == ',':
context.bg = self.getInt()
else:
self.ungetChar(c)
def wrap(s, length):
processed = []
chunks = textwrap.wrap(s, length)
context = None
for chunk in chunks:
if context is not None:
chunk = context.start(chunk)
context = FormatParser(chunk).parse()
processed.append(context.end(chunk))
return processed
def isValidArgument(s):
"""Returns whether s is strictly a valid argument for an IRC message."""
return '\r' not in s and '\n' not in s and '\x00' not in s
def safeArgument(s):
"""If s is unsafe for IRC, returns a safe version."""
if isinstance(s, unicode):
s = s.encode('utf-8')
elif not isinstance(s, basestring):
debug('Got a non-string in safeArgument: %r', s)
s = str(s)
if isValidArgument(s):
return s
else:
return repr(s)
def replyTo(msg):
"""Returns the appropriate target to send responses to msg."""
if isChannel(msg.args[0]):
return msg.args[0]
else:
return msg.nick
def dccIP(ip):
"""Converts an IP string to the DCC integer form."""
assert utils.net.isIPV4(ip), \
'argument must be a string ip in xxx.yyy.zzz.www format.'
i = 0
x = 256**3
for quad in ip.split('.'):
i += int(quad)*x
x /= 256
return i
def unDccIP(i):
"""Takes an integer DCC IP and return a normal string IP."""
assert isinstance(i, (int, long)), '%r is not an number.' % i
L = []
while len(L) < 4:
L.append(i % 256)
i /= 256
L.reverse()
return '.'.join(utils.iter.imap(str, L))
class IrcString(str):
"""This class does case-insensitive comparison and hashing of nicks."""
def __new__(cls, s=''):
x = super(IrcString, cls).__new__(cls, s)
x.lowered = toLower(x)
return x
def __eq__(self, s):
try:
return toLower(s) == self.lowered
except:
return False
def __ne__(self, s):
return not (self == s)
def __hash__(self):
return hash(self.lowered)
class IrcDict(utils.InsensitivePreservingDict):
"""Subclass of dict to make key comparison IRC-case insensitive."""
def key(self, s):
if s is not None:
s = toLower(s)
return s
class CallableValueIrcDict(IrcDict):
def __getitem__(self, k):
v = super(IrcDict, self).__getitem__(k)
if callable(v):
v = v()
return v
class IrcSet(utils.NormalizingSet):
"""A sets.Set using IrcStrings instead of regular strings."""
def normalize(self, s):
return IrcString(s)
def __reduce__(self):
return (self.__class__, (list(self),))
class FloodQueue(object):
timeout = 0
def __init__(self, timeout=None, queues=None):
if timeout is not None:
self.timeout = timeout
if queues is None:
queues = IrcDict()
self.queues = queues
def __repr__(self):
return 'FloodQueue(timeout=%r, queues=%s)' % (self.timeout,
repr(self.queues))
def key(self, msg):
# This really ought to be configurable without subclassing, but for
# now, it works.
# used to be msg.user + '@' + msg.host but that was too easily abused.
return msg.host
def getTimeout(self):
if callable(self.timeout):
return self.timeout()
else:
return self.timeout
def _getQueue(self, msg, insert=True):
key = self.key(msg)
try:
return self.queues[key]
except KeyError:
if insert:
# python--
# instancemethod.__repr__ calls the instance.__repr__, which
# means that our __repr__ calls self.queues.__repr__, which
# calls structures.TimeoutQueue.__repr__, which calls
# getTimeout.__repr__, which calls our __repr__, which calls...
getTimeout = lambda : self.getTimeout()
q = utils.structures.TimeoutQueue(getTimeout)
self.queues[key] = q
return q
else:
return None
def enqueue(self, msg, what=None):
if what is None:
what = msg
q = self._getQueue(msg)
q.enqueue(what)
def len(self, msg):
q = self._getQueue(msg, insert=False)
if q is not None:
return len(q)
else:
return 0
def has(self, msg, what=None):
q = self._getQueue(msg, insert=False)
if q is not None:
if what is None:
what = msg
for elt in q:
if elt == what:
return True
return False
mircColors = IrcDict({
'white': '0',
'black': '1',
'blue': '2',
'green': '3',
'red': '4',
'brown': '5',
'purple': '6',
'orange': '7',
'yellow': '8',
'light green': '9',
'teal': '10',
'light blue': '11',
'dark blue': '12',
'pink': '13',
'dark grey': '14',
'light grey': '15',
'dark gray': '14',
'light gray': '15',
})
# We'll map integers to their string form so mircColor is simpler.
for (k, v) in mircColors.items():
if k is not None: # Ignore empty string for None.
sv = str(v)
mircColors[sv] = sv
def standardSubstitute(irc, msg, text, env=None):
"""Do the standard set of substitutions on text, and return it"""
if isChannel(msg.args[0]):
channel = msg.args[0]
else:
channel = 'somewhere'
def randInt():
return str(random.randint(-1000, 1000))
def randDate():
t = pow(2,30)*random.random()+time.time()/4.0
return time.ctime(t)
def randNick():
if channel != 'somewhere':
L = list(irc.state.channels[channel].users)
if len(L) > 1:
n = msg.nick
while n == msg.nick:
n = utils.iter.choice(L)
return n
else:
return msg.nick
else:
return 'someone'
ctime = time.strftime("%a %b %d %H:%M:%S %Y")
localtime = time.localtime()
gmtime = time.strftime("%a %b %d %H:%M:%S %Y", time.gmtime())
vars = CallableValueIrcDict({
'who': msg.nick,
'nick': msg.nick,
'user': msg.user,
'host': msg.host,
'channel': channel,
'botnick': irc.nick,
'now': ctime, 'ctime': ctime,
'utc': gmtime, 'gmt': gmtime,
'randnick': randNick, 'randomnick': randNick,
'randdate': randDate, 'randomdate': randDate,
'rand': randInt, 'randint': randInt, 'randomint': randInt,
'today': time.strftime('%d %b %Y', localtime),
'year': localtime[0],
'month': localtime[1],
'monthname': time.strftime('%b', localtime),
'date': localtime[2],
'day': time.strftime('%A', localtime),
'h': localtime[3], 'hr': localtime[3], 'hour': localtime[3],
'm': localtime[4], 'min': localtime[4], 'minute': localtime[4],
's': localtime[5], 'sec': localtime[5], 'second': localtime[5],
'tz': time.strftime('%Z', localtime),
})
if env is not None:
vars.update(env)
t = string.Template(text)
t.idpattern = '[a-zA-Z][a-zA-Z0-9]*'
return t.safe_substitute(vars)
if __name__ == '__main__':
import sys, doctest
doctest.testmod(sys.modules['__main__'])
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| bsd-3-clause |
AlbertoPeon/invenio | modules/webjournal/lib/elements/bfe_webjournal_info.py | 35 | 2238 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
WebJournal element - prints journal info
"""
from invenio.webjournal_utils import \
parse_url_string, \
make_journal_url, \
get_current_issue, \
get_journal_css_url, \
get_journal_name_intl
def format_element(bfo, var=''):
"""
Print several journal specific variables.
@param var: the name of the desired variable. Can be one of: WEBJOURNAL_CSS_URL, WEBJOURNAL_NAME, WEBJOURNAL_NAME_INTL, WEBJOURNAL_CURRENT_ISSUE_NUMBER, WEBJOURNAL_ISSUE_NUMBER, WEBJOURNAL_URL
"""
args = parse_url_string(bfo.user_info['uri'])
journal_name = args["journal_name"]
this_issue_number = args["issue"]
if var == '':
out = ''
elif var == 'WEBJOURNAL_NAME':
out = journal_name
elif var == 'WEBJOURNAL_NAME_INTL':
out = get_journal_name_intl(journal_name, bfo.lang)
elif var == 'WEBJOURNAL_ISSUE_NUMBER':
out = this_issue_number
elif var == 'WEBJOURNAL_CURRENT_ISSUE_NUMBER':
out = get_current_issue(bfo.lang, journal_name)
elif var == 'WEBJOURNAL_URL':
out = make_journal_url(bfo.user_info['uri'], {'ln': bfo.lang})
elif var == 'WEBJOURNAL_CSS_URL':
out = get_journal_css_url(journal_name)
elif var == 'WEBJOURNAL_USER_LANG':
out = bfo.lang
return out
def escape_values(bfo):
"""
Called by BibFormat in order to check if output of this element
should be escaped.
"""
return 0
| gpl-2.0 |
LeartS/odoo | addons/account/wizard/account_move_line_unreconcile_select.py | 385 | 1864 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class account_move_line_unreconcile_select(osv.osv_memory):
_name = "account.move.line.unreconcile.select"
_description = "Unreconciliation"
_columns ={
'account_id': fields.many2one('account.account','Account',required=True),
}
def action_open_window(self, cr, uid, ids, context=None):
data = self.read(cr, uid, ids, context=context)[0]
return {
'domain': "[('account_id','=',%d),('reconcile_id','<>',False),('state','<>','draft')]" % data['account_id'],
'name': 'Unreconciliation',
'view_type': 'form',
'view_mode': 'tree,form',
'view_id': False,
'res_model': 'account.move.line',
'type': 'ir.actions.act_window'
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
jacobsenanaizabel/shoop | shoop/core/models/__init__.py | 6 | 3049 | # This file is part of Shoop.
#
# Copyright (c) 2012-2015, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from .addresses import Address, SavedAddress, SavedAddressRole, SavedAddressStatus
from .taxes import CustomerTaxGroup, Tax, TaxClass
from .attributes import Attribute, AttributeType, AttributeVisibility
from .categories import Category, CategoryVisibility, CategoryStatus
from .counters import Counter, CounterType
from .contacts import Contact, ContactGroup, CompanyContact, PersonContact, AnonymousContact, Gender, get_person_contact
from .methods import ShippingMethod, PaymentMethod, MethodType, MethodStatus
from .manufacturers import Manufacturer
from .orders import Order, OrderStatus, OrderStatusRole, OrderLogEntry, PaymentStatus, ShippingStatus
from .order_lines import OrderLine, OrderLineTax, OrderLineType
from .payments import Payment
from .persistent_cache import PersistentCacheEntry
from .products import (
Product, ProductMode, StockBehavior, ProductCrossSellType, ShippingMode,
ProductType, ProductCrossSell, ProductAttribute
)
from .product_media import ProductMedia, ProductMediaKind
from .product_shops import ShopProduct, ProductVisibility
from .product_variation import (
ProductVariationLinkStatus, ProductVariationVariable, ProductVariationVariableValue, ProductVariationResult
)
from .product_packages import ProductPackageLink
from .shops import Shop, ShopStatus
from .shipments import Shipment, ShipmentProduct
from .suppliers import Supplier, SupplierType
from .supplied_products import SuppliedProduct
from .units import SalesUnit
__all__ = [
"Address",
"AnonymousContact",
"Attribute",
"AttributeType",
"AttributeVisibility",
"Category",
"CategoryStatus",
"CategoryVisibility",
"CompanyContact",
"Contact",
"ContactGroup",
"Counter",
"CounterType",
"CustomerTaxGroup",
"get_person_contact",
"Gender",
"Manufacturer",
"MethodStatus",
"MethodType",
"Order",
"OrderLine",
"OrderLineTax",
"OrderLineType",
"OrderLogEntry",
"OrderStatus",
"OrderStatusRole",
"Payment",
"PaymentMethod",
"PaymentStatus",
"PersistentCacheEntry",
"PersonContact",
"Product",
"Product",
"ProductAttribute",
"ProductCrossSell",
"ProductCrossSellType",
"ProductMedia",
"ProductMediaKind",
"ProductMode",
"ProductPackageLink",
"ProductType",
"ProductVariationLinkStatus",
"ProductVariationResult",
"ProductVariationVariable",
"ProductVariationVariableValue",
"ProductVisibility",
"SalesUnit",
"SavedAddress",
"SavedAddressRole",
"SavedAddressStatus",
"Shipment",
"ShipmentProduct",
"ShippingMethod",
"ShippingMode",
"ShippingStatus",
"Shop",
"ShopProduct",
"ShopStatus",
"StockBehavior",
"SuppliedProduct",
"Supplier",
"SupplierType",
"Tax",
"TaxClass",
]
| agpl-3.0 |
Tehsmash/nova | nova/scheduler/weights/ram.py | 61 | 1614 | # Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
RAM Weigher. Weigh hosts by their RAM usage.
The default is to spread instances across all hosts evenly. If you prefer
stacking, you can set the 'ram_weight_multiplier' option to a negative
number and the weighing has the opposite effect of the default.
"""
from oslo_config import cfg
from nova.scheduler import weights
ram_weight_opts = [
cfg.FloatOpt('ram_weight_multiplier',
default=1.0,
help='Multiplier used for weighing ram. Negative '
'numbers mean to stack vs spread.'),
]
CONF = cfg.CONF
CONF.register_opts(ram_weight_opts)
class RAMWeigher(weights.BaseHostWeigher):
minval = 0
def weight_multiplier(self):
"""Override the weight multiplier."""
return CONF.ram_weight_multiplier
def _weigh_object(self, host_state, weight_properties):
"""Higher weights win. We want spreading to be the default."""
return host_state.free_ram_mb
| apache-2.0 |
jamesbulpin/xcp-xen-4.1 | tools/pygrub/src/ExtLinuxConf.py | 2 | 6728 | #
# ExtLinuxConf.py - Simple syslinux config parsing
#
# Copyright 2010 Citrix Systems Ltd.
#
# This software may be freely redistributed under the terms of the GNU
# general public license.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
import sys, re, os
import logging
import GrubConf
class ExtLinuxImage(object):
def __init__(self, lines, path):
self.reset(lines, path)
def __repr__(self):
return ("title: %s\n"
" root: %s\n"
" kernel: %s\n"
" args: %s\n"
" initrd: %s\n" %(self.title, self.root, self.kernel,
self.args, self.initrd))
def reset(self, lines, path):
self._initrd = self._kernel = self._readonly = None
self._args = ""
self.title = ""
self.lines = []
self.path = path
self.root = ""
map(self.set_from_line, lines)
def set_from_line(self, line, replace = None):
(com, arg) = GrubConf.grub_exact_split(line, 2)
com = com.lower()
# Special handling for mboot.c32
if com.lower() == "append" and self.kernel is not None:
(_,kernel) = self.kernel
if kernel.endswith("mboot.c32"):
kernel = None
args = None
initrd = None
modules = arg.split("---")
if len(modules) == 3: # Assume Xen + Kernel + Initrd
(_,kernel,initrd) = modules
elif len(modules) == 2: # Assume Kernel + Initrd
(kernel,initrd) = modules
if kernel:
setattr(self, "kernel", kernel.strip())
if initrd:
setattr(self, "initrd", initrd.strip())
# Bypass regular self.commands handling
com = None
elif arg.find("initrd="):
# find initrd image in append line
args = arg.strip().split(" ")
for a in args:
if a.lower().startswith("initrd="):
setattr(self, "initrd", a.replace("initrd=", ""))
arg = arg.replace(a, "")
if com is not None and self.commands.has_key(com):
if self.commands[com] is not None:
setattr(self, self.commands[com], re.sub('^"(.+)"$', r"\1", arg.strip()))
else:
logging.info("Ignored image directive %s" %(com,))
elif com is not None:
logging.warning("Unknown image directive %s" %(com,))
# now put the line in the list of lines
if replace is None:
self.lines.append(line)
else:
self.lines.pop(replace)
self.lines.insert(replace, line)
def set_kernel(self, val):
if val.find(" ") == -1:
self._kernel = (None,val)
self._args = None
return
(kernel, args) = val.split(None, 1)
self._kernel = (None,kernel)
self._args = args
def get_kernel(self):
return self._kernel
def set_args(self, val):
self._args = val
def get_args(self):
return self._args
kernel = property(get_kernel, set_kernel)
args = property(get_args, set_args)
def set_initrd(self, val):
self._initrd = (None,val)
def get_initrd(self):
return self._initrd
initrd = property(get_initrd, set_initrd)
def set_readonly(self, val):
self._readonly = 1
def get_readonly(self):
return self._readonly
readonly = property(get_readonly, set_readonly)
# set up command handlers
commands = {
"label": "title",
"kernel": "kernel",
"append": "args"}
class ExtLinuxConfigFile(object):
def __init__(self, fn = None):
self.filename = fn
self.images = []
self.timeout = -1
self._default = 0
if fn is not None:
self.parse()
def new_image(self, title, lines):
# ExtLinuxImage constructor doesn't have title but since path
# is being used by get_{kernel|initrd} functions we pass
# empty string rather than None (see lines above)
return ExtLinuxImage(lines, "")
def parse(self, buf = None):
if buf is None:
if self.filename is None:
raise ValueError, "No config file defined to parse!"
f = open(self.filename, 'r')
lines = f.readlines()
f.close()
else:
lines = buf.split("\n")
path = os.path.dirname(self.filename)
img = []
for l in lines:
l = l.strip()
# skip blank lines
if len(l) == 0:
continue
# skip comments
if l.startswith('#'):
continue
# new image
if l.lower().startswith("label"):
if len(img) > 0:
self.add_image(ExtLinuxImage(img, path))
img = [l]
continue
if len(img) > 0:
img.append(l)
continue
(com, arg) = GrubConf.grub_exact_split(l, 2)
com = com.lower()
if self.commands.has_key(com):
if self.commands[com] is not None:
setattr(self, self.commands[com], arg.strip())
else:
logging.info("Ignored directive %s" %(com,))
else:
logging.warning("Unknown directive %s" %(com,))
if len(img) > 0:
self.add_image(ExtLinuxImage(img, path))
def hasPassword(self):
return False
def hasPasswordAccess(self):
return True
def add_image(self, image):
self.images.append(image)
def _get_default(self):
for i in range(len(self.images)):
if self.images[i].title == self._default:
return i
return 0
def _set_default(self, val):
self._default = val
default = property(_get_default, _set_default)
commands = { "default": "default",
"timeout": "timeout",
"serial": None,
"prompt": None,
"display": None,
"f1": None,
"f2": None,
}
if __name__ == "__main__":
if sys.argv < 2:
raise RuntimeError, "Need a configuration file to read"
g = ExtLinuxConfigFile(sys.argv[1])
for i in g.images:
print i
print g.default
| gpl-2.0 |
Pluto-tv/chromium-crosswalk | tools/find_runtime_symbols/find_runtime_symbols.py | 102 | 6593 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Find symbols in a binary corresponding to given runtime virtual addresses.
Note that source file names are treated as symbols in this script while they
are actually not.
"""
import json
import logging
import os
import sys
from static_symbols import StaticSymbolsInFile
_BASE_PATH = os.path.dirname(os.path.abspath(__file__))
_TOOLS_LINUX_PATH = os.path.join(_BASE_PATH, os.pardir, 'linux')
sys.path.insert(0, _TOOLS_LINUX_PATH)
from procfs import ProcMaps # pylint: disable=F0401
try:
from collections import OrderedDict # pylint: disable=E0611
except ImportError:
_SIMPLEJSON_PATH = os.path.join(_BASE_PATH, os.pardir, os.pardir,
'third_party')
sys.path.insert(0, _SIMPLEJSON_PATH)
from simplejson import OrderedDict
FUNCTION_SYMBOLS = 0
SOURCEFILE_SYMBOLS = 1
TYPEINFO_SYMBOLS = 2
_MAPS_FILENAME = 'maps'
_FILES_FILENAME = 'files.json'
class RuntimeSymbolsInProcess(object):
def __init__(self):
self._maps = None
self._static_symbols_in_filse = {}
def find_procedure(self, runtime_address):
for vma in self._maps.iter(ProcMaps.executable):
if vma.begin <= runtime_address < vma.end:
static_symbols = self._static_symbols_in_filse.get(vma.name)
if static_symbols:
return static_symbols.find_procedure_by_runtime_address(
runtime_address, vma)
else:
return None
return None
def find_sourcefile(self, runtime_address):
for vma in self._maps.iter(ProcMaps.executable):
if vma.begin <= runtime_address < vma.end:
static_symbols = self._static_symbols_in_filse.get(vma.name)
if static_symbols:
return static_symbols.find_sourcefile_by_runtime_address(
runtime_address, vma)
else:
return None
return None
def find_typeinfo(self, runtime_address):
for vma in self._maps.iter(ProcMaps.constants):
if vma.begin <= runtime_address < vma.end:
static_symbols = self._static_symbols_in_filse.get(vma.name)
if static_symbols:
return static_symbols.find_typeinfo_by_runtime_address(
runtime_address, vma)
else:
return None
return None
@staticmethod
def load(prepared_data_dir):
symbols_in_process = RuntimeSymbolsInProcess()
with open(os.path.join(prepared_data_dir, _MAPS_FILENAME), mode='r') as f:
symbols_in_process._maps = ProcMaps.load_file(f)
with open(os.path.join(prepared_data_dir, _FILES_FILENAME), mode='r') as f:
files = json.load(f)
# pylint: disable=W0212
for vma in symbols_in_process._maps.iter(ProcMaps.executable_and_constants):
file_entry = files.get(vma.name)
if not file_entry:
continue
static_symbols = StaticSymbolsInFile(vma.name)
nm_entry = file_entry.get('nm')
if nm_entry and nm_entry['format'] == 'bsd':
with open(os.path.join(prepared_data_dir, nm_entry['file']), 'r') as f:
static_symbols.load_nm_bsd(f, nm_entry['mangled'])
readelf_entry = file_entry.get('readelf-e')
if readelf_entry:
with open(os.path.join(prepared_data_dir, readelf_entry['file']),
'r') as f:
static_symbols.load_readelf_ew(f)
decodedline_file_entry = file_entry.get('readelf-debug-decodedline-file')
if decodedline_file_entry:
with open(os.path.join(prepared_data_dir,
decodedline_file_entry['file']), 'r') as f:
static_symbols.load_readelf_debug_decodedline_file(f)
symbols_in_process._static_symbols_in_filse[vma.name] = static_symbols
return symbols_in_process
def _find_runtime_function_symbols(symbols_in_process, addresses):
result = OrderedDict()
for address in addresses:
if isinstance(address, basestring):
address = int(address, 16)
found = symbols_in_process.find_procedure(address)
if found:
result[address] = found.name
else:
result[address] = '0x%016x' % address
return result
def _find_runtime_sourcefile_symbols(symbols_in_process, addresses):
result = OrderedDict()
for address in addresses:
if isinstance(address, basestring):
address = int(address, 16)
found = symbols_in_process.find_sourcefile(address)
if found:
result[address] = found
else:
result[address] = ''
return result
def _find_runtime_typeinfo_symbols(symbols_in_process, addresses):
result = OrderedDict()
for address in addresses:
if isinstance(address, basestring):
address = int(address, 16)
if address == 0:
result[address] = 'no typeinfo'
else:
found = symbols_in_process.find_typeinfo(address)
if found:
if found.startswith('typeinfo for '):
result[address] = found[13:]
else:
result[address] = found
else:
result[address] = '0x%016x' % address
return result
_INTERNAL_FINDERS = {
FUNCTION_SYMBOLS: _find_runtime_function_symbols,
SOURCEFILE_SYMBOLS: _find_runtime_sourcefile_symbols,
TYPEINFO_SYMBOLS: _find_runtime_typeinfo_symbols,
}
def find_runtime_symbols(symbol_type, symbols_in_process, addresses):
return _INTERNAL_FINDERS[symbol_type](symbols_in_process, addresses)
def main():
# FIX: Accept only .pre data
if len(sys.argv) < 2:
sys.stderr.write("""Usage:
%s /path/to/prepared_data_dir/ < addresses.txt
""" % sys.argv[0])
return 1
log = logging.getLogger('find_runtime_symbols')
log.setLevel(logging.WARN)
handler = logging.StreamHandler()
handler.setLevel(logging.WARN)
formatter = logging.Formatter('%(message)s')
handler.setFormatter(formatter)
log.addHandler(handler)
prepared_data_dir = sys.argv[1]
if not os.path.exists(prepared_data_dir):
log.warn("Nothing found: %s" % prepared_data_dir)
return 1
if not os.path.isdir(prepared_data_dir):
log.warn("Not a directory: %s" % prepared_data_dir)
return 1
symbols_in_process = RuntimeSymbolsInProcess.load(prepared_data_dir)
symbols_dict = find_runtime_symbols(FUNCTION_SYMBOLS,
symbols_in_process,
sys.stdin)
for address, symbol in symbols_dict.iteritems():
if symbol:
print '%016x %s' % (address, symbol)
else:
print '%016x' % address
return 0
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
CurryBoy/FirstDraft | BootstrapHTMLGenerator.py | 1 | 1854 | import sys
sys.path.append("..")
from ParseTree import *
class BootstrapHTMLGenerator(HTMLGenerator):
def __init__(self):
self.cssRules = {}
pass
def Generate(self,htmlTree):
self.generateCSSRules(htmlTree.rootBodyNode,"a")
style = self.generateCSSStyle(self.cssRules)
style = "<style type='text/css'>%s</style>" % style
bootstrap = "<link href='http://netdna.bootstrapcdn.com/bootstrap/3.0.0/css/bootstrap.min.css' rel='stylesheet'>" + "<script src='http://netdna.bootstrapcdn.com/bootstrap/3.0.0/js/bootstrap.min.js'></script>"
body = "<body><div style='border: dashed; width: %dpx; height: %dpx'>%s</div></body>" % (htmlTree.rootBodyNode.boundingBox.width, htmlTree.rootBodyNode.boundingBox.height, str(htmlTree))
html = "<!DOCTYPE html><html lang='en'><head><title>A First Draft</title> %s %s </head>%s</html>" % (bootstrap,style,body)
return html
def generateCSSRules(self,node,node_id):
node_id = node_id + "a"
bbox = node.boundingBox
rules = {}
rules['position'] = 'absolute'
rules['top'] = '%dpx' % bbox.offsetY
rules['left'] = '%dpx' % bbox.offsetX
rules['width'] = '%dpx' % bbox.width
rules['height'] = '%dpx' % bbox.height
self.cssRules[node_id] = rules
node.tagAttributes = "id='%s'" % node_id
node_id_counter = 0;
for i,child in enumerate(node.children):
self.generateCSSRules(child,node_id+chr(ord(node_id[-1])+node_id_counter))
node_id_counter += 1
def generateCSSStyle(self,rules):
style = ""
for nid, rl in rules.iteritems():
style += "#%s { \n" % nid
for prop, val in rl.iteritems():
style += "%s: %s;\n" % (prop,val)
style += "}\n\n"
return style
| apache-2.0 |
leihu0724/azure-sdk-for-python | azure/http/batchclient.py | 2 | 13880 | #-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
import sys
import uuid
from azure import (
_update_request_uri_query,
WindowsAzureError,
WindowsAzureBatchOperationError,
url_unquote,
DEFAULT_HTTP_TIMEOUT,
_ERROR_CANNOT_FIND_PARTITION_KEY,
_ERROR_CANNOT_FIND_ROW_KEY,
_ERROR_INCORRECT_TABLE_IN_BATCH,
_ERROR_INCORRECT_PARTITION_KEY_IN_BATCH,
_ERROR_DUPLICATE_ROW_KEY_IN_BATCH,
_ERROR_BATCH_COMMIT_FAIL,
ETree,
_get_etree_text,
_etree_entity_feed_namespaces,
)
from azure.http import HTTPError, HTTPRequest, HTTPResponse
from azure.http.httpclient import _HTTPClient
from azure.storage import (
_update_storage_table_header,
METADATA_NS,
_sign_storage_table_request,
)
_DATASERVICES_NS = 'http://schemas.microsoft.com/ado/2007/08/dataservices'
if sys.version_info < (3,):
def _new_boundary():
return str(uuid.uuid1())
else:
def _new_boundary():
return str(uuid.uuid1()).encode('utf-8')
class _BatchClient(_HTTPClient):
'''
This is the class that is used for batch operation for storage table
service. It only supports one changeset.
'''
def __init__(self, service_instance, account_key, account_name,
protocol='http', timeout=DEFAULT_HTTP_TIMEOUT):
_HTTPClient.__init__(self, service_instance, account_name=account_name,
account_key=account_key, protocol=protocol,
timeout=timeout)
self.is_batch = False
self.batch_requests = []
self.batch_table = ''
self.batch_partition_key = ''
self.batch_row_keys = []
def get_request_table(self, request):
'''
Extracts table name from request.uri. The request.uri has either
"/mytable(...)" or "/mytable" format.
request:
the request to insert, update or delete entity
'''
if '(' in request.path:
pos = request.path.find('(')
return request.path[1:pos]
else:
return request.path[1:]
def get_request_partition_key(self, request):
'''
Extracts PartitionKey from request.body if it is a POST request or from
request.path if it is not a POST request. Only insert operation request
is a POST request and the PartitionKey is in the request body.
request:
the request to insert, update or delete entity
'''
if request.method == 'POST':
doc = ETree.fromstring(request.body)
part_key = doc.find('./atom:content/m:properties/d:PartitionKey', _etree_entity_feed_namespaces)
if part_key is None:
raise WindowsAzureError(_ERROR_CANNOT_FIND_PARTITION_KEY)
return _get_etree_text(part_key)
else:
uri = url_unquote(request.path)
pos1 = uri.find('PartitionKey=\'')
pos2 = uri.find('\',', pos1)
if pos1 == -1 or pos2 == -1:
raise WindowsAzureError(_ERROR_CANNOT_FIND_PARTITION_KEY)
return uri[pos1 + len('PartitionKey=\''):pos2]
def get_request_row_key(self, request):
'''
Extracts RowKey from request.body if it is a POST request or from
request.path if it is not a POST request. Only insert operation request
is a POST request and the Rowkey is in the request body.
request:
the request to insert, update or delete entity
'''
if request.method == 'POST':
doc = ETree.fromstring(request.body)
row_key = doc.find('./atom:content/m:properties/d:RowKey', _etree_entity_feed_namespaces)
if row_key is None:
raise WindowsAzureError(_ERROR_CANNOT_FIND_ROW_KEY)
return _get_etree_text(row_key)
else:
uri = url_unquote(request.path)
pos1 = uri.find('RowKey=\'')
pos2 = uri.find('\')', pos1)
if pos1 == -1 or pos2 == -1:
raise WindowsAzureError(_ERROR_CANNOT_FIND_ROW_KEY)
row_key = uri[pos1 + len('RowKey=\''):pos2]
return row_key
def validate_request_table(self, request):
'''
Validates that all requests have the same table name. Set the table
name if it is the first request for the batch operation.
request:
the request to insert, update or delete entity
'''
if self.batch_table:
if self.get_request_table(request) != self.batch_table:
raise WindowsAzureError(_ERROR_INCORRECT_TABLE_IN_BATCH)
else:
self.batch_table = self.get_request_table(request)
def validate_request_partition_key(self, request):
'''
Validates that all requests have the same PartitiionKey. Set the
PartitionKey if it is the first request for the batch operation.
request:
the request to insert, update or delete entity
'''
if self.batch_partition_key:
if self.get_request_partition_key(request) != \
self.batch_partition_key:
raise WindowsAzureError(_ERROR_INCORRECT_PARTITION_KEY_IN_BATCH)
else:
self.batch_partition_key = self.get_request_partition_key(request)
def validate_request_row_key(self, request):
'''
Validates that all requests have the different RowKey and adds RowKey
to existing RowKey list.
request:
the request to insert, update or delete entity
'''
if self.batch_row_keys:
if self.get_request_row_key(request) in self.batch_row_keys:
raise WindowsAzureError(_ERROR_DUPLICATE_ROW_KEY_IN_BATCH)
else:
self.batch_row_keys.append(self.get_request_row_key(request))
def begin_batch(self):
'''
Starts the batch operation. Intializes the batch variables
is_batch:
batch operation flag.
batch_table:
the table name of the batch operation
batch_partition_key:
the PartitionKey of the batch requests.
batch_row_keys:
the RowKey list of adding requests.
batch_requests:
the list of the requests.
'''
self.is_batch = True
self.batch_table = ''
self.batch_partition_key = ''
self.batch_row_keys = []
self.batch_requests = []
def insert_request_to_batch(self, request):
'''
Adds request to batch operation.
request:
the request to insert, update or delete entity
'''
self.validate_request_table(request)
self.validate_request_partition_key(request)
self.validate_request_row_key(request)
self.batch_requests.append(request)
def commit_batch(self):
''' Resets batch flag and commits the batch requests. '''
if self.is_batch:
self.is_batch = False
self.commit_batch_requests()
def commit_batch_requests(self):
''' Commits the batch requests. '''
batch_boundary = b'batch_' + _new_boundary()
changeset_boundary = b'changeset_' + _new_boundary()
# Commits batch only the requests list is not empty.
if self.batch_requests:
request = HTTPRequest()
request.method = 'POST'
request.host = self.batch_requests[0].host
request.path = '/$batch'
request.headers = [
('Content-Type', 'multipart/mixed; boundary=' + \
batch_boundary.decode('utf-8')),
('Accept', 'application/atom+xml,application/xml'),
('Accept-Charset', 'UTF-8')]
request.body = b'--' + batch_boundary + b'\n'
request.body += b'Content-Type: multipart/mixed; boundary='
request.body += changeset_boundary + b'\n\n'
content_id = 1
# Adds each request body to the POST data.
for batch_request in self.batch_requests:
request.body += b'--' + changeset_boundary + b'\n'
request.body += b'Content-Type: application/http\n'
request.body += b'Content-Transfer-Encoding: binary\n\n'
request.body += batch_request.method.encode('utf-8')
request.body += b' http://'
request.body += batch_request.host.encode('utf-8')
request.body += batch_request.path.encode('utf-8')
request.body += b' HTTP/1.1\n'
request.body += b'Content-ID: '
request.body += str(content_id).encode('utf-8') + b'\n'
content_id += 1
# Add different headers for different type requests.
if not batch_request.method == 'DELETE':
request.body += \
b'Content-Type: application/atom+xml;type=entry\n'
for name, value in batch_request.headers:
if name == 'If-Match':
request.body += name.encode('utf-8') + b': '
request.body += value.encode('utf-8') + b'\n'
break
request.body += b'Content-Length: '
request.body += str(len(batch_request.body)).encode('utf-8')
request.body += b'\n\n'
request.body += batch_request.body + b'\n'
else:
for name, value in batch_request.headers:
# If-Match should be already included in
# batch_request.headers, but in case it is missing,
# just add it.
if name == 'If-Match':
request.body += name.encode('utf-8') + b': '
request.body += value.encode('utf-8') + b'\n\n'
break
else:
request.body += b'If-Match: *\n\n'
request.body += b'--' + changeset_boundary + b'--' + b'\n'
request.body += b'--' + batch_boundary + b'--'
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_storage_table_header(request)
auth = _sign_storage_table_request(request,
self.account_name,
self.account_key)
request.headers.append(('Authorization', auth))
# Submit the whole request as batch request.
response = self.perform_request(request)
if response.status >= 300:
raise HTTPError(response.status,
_ERROR_BATCH_COMMIT_FAIL,
self.respheader,
response.body)
# http://www.odata.org/documentation/odata-version-2-0/batch-processing/
# The body of a ChangeSet response is either a response for all the
# successfully processed change request within the ChangeSet,
# formatted exactly as it would have appeared outside of a batch,
# or a single response indicating a failure of the entire ChangeSet.
responses = self._parse_batch_response(response.body)
if responses and responses[0].status >= 300:
self._report_batch_error(responses[0])
def cancel_batch(self):
''' Resets the batch flag. '''
self.is_batch = False
def _parse_batch_response(self, body):
parts = body.split(b'--changesetresponse_')
responses = []
for part in parts:
httpLocation = part.find(b'HTTP/')
if httpLocation > 0:
response = self._parse_batch_response_part(part[httpLocation:])
responses.append(response)
return responses
def _parse_batch_response_part(self, part):
lines = part.splitlines();
# First line is the HTTP status/reason
status, _, reason = lines[0].partition(b' ')[2].partition(b' ')
# Followed by headers and body
headers = []
body = b''
isBody = False
for line in lines[1:]:
if line == b'' and not isBody:
isBody = True
elif isBody:
body += line
else:
headerName, _, headerVal = line.partition(b':')
headers.append((headerName.lower(), headerVal))
return HTTPResponse(int(status), reason.strip(), headers, body)
def _report_batch_error(self, response):
doc = ETree.fromstring(response.body)
code_element = doc.find('./m:code', _etree_entity_feed_namespaces)
code = _get_etree_text(code_element) if code_element is not None else ''
message_element = doc.find('./m:message', _etree_entity_feed_namespaces)
message = _get_etree_text(message_element) if message_element is not None else ''
raise WindowsAzureBatchOperationError(message, code)
| apache-2.0 |
dch312/numpy | numpy/testing/utils.py | 2 | 58176 | """
Utility function to facilitate testing.
"""
from __future__ import division, absolute_import, print_function
import os
import sys
import re
import operator
import warnings
from functools import partial
import shutil
import contextlib
from tempfile import mkdtemp
from .nosetester import import_nose
from numpy.core import float32, empty, arange, array_repr, ndarray
if sys.version_info[0] >= 3:
from io import StringIO
else:
from StringIO import StringIO
__all__ = ['assert_equal', 'assert_almost_equal', 'assert_approx_equal',
'assert_array_equal', 'assert_array_less', 'assert_string_equal',
'assert_array_almost_equal', 'assert_raises', 'build_err_msg',
'decorate_methods', 'jiffies', 'memusage', 'print_assert_equal',
'raises', 'rand', 'rundocs', 'runstring', 'verbose', 'measure',
'assert_', 'assert_array_almost_equal_nulp', 'assert_raises_regex',
'assert_array_max_ulp', 'assert_warns', 'assert_no_warnings',
'assert_allclose', 'IgnoreException']
verbose = 0
def assert_(val, msg='') :
"""
Assert that works in release mode.
Accepts callable msg to allow deferring evaluation until failure.
The Python built-in ``assert`` does not work when executing code in
optimized mode (the ``-O`` flag) - no byte-code is generated for it.
For documentation on usage, refer to the Python documentation.
"""
if not val :
try:
smsg = msg()
except TypeError:
smsg = msg
raise AssertionError(smsg)
def gisnan(x):
"""like isnan, but always raise an error if type not supported instead of
returning a TypeError object.
Notes
-----
isnan and other ufunc sometimes return a NotImplementedType object instead
of raising any exception. This function is a wrapper to make sure an
exception is always raised.
This should be removed once this problem is solved at the Ufunc level."""
from numpy.core import isnan
st = isnan(x)
if isinstance(st, type(NotImplemented)):
raise TypeError("isnan not supported for this type")
return st
def gisfinite(x):
"""like isfinite, but always raise an error if type not supported instead of
returning a TypeError object.
Notes
-----
isfinite and other ufunc sometimes return a NotImplementedType object instead
of raising any exception. This function is a wrapper to make sure an
exception is always raised.
This should be removed once this problem is solved at the Ufunc level."""
from numpy.core import isfinite, errstate
with errstate(invalid='ignore'):
st = isfinite(x)
if isinstance(st, type(NotImplemented)):
raise TypeError("isfinite not supported for this type")
return st
def gisinf(x):
"""like isinf, but always raise an error if type not supported instead of
returning a TypeError object.
Notes
-----
isinf and other ufunc sometimes return a NotImplementedType object instead
of raising any exception. This function is a wrapper to make sure an
exception is always raised.
This should be removed once this problem is solved at the Ufunc level."""
from numpy.core import isinf, errstate
with errstate(invalid='ignore'):
st = isinf(x)
if isinstance(st, type(NotImplemented)):
raise TypeError("isinf not supported for this type")
return st
def rand(*args):
"""Returns an array of random numbers with the given shape.
This only uses the standard library, so it is useful for testing purposes.
"""
import random
from numpy.core import zeros, float64
results = zeros(args, float64)
f = results.flat
for i in range(len(f)):
f[i] = random.random()
return results
if sys.platform[:5]=='linux':
def jiffies(_proc_pid_stat = '/proc/%s/stat'%(os.getpid()),
_load_time=[]):
""" Return number of jiffies (1/100ths of a second) that this
process has been scheduled in user mode. See man 5 proc. """
import time
if not _load_time:
_load_time.append(time.time())
try:
f=open(_proc_pid_stat, 'r')
l = f.readline().split(' ')
f.close()
return int(l[13])
except:
return int(100*(time.time()-_load_time[0]))
def memusage(_proc_pid_stat = '/proc/%s/stat'%(os.getpid())):
""" Return virtual memory size in bytes of the running python.
"""
try:
f=open(_proc_pid_stat, 'r')
l = f.readline().split(' ')
f.close()
return int(l[22])
except:
return
else:
# os.getpid is not in all platforms available.
# Using time is safe but inaccurate, especially when process
# was suspended or sleeping.
def jiffies(_load_time=[]):
""" Return number of jiffies (1/100ths of a second) that this
process has been scheduled in user mode. [Emulation with time.time]. """
import time
if not _load_time:
_load_time.append(time.time())
return int(100*(time.time()-_load_time[0]))
def memusage():
""" Return memory usage of running python. [Not implemented]"""
raise NotImplementedError
if os.name=='nt':
# Code "stolen" from enthought/debug/memusage.py
def GetPerformanceAttributes(object, counter, instance = None,
inum=-1, format = None, machine=None):
# NOTE: Many counters require 2 samples to give accurate results,
# including "% Processor Time" (as by definition, at any instant, a
# thread's CPU usage is either 0 or 100). To read counters like this,
# you should copy this function, but keep the counter open, and call
# CollectQueryData() each time you need to know.
# See http://msdn.microsoft.com/library/en-us/dnperfmo/html/perfmonpt2.asp
# My older explanation for this was that the "AddCounter" process forced
# the CPU to 100%, but the above makes more sense :)
import win32pdh
if format is None: format = win32pdh.PDH_FMT_LONG
path = win32pdh.MakeCounterPath( (machine, object, instance, None, inum, counter) )
hq = win32pdh.OpenQuery()
try:
hc = win32pdh.AddCounter(hq, path)
try:
win32pdh.CollectQueryData(hq)
type, val = win32pdh.GetFormattedCounterValue(hc, format)
return val
finally:
win32pdh.RemoveCounter(hc)
finally:
win32pdh.CloseQuery(hq)
def memusage(processName="python", instance=0):
# from win32pdhutil, part of the win32all package
import win32pdh
return GetPerformanceAttributes("Process", "Virtual Bytes",
processName, instance,
win32pdh.PDH_FMT_LONG, None)
def build_err_msg(arrays, err_msg, header='Items are not equal:',
verbose=True, names=('ACTUAL', 'DESIRED'), precision=8):
msg = ['\n' + header]
if err_msg:
if err_msg.find('\n') == -1 and len(err_msg) < 79-len(header):
msg = [msg[0] + ' ' + err_msg]
else:
msg.append(err_msg)
if verbose:
for i, a in enumerate(arrays):
if isinstance(a, ndarray):
# precision argument is only needed if the objects are ndarrays
r_func = partial(array_repr, precision=precision)
else:
r_func = repr
try:
r = r_func(a)
except:
r = '[repr failed]'
if r.count('\n') > 3:
r = '\n'.join(r.splitlines()[:3])
r += '...'
msg.append(' %s: %s' % (names[i], r))
return '\n'.join(msg)
def assert_equal(actual,desired,err_msg='',verbose=True):
"""
Raises an AssertionError if two objects are not equal.
Given two objects (scalars, lists, tuples, dictionaries or numpy arrays),
check that all elements of these objects are equal. An exception is raised
at the first conflicting values.
Parameters
----------
actual : array_like
The object to check.
desired : array_like
The expected object.
err_msg : str, optional
The error message to be printed in case of failure.
verbose : bool, optional
If True, the conflicting values are appended to the error message.
Raises
------
AssertionError
If actual and desired are not equal.
Examples
--------
>>> np.testing.assert_equal([4,5], [4,6])
...
<type 'exceptions.AssertionError'>:
Items are not equal:
item=1
ACTUAL: 5
DESIRED: 6
"""
if isinstance(desired, dict):
if not isinstance(actual, dict) :
raise AssertionError(repr(type(actual)))
assert_equal(len(actual), len(desired), err_msg, verbose)
for k, i in desired.items():
if k not in actual :
raise AssertionError(repr(k))
assert_equal(actual[k], desired[k], 'key=%r\n%s' % (k, err_msg), verbose)
return
if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)):
assert_equal(len(actual), len(desired), err_msg, verbose)
for k in range(len(desired)):
assert_equal(actual[k], desired[k], 'item=%r\n%s' % (k, err_msg), verbose)
return
from numpy.core import ndarray, isscalar, signbit
from numpy.lib import iscomplexobj, real, imag
if isinstance(actual, ndarray) or isinstance(desired, ndarray):
return assert_array_equal(actual, desired, err_msg, verbose)
msg = build_err_msg([actual, desired], err_msg, verbose=verbose)
# Handle complex numbers: separate into real/imag to handle
# nan/inf/negative zero correctly
# XXX: catch ValueError for subclasses of ndarray where iscomplex fail
try:
usecomplex = iscomplexobj(actual) or iscomplexobj(desired)
except ValueError:
usecomplex = False
if usecomplex:
if iscomplexobj(actual):
actualr = real(actual)
actuali = imag(actual)
else:
actualr = actual
actuali = 0
if iscomplexobj(desired):
desiredr = real(desired)
desiredi = imag(desired)
else:
desiredr = desired
desiredi = 0
try:
assert_equal(actualr, desiredr)
assert_equal(actuali, desiredi)
except AssertionError:
raise AssertionError(msg)
# Inf/nan/negative zero handling
try:
# isscalar test to check cases such as [np.nan] != np.nan
if isscalar(desired) != isscalar(actual):
raise AssertionError(msg)
# If one of desired/actual is not finite, handle it specially here:
# check that both are nan if any is a nan, and test for equality
# otherwise
if not (gisfinite(desired) and gisfinite(actual)):
isdesnan = gisnan(desired)
isactnan = gisnan(actual)
if isdesnan or isactnan:
if not (isdesnan and isactnan):
raise AssertionError(msg)
else:
if not desired == actual:
raise AssertionError(msg)
return
elif desired == 0 and actual == 0:
if not signbit(desired) == signbit(actual):
raise AssertionError(msg)
# If TypeError or ValueError raised while using isnan and co, just handle
# as before
except (TypeError, ValueError, NotImplementedError):
pass
# Explicitly use __eq__ for comparison, ticket #2552
if not (desired == actual):
raise AssertionError(msg)
def print_assert_equal(test_string, actual, desired):
"""
Test if two objects are equal, and print an error message if test fails.
The test is performed with ``actual == desired``.
Parameters
----------
test_string : str
The message supplied to AssertionError.
actual : object
The object to test for equality against `desired`.
desired : object
The expected result.
Examples
--------
>>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 1])
>>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 2])
Traceback (most recent call last):
...
AssertionError: Test XYZ of func xyz failed
ACTUAL:
[0, 1]
DESIRED:
[0, 2]
"""
import pprint
if not (actual == desired):
msg = StringIO()
msg.write(test_string)
msg.write(' failed\nACTUAL: \n')
pprint.pprint(actual, msg)
msg.write('DESIRED: \n')
pprint.pprint(desired, msg)
raise AssertionError(msg.getvalue())
def assert_almost_equal(actual,desired,decimal=7,err_msg='',verbose=True):
"""
Raises an AssertionError if two items are not equal up to desired
precision.
.. note:: It is recommended to use one of `assert_allclose`,
`assert_array_almost_equal_nulp` or `assert_array_max_ulp`
instead of this function for more consistent floating point
comparisons.
The test is equivalent to ``abs(desired-actual) < 0.5 * 10**(-decimal)``.
Given two objects (numbers or ndarrays), check that all elements of these
objects are almost equal. An exception is raised at conflicting values.
For ndarrays this delegates to assert_array_almost_equal
Parameters
----------
actual : array_like
The object to check.
desired : array_like
The expected object.
decimal : int, optional
Desired precision, default is 7.
err_msg : str, optional
The error message to be printed in case of failure.
verbose : bool, optional
If True, the conflicting values are appended to the error message.
Raises
------
AssertionError
If actual and desired are not equal up to specified precision.
See Also
--------
assert_allclose: Compare two array_like objects for equality with desired
relative and/or absolute precision.
assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
Examples
--------
>>> import numpy.testing as npt
>>> npt.assert_almost_equal(2.3333333333333, 2.33333334)
>>> npt.assert_almost_equal(2.3333333333333, 2.33333334, decimal=10)
...
<type 'exceptions.AssertionError'>:
Items are not equal:
ACTUAL: 2.3333333333333002
DESIRED: 2.3333333399999998
>>> npt.assert_almost_equal(np.array([1.0,2.3333333333333]),
... np.array([1.0,2.33333334]), decimal=9)
...
<type 'exceptions.AssertionError'>:
Arrays are not almost equal
<BLANKLINE>
(mismatch 50.0%)
x: array([ 1. , 2.33333333])
y: array([ 1. , 2.33333334])
"""
from numpy.core import ndarray
from numpy.lib import iscomplexobj, real, imag
# Handle complex numbers: separate into real/imag to handle
# nan/inf/negative zero correctly
# XXX: catch ValueError for subclasses of ndarray where iscomplex fail
try:
usecomplex = iscomplexobj(actual) or iscomplexobj(desired)
except ValueError:
usecomplex = False
def _build_err_msg():
header = ('Arrays are not almost equal to %d decimals' % decimal)
return build_err_msg([actual, desired], err_msg, verbose=verbose,
header=header)
if usecomplex:
if iscomplexobj(actual):
actualr = real(actual)
actuali = imag(actual)
else:
actualr = actual
actuali = 0
if iscomplexobj(desired):
desiredr = real(desired)
desiredi = imag(desired)
else:
desiredr = desired
desiredi = 0
try:
assert_almost_equal(actualr, desiredr, decimal=decimal)
assert_almost_equal(actuali, desiredi, decimal=decimal)
except AssertionError:
raise AssertionError(_build_err_msg())
if isinstance(actual, (ndarray, tuple, list)) \
or isinstance(desired, (ndarray, tuple, list)):
return assert_array_almost_equal(actual, desired, decimal, err_msg)
try:
# If one of desired/actual is not finite, handle it specially here:
# check that both are nan if any is a nan, and test for equality
# otherwise
if not (gisfinite(desired) and gisfinite(actual)):
if gisnan(desired) or gisnan(actual):
if not (gisnan(desired) and gisnan(actual)):
raise AssertionError(_build_err_msg())
else:
if not desired == actual:
raise AssertionError(_build_err_msg())
return
except (NotImplementedError, TypeError):
pass
if round(abs(desired - actual), decimal) != 0 :
raise AssertionError(_build_err_msg())
def assert_approx_equal(actual,desired,significant=7,err_msg='',verbose=True):
"""
Raises an AssertionError if two items are not equal up to significant
digits.
.. note:: It is recommended to use one of `assert_allclose`,
`assert_array_almost_equal_nulp` or `assert_array_max_ulp`
instead of this function for more consistent floating point
comparisons.
Given two numbers, check that they are approximately equal.
Approximately equal is defined as the number of significant digits
that agree.
Parameters
----------
actual : scalar
The object to check.
desired : scalar
The expected object.
significant : int, optional
Desired precision, default is 7.
err_msg : str, optional
The error message to be printed in case of failure.
verbose : bool, optional
If True, the conflicting values are appended to the error message.
Raises
------
AssertionError
If actual and desired are not equal up to specified precision.
See Also
--------
assert_allclose: Compare two array_like objects for equality with desired
relative and/or absolute precision.
assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
Examples
--------
>>> np.testing.assert_approx_equal(0.12345677777777e-20, 0.1234567e-20)
>>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345671e-20,
significant=8)
>>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345672e-20,
significant=8)
...
<type 'exceptions.AssertionError'>:
Items are not equal to 8 significant digits:
ACTUAL: 1.234567e-021
DESIRED: 1.2345672000000001e-021
the evaluated condition that raises the exception is
>>> abs(0.12345670e-20/1e-21 - 0.12345672e-20/1e-21) >= 10**-(8-1)
True
"""
import numpy as np
(actual, desired) = map(float, (actual, desired))
if desired==actual:
return
# Normalized the numbers to be in range (-10.0,10.0)
# scale = float(pow(10,math.floor(math.log10(0.5*(abs(desired)+abs(actual))))))
with np.errstate(invalid='ignore'):
scale = 0.5*(np.abs(desired) + np.abs(actual))
scale = np.power(10, np.floor(np.log10(scale)))
try:
sc_desired = desired/scale
except ZeroDivisionError:
sc_desired = 0.0
try:
sc_actual = actual/scale
except ZeroDivisionError:
sc_actual = 0.0
msg = build_err_msg([actual, desired], err_msg,
header='Items are not equal to %d significant digits:' %
significant,
verbose=verbose)
try:
# If one of desired/actual is not finite, handle it specially here:
# check that both are nan if any is a nan, and test for equality
# otherwise
if not (gisfinite(desired) and gisfinite(actual)):
if gisnan(desired) or gisnan(actual):
if not (gisnan(desired) and gisnan(actual)):
raise AssertionError(msg)
else:
if not desired == actual:
raise AssertionError(msg)
return
except (TypeError, NotImplementedError):
pass
if np.abs(sc_desired - sc_actual) >= np.power(10., -(significant-1)) :
raise AssertionError(msg)
def assert_array_compare(comparison, x, y, err_msg='', verbose=True,
header='', precision=6):
from numpy.core import array, isnan, isinf, any, all, inf
x = array(x, copy=False, subok=True)
y = array(y, copy=False, subok=True)
def isnumber(x):
return x.dtype.char in '?bhilqpBHILQPefdgFDG'
def chk_same_position(x_id, y_id, hasval='nan'):
"""Handling nan/inf: check that x and y have the nan/inf at the same
locations."""
try:
assert_array_equal(x_id, y_id)
except AssertionError:
msg = build_err_msg([x, y],
err_msg + '\nx and y %s location mismatch:' \
% (hasval), verbose=verbose, header=header,
names=('x', 'y'), precision=precision)
raise AssertionError(msg)
try:
cond = (x.shape==() or y.shape==()) or x.shape == y.shape
if not cond:
msg = build_err_msg([x, y],
err_msg
+ '\n(shapes %s, %s mismatch)' % (x.shape,
y.shape),
verbose=verbose, header=header,
names=('x', 'y'), precision=precision)
if not cond :
raise AssertionError(msg)
if isnumber(x) and isnumber(y):
x_isnan, y_isnan = isnan(x), isnan(y)
x_isinf, y_isinf = isinf(x), isinf(y)
# Validate that the special values are in the same place
if any(x_isnan) or any(y_isnan):
chk_same_position(x_isnan, y_isnan, hasval='nan')
if any(x_isinf) or any(y_isinf):
# Check +inf and -inf separately, since they are different
chk_same_position(x == +inf, y == +inf, hasval='+inf')
chk_same_position(x == -inf, y == -inf, hasval='-inf')
# Combine all the special values
x_id, y_id = x_isnan, y_isnan
x_id |= x_isinf
y_id |= y_isinf
# Only do the comparison if actual values are left
if all(x_id):
return
if any(x_id):
val = comparison(x[~x_id], y[~y_id])
else:
val = comparison(x, y)
else:
val = comparison(x, y)
if isinstance(val, bool):
cond = val
reduced = [0]
else:
reduced = val.ravel()
cond = reduced.all()
reduced = reduced.tolist()
if not cond:
match = 100-100.0*reduced.count(1)/len(reduced)
msg = build_err_msg([x, y],
err_msg
+ '\n(mismatch %s%%)' % (match,),
verbose=verbose, header=header,
names=('x', 'y'), precision=precision)
if not cond :
raise AssertionError(msg)
except ValueError as e:
import traceback
efmt = traceback.format_exc()
header = 'error during assertion:\n\n%s\n\n%s' % (efmt, header)
msg = build_err_msg([x, y], err_msg, verbose=verbose, header=header,
names=('x', 'y'), precision=precision)
raise ValueError(msg)
def assert_array_equal(x, y, err_msg='', verbose=True):
"""
Raises an AssertionError if two array_like objects are not equal.
Given two array_like objects, check that the shape is equal and all
elements of these objects are equal. An exception is raised at
shape mismatch or conflicting values. In contrast to the standard usage
in numpy, NaNs are compared like numbers, no assertion is raised if
both objects have NaNs in the same positions.
The usual caution for verifying equality with floating point numbers is
advised.
Parameters
----------
x : array_like
The actual object to check.
y : array_like
The desired, expected object.
err_msg : str, optional
The error message to be printed in case of failure.
verbose : bool, optional
If True, the conflicting values are appended to the error message.
Raises
------
AssertionError
If actual and desired objects are not equal.
See Also
--------
assert_allclose: Compare two array_like objects for equality with desired
relative and/or absolute precision.
assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
Examples
--------
The first assert does not raise an exception:
>>> np.testing.assert_array_equal([1.0,2.33333,np.nan],
... [np.exp(0),2.33333, np.nan])
Assert fails with numerical inprecision with floats:
>>> np.testing.assert_array_equal([1.0,np.pi,np.nan],
... [1, np.sqrt(np.pi)**2, np.nan])
...
<type 'exceptions.ValueError'>:
AssertionError:
Arrays are not equal
<BLANKLINE>
(mismatch 50.0%)
x: array([ 1. , 3.14159265, NaN])
y: array([ 1. , 3.14159265, NaN])
Use `assert_allclose` or one of the nulp (number of floating point values)
functions for these cases instead:
>>> np.testing.assert_allclose([1.0,np.pi,np.nan],
... [1, np.sqrt(np.pi)**2, np.nan],
... rtol=1e-10, atol=0)
"""
assert_array_compare(operator.__eq__, x, y, err_msg=err_msg,
verbose=verbose, header='Arrays are not equal')
def assert_array_almost_equal(x, y, decimal=6, err_msg='', verbose=True):
"""
Raises an AssertionError if two objects are not equal up to desired
precision.
.. note:: It is recommended to use one of `assert_allclose`,
`assert_array_almost_equal_nulp` or `assert_array_max_ulp`
instead of this function for more consistent floating point
comparisons.
The test verifies identical shapes and verifies values with
``abs(desired-actual) < 0.5 * 10**(-decimal)``.
Given two array_like objects, check that the shape is equal and all
elements of these objects are almost equal. An exception is raised at
shape mismatch or conflicting values. In contrast to the standard usage
in numpy, NaNs are compared like numbers, no assertion is raised if
both objects have NaNs in the same positions.
Parameters
----------
x : array_like
The actual object to check.
y : array_like
The desired, expected object.
decimal : int, optional
Desired precision, default is 6.
err_msg : str, optional
The error message to be printed in case of failure.
verbose : bool, optional
If True, the conflicting values are appended to the error message.
Raises
------
AssertionError
If actual and desired are not equal up to specified precision.
See Also
--------
assert_allclose: Compare two array_like objects for equality with desired
relative and/or absolute precision.
assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
Examples
--------
the first assert does not raise an exception
>>> np.testing.assert_array_almost_equal([1.0,2.333,np.nan],
[1.0,2.333,np.nan])
>>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan],
... [1.0,2.33339,np.nan], decimal=5)
...
<type 'exceptions.AssertionError'>:
AssertionError:
Arrays are not almost equal
<BLANKLINE>
(mismatch 50.0%)
x: array([ 1. , 2.33333, NaN])
y: array([ 1. , 2.33339, NaN])
>>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan],
... [1.0,2.33333, 5], decimal=5)
<type 'exceptions.ValueError'>:
ValueError:
Arrays are not almost equal
x: array([ 1. , 2.33333, NaN])
y: array([ 1. , 2.33333, 5. ])
"""
from numpy.core import around, number, float_, result_type, array
from numpy.core.numerictypes import issubdtype
from numpy.core.fromnumeric import any as npany
def compare(x, y):
try:
if npany(gisinf(x)) or npany( gisinf(y)):
xinfid = gisinf(x)
yinfid = gisinf(y)
if not xinfid == yinfid:
return False
# if one item, x and y is +- inf
if x.size == y.size == 1:
return x == y
x = x[~xinfid]
y = y[~yinfid]
except (TypeError, NotImplementedError):
pass
# make sure y is an inexact type to avoid abs(MIN_INT); will cause
# casting of x later.
dtype = result_type(y, 1.)
y = array(y, dtype=dtype, copy=False, subok=True)
z = abs(x-y)
if not issubdtype(z.dtype, number):
z = z.astype(float_) # handle object arrays
return around(z, decimal) <= 10.0**(-decimal)
assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose,
header=('Arrays are not almost equal to %d decimals' % decimal),
precision=decimal)
def assert_array_less(x, y, err_msg='', verbose=True):
"""
Raises an AssertionError if two array_like objects are not ordered by less
than.
Given two array_like objects, check that the shape is equal and all
elements of the first object are strictly smaller than those of the
second object. An exception is raised at shape mismatch or incorrectly
ordered values. Shape mismatch does not raise if an object has zero
dimension. In contrast to the standard usage in numpy, NaNs are
compared, no assertion is raised if both objects have NaNs in the same
positions.
Parameters
----------
x : array_like
The smaller object to check.
y : array_like
The larger object to compare.
err_msg : string
The error message to be printed in case of failure.
verbose : bool
If True, the conflicting values are appended to the error message.
Raises
------
AssertionError
If actual and desired objects are not equal.
See Also
--------
assert_array_equal: tests objects for equality
assert_array_almost_equal: test objects for equality up to precision
Examples
--------
>>> np.testing.assert_array_less([1.0, 1.0, np.nan], [1.1, 2.0, np.nan])
>>> np.testing.assert_array_less([1.0, 1.0, np.nan], [1, 2.0, np.nan])
...
<type 'exceptions.ValueError'>:
Arrays are not less-ordered
(mismatch 50.0%)
x: array([ 1., 1., NaN])
y: array([ 1., 2., NaN])
>>> np.testing.assert_array_less([1.0, 4.0], 3)
...
<type 'exceptions.ValueError'>:
Arrays are not less-ordered
(mismatch 50.0%)
x: array([ 1., 4.])
y: array(3)
>>> np.testing.assert_array_less([1.0, 2.0, 3.0], [4])
...
<type 'exceptions.ValueError'>:
Arrays are not less-ordered
(shapes (3,), (1,) mismatch)
x: array([ 1., 2., 3.])
y: array([4])
"""
assert_array_compare(operator.__lt__, x, y, err_msg=err_msg,
verbose=verbose,
header='Arrays are not less-ordered')
def runstring(astr, dict):
exec(astr, dict)
def assert_string_equal(actual, desired):
"""
Test if two strings are equal.
If the given strings are equal, `assert_string_equal` does nothing.
If they are not equal, an AssertionError is raised, and the diff
between the strings is shown.
Parameters
----------
actual : str
The string to test for equality against the expected string.
desired : str
The expected string.
Examples
--------
>>> np.testing.assert_string_equal('abc', 'abc')
>>> np.testing.assert_string_equal('abc', 'abcd')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
...
AssertionError: Differences in strings:
- abc+ abcd? +
"""
# delay import of difflib to reduce startup time
import difflib
if not isinstance(actual, str) :
raise AssertionError(repr(type(actual)))
if not isinstance(desired, str):
raise AssertionError(repr(type(desired)))
if re.match(r'\A'+desired+r'\Z', actual, re.M):
return
diff = list(difflib.Differ().compare(actual.splitlines(1), desired.splitlines(1)))
diff_list = []
while diff:
d1 = diff.pop(0)
if d1.startswith(' '):
continue
if d1.startswith('- '):
l = [d1]
d2 = diff.pop(0)
if d2.startswith('? '):
l.append(d2)
d2 = diff.pop(0)
if not d2.startswith('+ ') :
raise AssertionError(repr(d2))
l.append(d2)
d3 = diff.pop(0)
if d3.startswith('? '):
l.append(d3)
else:
diff.insert(0, d3)
if re.match(r'\A'+d2[2:]+r'\Z', d1[2:]):
continue
diff_list.extend(l)
continue
raise AssertionError(repr(d1))
if not diff_list:
return
msg = 'Differences in strings:\n%s' % (''.join(diff_list)).rstrip()
if actual != desired :
raise AssertionError(msg)
def rundocs(filename=None, raise_on_error=True):
"""
Run doctests found in the given file.
By default `rundocs` raises an AssertionError on failure.
Parameters
----------
filename : str
The path to the file for which the doctests are run.
raise_on_error : bool
Whether to raise an AssertionError when a doctest fails. Default is
True.
Notes
-----
The doctests can be run by the user/developer by adding the ``doctests``
argument to the ``test()`` call. For example, to run all tests (including
doctests) for `numpy.lib`:
>>> np.lib.test(doctests=True) #doctest: +SKIP
"""
import doctest, imp
if filename is None:
f = sys._getframe(1)
filename = f.f_globals['__file__']
name = os.path.splitext(os.path.basename(filename))[0]
path = [os.path.dirname(filename)]
file, pathname, description = imp.find_module(name, path)
try:
m = imp.load_module(name, file, pathname, description)
finally:
file.close()
tests = doctest.DocTestFinder().find(m)
runner = doctest.DocTestRunner(verbose=False)
msg = []
if raise_on_error:
out = lambda s: msg.append(s)
else:
out = None
for test in tests:
runner.run(test, out=out)
if runner.failures > 0 and raise_on_error:
raise AssertionError("Some doctests failed:\n%s" % "\n".join(msg))
def raises(*args,**kwargs):
nose = import_nose()
return nose.tools.raises(*args,**kwargs)
def assert_raises(*args,**kwargs):
"""
assert_raises(exception_class, callable, *args, **kwargs)
Fail unless an exception of class exception_class is thrown
by callable when invoked with arguments args and keyword
arguments kwargs. If a different type of exception is
thrown, it will not be caught, and the test case will be
deemed to have suffered an error, exactly as for an
unexpected exception.
"""
nose = import_nose()
return nose.tools.assert_raises(*args,**kwargs)
assert_raises_regex_impl = None
def assert_raises_regex(exception_class, expected_regexp,
callable_obj=None, *args, **kwargs):
"""
Fail unless an exception of class exception_class and with message that
matches expected_regexp is thrown by callable when invoked with arguments
args and keyword arguments kwargs.
Name of this function adheres to Python 3.2+ reference, but should work in
all versions down to 2.6.
"""
nose = import_nose()
global assert_raises_regex_impl
if assert_raises_regex_impl is None:
try:
# Python 3.2+
assert_raises_regex_impl = nose.tools.assert_raises_regex
except AttributeError:
try:
# 2.7+
assert_raises_regex_impl = nose.tools.assert_raises_regexp
except AttributeError:
# 2.6
# This class is copied from Python2.7 stdlib almost verbatim
class _AssertRaisesContext(object):
"""A context manager used to implement TestCase.assertRaises* methods."""
def __init__(self, expected, expected_regexp=None):
self.expected = expected
self.expected_regexp = expected_regexp
def failureException(self, msg):
return AssertionError(msg)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
if exc_type is None:
try:
exc_name = self.expected.__name__
except AttributeError:
exc_name = str(self.expected)
raise self.failureException(
"{0} not raised".format(exc_name))
if not issubclass(exc_type, self.expected):
# let unexpected exceptions pass through
return False
self.exception = exc_value # store for later retrieval
if self.expected_regexp is None:
return True
expected_regexp = self.expected_regexp
if isinstance(expected_regexp, basestring):
expected_regexp = re.compile(expected_regexp)
if not expected_regexp.search(str(exc_value)):
raise self.failureException(
'"%s" does not match "%s"' %
(expected_regexp.pattern, str(exc_value)))
return True
def impl(cls, regex, callable_obj, *a, **kw):
mgr = _AssertRaisesContext(cls, regex)
if callable_obj is None:
return mgr
with mgr:
callable_obj(*a, **kw)
assert_raises_regex_impl = impl
return assert_raises_regex_impl(exception_class, expected_regexp,
callable_obj, *args, **kwargs)
def decorate_methods(cls, decorator, testmatch=None):
"""
Apply a decorator to all methods in a class matching a regular expression.
The given decorator is applied to all public methods of `cls` that are
matched by the regular expression `testmatch`
(``testmatch.search(methodname)``). Methods that are private, i.e. start
with an underscore, are ignored.
Parameters
----------
cls : class
Class whose methods to decorate.
decorator : function
Decorator to apply to methods
testmatch : compiled regexp or str, optional
The regular expression. Default value is None, in which case the
nose default (``re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep)``)
is used.
If `testmatch` is a string, it is compiled to a regular expression
first.
"""
if testmatch is None:
testmatch = re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep)
else:
testmatch = re.compile(testmatch)
cls_attr = cls.__dict__
# delayed import to reduce startup time
from inspect import isfunction
methods = [_m for _m in cls_attr.values() if isfunction(_m)]
for function in methods:
try:
if hasattr(function, 'compat_func_name'):
funcname = function.compat_func_name
else:
funcname = function.__name__
except AttributeError:
# not a function
continue
if testmatch.search(funcname) and not funcname.startswith('_'):
setattr(cls, funcname, decorator(function))
return
def measure(code_str,times=1,label=None):
"""
Return elapsed time for executing code in the namespace of the caller.
The supplied code string is compiled with the Python builtin ``compile``.
The precision of the timing is 10 milli-seconds. If the code will execute
fast on this timescale, it can be executed many times to get reasonable
timing accuracy.
Parameters
----------
code_str : str
The code to be timed.
times : int, optional
The number of times the code is executed. Default is 1. The code is
only compiled once.
label : str, optional
A label to identify `code_str` with. This is passed into ``compile``
as the second argument (for run-time error messages).
Returns
-------
elapsed : float
Total elapsed time in seconds for executing `code_str` `times` times.
Examples
--------
>>> etime = np.testing.measure('for i in range(1000): np.sqrt(i**2)',
... times=times)
>>> print "Time for a single execution : ", etime / times, "s"
Time for a single execution : 0.005 s
"""
frame = sys._getframe(1)
locs, globs = frame.f_locals, frame.f_globals
code = compile(code_str,
'Test name: %s ' % label,
'exec')
i = 0
elapsed = jiffies()
while i < times:
i += 1
exec(code, globs, locs)
elapsed = jiffies() - elapsed
return 0.01*elapsed
def _assert_valid_refcount(op):
"""
Check that ufuncs don't mishandle refcount of object `1`.
Used in a few regression tests.
"""
import numpy as np
a = np.arange(100 * 100)
b = np.arange(100*100).reshape(100, 100)
c = b
i = 1
rc = sys.getrefcount(i)
for j in range(15):
d = op(b, c)
assert_(sys.getrefcount(i) >= rc)
def assert_allclose(actual, desired, rtol=1e-7, atol=0,
err_msg='', verbose=True):
"""
Raises an AssertionError if two objects are not equal up to desired
tolerance.
The test is equivalent to ``allclose(actual, desired, rtol, atol)``.
It compares the difference between `actual` and `desired` to
``atol + rtol * abs(desired)``.
.. versionadded:: 1.5.0
Parameters
----------
actual : array_like
Array obtained.
desired : array_like
Array desired.
rtol : float, optional
Relative tolerance.
atol : float, optional
Absolute tolerance.
err_msg : str, optional
The error message to be printed in case of failure.
verbose : bool, optional
If True, the conflicting values are appended to the error message.
Raises
------
AssertionError
If actual and desired are not equal up to specified precision.
See Also
--------
assert_array_almost_equal_nulp, assert_array_max_ulp
Examples
--------
>>> x = [1e-5, 1e-3, 1e-1]
>>> y = np.arccos(np.cos(x))
>>> assert_allclose(x, y, rtol=1e-5, atol=0)
"""
import numpy as np
def compare(x, y):
return np.core.numeric._allclose_points(x, y, rtol=rtol, atol=atol)
actual, desired = np.asanyarray(actual), np.asanyarray(desired)
header = 'Not equal to tolerance rtol=%g, atol=%g' % (rtol, atol)
assert_array_compare(compare, actual, desired, err_msg=str(err_msg),
verbose=verbose, header=header)
def assert_array_almost_equal_nulp(x, y, nulp=1):
"""
Compare two arrays relatively to their spacing.
This is a relatively robust method to compare two arrays whose amplitude
is variable.
Parameters
----------
x, y : array_like
Input arrays.
nulp : int, optional
The maximum number of unit in the last place for tolerance (see Notes).
Default is 1.
Returns
-------
None
Raises
------
AssertionError
If the spacing between `x` and `y` for one or more elements is larger
than `nulp`.
See Also
--------
assert_array_max_ulp : Check that all items of arrays differ in at most
N Units in the Last Place.
spacing : Return the distance between x and the nearest adjacent number.
Notes
-----
An assertion is raised if the following condition is not met::
abs(x - y) <= nulps * spacing(maximum(abs(x), abs(y)))
Examples
--------
>>> x = np.array([1., 1e-10, 1e-20])
>>> eps = np.finfo(x.dtype).eps
>>> np.testing.assert_array_almost_equal_nulp(x, x*eps/2 + x)
>>> np.testing.assert_array_almost_equal_nulp(x, x*eps + x)
Traceback (most recent call last):
...
AssertionError: X and Y are not equal to 1 ULP (max is 2)
"""
import numpy as np
ax = np.abs(x)
ay = np.abs(y)
ref = nulp * np.spacing(np.where(ax > ay, ax, ay))
if not np.all(np.abs(x-y) <= ref):
if np.iscomplexobj(x) or np.iscomplexobj(y):
msg = "X and Y are not equal to %d ULP" % nulp
else:
max_nulp = np.max(nulp_diff(x, y))
msg = "X and Y are not equal to %d ULP (max is %g)" % (nulp, max_nulp)
raise AssertionError(msg)
def assert_array_max_ulp(a, b, maxulp=1, dtype=None):
"""
Check that all items of arrays differ in at most N Units in the Last Place.
Parameters
----------
a, b : array_like
Input arrays to be compared.
maxulp : int, optional
The maximum number of units in the last place that elements of `a` and
`b` can differ. Default is 1.
dtype : dtype, optional
Data-type to convert `a` and `b` to if given. Default is None.
Returns
-------
ret : ndarray
Array containing number of representable floating point numbers between
items in `a` and `b`.
Raises
------
AssertionError
If one or more elements differ by more than `maxulp`.
See Also
--------
assert_array_almost_equal_nulp : Compare two arrays relatively to their
spacing.
Examples
--------
>>> a = np.linspace(0., 1., 100)
>>> res = np.testing.assert_array_max_ulp(a, np.arcsin(np.sin(a)))
"""
import numpy as np
ret = nulp_diff(a, b, dtype)
if not np.all(ret <= maxulp):
raise AssertionError("Arrays are not almost equal up to %g ULP" % \
maxulp)
return ret
def nulp_diff(x, y, dtype=None):
"""For each item in x and y, return the number of representable floating
points between them.
Parameters
----------
x : array_like
first input array
y : array_like
second input array
Returns
-------
nulp : array_like
number of representable floating point numbers between each item in x
and y.
Examples
--------
# By definition, epsilon is the smallest number such as 1 + eps != 1, so
# there should be exactly one ULP between 1 and 1 + eps
>>> nulp_diff(1, 1 + np.finfo(x.dtype).eps)
1.0
"""
import numpy as np
if dtype:
x = np.array(x, dtype=dtype)
y = np.array(y, dtype=dtype)
else:
x = np.array(x)
y = np.array(y)
t = np.common_type(x, y)
if np.iscomplexobj(x) or np.iscomplexobj(y):
raise NotImplementedError("_nulp not implemented for complex array")
x = np.array(x, dtype=t)
y = np.array(y, dtype=t)
if not x.shape == y.shape:
raise ValueError("x and y do not have the same shape: %s - %s" % \
(x.shape, y.shape))
def _diff(rx, ry, vdt):
diff = np.array(rx-ry, dtype=vdt)
return np.abs(diff)
rx = integer_repr(x)
ry = integer_repr(y)
return _diff(rx, ry, t)
def _integer_repr(x, vdt, comp):
# Reinterpret binary representation of the float as sign-magnitude:
# take into account two-complement representation
# See also
# http://www.cygnus-software.com/papers/comparingfloats/comparingfloats.htm
rx = x.view(vdt)
if not (rx.size == 1):
rx[rx < 0] = comp - rx[rx<0]
else:
if rx < 0:
rx = comp - rx
return rx
def integer_repr(x):
"""Return the signed-magnitude interpretation of the binary representation of
x."""
import numpy as np
if x.dtype == np.float32:
return _integer_repr(x, np.int32, np.int32(-2**31))
elif x.dtype == np.float64:
return _integer_repr(x, np.int64, np.int64(-2**63))
else:
raise ValueError("Unsupported dtype %s" % x.dtype)
# The following two classes are copied from python 2.6 warnings module (context
# manager)
class WarningMessage(object):
"""
Holds the result of a single showwarning() call.
Deprecated in 1.8.0
Notes
-----
`WarningMessage` is copied from the Python 2.6 warnings module,
so it can be used in NumPy with older Python versions.
"""
_WARNING_DETAILS = ("message", "category", "filename", "lineno", "file",
"line")
def __init__(self, message, category, filename, lineno, file=None,
line=None):
local_values = locals()
for attr in self._WARNING_DETAILS:
setattr(self, attr, local_values[attr])
if category:
self._category_name = category.__name__
else:
self._category_name = None
def __str__(self):
return ("{message : %r, category : %r, filename : %r, lineno : %s, "
"line : %r}" % (self.message, self._category_name,
self.filename, self.lineno, self.line))
class WarningManager(object):
"""
A context manager that copies and restores the warnings filter upon
exiting the context.
The 'record' argument specifies whether warnings should be captured by a
custom implementation of ``warnings.showwarning()`` and be appended to a
list returned by the context manager. Otherwise None is returned by the
context manager. The objects appended to the list are arguments whose
attributes mirror the arguments to ``showwarning()``.
The 'module' argument is to specify an alternative module to the module
named 'warnings' and imported under that name. This argument is only useful
when testing the warnings module itself.
Deprecated in 1.8.0
Notes
-----
`WarningManager` is a copy of the ``catch_warnings`` context manager
from the Python 2.6 warnings module, with slight modifications.
It is copied so it can be used in NumPy with older Python versions.
"""
def __init__(self, record=False, module=None):
self._record = record
if module is None:
self._module = sys.modules['warnings']
else:
self._module = module
self._entered = False
def __enter__(self):
if self._entered:
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._showwarning = self._module.showwarning
if self._record:
log = []
def showwarning(*args, **kwargs):
log.append(WarningMessage(*args, **kwargs))
self._module.showwarning = showwarning
return log
else:
return None
def __exit__(self):
if not self._entered:
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module.showwarning = self._showwarning
def assert_warns(warning_class, func, *args, **kw):
"""
Fail unless the given callable throws the specified warning.
A warning of class warning_class should be thrown by the callable when
invoked with arguments args and keyword arguments kwargs.
If a different type of warning is thrown, it will not be caught, and the
test case will be deemed to have suffered an error.
.. versionadded:: 1.4.0
Parameters
----------
warning_class : class
The class defining the warning that `func` is expected to throw.
func : callable
The callable to test.
\\*args : Arguments
Arguments passed to `func`.
\\*\\*kwargs : Kwargs
Keyword arguments passed to `func`.
Returns
-------
The value returned by `func`.
"""
with warnings.catch_warnings(record=True) as l:
warnings.simplefilter('always')
result = func(*args, **kw)
if not len(l) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
if not l[0].category is warning_class:
raise AssertionError("First warning for %s is not a " \
"%s( is %s)" % (func.__name__, warning_class, l[0]))
return result
def assert_no_warnings(func, *args, **kw):
"""
Fail if the given callable produces any warnings.
.. versionadded:: 1.7.0
Parameters
----------
func : callable
The callable to test.
\\*args : Arguments
Arguments passed to `func`.
\\*\\*kwargs : Kwargs
Keyword arguments passed to `func`.
Returns
-------
The value returned by `func`.
"""
with warnings.catch_warnings(record=True) as l:
warnings.simplefilter('always')
result = func(*args, **kw)
if len(l) > 0:
raise AssertionError("Got warnings when calling %s: %s"
% (func.__name__, l))
return result
def _gen_alignment_data(dtype=float32, type='binary', max_size=24):
"""
generator producing data with different alignment and offsets
to test simd vectorization
Parameters
----------
dtype : dtype
data type to produce
type : string
'unary': create data for unary operations, creates one input
and output array
'binary': create data for unary operations, creates two input
and output array
max_size : integer
maximum size of data to produce
Returns
-------
if type is 'unary' yields one output, one input array and a message
containing information on the data
if type is 'binary' yields one output array, two input array and a message
containing information on the data
"""
ufmt = 'unary offset=(%d, %d), size=%d, dtype=%r, %s'
bfmt = 'binary offset=(%d, %d, %d), size=%d, dtype=%r, %s'
for o in range(3):
for s in range(o + 2, max(o + 3, max_size)):
if type == 'unary':
inp = lambda : arange(s, dtype=dtype)[o:]
out = empty((s,), dtype=dtype)[o:]
yield out, inp(), ufmt % (o, o, s, dtype, 'out of place')
yield inp(), inp(), ufmt % (o, o, s, dtype, 'in place')
yield out[1:], inp()[:-1], ufmt % \
(o + 1, o, s - 1, dtype, 'out of place')
yield out[:-1], inp()[1:], ufmt % \
(o, o + 1, s - 1, dtype, 'out of place')
yield inp()[:-1], inp()[1:], ufmt % \
(o, o + 1, s - 1, dtype, 'aliased')
yield inp()[1:], inp()[:-1], ufmt % \
(o + 1, o, s - 1, dtype, 'aliased')
if type == 'binary':
inp1 = lambda :arange(s, dtype=dtype)[o:]
inp2 = lambda :arange(s, dtype=dtype)[o:]
out = empty((s,), dtype=dtype)[o:]
yield out, inp1(), inp2(), bfmt % \
(o, o, o, s, dtype, 'out of place')
yield inp1(), inp1(), inp2(), bfmt % \
(o, o, o, s, dtype, 'in place1')
yield inp2(), inp1(), inp2(), bfmt % \
(o, o, o, s, dtype, 'in place2')
yield out[1:], inp1()[:-1], inp2()[:-1], bfmt % \
(o + 1, o, o, s - 1, dtype, 'out of place')
yield out[:-1], inp1()[1:], inp2()[:-1], bfmt % \
(o, o + 1, o, s - 1, dtype, 'out of place')
yield out[:-1], inp1()[:-1], inp2()[1:], bfmt % \
(o, o, o + 1, s - 1, dtype, 'out of place')
yield inp1()[1:], inp1()[:-1], inp2()[:-1], bfmt % \
(o + 1, o, o, s - 1, dtype, 'aliased')
yield inp1()[:-1], inp1()[1:], inp2()[:-1], bfmt % \
(o, o + 1, o, s - 1, dtype, 'aliased')
yield inp1()[:-1], inp1()[:-1], inp2()[1:], bfmt % \
(o, o, o + 1, s - 1, dtype, 'aliased')
class IgnoreException(Exception):
"Ignoring this exception due to disabled feature"
@contextlib.contextmanager
def tempdir(*args, **kwargs):
"""Context manager to provide a temporary test folder.
All arguments are passed as this to the underlying tempfile.mkdtemp
function.
"""
tmpdir = mkdtemp(*args, **kwargs)
yield tmpdir
shutil.rmtree(tmpdir)
| bsd-3-clause |
dmlc/mxnet | tests/python/unittest/test_base.py | 9 | 1775 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import mxnet as mx
from mxnet.base import data_dir
from nose.tools import *
import os
import unittest
import logging
import os.path as op
import platform
class MXNetDataDirTest(unittest.TestCase):
def setUp(self):
self.mxnet_data_dir = os.environ.get('MXNET_HOME')
if 'MXNET_HOME' in os.environ:
del os.environ['MXNET_HOME']
def tearDown(self):
if self.mxnet_data_dir:
os.environ['MXNET_HOME'] = self.mxnet_data_dir
else:
if 'MXNET_HOME' in os.environ:
del os.environ['MXNET_HOME']
def test_data_dir(self,):
prev_data_dir = data_dir()
system = platform.system()
if system != 'Windows':
self.assertEqual(data_dir(), op.join(op.expanduser('~'), '.mxnet'))
os.environ['MXNET_HOME'] = '/tmp/mxnet_data'
self.assertEqual(data_dir(), '/tmp/mxnet_data')
del os.environ['MXNET_HOME']
self.assertEqual(data_dir(), prev_data_dir)
| apache-2.0 |
zsdonghao/tensorlayer | examples/quantized_net/tutorial_ternaryweight_cifar10_tfrecord.py | 1 | 11034 | #! /usr/bin/python
# -*- coding: utf-8 -*-
"""
- 1. This model has 1,068,298 paramters and TWN compression strategy(weight:1,0,-1, output: float32),
after 500 epoches' training with GPU,accurcy of 80.6% was found.
- 2. For simplified CNN layers see "Convolutional layer (Simplified)"
in read the docs website.
- 3. Data augmentation without TFRecord see `tutorial_image_preprocess.py` !!
Links
-------
.. https://arxiv.org/abs/1605.04711
.. https://github.com/XJTUWYD/TWN
Note
------
The optimizers between official code and this code are different.
Description
-----------
The images are processed as follows:
.. They are cropped to 24 x 24 pixels, centrally for evaluation or randomly for training.
.. They are approximately whitened to make the model insensitive to dynamic range.
For training, we additionally apply a series of random distortions to
artificially increase the data set size:
.. Randomly flip the image from left to right.
.. Randomly distort the image brightness.
.. Randomly distort the image contrast.
Speed Up
--------
Reading images from disk and distorting them can use a non-trivial amount
of processing time. To prevent these operations from slowing down training,
we run them inside 16 separate threads which continuously fill a TensorFlow queue.
"""
import os
import time
import tensorflow as tf
import tensorlayer as tl
tf.logging.set_verbosity(tf.logging.DEBUG)
tl.logging.set_verbosity(tl.logging.DEBUG)
model_file_name = "./model_cifar10_tfrecord.ckpt"
resume = False # load model, resume from previous checkpoint?
# Download data, and convert to TFRecord format, see ```tutorial_tfrecord.py```
X_train, y_train, X_test, y_test = tl.files.load_cifar10_dataset(shape=(-1, 32, 32, 3), plotable=False)
print('X_train.shape', X_train.shape) # (50000, 32, 32, 3)
print('y_train.shape', y_train.shape) # (50000,)
print('X_test.shape', X_test.shape) # (10000, 32, 32, 3)
print('y_test.shape', y_test.shape) # (10000,)
print('X %s y %s' % (X_test.dtype, y_test.dtype))
def data_to_tfrecord(images, labels, filename):
"""Save data into TFRecord."""
if os.path.isfile(filename):
print("%s exists" % filename)
return
print("Converting data into %s ..." % filename)
# cwd = os.getcwd()
writer = tf.python_io.TFRecordWriter(filename)
for index, img in enumerate(images):
img_raw = img.tobytes()
# Visualize a image
# tl.visualize.frame(np.asarray(img, dtype=np.uint8), second=1, saveable=False, name='frame', fig_idx=1236)
label = int(labels[index])
# print(label)
# Convert the bytes back to image as follow:
# image = Image.frombytes('RGB', (32, 32), img_raw)
# image = np.fromstring(img_raw, np.float32)
# image = image.reshape([32, 32, 3])
# tl.visualize.frame(np.asarray(image, dtype=np.uint8), second=1, saveable=False, name='frame', fig_idx=1236)
example = tf.train.Example(
features=tf.train.Features(
feature={
"label": tf.train.Feature(int64_list=tf.train.Int64List(value=[label])),
'img_raw': tf.train.Feature(bytes_list=tf.train.BytesList(value=[img_raw])),
}
)
)
writer.write(example.SerializeToString()) # Serialize To String
writer.close()
def read_and_decode(filename, is_train=None):
"""Return tensor to read from TFRecord."""
filename_queue = tf.train.string_input_producer([filename])
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(
serialized_example, features={
'label': tf.FixedLenFeature([], tf.int64),
'img_raw': tf.FixedLenFeature([], tf.string),
}
)
# You can do more image distortion here for training data
img = tf.decode_raw(features['img_raw'], tf.float32)
img = tf.reshape(img, [32, 32, 3])
# img = tf.cast(img, tf.float32) #* (1. / 255) - 0.5
if is_train ==True:
# 1. Randomly crop a [height, width] section of the image.
img = tf.random_crop(img, [24, 24, 3])
# 2. Randomly flip the image horizontally.
img = tf.image.random_flip_left_right(img)
# 3. Randomly change brightness.
img = tf.image.random_brightness(img, max_delta=63)
# 4. Randomly change contrast.
img = tf.image.random_contrast(img, lower=0.2, upper=1.8)
# 5. Subtract off the mean and divide by the variance of the pixels.
img = tf.image.per_image_standardization(img)
elif is_train == False:
# 1. Crop the central [height, width] of the image.
img = tf.image.resize_image_with_crop_or_pad(img, 24, 24)
# 2. Subtract off the mean and divide by the variance of the pixels.
img = tf.image.per_image_standardization(img)
elif is_train == None:
img = img
label = tf.cast(features['label'], tf.int32)
return img, label
# Save data into TFRecord files
data_to_tfrecord(images=X_train, labels=y_train, filename="train.cifar10")
data_to_tfrecord(images=X_test, labels=y_test, filename="test.cifar10")
batch_size = 128
model_file_name = "./model_cifar10_advanced.ckpt"
resume = False # load model, resume from previous checkpoint?
with tf.device('/cpu:0'):
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
# prepare data in cpu
x_train_, y_train_ = read_and_decode("train.cifar10", True)
x_test_, y_test_ = read_and_decode("test.cifar10", False)
# set the number of threads here
x_train_batch, y_train_batch = tf.train.shuffle_batch(
[x_train_, y_train_], batch_size=batch_size, capacity=2000, min_after_dequeue=1000, num_threads=32
)
# for testing, uses batch instead of shuffle_batch
x_test_batch, y_test_batch = tf.train.batch(
[x_test_, y_test_], batch_size=batch_size, capacity=50000, num_threads=32
)
def model(x_crop, y_, reuse):
"""For more simplified CNN APIs, check tensorlayer.org."""
with tf.variable_scope("model", reuse=reuse):
net = tl.layers.InputLayer(x_crop, name='input')
net = tl.layers.Conv2d(net, 64, (5, 5), (1, 1), act=tf.nn.relu, padding='SAME', name='cnn1')
net = tl.layers.MaxPool2d(net, (3, 3), (2, 2), padding='SAME', name='pool1')
net = tl.layers.LocalResponseNormLayer(net, 4, 1.0, 0.001 / 9.0, 0.75, name='norm1')
net = tl.layers.TernaryConv2d(net, 64, (5, 5), (1, 1), act=tf.nn.relu, padding='SAME', name='cnn2')
net = tl.layers.LocalResponseNormLayer(net, 4, 1.0, 0.001 / 9.0, 0.75, name='norm2')
net = tl.layers.MaxPool2d(net, (3, 3), (2, 2), padding='SAME', name='pool2')
net = tl.layers.FlattenLayer(net, name='flatten')
net = tl.layers.TernaryDenseLayer(net, 384, act=tf.nn.relu, name='d1relu')
net = tl.layers.TernaryDenseLayer(net, 192, act=tf.nn.relu, name='d2relu')
net = tl.layers.DenseLayer(net, 10, act=None, name='output')
y = net.outputs
ce = tl.cost.cross_entropy(y, y_, name='cost')
# L2 for the MLP, without this, the accuracy will be reduced by 15%.
L2 = 0
for p in tl.layers.get_variables_with_name('relu/W', True, True):
L2 += tf.contrib.layers.l2_regularizer(0.004)(p)
cost = ce + L2
# correct_prediction = tf.equal(tf.argmax(tf.nn.softmax(y), 1), y_)
correct_prediction = tf.equal(tf.cast(tf.argmax(y, 1), tf.int32), y_)
acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
return net, cost, acc
# You can also use placeholder to feed_dict in data after using
# val, l = sess.run([x_train_batch, y_train_batch]) to get the data
# x_crop = tf.placeholder(tf.float32, shape=[batch_size, 24, 24, 3])
# y_ = tf.placeholder(tf.int32, shape=[batch_size,])
# cost, acc, network = model(x_crop, y_, None)
with tf.device('/gpu:0'): # <-- remove it if you don't have GPU
network, cost, acc, = model(x_train_batch, y_train_batch, False)
_, cost_test, acc_test = model(x_test_batch, y_test_batch, True)
# train
n_epoch = 50000
learning_rate = 0.0001
print_freq = 1
n_step_epoch = int(len(y_train) / batch_size)
n_step = n_epoch * n_step_epoch
with tf.device('/gpu:0'): # <-- remove it if you don't have GPU
train_op = tf.train.AdamOptimizer(learning_rate).minimize(cost)
tl.layers.initialize_global_variables(sess)
if resume:
print("Load existing model " + "!" * 10)
saver = tf.train.Saver()
saver.restore(sess, model_file_name)
network.print_params(False)
network.print_layers()
print(' learning_rate: %f' % learning_rate)
print(' batch_size: %d' % batch_size)
print(' n_epoch: %d, step in an epoch: %d, total n_step: %d' % (n_epoch, n_step_epoch, n_step))
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
step = 0
for epoch in range(n_epoch):
start_time = time.time()
train_loss, train_acc, n_batch = 0, 0, 0
for s in range(n_step_epoch):
# You can also use placeholder to feed_dict in data after using
# val, l = sess.run([x_train_batch, y_train_batch])
# tl.visualize.images2d(val, second=3, saveable=False, name='batch', dtype=np.uint8, fig_idx=2020121)
# err, ac, _ = sess.run([cost, acc, train_op], feed_dict={x_crop: val, y_: l})
err, ac, _ = sess.run([cost, acc, train_op])
step += 1
train_loss += err
train_acc += ac
n_batch += 1
if epoch + 1 == 1 or (epoch + 1) % print_freq == 0:
print(
"Epoch %d : Step %d-%d of %d took %fs" %
(epoch, step, step + n_step_epoch, n_step, time.time() - start_time)
)
print(" train loss: %f" % (train_loss / n_batch))
print(" train acc: %f" % (train_acc / n_batch))
test_loss, test_acc, n_batch = 0, 0, 0
for _ in range(int(len(y_test) / batch_size)):
err, ac = sess.run([cost_test, acc_test])
test_loss += err
test_acc += ac
n_batch += 1
print(" test loss: %f" % (test_loss / n_batch))
print(" test acc: %f" % (test_acc / n_batch))
if (epoch + 1) % (print_freq * 50) == 0:
print("Save model " + "!" * 10)
saver = tf.train.Saver()
save_path = saver.save(sess, model_file_name)
# you can also save model into npz
tl.files.save_npz(network.all_params, name='model.npz', sess=sess)
# and restore it as follow:
# tl.files.load_and_assign_npz(sess=sess, name='model.npz', network=network)
coord.request_stop()
coord.join(threads)
sess.close()
| apache-2.0 |
IntersectAustralia/hcsvlab_robochef | hcsvlab_robochef/paradisec/ingest.py | 1 | 8584 | from hcsvlab_robochef.annotations import *
from hcsvlab_robochef.ingest_base import IngestBase
from hcsvlab_robochef.rdf.map import *
from hcsvlab_robochef.utils.serialiser import *
from hcsvlab_robochef.utils.statistics import *
from rdf import paradisecMap
from xml.etree import ElementTree as ET
import codecs
import mimetypes
import urllib
import re
from collections import Counter
from rdflib.term import Literal
class ParadisecIngest(IngestBase):
olac_role_map = {'annotator': OLAC.annotator, 'author': OLAC.author, 'compiler': OLAC.compiler,
'consultant': OLAC.consultant, 'data_inputter': OLAC.data_inputter,
'depositor': OLAC.depositor, 'developer': OLAC.developer, 'editor': OLAC.editor,
'illustrator': OLAC.illustrator, 'interpreter': OLAC.interpreter,
'interviewer': OLAC.interviewer, 'participant': OLAC.participant,
'performer': OLAC.performer, 'photographer': OLAC.photographer,
'recorder': OLAC.recorder, 'researcher': OLAC.researcher,
'research_participant': OLAC.research_participant, 'responder': OLAC.responder,
'signer': OLAC.signer, 'singer': OLAC.singer, 'speaker': OLAC.speaker,
'sponsor': OLAC.sponsor, 'transcriber': OLAC.transcriber, 'translator': OLAC.translator}
def ingestCorpus(self, srcdir, outdir):
''' This function will initiate the ingest process for the PARADISEC corpus '''
print " converting corpus in", srcdir, "into normalised data in", outdir
print " clearing and creating output location"
self.clear_output_dir(outdir)
print " processing files..."
files_to_process = self.__get_files(srcdir)
sofar = 0
for f in files_to_process:
if "paradisec-" in os.path.basename(f):
meta_dict = self.ingestCollection(srcdir, f)
uri_ref = URIRef(meta_dict['uri'])
metadata_graph = Graph(identifier=uri_ref)
metadata_graph.bind('paradisec', uri_ref)
bind_graph(metadata_graph)
metadata_graph.add((uri_ref, RDF.type, DCMITYPE.Collection))
metadata_graph.add((uri_ref, DC.title, Literal(meta_dict['namePart'][0])))
metadata_graph.add((uri_ref, DC.description, Literal(meta_dict.get('brief', [""])[0])))
metadata_graph.add((uri_ref, DC.bibliographicCitation, Literal(meta_dict['fullCitation'][0])))
metadata_graph.add((uri_ref, DC.creator, Literal(meta_dict['fullCitation'][0].split(" (", 1)[0])))
metadata_graph.add((uri_ref, DC.rights, Literal(meta_dict['accessRights'][0])))
serializer = plugin.get('turtle', Serializer)(metadata_graph)
outfile = open(os.path.abspath(os.path.join(outdir, "paradisec-" + meta_dict['corpus_suffix'].lower() + ".n3")), 'w')
serializer.serialize(outfile, encoding='utf-8')
outfile.close()
else:
meta_dict = self.ingestDocument(srcdir, f)
corpus_suffix = meta_dict.pop('corpus_suffix')
subdir = os.path.join(outdir, corpus_suffix)
try:
os.makedirs(subdir)
except:
pass
sampleid = corpus_suffix + "-" + meta_dict['identifier']
serialiser = MetaSerialiser()
serialiser.serialise(subdir, sampleid, paradisecMap, meta_dict, self.identify_documents, True)
sofar = sofar + 1
print "\033[2K ", sofar, " ", f, "\033[A"
print "\033[2K ", sofar, "files processed"
def setMetaData(self, srcdir):
''' Loads the meta data for use during ingest '''
pass
def ingestCollection(self, srcdir, sourcepath):
""" Read and process a corpus document """
xml_tree = self.__load_xml_tree(sourcepath)
meta_dict = metadata.xml2paradisecdict(xml_tree, ignorelist=['olac', 'metadata'])
for candidate in meta_dict['uri']:
if "http://catalog.paradisec.org.au/collections" in candidate:
uri = candidate
corpus_suffix = uri.split("/")[-1]
meta_dict['corpus_suffix'] = corpus_suffix
meta_dict['uri'] = candidate
return meta_dict
def ingestDocument(self, srcdir, sourcepath):
""" Read and process a corpus document """
xml_tree = self.__load_xml_tree(sourcepath)
meta_dict = metadata.xml2paradisecdict(xml_tree, ignorelist=['olac', 'metadata'])
self.__get_documents(meta_dict)
self.__get_people(meta_dict)
for identifier in meta_dict['identifier']:
if re.match("^\w*-.*$",identifier):
corpus_suffix, short_uri = identifier.split("-", 1)
meta_dict['identifier'] = short_uri
meta_dict['corpus_suffix'] = corpus_suffix
paradisecMap.corpusID = "PARADISEC-" + corpus_suffix
return meta_dict
def identify_documents(self, documents):
cnt = Counter()
display = None
indexable = None
for doc in documents:
cnt[doc['filetype']] += 1
if cnt['Video'] == 1:
display_type = "Video"
elif cnt['Audio'] == 1:
display_type = "Audio"
elif cnt['Text'] == 1:
display_type = "Text"
else:
display_type = None
if display_type:
for doc in documents:
if doc['filetype'] == display_type:
display = doc['uri']
if display_type == "Text":
indexable = doc['uri']
break
return (indexable, display)
def __get_documents(self, meta_dict):
docs = meta_dict.pop('tableOfContents', None)
if docs is not None:
for v in docs:
file = os.path.basename(v)
meta_dict['table_document_' + file] = {'id': file, 'filename': file, 'filetype': self.__get_type(file), 'documenttitle': file}
def __get_people(self, meta_dict):
roles = self.olac_role_map.keys()
for role in roles:
popped = meta_dict.pop(role, None)
if popped is not None:
for v in popped:
person = {'role': self.olac_role_map[role], 'id': re.sub(' ', '_', v), 'name': v}
meta_dict['table_person_' + role] = person
# TODO: this could be moved to somewhere like ../utils where other modules could use it
def __get_type(self, filepath):
url = urllib.pathname2url(filepath)
mime_type, _ = mimetypes.guess_type(url)
filetype = None
if mime_type:
filetype = mime_type.split('/')[0].title()
if not filetype or filetype == 'Application':
filetype = 'Other'
return filetype
def __get_files(self, srcdir):
item_pattern = "^.+\.xml"
for root, dirnames, filenames in os.walk(srcdir):
for filename in filenames:
if re.match(item_pattern, filename):
yield os.path.join(root, filename)
def __tuplelist2dict__(self, tuplelist):
result = dict()
for (k, v) in tuplelist:
if k and v:
result[k] = v
return result
def __load_xml_tree(self, sourcepath):
'''
This function reads in a XML docment as a text file and converts it into
an XML tree for further processing
'''
fhandle = codecs.open(sourcepath, "r", "utf-8")
text = fhandle.read()
fhandle.close()
text = text.replace('–', u"\u2013")
text = text.replace('—', u"\u2014")
text = text.replace('©', u"\u00A9")
text = text.replace('“', u"\u201C")
text = text.replace('”', u"\u201D")
text = text.replace(' ', u"\u2003")
text = text.replace('é', u"\u00E9")
text = text.replace('‘', u"\u2018")
text = text.replace('’', u"\u2019")
text = text.replace('ê', u"\u00EA")
text = text.replace('à', u"\u00E0")
text = text.replace('è', u"\u00E8")
text = text.replace('œ', u"\u0153")
text = text.replace('æ', u"\u00E6")
text = text.replace('…', u"\u2026")
return ET.fromstring(text.encode("utf-8"))
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.