repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
paulmathews/nova | nova/api/openstack/compute/versions.py | 9 | 7801 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from lxml import etree
from nova.api.openstack.compute.views import versions as views_versions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova.openstack.common import timeutils
LINKS = {
'v2.0': {
'pdf': 'http://docs.openstack.org/'
'api/openstack-compute/1.1/os-compute-devguide-1.1.pdf',
'wadl': 'http://docs.openstack.org/'
'api/openstack-compute/1.1/wadl/os-compute-1.1.wadl',
},
}
VERSIONS = {
"v2.0": {
"id": "v2.0",
"status": "CURRENT",
"updated": "2011-01-21T11:33:21Z",
"links": [
{
"rel": "describedby",
"type": "application/pdf",
"href": LINKS['v2.0']['pdf'],
},
{
"rel": "describedby",
"type": "application/vnd.sun.wadl+xml",
"href": LINKS['v2.0']['wadl'],
},
],
"media-types": [
{
"base": "application/xml",
"type": "application/vnd.openstack.compute+xml;version=2",
},
{
"base": "application/json",
"type": "application/vnd.openstack.compute+json;version=2",
}
],
}
}
class MediaTypesTemplateElement(xmlutil.TemplateElement):
def will_render(self, datum):
return 'media-types' in datum
def make_version(elem):
elem.set('id')
elem.set('status')
elem.set('updated')
mts = MediaTypesTemplateElement('media-types')
elem.append(mts)
mt = xmlutil.SubTemplateElement(mts, 'media-type', selector='media-types')
mt.set('base')
mt.set('type')
xmlutil.make_links(elem, 'links')
version_nsmap = {None: xmlutil.XMLNS_COMMON_V10, 'atom': xmlutil.XMLNS_ATOM}
class VersionTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('version', selector='version')
make_version(root)
return xmlutil.MasterTemplate(root, 1, nsmap=version_nsmap)
class VersionsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('versions')
elem = xmlutil.SubTemplateElement(root, 'version', selector='versions')
make_version(elem)
return xmlutil.MasterTemplate(root, 1, nsmap=version_nsmap)
class ChoicesTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('choices')
elem = xmlutil.SubTemplateElement(root, 'version', selector='choices')
make_version(elem)
return xmlutil.MasterTemplate(root, 1, nsmap=version_nsmap)
class AtomSerializer(wsgi.XMLDictSerializer):
NSMAP = {None: xmlutil.XMLNS_ATOM}
def __init__(self, metadata=None, xmlns=None):
self.metadata = metadata or {}
if not xmlns:
self.xmlns = wsgi.XMLNS_ATOM
else:
self.xmlns = xmlns
def _get_most_recent_update(self, versions):
recent = None
for version in versions:
updated = timeutils.parse_strtime(version['updated'],
'%Y-%m-%dT%H:%M:%SZ')
if not recent:
recent = updated
elif updated > recent:
recent = updated
return recent.strftime('%Y-%m-%dT%H:%M:%SZ')
def _get_base_url(self, link_href):
# Make sure no trailing /
link_href = link_href.rstrip('/')
return link_href.rsplit('/', 1)[0] + '/'
def _create_feed(self, versions, feed_title, feed_id):
feed = etree.Element('feed', nsmap=self.NSMAP)
title = etree.SubElement(feed, 'title')
title.set('type', 'text')
title.text = feed_title
# Set this updated to the most recently updated version
recent = self._get_most_recent_update(versions)
etree.SubElement(feed, 'updated').text = recent
etree.SubElement(feed, 'id').text = feed_id
link = etree.SubElement(feed, 'link')
link.set('rel', 'self')
link.set('href', feed_id)
author = etree.SubElement(feed, 'author')
etree.SubElement(author, 'name').text = 'Rackspace'
etree.SubElement(author, 'uri').text = 'http://www.rackspace.com/'
for version in versions:
feed.append(self._create_version_entry(version))
return feed
def _create_version_entry(self, version):
entry = etree.Element('entry')
etree.SubElement(entry, 'id').text = version['links'][0]['href']
title = etree.SubElement(entry, 'title')
title.set('type', 'text')
title.text = 'Version %s' % version['id']
etree.SubElement(entry, 'updated').text = version['updated']
for link in version['links']:
link_elem = etree.SubElement(entry, 'link')
link_elem.set('rel', link['rel'])
link_elem.set('href', link['href'])
if 'type' in link:
link_elem.set('type', link['type'])
content = etree.SubElement(entry, 'content')
content.set('type', 'text')
content.text = 'Version %s %s (%s)' % (version['id'],
version['status'],
version['updated'])
return entry
class VersionsAtomSerializer(AtomSerializer):
def default(self, data):
versions = data['versions']
feed_id = self._get_base_url(versions[0]['links'][0]['href'])
feed = self._create_feed(versions, 'Available API Versions', feed_id)
return self._to_xml(feed)
class VersionAtomSerializer(AtomSerializer):
def default(self, data):
version = data['version']
feed_id = version['links'][0]['href']
feed = self._create_feed([version], 'About This Version', feed_id)
return self._to_xml(feed)
class Versions(wsgi.Resource):
def __init__(self):
super(Versions, self).__init__(None)
@wsgi.serializers(xml=VersionsTemplate,
atom=VersionsAtomSerializer)
def index(self, req):
"""Return all versions."""
builder = views_versions.get_view_builder(req)
return builder.build_versions(VERSIONS)
@wsgi.serializers(xml=ChoicesTemplate)
@wsgi.response(300)
def multi(self, req):
"""Return multiple choices."""
builder = views_versions.get_view_builder(req)
return builder.build_choices(VERSIONS, req)
def get_action_args(self, request_environment):
"""Parse dictionary created by routes library."""
args = {}
if request_environment['PATH_INFO'] == '/':
args['action'] = 'index'
else:
args['action'] = 'multi'
return args
class VersionV2(object):
@wsgi.serializers(xml=VersionTemplate,
atom=VersionAtomSerializer)
def show(self, req):
builder = views_versions.get_view_builder(req)
return builder.build_version(VERSIONS['v2.0'])
def create_resource():
return wsgi.Resource(VersionV2())
| apache-2.0 |
skg-net/ansible | test/units/modules/system/interfaces_file/test_interfaces_file.py | 20 | 9479 | # (c) 2017, Roman Belyakovsky <ihryamzik () gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from ansible.compat.tests import unittest
from ansible.modules.system import interfaces_file
import os
import json
import io
import inspect
from shutil import copyfile, move
import difflib
class AnsibleFailJson(Exception):
pass
class ModuleMocked():
def atomic_move(self, src, dst):
move(src, dst)
def backup_local(self, path):
backupp = os.path.join("/tmp", os.path.basename(path) + ".bak")
copyfile(path, backupp)
return backupp
def fail_json(self, msg):
raise AnsibleFailJson(msg)
module = ModuleMocked()
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures', 'input')
golden_output_path = os.path.join(os.path.dirname(__file__), 'fixtures', 'golden_output')
class TestInterfacesFileModule(unittest.TestCase):
def getTestFiles(self):
return next(os.walk(fixture_path))[2]
def compareFileToBackup(self, path, backup):
with open(path) as f1:
with open(backup) as f2:
diffs = difflib.context_diff(f1.readlines(),
f2.readlines(),
fromfile=os.path.basename(path),
tofile=os.path.basename(backup))
# Restore backup
move(backup, path)
deltas = [d for d in diffs]
self.assertTrue(len(deltas) == 0)
def compareInterfacesLinesToFile(self, interfaces_lines, path, testname=None):
if not testname:
testname = "%s.%s" % (path, inspect.stack()[1][3])
self.compareStringWithFile("".join([d['line'] for d in interfaces_lines if 'line' in d]), testname)
def compareInterfacesToFile(self, ifaces, path, testname=None):
if not testname:
testname = "%s.%s.json" % (path, inspect.stack()[1][3])
self.compareStringWithFile(json.dumps(ifaces, sort_keys=True, indent=4, separators=(',', ': ')), testname)
def compareStringWithFile(self, string, path):
# self.assertEqual("","_",msg=path)
testfilepath = os.path.join(golden_output_path, path)
goldenstring = string
if not os.path.isfile(testfilepath):
f = io.open(testfilepath, 'wb')
f.write(string)
f.close()
else:
with open(testfilepath, 'r') as goldenfile:
goldenstring = goldenfile.read()
goldenfile.close()
self.assertEqual(string, goldenstring)
def test_no_changes(self):
for testfile in self.getTestFiles():
path = os.path.join(fixture_path, testfile)
lines, ifaces = interfaces_file.read_interfaces_file(module, path)
self.compareInterfacesLinesToFile(lines, testfile)
self.compareInterfacesToFile(ifaces, testfile)
def test_add_up_aoption_to_aggi(self):
testcases = {
"add_aggi_up": [
{
'iface': 'aggi',
'option': 'up',
'value': 'route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi',
'state': 'present',
}
],
"add_and_delete_aggi_up": [
{
'iface': 'aggi',
'option': 'up',
'value': 'route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi',
'state': 'present',
},
{
'iface': 'aggi',
'option': 'up',
'value': None,
'state': 'absent',
},
],
"set_aggi_slaves": [
{
'iface': 'aggi',
'option': 'slaves',
'value': 'int1 int3',
'state': 'present',
},
],
"set_aggi_and_eth0_mtu": [
{
'iface': 'aggi',
'option': 'mtu',
'value': '1350',
'state': 'present',
},
{
'iface': 'eth0',
'option': 'mtu',
'value': '1350',
'state': 'present',
},
],
}
for testname, options_list in testcases.items():
for testfile in self.getTestFiles():
path = os.path.join(fixture_path, testfile)
lines, ifaces = interfaces_file.read_interfaces_file(module, path)
fail_json_iterations = []
for i, options in enumerate(options_list):
try:
_, lines = interfaces_file.setInterfaceOption(module, lines, options['iface'], options['option'], options['value'], options['state'])
except AnsibleFailJson as e:
fail_json_iterations.append("[%d] fail_json message: %s\noptions:\n%s" %
(i, str(e), json.dumps(options, sort_keys=True, indent=4, separators=(',', ': '))))
self.compareStringWithFile("\n=====\n".join(fail_json_iterations), "%s_%s.exceptions.txt" % (testfile, testname))
self.compareInterfacesLinesToFile(lines, testfile, "%s_%s" % (testfile, testname))
self.compareInterfacesToFile(ifaces, testfile, "%s_%s.json" % (testfile, testname))
def test_revert(self):
testcases = {
"revert": [
{
'iface': 'eth0',
'option': 'mtu',
'value': '1350',
}
],
}
for testname, options_list in testcases.items():
for testfile in self.getTestFiles():
path = os.path.join(fixture_path, testfile)
lines, ifaces = interfaces_file.read_interfaces_file(module, path)
backupp = module.backup_local(path)
options = options_list[0]
for state in ['present', 'absent']:
fail_json_iterations = []
options['state'] = state
try:
_, lines = interfaces_file.setInterfaceOption(module, lines, options['iface'], options['option'], options['value'], options['state'])
except AnsibleFailJson as e:
fail_json_iterations.append("fail_json message: %s\noptions:\n%s" %
(str(e), json.dumps(options, sort_keys=True, indent=4, separators=(',', ': '))))
interfaces_file.write_changes(module, [d['line'] for d in lines if 'line' in d], path)
self.compareStringWithFile("\n=====\n".join(fail_json_iterations), "%s_%s.exceptions.txt" % (testfile, testname))
self.compareInterfacesLinesToFile(lines, testfile, "%s_%s" % (testfile, testname))
self.compareInterfacesToFile(ifaces, testfile, "%s_%s.json" % (testfile, testname))
self.compareFileToBackup(path, backupp)
def test_change_method(self):
testcases = {
"change_method": [
{
'iface': 'eth0',
'option': 'method',
'value': 'manual',
'state': 'present',
}
],
}
for testname, options_list in testcases.items():
for testfile in self.getTestFiles():
path = os.path.join(fixture_path, testfile)
lines, ifaces = interfaces_file.read_interfaces_file(module, path)
backupp = module.backup_local(path)
options = options_list[0]
fail_json_iterations = []
try:
_, lines = interfaces_file.setInterfaceOption(module, lines, options['iface'], options['option'], options['value'], options['state'])
except AnsibleFailJson as e:
fail_json_iterations.append("fail_json message: %s\noptions:\n%s" %
(str(e), json.dumps(options, sort_keys=True, indent=4, separators=(',', ': '))))
interfaces_file.write_changes(module, [d['line'] for d in lines if 'line' in d], path)
self.compareStringWithFile("\n=====\n".join(fail_json_iterations), "%s_%s.exceptions.txt" % (testfile, testname))
self.compareInterfacesLinesToFile(lines, testfile, "%s_%s" % (testfile, testname))
self.compareInterfacesToFile(ifaces, testfile, "%s_%s.json" % (testfile, testname))
# Restore backup
move(backupp, path)
| gpl-3.0 |
Azure/azure-sdk-for-python | sdk/appservice/azure-mgmt-web/azure/mgmt/web/v2020_09_01/operations/_domain_registration_provider_operations.py | 1 | 5142 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class DomainRegistrationProviderOperations(object):
"""DomainRegistrationProviderOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.web.v2020_09_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_operations(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.CsmOperationCollection"]
"""Implements Csm operations Api to exposes the list of available Csm Apis under the resource provider.
Description for Implements Csm operations Api to exposes the list of available Csm Apis under
the resource provider.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CsmOperationCollection or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.web.v2020_09_01.models.CsmOperationCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CsmOperationCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_operations.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('CsmOperationCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_operations.metadata = {'url': '/providers/Microsoft.DomainRegistration/operations'} # type: ignore
| mit |
gauravbose/digital-menu | tests/forms_tests/tests/test_widgets.py | 13 | 74529 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import copy
import datetime
from django.contrib.admin.tests import AdminSeleniumWebDriverTestCase
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.urlresolvers import reverse
from django.forms import (
BooleanField, CheckboxInput, CheckboxSelectMultiple, ChoiceField,
ClearableFileInput, DateInput, DateTimeField, DateTimeInput, FileInput,
Form, HiddenInput, MultipleHiddenInput, MultiWidget, NullBooleanSelect,
PasswordInput, RadioSelect, Select, SelectMultiple, SplitDateTimeWidget,
Textarea, TextInput, TimeInput,
)
from django.forms.widgets import (
ChoiceFieldRenderer, ChoiceInput, RadioFieldRenderer,
)
from django.test import TestCase, ignore_warnings, override_settings
from django.utils import six
from django.utils.deprecation import RemovedInDjango19Warning
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.safestring import SafeData, mark_safe
from django.utils.translation import activate, deactivate, override
from ..models import Article
class FormsWidgetTestCase(TestCase):
# Each Widget class corresponds to an HTML form widget. A Widget knows how to
# render itself, given a field name and some data. Widgets don't perform
# validation.
def test_textinput(self):
w = TextInput()
self.assertHTMLEqual(w.render('email', ''), '<input type="text" name="email" />')
self.assertHTMLEqual(w.render('email', None), '<input type="text" name="email" />')
self.assertHTMLEqual(w.render('email', 'test@example.com'), '<input type="text" name="email" value="test@example.com" />')
self.assertHTMLEqual(w.render('email', 'some "quoted" & ampersanded value'), '<input type="text" name="email" value="some "quoted" & ampersanded value" />')
self.assertHTMLEqual(w.render('email', 'test@example.com', attrs={'class': 'fun'}), '<input type="text" name="email" value="test@example.com" class="fun" />')
self.assertHTMLEqual(w.render('email', 'ŠĐĆŽćžšđ', attrs={'class': 'fun'}), '<input type="text" name="email" value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111" class="fun" />')
# You can also pass 'attrs' to the constructor:
w = TextInput(attrs={'class': 'fun', 'type': 'email'})
self.assertHTMLEqual(w.render('email', ''), '<input type="email" class="fun" name="email" />')
self.assertHTMLEqual(w.render('email', 'foo@example.com'), '<input type="email" class="fun" value="foo@example.com" name="email" />')
# 'attrs' passed to render() get precedence over those passed to the constructor:
w = TextInput(attrs={'class': 'pretty'})
self.assertHTMLEqual(w.render('email', '', attrs={'class': 'special'}), '<input type="text" class="special" name="email" />')
# 'attrs' can be safe-strings if needed)
w = TextInput(attrs={'onBlur': mark_safe("function('foo')")})
self.assertHTMLEqual(w.render('email', ''), '<input onBlur="function(\'foo\')" type="text" name="email" />')
def test_passwordinput(self):
w = PasswordInput()
self.assertHTMLEqual(w.render('email', ''), '<input type="password" name="email" />')
self.assertHTMLEqual(w.render('email', None), '<input type="password" name="email" />')
self.assertHTMLEqual(w.render('email', 'secret'), '<input type="password" name="email" />')
# The render_value argument lets you specify whether the widget should render
# its value. For security reasons, this is off by default.
w = PasswordInput(render_value=True)
self.assertHTMLEqual(w.render('email', ''), '<input type="password" name="email" />')
self.assertHTMLEqual(w.render('email', None), '<input type="password" name="email" />')
self.assertHTMLEqual(w.render('email', 'test@example.com'), '<input type="password" name="email" value="test@example.com" />')
self.assertHTMLEqual(w.render('email', 'some "quoted" & ampersanded value'), '<input type="password" name="email" value="some "quoted" & ampersanded value" />')
self.assertHTMLEqual(w.render('email', 'test@example.com', attrs={'class': 'fun'}), '<input type="password" name="email" value="test@example.com" class="fun" />')
# You can also pass 'attrs' to the constructor:
w = PasswordInput(attrs={'class': 'fun'}, render_value=True)
self.assertHTMLEqual(w.render('email', ''), '<input type="password" class="fun" name="email" />')
self.assertHTMLEqual(w.render('email', 'foo@example.com'), '<input type="password" class="fun" value="foo@example.com" name="email" />')
# 'attrs' passed to render() get precedence over those passed to the constructor:
w = PasswordInput(attrs={'class': 'pretty'}, render_value=True)
self.assertHTMLEqual(w.render('email', '', attrs={'class': 'special'}), '<input type="password" class="special" name="email" />')
self.assertHTMLEqual(w.render('email', 'ŠĐĆŽćžšđ', attrs={'class': 'fun'}), '<input type="password" class="fun" value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111" name="email" />')
def test_hiddeninput(self):
w = HiddenInput()
self.assertHTMLEqual(w.render('email', ''), '<input type="hidden" name="email" />')
self.assertHTMLEqual(w.render('email', None), '<input type="hidden" name="email" />')
self.assertHTMLEqual(w.render('email', 'test@example.com'), '<input type="hidden" name="email" value="test@example.com" />')
self.assertHTMLEqual(w.render('email', 'some "quoted" & ampersanded value'), '<input type="hidden" name="email" value="some "quoted" & ampersanded value" />')
self.assertHTMLEqual(w.render('email', 'test@example.com', attrs={'class': 'fun'}), '<input type="hidden" name="email" value="test@example.com" class="fun" />')
# You can also pass 'attrs' to the constructor:
w = HiddenInput(attrs={'class': 'fun'})
self.assertHTMLEqual(w.render('email', ''), '<input type="hidden" class="fun" name="email" />')
self.assertHTMLEqual(w.render('email', 'foo@example.com'), '<input type="hidden" class="fun" value="foo@example.com" name="email" />')
# 'attrs' passed to render() get precedence over those passed to the constructor:
w = HiddenInput(attrs={'class': 'pretty'})
self.assertHTMLEqual(w.render('email', '', attrs={'class': 'special'}), '<input type="hidden" class="special" name="email" />')
self.assertHTMLEqual(w.render('email', 'ŠĐĆŽćžšđ', attrs={'class': 'fun'}), '<input type="hidden" class="fun" value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111" name="email" />')
# 'attrs' passed to render() get precedence over those passed to the constructor:
w = HiddenInput(attrs={'class': 'pretty'})
self.assertHTMLEqual(w.render('email', '', attrs={'class': 'special'}), '<input type="hidden" class="special" name="email" />')
# Boolean values are rendered to their string forms ("True" and "False").
w = HiddenInput()
self.assertHTMLEqual(w.render('get_spam', False), '<input type="hidden" name="get_spam" value="False" />')
self.assertHTMLEqual(w.render('get_spam', True), '<input type="hidden" name="get_spam" value="True" />')
def test_multiplehiddeninput(self):
w = MultipleHiddenInput()
self.assertHTMLEqual(w.render('email', []), '')
self.assertHTMLEqual(w.render('email', None), '')
self.assertHTMLEqual(w.render('email', ['test@example.com']), '<input type="hidden" name="email" value="test@example.com" />')
self.assertHTMLEqual(w.render('email', ['some "quoted" & ampersanded value']), '<input type="hidden" name="email" value="some "quoted" & ampersanded value" />')
self.assertHTMLEqual(w.render('email', ['test@example.com', 'foo@example.com']), '<input type="hidden" name="email" value="test@example.com" />\n<input type="hidden" name="email" value="foo@example.com" />')
self.assertHTMLEqual(w.render('email', ['test@example.com'], attrs={'class': 'fun'}), '<input type="hidden" name="email" value="test@example.com" class="fun" />')
self.assertHTMLEqual(w.render('email', ['test@example.com', 'foo@example.com'], attrs={'class': 'fun'}), '<input type="hidden" name="email" value="test@example.com" class="fun" />\n<input type="hidden" name="email" value="foo@example.com" class="fun" />')
# You can also pass 'attrs' to the constructor:
w = MultipleHiddenInput(attrs={'class': 'fun'})
self.assertHTMLEqual(w.render('email', []), '')
self.assertHTMLEqual(w.render('email', ['foo@example.com']), '<input type="hidden" class="fun" value="foo@example.com" name="email" />')
self.assertHTMLEqual(w.render('email', ['foo@example.com', 'test@example.com']), '<input type="hidden" class="fun" value="foo@example.com" name="email" />\n<input type="hidden" class="fun" value="test@example.com" name="email" />')
# 'attrs' passed to render() get precedence over those passed to the constructor:
w = MultipleHiddenInput(attrs={'class': 'pretty'})
self.assertHTMLEqual(w.render('email', ['foo@example.com'], attrs={'class': 'special'}), '<input type="hidden" class="special" value="foo@example.com" name="email" />')
self.assertHTMLEqual(w.render('email', ['ŠĐĆŽćžšđ'], attrs={'class': 'fun'}), '<input type="hidden" class="fun" value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111" name="email" />')
# 'attrs' passed to render() get precedence over those passed to the constructor:
w = MultipleHiddenInput(attrs={'class': 'pretty'})
self.assertHTMLEqual(w.render('email', ['foo@example.com'], attrs={'class': 'special'}), '<input type="hidden" class="special" value="foo@example.com" name="email" />')
# Each input gets a separate ID.
w = MultipleHiddenInput()
self.assertHTMLEqual(w.render('letters', list('abc'), attrs={'id': 'hideme'}), '<input type="hidden" name="letters" value="a" id="hideme_0" />\n<input type="hidden" name="letters" value="b" id="hideme_1" />\n<input type="hidden" name="letters" value="c" id="hideme_2" />')
def test_fileinput(self):
# FileInput widgets don't ever show the value, because the old value is of no use
# if you are updating the form or if the provided file generated an error.
w = FileInput()
self.assertHTMLEqual(w.render('email', ''), '<input type="file" name="email" />')
self.assertHTMLEqual(w.render('email', None), '<input type="file" name="email" />')
self.assertHTMLEqual(w.render('email', 'test@example.com'), '<input type="file" name="email" />')
self.assertHTMLEqual(w.render('email', 'some "quoted" & ampersanded value'), '<input type="file" name="email" />')
self.assertHTMLEqual(w.render('email', 'test@example.com', attrs={'class': 'fun'}), '<input type="file" name="email" class="fun" />')
# You can also pass 'attrs' to the constructor:
w = FileInput(attrs={'class': 'fun'})
self.assertHTMLEqual(w.render('email', ''), '<input type="file" class="fun" name="email" />')
self.assertHTMLEqual(w.render('email', 'foo@example.com'), '<input type="file" class="fun" name="email" />')
self.assertHTMLEqual(w.render('email', 'ŠĐĆŽćžšđ', attrs={'class': 'fun'}), '<input type="file" class="fun" name="email" />')
def test_textarea(self):
w = Textarea()
self.assertHTMLEqual(w.render('msg', ''), '<textarea rows="10" cols="40" name="msg"></textarea>')
self.assertHTMLEqual(w.render('msg', None), '<textarea rows="10" cols="40" name="msg"></textarea>')
self.assertHTMLEqual(w.render('msg', 'value'), '<textarea rows="10" cols="40" name="msg">value</textarea>')
self.assertHTMLEqual(w.render('msg', 'some "quoted" & ampersanded value'), '<textarea rows="10" cols="40" name="msg">some "quoted" & ampersanded value</textarea>')
self.assertHTMLEqual(w.render('msg', mark_safe('pre "quoted" value')), '<textarea rows="10" cols="40" name="msg">pre "quoted" value</textarea>')
self.assertHTMLEqual(w.render('msg', 'value', attrs={'class': 'pretty', 'rows': 20}), '<textarea class="pretty" rows="20" cols="40" name="msg">value</textarea>')
# You can also pass 'attrs' to the constructor:
w = Textarea(attrs={'class': 'pretty'})
self.assertHTMLEqual(w.render('msg', ''), '<textarea rows="10" cols="40" name="msg" class="pretty"></textarea>')
self.assertHTMLEqual(w.render('msg', 'example'), '<textarea rows="10" cols="40" name="msg" class="pretty">example</textarea>')
# 'attrs' passed to render() get precedence over those passed to the constructor:
w = Textarea(attrs={'class': 'pretty'})
self.assertHTMLEqual(w.render('msg', '', attrs={'class': 'special'}), '<textarea rows="10" cols="40" name="msg" class="special"></textarea>')
self.assertHTMLEqual(w.render('msg', 'ŠĐĆŽćžšđ', attrs={'class': 'fun'}), '<textarea rows="10" cols="40" name="msg" class="fun">\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111</textarea>')
def test_checkboxinput(self):
w = CheckboxInput()
self.assertHTMLEqual(w.render('is_cool', ''), '<input type="checkbox" name="is_cool" />')
self.assertHTMLEqual(w.render('is_cool', None), '<input type="checkbox" name="is_cool" />')
self.assertHTMLEqual(w.render('is_cool', False), '<input type="checkbox" name="is_cool" />')
self.assertHTMLEqual(w.render('is_cool', True), '<input checked="checked" type="checkbox" name="is_cool" />')
# Using any value that's not in ('', None, False, True) will check the checkbox
# and set the 'value' attribute.
self.assertHTMLEqual(w.render('is_cool', 'foo'), '<input checked="checked" type="checkbox" name="is_cool" value="foo" />')
self.assertHTMLEqual(w.render('is_cool', False, attrs={'class': 'pretty'}), '<input type="checkbox" name="is_cool" class="pretty" />')
# regression for #17114
self.assertHTMLEqual(w.render('is_cool', 0), '<input checked="checked" type="checkbox" name="is_cool" value="0" />')
self.assertHTMLEqual(w.render('is_cool', 1), '<input checked="checked" type="checkbox" name="is_cool" value="1" />')
# You can also pass 'attrs' to the constructor:
w = CheckboxInput(attrs={'class': 'pretty'})
self.assertHTMLEqual(w.render('is_cool', ''), '<input type="checkbox" class="pretty" name="is_cool" />')
# 'attrs' passed to render() get precedence over those passed to the constructor:
w = CheckboxInput(attrs={'class': 'pretty'})
self.assertHTMLEqual(w.render('is_cool', '', attrs={'class': 'special'}), '<input type="checkbox" class="special" name="is_cool" />')
# You can pass 'check_test' to the constructor. This is a callable that takes the
# value and returns True if the box should be checked.
w = CheckboxInput(check_test=lambda value: value.startswith('hello'))
self.assertHTMLEqual(w.render('greeting', ''), '<input type="checkbox" name="greeting" />')
self.assertHTMLEqual(w.render('greeting', 'hello'), '<input checked="checked" type="checkbox" name="greeting" value="hello" />')
self.assertHTMLEqual(w.render('greeting', 'hello there'), '<input checked="checked" type="checkbox" name="greeting" value="hello there" />')
self.assertHTMLEqual(w.render('greeting', 'hello & goodbye'), '<input checked="checked" type="checkbox" name="greeting" value="hello & goodbye" />')
# Ticket #17888: calling check_test shouldn't swallow exceptions
with self.assertRaises(AttributeError):
w.render('greeting', True)
# The CheckboxInput widget will return False if the key is not found in the data
# dictionary (because HTML form submission doesn't send any result for unchecked
# checkboxes).
self.assertFalse(w.value_from_datadict({}, {}, 'testing'))
value = w.value_from_datadict({'testing': '0'}, {}, 'testing')
self.assertIsInstance(value, bool)
self.assertTrue(value)
def test_select(self):
w = Select()
self.assertHTMLEqual(w.render('beatle', 'J', choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))), """<select name="beatle">
<option value="J" selected="selected">John</option>
<option value="P">Paul</option>
<option value="G">George</option>
<option value="R">Ringo</option>
</select>""")
# If the value is None, none of the options are selected:
self.assertHTMLEqual(w.render('beatle', None, choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))), """<select name="beatle">
<option value="J">John</option>
<option value="P">Paul</option>
<option value="G">George</option>
<option value="R">Ringo</option>
</select>""")
# If the value corresponds to a label (but not to an option value), none of the options are selected:
self.assertHTMLEqual(w.render('beatle', 'John', choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))), """<select name="beatle">
<option value="J">John</option>
<option value="P">Paul</option>
<option value="G">George</option>
<option value="R">Ringo</option>
</select>""")
# Only one option can be selected, see #8103:
self.assertHTMLEqual(w.render('choices', '0', choices=(('0', '0'), ('1', '1'), ('2', '2'), ('3', '3'), ('0', 'extra'))), """<select name="choices">
<option value="0" selected="selected">0</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="0">extra</option>
</select>""")
# The value is compared to its str():
self.assertHTMLEqual(w.render('num', 2, choices=[('1', '1'), ('2', '2'), ('3', '3')]), """<select name="num">
<option value="1">1</option>
<option value="2" selected="selected">2</option>
<option value="3">3</option>
</select>""")
self.assertHTMLEqual(w.render('num', '2', choices=[(1, 1), (2, 2), (3, 3)]), """<select name="num">
<option value="1">1</option>
<option value="2" selected="selected">2</option>
<option value="3">3</option>
</select>""")
self.assertHTMLEqual(w.render('num', 2, choices=[(1, 1), (2, 2), (3, 3)]), """<select name="num">
<option value="1">1</option>
<option value="2" selected="selected">2</option>
<option value="3">3</option>
</select>""")
# The 'choices' argument can be any iterable:
from itertools import chain
def get_choices():
for i in range(5):
yield (i, i)
self.assertHTMLEqual(w.render('num', 2, choices=get_choices()), """<select name="num">
<option value="0">0</option>
<option value="1">1</option>
<option value="2" selected="selected">2</option>
<option value="3">3</option>
<option value="4">4</option>
</select>""")
things = ({'id': 1, 'name': 'And Boom'}, {'id': 2, 'name': 'One More Thing!'})
class SomeForm(Form):
somechoice = ChoiceField(choices=chain((('', '-' * 9),), [(thing['id'], thing['name']) for thing in things]))
f = SomeForm()
self.assertHTMLEqual(f.as_table(), '<tr><th><label for="id_somechoice">Somechoice:</label></th><td><select name="somechoice" id="id_somechoice">\n<option value="" selected="selected">---------</option>\n<option value="1">And Boom</option>\n<option value="2">One More Thing!</option>\n</select></td></tr>')
self.assertHTMLEqual(f.as_table(), '<tr><th><label for="id_somechoice">Somechoice:</label></th><td><select name="somechoice" id="id_somechoice">\n<option value="" selected="selected">---------</option>\n<option value="1">And Boom</option>\n<option value="2">One More Thing!</option>\n</select></td></tr>')
f = SomeForm({'somechoice': 2})
self.assertHTMLEqual(f.as_table(), '<tr><th><label for="id_somechoice">Somechoice:</label></th><td><select name="somechoice" id="id_somechoice">\n<option value="">---------</option>\n<option value="1">And Boom</option>\n<option value="2" selected="selected">One More Thing!</option>\n</select></td></tr>')
# You can also pass 'choices' to the constructor:
w = Select(choices=[(1, 1), (2, 2), (3, 3)])
self.assertHTMLEqual(w.render('num', 2), """<select name="num">
<option value="1">1</option>
<option value="2" selected="selected">2</option>
<option value="3">3</option>
</select>""")
# If 'choices' is passed to both the constructor and render(), then they'll both be in the output:
self.assertHTMLEqual(w.render('num', 2, choices=[(4, 4), (5, 5)]), """<select name="num">
<option value="1">1</option>
<option value="2" selected="selected">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
</select>""")
# Choices are escaped correctly
self.assertHTMLEqual(w.render('escape', None, choices=(('bad', 'you & me'), ('good', mark_safe('you > me')))), """<select name="escape">
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="bad">you & me</option>
<option value="good">you > me</option>
</select>""")
# Unicode choices are correctly rendered as HTML
self.assertHTMLEqual(w.render('email', 'ŠĐĆŽćžšđ', choices=[('ŠĐĆŽćžšđ', 'ŠĐabcĆŽćžšđ'), ('ćžšđ', 'abcćžšđ')]), '<select name="email">\n<option value="1">1</option>\n<option value="2">2</option>\n<option value="3">3</option>\n<option value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111" selected="selected">\u0160\u0110abc\u0106\u017d\u0107\u017e\u0161\u0111</option>\n<option value="\u0107\u017e\u0161\u0111">abc\u0107\u017e\u0161\u0111</option>\n</select>')
# If choices is passed to the constructor and is a generator, it can be iterated
# over multiple times without getting consumed:
w = Select(choices=get_choices())
self.assertHTMLEqual(w.render('num', 2), """<select name="num">
<option value="0">0</option>
<option value="1">1</option>
<option value="2" selected="selected">2</option>
<option value="3">3</option>
<option value="4">4</option>
</select>""")
self.assertHTMLEqual(w.render('num', 3), """<select name="num">
<option value="0">0</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3" selected="selected">3</option>
<option value="4">4</option>
</select>""")
# Choices can be nested one level in order to create HTML optgroups:
w.choices = (
('outer1', 'Outer 1'),
('Group "1"', (('inner1', 'Inner 1'), ('inner2', 'Inner 2'))),
)
self.assertHTMLEqual(w.render('nestchoice', None), """<select name="nestchoice">
<option value="outer1">Outer 1</option>
<optgroup label="Group "1"">
<option value="inner1">Inner 1</option>
<option value="inner2">Inner 2</option>
</optgroup>
</select>""")
self.assertHTMLEqual(w.render('nestchoice', 'outer1'), """<select name="nestchoice">
<option value="outer1" selected="selected">Outer 1</option>
<optgroup label="Group "1"">
<option value="inner1">Inner 1</option>
<option value="inner2">Inner 2</option>
</optgroup>
</select>""")
self.assertHTMLEqual(w.render('nestchoice', 'inner1'), """<select name="nestchoice">
<option value="outer1">Outer 1</option>
<optgroup label="Group "1"">
<option value="inner1" selected="selected">Inner 1</option>
<option value="inner2">Inner 2</option>
</optgroup>
</select>""")
def test_nullbooleanselect(self):
w = NullBooleanSelect()
self.assertTrue(w.render('is_cool', True), """<select name="is_cool">
<option value="1">Unknown</option>
<option value="2" selected="selected">Yes</option>
<option value="3">No</option>
</select>""")
self.assertHTMLEqual(w.render('is_cool', False), """<select name="is_cool">
<option value="1">Unknown</option>
<option value="2">Yes</option>
<option value="3" selected="selected">No</option>
</select>""")
self.assertHTMLEqual(w.render('is_cool', None), """<select name="is_cool">
<option value="1" selected="selected">Unknown</option>
<option value="2">Yes</option>
<option value="3">No</option>
</select>""")
self.assertHTMLEqual(w.render('is_cool', '2'), """<select name="is_cool">
<option value="1">Unknown</option>
<option value="2" selected="selected">Yes</option>
<option value="3">No</option>
</select>""")
self.assertHTMLEqual(w.render('is_cool', '3'), """<select name="is_cool">
<option value="1">Unknown</option>
<option value="2">Yes</option>
<option value="3" selected="selected">No</option>
</select>""")
def test_selectmultiple(self):
w = SelectMultiple()
self.assertHTMLEqual(w.render('beatles', ['J'], choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))), """<select multiple="multiple" name="beatles">
<option value="J" selected="selected">John</option>
<option value="P">Paul</option>
<option value="G">George</option>
<option value="R">Ringo</option>
</select>""")
self.assertHTMLEqual(w.render('beatles', ['J', 'P'], choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))), """<select multiple="multiple" name="beatles">
<option value="J" selected="selected">John</option>
<option value="P" selected="selected">Paul</option>
<option value="G">George</option>
<option value="R">Ringo</option>
</select>""")
self.assertHTMLEqual(w.render('beatles', ['J', 'P', 'R'], choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))), """<select multiple="multiple" name="beatles">
<option value="J" selected="selected">John</option>
<option value="P" selected="selected">Paul</option>
<option value="G">George</option>
<option value="R" selected="selected">Ringo</option>
</select>""")
# If the value is None, none of the options are selected:
self.assertHTMLEqual(w.render('beatles', None, choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))), """<select multiple="multiple" name="beatles">
<option value="J">John</option>
<option value="P">Paul</option>
<option value="G">George</option>
<option value="R">Ringo</option>
</select>""")
# If the value corresponds to a label (but not to an option value), none of the options are selected:
self.assertHTMLEqual(w.render('beatles', ['John'], choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))), """<select multiple="multiple" name="beatles">
<option value="J">John</option>
<option value="P">Paul</option>
<option value="G">George</option>
<option value="R">Ringo</option>
</select>""")
# Multiple options (with the same value) can be selected, see #8103:
self.assertHTMLEqual(w.render('choices', ['0'], choices=(('0', '0'), ('1', '1'), ('2', '2'), ('3', '3'), ('0', 'extra'))), """<select multiple="multiple" name="choices">
<option value="0" selected="selected">0</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="0" selected="selected">extra</option>
</select>""")
# If multiple values are given, but some of them are not valid, the valid ones are selected:
self.assertHTMLEqual(w.render('beatles', ['J', 'G', 'foo'], choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))), """<select multiple="multiple" name="beatles">
<option value="J" selected="selected">John</option>
<option value="P">Paul</option>
<option value="G" selected="selected">George</option>
<option value="R">Ringo</option>
</select>""")
# The value is compared to its str():
self.assertHTMLEqual(w.render('nums', [2], choices=[('1', '1'), ('2', '2'), ('3', '3')]), """<select multiple="multiple" name="nums">
<option value="1">1</option>
<option value="2" selected="selected">2</option>
<option value="3">3</option>
</select>""")
self.assertHTMLEqual(w.render('nums', ['2'], choices=[(1, 1), (2, 2), (3, 3)]), """<select multiple="multiple" name="nums">
<option value="1">1</option>
<option value="2" selected="selected">2</option>
<option value="3">3</option>
</select>""")
self.assertHTMLEqual(w.render('nums', [2], choices=[(1, 1), (2, 2), (3, 3)]), """<select multiple="multiple" name="nums">
<option value="1">1</option>
<option value="2" selected="selected">2</option>
<option value="3">3</option>
</select>""")
# The 'choices' argument can be any iterable:
def get_choices():
for i in range(5):
yield (i, i)
self.assertHTMLEqual(w.render('nums', [2], choices=get_choices()), """<select multiple="multiple" name="nums">
<option value="0">0</option>
<option value="1">1</option>
<option value="2" selected="selected">2</option>
<option value="3">3</option>
<option value="4">4</option>
</select>""")
# You can also pass 'choices' to the constructor:
w = SelectMultiple(choices=[(1, 1), (2, 2), (3, 3)])
self.assertHTMLEqual(w.render('nums', [2]), """<select multiple="multiple" name="nums">
<option value="1">1</option>
<option value="2" selected="selected">2</option>
<option value="3">3</option>
</select>""")
# If 'choices' is passed to both the constructor and render(), then they'll both be in the output:
self.assertHTMLEqual(w.render('nums', [2], choices=[(4, 4), (5, 5)]), """<select multiple="multiple" name="nums">
<option value="1">1</option>
<option value="2" selected="selected">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
</select>""")
# Choices are escaped correctly
self.assertHTMLEqual(w.render('escape', None, choices=(('bad', 'you & me'), ('good', mark_safe('you > me')))), """<select multiple="multiple" name="escape">
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="bad">you & me</option>
<option value="good">you > me</option>
</select>""")
# Unicode choices are correctly rendered as HTML
self.assertHTMLEqual(w.render('nums', ['ŠĐĆŽćžšđ'], choices=[('ŠĐĆŽćžšđ', 'ŠĐabcĆŽćžšđ'), ('ćžšđ', 'abcćžšđ')]), '<select multiple="multiple" name="nums">\n<option value="1">1</option>\n<option value="2">2</option>\n<option value="3">3</option>\n<option value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111" selected="selected">\u0160\u0110abc\u0106\u017d\u0107\u017e\u0161\u0111</option>\n<option value="\u0107\u017e\u0161\u0111">abc\u0107\u017e\u0161\u0111</option>\n</select>')
# Choices can be nested one level in order to create HTML optgroups:
w.choices = (('outer1', 'Outer 1'), ('Group "1"', (('inner1', 'Inner 1'), ('inner2', 'Inner 2'))))
self.assertHTMLEqual(w.render('nestchoice', None), """<select multiple="multiple" name="nestchoice">
<option value="outer1">Outer 1</option>
<optgroup label="Group "1"">
<option value="inner1">Inner 1</option>
<option value="inner2">Inner 2</option>
</optgroup>
</select>""")
self.assertHTMLEqual(w.render('nestchoice', ['outer1']), """<select multiple="multiple" name="nestchoice">
<option value="outer1" selected="selected">Outer 1</option>
<optgroup label="Group "1"">
<option value="inner1">Inner 1</option>
<option value="inner2">Inner 2</option>
</optgroup>
</select>""")
self.assertHTMLEqual(w.render('nestchoice', ['inner1']), """<select multiple="multiple" name="nestchoice">
<option value="outer1">Outer 1</option>
<optgroup label="Group "1"">
<option value="inner1" selected="selected">Inner 1</option>
<option value="inner2">Inner 2</option>
</optgroup>
</select>""")
self.assertHTMLEqual(w.render('nestchoice', ['outer1', 'inner2']), """<select multiple="multiple" name="nestchoice">
<option value="outer1" selected="selected">Outer 1</option>
<optgroup label="Group "1"">
<option value="inner1">Inner 1</option>
<option value="inner2" selected="selected">Inner 2</option>
</optgroup>
</select>""")
def test_radioselect(self):
w = RadioSelect()
self.assertHTMLEqual(w.render('beatle', 'J', choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))), """<ul>
<li><label><input checked="checked" type="radio" name="beatle" value="J" /> John</label></li>
<li><label><input type="radio" name="beatle" value="P" /> Paul</label></li>
<li><label><input type="radio" name="beatle" value="G" /> George</label></li>
<li><label><input type="radio" name="beatle" value="R" /> Ringo</label></li>
</ul>""")
# If the value is None, none of the options are checked:
self.assertHTMLEqual(w.render('beatle', None, choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))), """<ul>
<li><label><input type="radio" name="beatle" value="J" /> John</label></li>
<li><label><input type="radio" name="beatle" value="P" /> Paul</label></li>
<li><label><input type="radio" name="beatle" value="G" /> George</label></li>
<li><label><input type="radio" name="beatle" value="R" /> Ringo</label></li>
</ul>""")
# If the value corresponds to a label (but not to an option value), none of the options are checked:
self.assertHTMLEqual(w.render('beatle', 'John', choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))), """<ul>
<li><label><input type="radio" name="beatle" value="J" /> John</label></li>
<li><label><input type="radio" name="beatle" value="P" /> Paul</label></li>
<li><label><input type="radio" name="beatle" value="G" /> George</label></li>
<li><label><input type="radio" name="beatle" value="R" /> Ringo</label></li>
</ul>""")
# The value is compared to its str():
self.assertHTMLEqual(w.render('num', 2, choices=[('1', '1'), ('2', '2'), ('3', '3')]), """<ul>
<li><label><input type="radio" name="num" value="1" /> 1</label></li>
<li><label><input checked="checked" type="radio" name="num" value="2" /> 2</label></li>
<li><label><input type="radio" name="num" value="3" /> 3</label></li>
</ul>""")
self.assertHTMLEqual(w.render('num', '2', choices=[(1, 1), (2, 2), (3, 3)]), """<ul>
<li><label><input type="radio" name="num" value="1" /> 1</label></li>
<li><label><input checked="checked" type="radio" name="num" value="2" /> 2</label></li>
<li><label><input type="radio" name="num" value="3" /> 3</label></li>
</ul>""")
self.assertHTMLEqual(w.render('num', 2, choices=[(1, 1), (2, 2), (3, 3)]), """<ul>
<li><label><input type="radio" name="num" value="1" /> 1</label></li>
<li><label><input checked="checked" type="radio" name="num" value="2" /> 2</label></li>
<li><label><input type="radio" name="num" value="3" /> 3</label></li>
</ul>""")
# The 'choices' argument can be any iterable:
def get_choices():
for i in range(5):
yield (i, i)
self.assertHTMLEqual(w.render('num', 2, choices=get_choices()), """<ul>
<li><label><input type="radio" name="num" value="0" /> 0</label></li>
<li><label><input type="radio" name="num" value="1" /> 1</label></li>
<li><label><input checked="checked" type="radio" name="num" value="2" /> 2</label></li>
<li><label><input type="radio" name="num" value="3" /> 3</label></li>
<li><label><input type="radio" name="num" value="4" /> 4</label></li>
</ul>""")
# You can also pass 'choices' to the constructor:
w = RadioSelect(choices=[(1, 1), (2, 2), (3, 3)])
self.assertHTMLEqual(w.render('num', 2), """<ul>
<li><label><input type="radio" name="num" value="1" /> 1</label></li>
<li><label><input checked="checked" type="radio" name="num" value="2" /> 2</label></li>
<li><label><input type="radio" name="num" value="3" /> 3</label></li>
</ul>""")
# If 'choices' is passed to both the constructor and render(), then they'll both be in the output:
self.assertHTMLEqual(w.render('num', 2, choices=[(4, 4), (5, 5)]), """<ul>
<li><label><input type="radio" name="num" value="1" /> 1</label></li>
<li><label><input checked="checked" type="radio" name="num" value="2" /> 2</label></li>
<li><label><input type="radio" name="num" value="3" /> 3</label></li>
<li><label><input type="radio" name="num" value="4" /> 4</label></li>
<li><label><input type="radio" name="num" value="5" /> 5</label></li>
</ul>""")
# Choices are escaped correctly
w = RadioSelect()
self.assertHTMLEqual(w.render('escape', None, choices=(('bad', 'you & me'), ('good', mark_safe('you > me')))), """<ul>
<li><label><input type="radio" name="escape" value="bad" /> you & me</label></li>
<li><label><input type="radio" name="escape" value="good" /> you > me</label></li>
</ul>""")
# Unicode choices are correctly rendered as HTML
w = RadioSelect()
self.assertHTMLEqual(six.text_type(w.render('email', 'ŠĐĆŽćžšđ', choices=[('ŠĐĆŽćžšđ', 'ŠĐabcĆŽćžšđ'), ('ćžšđ', 'abcćžšđ')])), '<ul>\n<li><label><input checked="checked" type="radio" name="email" value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111" /> \u0160\u0110abc\u0106\u017d\u0107\u017e\u0161\u0111</label></li>\n<li><label><input type="radio" name="email" value="\u0107\u017e\u0161\u0111" /> abc\u0107\u017e\u0161\u0111</label></li>\n</ul>')
# Attributes provided at instantiation are passed to the constituent inputs
w = RadioSelect(attrs={'id': 'foo'})
self.assertHTMLEqual(w.render('beatle', 'J', choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))), """<ul id="foo">
<li><label for="foo_0"><input checked="checked" type="radio" id="foo_0" value="J" name="beatle" /> John</label></li>
<li><label for="foo_1"><input type="radio" id="foo_1" value="P" name="beatle" /> Paul</label></li>
<li><label for="foo_2"><input type="radio" id="foo_2" value="G" name="beatle" /> George</label></li>
<li><label for="foo_3"><input type="radio" id="foo_3" value="R" name="beatle" /> Ringo</label></li>
</ul>""")
# Attributes provided at render-time are passed to the constituent inputs
w = RadioSelect()
self.assertHTMLEqual(w.render('beatle', 'J', choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo')), attrs={'id': 'bar'}), """<ul id="bar">
<li><label for="bar_0"><input checked="checked" type="radio" id="bar_0" value="J" name="beatle" /> John</label></li>
<li><label for="bar_1"><input type="radio" id="bar_1" value="P" name="beatle" /> Paul</label></li>
<li><label for="bar_2"><input type="radio" id="bar_2" value="G" name="beatle" /> George</label></li>
<li><label for="bar_3"><input type="radio" id="bar_3" value="R" name="beatle" /> Ringo</label></li>
</ul>""")
def test_radiofieldrenderer(self):
# RadioSelect uses a RadioFieldRenderer to render the individual radio inputs.
# You can manipulate that object directly to customize the way the RadioSelect
# is rendered.
w = RadioSelect()
r = w.get_renderer('beatle', 'J', choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo')))
inp_set1 = []
inp_set2 = []
inp_set3 = []
inp_set4 = []
for inp in r:
inp_set1.append(str(inp))
inp_set2.append('%s<br />' % inp)
inp_set3.append('<p>%s %s</p>' % (inp.tag(), inp.choice_label))
inp_set4.append('%s %s %s %s %s' % (inp.name, inp.value, inp.choice_value, inp.choice_label, inp.is_checked()))
self.assertHTMLEqual('\n'.join(inp_set1), """<label><input checked="checked" type="radio" name="beatle" value="J" /> John</label>
<label><input type="radio" name="beatle" value="P" /> Paul</label>
<label><input type="radio" name="beatle" value="G" /> George</label>
<label><input type="radio" name="beatle" value="R" /> Ringo</label>""")
self.assertHTMLEqual('\n'.join(inp_set2), """<label><input checked="checked" type="radio" name="beatle" value="J" /> John</label><br />
<label><input type="radio" name="beatle" value="P" /> Paul</label><br />
<label><input type="radio" name="beatle" value="G" /> George</label><br />
<label><input type="radio" name="beatle" value="R" /> Ringo</label><br />""")
self.assertHTMLEqual('\n'.join(inp_set3), """<p><input checked="checked" type="radio" name="beatle" value="J" /> John</p>
<p><input type="radio" name="beatle" value="P" /> Paul</p>
<p><input type="radio" name="beatle" value="G" /> George</p>
<p><input type="radio" name="beatle" value="R" /> Ringo</p>""")
self.assertHTMLEqual('\n'.join(inp_set4), """beatle J J John True
beatle J P Paul False
beatle J G George False
beatle J R Ringo False""")
# A RadioFieldRenderer object also allows index access to individual RadioChoiceInput
w = RadioSelect()
r = w.get_renderer('beatle', 'J', choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo')))
self.assertHTMLEqual(str(r[1]), '<label><input type="radio" name="beatle" value="P" /> Paul</label>')
self.assertHTMLEqual(str(r[0]), '<label><input checked="checked" type="radio" name="beatle" value="J" /> John</label>')
self.assertTrue(r[0].is_checked())
self.assertFalse(r[1].is_checked())
self.assertEqual((r[1].name, r[1].value, r[1].choice_value, r[1].choice_label), ('beatle', 'J', 'P', 'Paul'))
# These individual widgets can accept extra attributes if manually rendered.
self.assertHTMLEqual(
r[1].render(attrs={'extra': 'value'}),
'<label><input type="radio" extra="value" name="beatle" value="P" /> Paul</label>'
)
with self.assertRaises(IndexError):
r[10]
# You can create your own custom renderers for RadioSelect to use.
class MyRenderer(RadioFieldRenderer):
def render(self):
return '<br />\n'.join(six.text_type(choice) for choice in self)
w = RadioSelect(renderer=MyRenderer)
self.assertHTMLEqual(w.render('beatle', 'G', choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))), """<label><input type="radio" name="beatle" value="J" /> John</label><br />
<label><input type="radio" name="beatle" value="P" /> Paul</label><br />
<label><input checked="checked" type="radio" name="beatle" value="G" /> George</label><br />
<label><input type="radio" name="beatle" value="R" /> Ringo</label>""")
# Or you can use custom RadioSelect fields that use your custom renderer.
class CustomRadioSelect(RadioSelect):
renderer = MyRenderer
w = CustomRadioSelect()
self.assertHTMLEqual(w.render('beatle', 'G', choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))), """<label><input type="radio" name="beatle" value="J" /> John</label><br />
<label><input type="radio" name="beatle" value="P" /> Paul</label><br />
<label><input checked="checked" type="radio" name="beatle" value="G" /> George</label><br />
<label><input type="radio" name="beatle" value="R" /> Ringo</label>""")
# You can customize rendering with outer_html/inner_html renderer variables (#22950)
class MyRenderer(RadioFieldRenderer):
outer_html = str('<div{id_attr}>{content}</div>') # str is just to test some Python 2 issue with bytestrings
inner_html = '<p>{choice_value}{sub_widgets}</p>'
w = RadioSelect(renderer=MyRenderer)
output = w.render('beatle', 'J',
choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo')),
attrs={'id': 'bar'})
self.assertIsInstance(output, SafeData)
self.assertHTMLEqual(output, """<div id="bar">
<p><label for="bar_0"><input checked="checked" type="radio" id="bar_0" value="J" name="beatle" /> John</label></p>
<p><label for="bar_1"><input type="radio" id="bar_1" value="P" name="beatle" /> Paul</label></p>
<p><label for="bar_2"><input type="radio" id="bar_2" value="G" name="beatle" /> George</label></p>
<p><label for="bar_3"><input type="radio" id="bar_3" value="R" name="beatle" /> Ringo</label></p>
</div>""")
def test_nested_choices(self):
# Choices can be nested for radio buttons:
w = RadioSelect()
w.choices = (
('unknown', 'Unknown'),
('Audio', (('vinyl', 'Vinyl'), ('cd', 'CD'))),
('Video', (('vhs', 'VHS'), ('dvd', 'DVD'))),
)
self.assertHTMLEqual(w.render('nestchoice', 'dvd', attrs={'id': 'media'}), """<ul id="media">
<li><label for="media_0"><input id="media_0" name="nestchoice" type="radio" value="unknown" /> Unknown</label></li>
<li>Audio<ul id="media_1">
<li><label for="media_1_0"><input id="media_1_0" name="nestchoice" type="radio" value="vinyl" /> Vinyl</label></li>
<li><label for="media_1_1"><input id="media_1_1" name="nestchoice" type="radio" value="cd" /> CD</label></li>
</ul></li>
<li>Video<ul id="media_2">
<li><label for="media_2_0"><input id="media_2_0" name="nestchoice" type="radio" value="vhs" /> VHS</label></li>
<li><label for="media_2_1"><input checked="checked" id="media_2_1" name="nestchoice" type="radio" value="dvd" /> DVD</label></li>
</ul></li>
</ul>""")
# Choices can be nested for checkboxes:
w = CheckboxSelectMultiple()
w.choices = (
('unknown', 'Unknown'),
('Audio', (('vinyl', 'Vinyl'), ('cd', 'CD'))),
('Video', (('vhs', 'VHS'), ('dvd', 'DVD'))),
)
self.assertHTMLEqual(w.render('nestchoice', ('vinyl', 'dvd'), attrs={'id': 'media'}), """<ul id="media">
<li><label for="media_0"><input id="media_0" name="nestchoice" type="checkbox" value="unknown" /> Unknown</label></li>
<li>Audio<ul id="media_1">
<li><label for="media_1_0"><input checked="checked" id="media_1_0" name="nestchoice" type="checkbox" value="vinyl" /> Vinyl</label></li>
<li><label for="media_1_1"><input id="media_1_1" name="nestchoice" type="checkbox" value="cd" /> CD</label></li>
</ul></li>
<li>Video<ul id="media_2">
<li><label for="media_2_0"><input id="media_2_0" name="nestchoice" type="checkbox" value="vhs" /> VHS</label></li>
<li><label for="media_2_1"><input checked="checked" id="media_2_1" name="nestchoice" type="checkbox" value="dvd" /> DVD</label></li>
</ul></li>
</ul>""")
def test_checkboxselectmultiple(self):
w = CheckboxSelectMultiple()
self.assertHTMLEqual(w.render('beatles', ['J'], choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))), """<ul>
<li><label><input checked="checked" type="checkbox" name="beatles" value="J" /> John</label></li>
<li><label><input type="checkbox" name="beatles" value="P" /> Paul</label></li>
<li><label><input type="checkbox" name="beatles" value="G" /> George</label></li>
<li><label><input type="checkbox" name="beatles" value="R" /> Ringo</label></li>
</ul>""")
self.assertHTMLEqual(w.render('beatles', ['J', 'P'], choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))), """<ul>
<li><label><input checked="checked" type="checkbox" name="beatles" value="J" /> John</label></li>
<li><label><input checked="checked" type="checkbox" name="beatles" value="P" /> Paul</label></li>
<li><label><input type="checkbox" name="beatles" value="G" /> George</label></li>
<li><label><input type="checkbox" name="beatles" value="R" /> Ringo</label></li>
</ul>""")
self.assertHTMLEqual(w.render('beatles', ['J', 'P', 'R'], choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))), """<ul>
<li><label><input checked="checked" type="checkbox" name="beatles" value="J" /> John</label></li>
<li><label><input checked="checked" type="checkbox" name="beatles" value="P" /> Paul</label></li>
<li><label><input type="checkbox" name="beatles" value="G" /> George</label></li>
<li><label><input checked="checked" type="checkbox" name="beatles" value="R" /> Ringo</label></li>
</ul>""")
# If the value is None, none of the options are selected:
self.assertHTMLEqual(w.render('beatles', None, choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))), """<ul>
<li><label><input type="checkbox" name="beatles" value="J" /> John</label></li>
<li><label><input type="checkbox" name="beatles" value="P" /> Paul</label></li>
<li><label><input type="checkbox" name="beatles" value="G" /> George</label></li>
<li><label><input type="checkbox" name="beatles" value="R" /> Ringo</label></li>
</ul>""")
# If the value corresponds to a label (but not to an option value), none of the options are selected:
self.assertHTMLEqual(w.render('beatles', ['John'], choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))), """<ul>
<li><label><input type="checkbox" name="beatles" value="J" /> John</label></li>
<li><label><input type="checkbox" name="beatles" value="P" /> Paul</label></li>
<li><label><input type="checkbox" name="beatles" value="G" /> George</label></li>
<li><label><input type="checkbox" name="beatles" value="R" /> Ringo</label></li>
</ul>""")
# If multiple values are given, but some of them are not valid, the valid ones are selected:
self.assertHTMLEqual(w.render('beatles', ['J', 'G', 'foo'], choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))), """<ul>
<li><label><input checked="checked" type="checkbox" name="beatles" value="J" /> John</label></li>
<li><label><input type="checkbox" name="beatles" value="P" /> Paul</label></li>
<li><label><input checked="checked" type="checkbox" name="beatles" value="G" /> George</label></li>
<li><label><input type="checkbox" name="beatles" value="R" /> Ringo</label></li>
</ul>""")
# The value is compared to its str():
self.assertHTMLEqual(w.render('nums', [2], choices=[('1', '1'), ('2', '2'), ('3', '3')]), """<ul>
<li><label><input type="checkbox" name="nums" value="1" /> 1</label></li>
<li><label><input checked="checked" type="checkbox" name="nums" value="2" /> 2</label></li>
<li><label><input type="checkbox" name="nums" value="3" /> 3</label></li>
</ul>""")
self.assertHTMLEqual(w.render('nums', ['2'], choices=[(1, 1), (2, 2), (3, 3)]), """<ul>
<li><label><input type="checkbox" name="nums" value="1" /> 1</label></li>
<li><label><input checked="checked" type="checkbox" name="nums" value="2" /> 2</label></li>
<li><label><input type="checkbox" name="nums" value="3" /> 3</label></li>
</ul>""")
self.assertHTMLEqual(w.render('nums', [2], choices=[(1, 1), (2, 2), (3, 3)]), """<ul>
<li><label><input type="checkbox" name="nums" value="1" /> 1</label></li>
<li><label><input checked="checked" type="checkbox" name="nums" value="2" /> 2</label></li>
<li><label><input type="checkbox" name="nums" value="3" /> 3</label></li>
</ul>""")
# The 'choices' argument can be any iterable:
def get_choices():
for i in range(5):
yield (i, i)
self.assertHTMLEqual(w.render('nums', [2], choices=get_choices()), """<ul>
<li><label><input type="checkbox" name="nums" value="0" /> 0</label></li>
<li><label><input type="checkbox" name="nums" value="1" /> 1</label></li>
<li><label><input checked="checked" type="checkbox" name="nums" value="2" /> 2</label></li>
<li><label><input type="checkbox" name="nums" value="3" /> 3</label></li>
<li><label><input type="checkbox" name="nums" value="4" /> 4</label></li>
</ul>""")
# You can also pass 'choices' to the constructor:
w = CheckboxSelectMultiple(choices=[(1, 1), (2, 2), (3, 3)])
self.assertHTMLEqual(w.render('nums', [2]), """<ul>
<li><label><input type="checkbox" name="nums" value="1" /> 1</label></li>
<li><label><input checked="checked" type="checkbox" name="nums" value="2" /> 2</label></li>
<li><label><input type="checkbox" name="nums" value="3" /> 3</label></li>
</ul>""")
# If 'choices' is passed to both the constructor and render(), then they'll both be in the output:
self.assertHTMLEqual(w.render('nums', [2], choices=[(4, 4), (5, 5)]), """<ul>
<li><label><input type="checkbox" name="nums" value="1" /> 1</label></li>
<li><label><input checked="checked" type="checkbox" name="nums" value="2" /> 2</label></li>
<li><label><input type="checkbox" name="nums" value="3" /> 3</label></li>
<li><label><input type="checkbox" name="nums" value="4" /> 4</label></li>
<li><label><input type="checkbox" name="nums" value="5" /> 5</label></li>
</ul>""")
# Choices are escaped correctly
self.assertHTMLEqual(w.render('escape', None, choices=(('bad', 'you & me'), ('good', mark_safe('you > me')))), """<ul>
<li><label><input type="checkbox" name="escape" value="1" /> 1</label></li>
<li><label><input type="checkbox" name="escape" value="2" /> 2</label></li>
<li><label><input type="checkbox" name="escape" value="3" /> 3</label></li>
<li><label><input type="checkbox" name="escape" value="bad" /> you & me</label></li>
<li><label><input type="checkbox" name="escape" value="good" /> you > me</label></li>
</ul>""")
# Unicode choices are correctly rendered as HTML
self.assertHTMLEqual(w.render('nums', ['ŠĐĆŽćžšđ'], choices=[('ŠĐĆŽćžšđ', 'ŠĐabcĆŽćžšđ'), ('ćžšđ', 'abcćžšđ')]), '<ul>\n<li><label><input type="checkbox" name="nums" value="1" /> 1</label></li>\n<li><label><input type="checkbox" name="nums" value="2" /> 2</label></li>\n<li><label><input type="checkbox" name="nums" value="3" /> 3</label></li>\n<li><label><input checked="checked" type="checkbox" name="nums" value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111" /> \u0160\u0110abc\u0106\u017d\u0107\u017e\u0161\u0111</label></li>\n<li><label><input type="checkbox" name="nums" value="\u0107\u017e\u0161\u0111" /> abc\u0107\u017e\u0161\u0111</label></li>\n</ul>')
# Each input gets a separate ID
self.assertHTMLEqual(CheckboxSelectMultiple().render('letters', list('ac'), choices=zip(list('abc'), list('ABC')), attrs={'id': 'abc'}), """<ul id="abc">
<li><label for="abc_0"><input checked="checked" type="checkbox" name="letters" value="a" id="abc_0" /> A</label></li>
<li><label for="abc_1"><input type="checkbox" name="letters" value="b" id="abc_1" /> B</label></li>
<li><label for="abc_2"><input checked="checked" type="checkbox" name="letters" value="c" id="abc_2" /> C</label></li>
</ul>""")
# Each input gets a separate ID when the ID is passed to the constructor
self.assertHTMLEqual(CheckboxSelectMultiple(attrs={'id': 'abc'}).render('letters', list('ac'), choices=zip(list('abc'), list('ABC'))), """<ul id="abc">
<li><label for="abc_0"><input checked="checked" type="checkbox" name="letters" value="a" id="abc_0" /> A</label></li>
<li><label for="abc_1"><input type="checkbox" name="letters" value="b" id="abc_1" /> B</label></li>
<li><label for="abc_2"><input checked="checked" type="checkbox" name="letters" value="c" id="abc_2" /> C</label></li>
</ul>""")
w = CheckboxSelectMultiple()
r = w.get_renderer('abc', 'b', choices=[(c, c.upper()) for c in 'abc'])
# You can iterate over the CheckboxFieldRenderer to get individual elements
expected = [
'<label><input type="checkbox" name="abc" value="a" /> A</label>',
'<label><input checked="checked" type="checkbox" name="abc" value="b" /> B</label>',
'<label><input type="checkbox" name="abc" value="c" /> C</label>',
]
for output, expected in zip(r, expected):
self.assertHTMLEqual(force_text(output), expected)
# You can access individual elements
self.assertHTMLEqual(force_text(r[1]),
'<label><input checked="checked" type="checkbox" name="abc" value="b" /> B</label>')
# Out-of-range errors are propagated
with self.assertRaises(IndexError):
r[42]
def test_subwidget(self):
# Each subwidget tag gets a separate ID when the widget has an ID specified
self.assertHTMLEqual("\n".join(c.tag() for c in CheckboxSelectMultiple(attrs={'id': 'abc'}).subwidgets('letters', list('ac'), choices=zip(list('abc'), list('ABC')))), """<input checked="checked" type="checkbox" name="letters" value="a" id="abc_0" />
<input type="checkbox" name="letters" value="b" id="abc_1" />
<input checked="checked" type="checkbox" name="letters" value="c" id="abc_2" />""")
# Each subwidget tag does not get an ID if the widget does not have an ID specified
self.assertHTMLEqual("\n".join(c.tag() for c in CheckboxSelectMultiple().subwidgets('letters', list('ac'), choices=zip(list('abc'), list('ABC')))), """<input checked="checked" type="checkbox" name="letters" value="a" />
<input type="checkbox" name="letters" value="b" />
<input checked="checked" type="checkbox" name="letters" value="c" />""")
# The id_for_label property of the subwidget should return the ID that is used on the subwidget's tag
self.assertHTMLEqual("\n".join('<input type="checkbox" name="letters" value="%s" id="%s" />' % (c.choice_value, c.id_for_label) for c in CheckboxSelectMultiple(attrs={'id': 'abc'}).subwidgets('letters', [], choices=zip(list('abc'), list('ABC')))), """<input type="checkbox" name="letters" value="a" id="abc_0" />
<input type="checkbox" name="letters" value="b" id="abc_1" />
<input type="checkbox" name="letters" value="c" id="abc_2" />""")
def test_multi(self):
class MyMultiWidget(MultiWidget):
def decompress(self, value):
if value:
return value.split('__')
return ['', '']
def format_output(self, rendered_widgets):
return '<br />'.join(rendered_widgets)
w = MyMultiWidget(widgets=(TextInput(attrs={'class': 'big'}), TextInput(attrs={'class': 'small'})))
self.assertHTMLEqual(w.render('name', ['john', 'lennon']), '<input type="text" class="big" value="john" name="name_0" /><br /><input type="text" class="small" value="lennon" name="name_1" />')
self.assertHTMLEqual(w.render('name', 'john__lennon'), '<input type="text" class="big" value="john" name="name_0" /><br /><input type="text" class="small" value="lennon" name="name_1" />')
self.assertHTMLEqual(w.render('name', 'john__lennon', attrs={'id': 'foo'}), '<input id="foo_0" type="text" class="big" value="john" name="name_0" /><br /><input id="foo_1" type="text" class="small" value="lennon" name="name_1" />')
w = MyMultiWidget(widgets=(TextInput(attrs={'class': 'big'}), TextInput(attrs={'class': 'small'})), attrs={'id': 'bar'})
self.assertHTMLEqual(w.render('name', ['john', 'lennon']), '<input id="bar_0" type="text" class="big" value="john" name="name_0" /><br /><input id="bar_1" type="text" class="small" value="lennon" name="name_1" />')
# Test needs_multipart_form=True if any widget needs it
w = MyMultiWidget(widgets=(TextInput(), FileInput()))
self.assertTrue(w.needs_multipart_form)
# Test needs_multipart_form=False if no widget needs it
w = MyMultiWidget(widgets=(TextInput(), TextInput()))
self.assertFalse(w.needs_multipart_form)
def test_splitdatetime(self):
w = SplitDateTimeWidget()
self.assertHTMLEqual(w.render('date', ''), '<input type="text" name="date_0" /><input type="text" name="date_1" />')
self.assertHTMLEqual(w.render('date', None), '<input type="text" name="date_0" /><input type="text" name="date_1" />')
self.assertHTMLEqual(w.render('date', datetime.datetime(2006, 1, 10, 7, 30)), '<input type="text" name="date_0" value="2006-01-10" /><input type="text" name="date_1" value="07:30:00" />')
self.assertHTMLEqual(w.render('date', [datetime.date(2006, 1, 10), datetime.time(7, 30)]), '<input type="text" name="date_0" value="2006-01-10" /><input type="text" name="date_1" value="07:30:00" />')
# You can also pass 'attrs' to the constructor. In this case, the attrs will be
w = SplitDateTimeWidget(attrs={'class': 'pretty'})
self.assertHTMLEqual(w.render('date', datetime.datetime(2006, 1, 10, 7, 30)), '<input type="text" class="pretty" value="2006-01-10" name="date_0" /><input type="text" class="pretty" value="07:30:00" name="date_1" />')
# Use 'date_format' and 'time_format' to change the way a value is displayed.
w = SplitDateTimeWidget(date_format='%d/%m/%Y', time_format='%H:%M')
self.assertHTMLEqual(w.render('date', datetime.datetime(2006, 1, 10, 7, 30)), '<input type="text" name="date_0" value="10/01/2006" /><input type="text" name="date_1" value="07:30" />')
def test_datetimeinput(self):
w = DateTimeInput()
self.assertHTMLEqual(w.render('date', None), '<input type="text" name="date" />')
d = datetime.datetime(2007, 9, 17, 12, 51, 34, 482548)
self.assertEqual(str(d), '2007-09-17 12:51:34.482548')
# The microseconds are trimmed on display, by default.
self.assertHTMLEqual(w.render('date', d), '<input type="text" name="date" value="2007-09-17 12:51:34" />')
self.assertHTMLEqual(w.render('date', datetime.datetime(2007, 9, 17, 12, 51, 34)), '<input type="text" name="date" value="2007-09-17 12:51:34" />')
self.assertHTMLEqual(w.render('date', datetime.datetime(2007, 9, 17, 12, 51)), '<input type="text" name="date" value="2007-09-17 12:51:00" />')
# Use 'format' to change the way a value is displayed.
w = DateTimeInput(format='%d/%m/%Y %H:%M', attrs={'type': 'datetime'})
self.assertHTMLEqual(w.render('date', d), '<input type="datetime" name="date" value="17/09/2007 12:51" />')
def test_dateinput(self):
w = DateInput()
self.assertHTMLEqual(w.render('date', None), '<input type="text" name="date" />')
d = datetime.date(2007, 9, 17)
self.assertEqual(str(d), '2007-09-17')
self.assertHTMLEqual(w.render('date', d), '<input type="text" name="date" value="2007-09-17" />')
self.assertHTMLEqual(w.render('date', datetime.date(2007, 9, 17)), '<input type="text" name="date" value="2007-09-17" />')
# We should be able to initialize from a unicode value.
self.assertHTMLEqual(w.render('date', '2007-09-17'), '<input type="text" name="date" value="2007-09-17" />')
# Use 'format' to change the way a value is displayed.
w = DateInput(format='%d/%m/%Y', attrs={'type': 'date'})
self.assertHTMLEqual(w.render('date', d), '<input type="date" name="date" value="17/09/2007" />')
def test_timeinput(self):
w = TimeInput()
self.assertHTMLEqual(w.render('time', None), '<input type="text" name="time" />')
t = datetime.time(12, 51, 34, 482548)
self.assertEqual(str(t), '12:51:34.482548')
# The microseconds are trimmed on display, by default.
self.assertHTMLEqual(w.render('time', t), '<input type="text" name="time" value="12:51:34" />')
self.assertHTMLEqual(w.render('time', datetime.time(12, 51, 34)), '<input type="text" name="time" value="12:51:34" />')
self.assertHTMLEqual(w.render('time', datetime.time(12, 51)), '<input type="text" name="time" value="12:51:00" />')
# We should be able to initialize from a unicode value.
self.assertHTMLEqual(w.render('time', '13:12:11'), '<input type="text" name="time" value="13:12:11" />')
# Use 'format' to change the way a value is displayed.
w = TimeInput(format='%H:%M', attrs={'type': 'time'})
self.assertHTMLEqual(w.render('time', t), '<input type="time" name="time" value="12:51" />')
def test_splithiddendatetime(self):
from django.forms.widgets import SplitHiddenDateTimeWidget
w = SplitHiddenDateTimeWidget()
self.assertHTMLEqual(w.render('date', ''), '<input type="hidden" name="date_0" /><input type="hidden" name="date_1" />')
d = datetime.datetime(2007, 9, 17, 12, 51, 34, 482548)
self.assertHTMLEqual(str(d), '2007-09-17 12:51:34.482548')
self.assertHTMLEqual(w.render('date', d), '<input type="hidden" name="date_0" value="2007-09-17" /><input type="hidden" name="date_1" value="12:51:34" />')
self.assertHTMLEqual(w.render('date', datetime.datetime(2007, 9, 17, 12, 51, 34)), '<input type="hidden" name="date_0" value="2007-09-17" /><input type="hidden" name="date_1" value="12:51:34" />')
self.assertHTMLEqual(w.render('date', datetime.datetime(2007, 9, 17, 12, 51)), '<input type="hidden" name="date_0" value="2007-09-17" /><input type="hidden" name="date_1" value="12:51:00" />')
def test_sub_widget_html_safe(self):
widget = TextInput()
subwidget = next(widget.subwidgets('username', 'John Doe'))
self.assertTrue(hasattr(subwidget, '__html__'))
self.assertEqual(force_text(subwidget), subwidget.__html__())
def test_choice_input_html_safe(self):
widget = ChoiceInput('choices', 'CHOICE1', {}, ('CHOICE1', 'first choice'), 0)
self.assertTrue(hasattr(ChoiceInput, '__html__'))
self.assertEqual(force_text(widget), widget.__html__())
def test_choice_field_renderer_html_safe(self):
renderer = ChoiceFieldRenderer('choices', 'CHOICE1', {}, [('CHOICE1', 'first_choice')])
renderer.choice_input_class = lambda *args: args
self.assertTrue(hasattr(ChoiceFieldRenderer, '__html__'))
self.assertEqual(force_text(renderer), renderer.__html__())
class NullBooleanSelectLazyForm(Form):
"""Form to test for lazy evaluation. Refs #17190"""
bool = BooleanField(widget=NullBooleanSelect())
@override_settings(USE_L10N=True)
class FormsI18NWidgetsTestCase(TestCase):
def setUp(self):
super(FormsI18NWidgetsTestCase, self).setUp()
activate('de-at')
def tearDown(self):
deactivate()
super(FormsI18NWidgetsTestCase, self).tearDown()
def test_datetimeinput(self):
w = DateTimeInput()
d = datetime.datetime(2007, 9, 17, 12, 51, 34, 482548)
self.assertHTMLEqual(w.render('date', d), '<input type="text" name="date" value="17.09.2007 12:51:34" />')
def test_dateinput(self):
w = DateInput()
d = datetime.date(2007, 9, 17)
self.assertHTMLEqual(w.render('date', d), '<input type="text" name="date" value="17.09.2007" />')
def test_timeinput(self):
w = TimeInput()
t = datetime.time(12, 51, 34, 482548)
self.assertHTMLEqual(w.render('time', t), '<input type="text" name="time" value="12:51:34" />')
def test_datetime_locale_aware(self):
w = DateTimeInput()
d = datetime.datetime(2007, 9, 17, 12, 51, 34, 482548)
with self.settings(USE_L10N=False):
self.assertHTMLEqual(w.render('date', d), '<input type="text" name="date" value="2007-09-17 12:51:34" />')
with override('es'):
self.assertHTMLEqual(w.render('date', d), '<input type="text" name="date" value="17/09/2007 12:51:34" />')
def test_splithiddendatetime(self):
from django.forms.widgets import SplitHiddenDateTimeWidget
w = SplitHiddenDateTimeWidget()
self.assertHTMLEqual(w.render('date', datetime.datetime(2007, 9, 17, 12, 51)), '<input type="hidden" name="date_0" value="17.09.2007" /><input type="hidden" name="date_1" value="12:51:00" />')
def test_nullbooleanselect(self):
"""
Ensure that the NullBooleanSelect widget's options are lazily
localized.
Refs #17190
"""
f = NullBooleanSelectLazyForm()
self.assertHTMLEqual(f.fields['bool'].widget.render('id_bool', True), '<select name="id_bool">\n<option value="1">Unbekannt</option>\n<option value="2" selected="selected">Ja</option>\n<option value="3">Nein</option>\n</select>')
class SelectAndTextWidget(MultiWidget):
"""
MultiWidget subclass
"""
def __init__(self, choices=[]):
widgets = [
RadioSelect(choices=choices),
TextInput
]
super(SelectAndTextWidget, self).__init__(widgets)
def _set_choices(self, choices):
"""
When choices are set for this widget, we want to pass those along to the Select widget
"""
self.widgets[0].choices = choices
def _get_choices(self):
"""
The choices for this widget are the Select widget's choices
"""
return self.widgets[0].choices
choices = property(_get_choices, _set_choices)
class WidgetTests(TestCase):
def test_12048(self):
# See ticket #12048.
w1 = SelectAndTextWidget(choices=[1, 2, 3])
w2 = copy.deepcopy(w1)
w2.choices = [4, 5, 6]
# w2 ought to be independent of w1, since MultiWidget ought
# to make a copy of its sub-widgets when it is copied.
self.assertEqual(w1.choices, [1, 2, 3])
@ignore_warnings(category=RemovedInDjango19Warning)
def test_13390(self):
# See ticket #13390
class SplitDateForm(Form):
field = DateTimeField(widget=SplitDateTimeWidget, required=False)
form = SplitDateForm({'field': ''})
self.assertTrue(form.is_valid())
form = SplitDateForm({'field': ['', '']})
self.assertTrue(form.is_valid())
class SplitDateRequiredForm(Form):
field = DateTimeField(widget=SplitDateTimeWidget, required=True)
form = SplitDateRequiredForm({'field': ''})
self.assertFalse(form.is_valid())
form = SplitDateRequiredForm({'field': ['', '']})
self.assertFalse(form.is_valid())
@override_settings(ROOT_URLCONF='forms_tests.urls')
class LiveWidgetTests(AdminSeleniumWebDriverTestCase):
available_apps = ['forms_tests'] + AdminSeleniumWebDriverTestCase.available_apps
def test_textarea_trailing_newlines(self):
"""
Test that a roundtrip on a ModelForm doesn't alter the TextField value
"""
article = Article.objects.create(content="\nTst\n")
self.selenium.get('%s%s' % (self.live_server_url,
reverse('article_form', args=[article.pk])))
self.selenium.find_element_by_id('submit').submit()
article = Article.objects.get(pk=article.pk)
# Should be "\nTst\n" after #19251 is fixed
self.assertEqual(article.content, "\r\nTst\r\n")
@python_2_unicode_compatible
class FakeFieldFile(object):
"""
Quacks like a FieldFile (has a .url and unicode representation), but
doesn't require us to care about storages etc.
"""
url = 'something'
def __str__(self):
return self.url
class ClearableFileInputTests(TestCase):
def test_clear_input_renders(self):
"""
A ClearableFileInput with is_required False and rendered with
an initial value that is a file renders a clear checkbox.
"""
widget = ClearableFileInput()
widget.is_required = False
self.assertHTMLEqual(widget.render('myfile', FakeFieldFile()),
'Currently: <a href="something">something</a> <input type="checkbox" name="myfile-clear" id="myfile-clear_id" /> <label for="myfile-clear_id">Clear</label><br />Change: <input type="file" name="myfile" />')
def test_html_escaped(self):
"""
A ClearableFileInput should escape name, filename and URL when
rendering HTML. Refs #15182.
"""
@python_2_unicode_compatible
class StrangeFieldFile(object):
url = "something?chapter=1§=2©=3&lang=en"
def __str__(self):
return '''something<div onclick="alert('oops')">.jpg'''
widget = ClearableFileInput()
field = StrangeFieldFile()
output = widget.render('my<div>file', field)
self.assertNotIn(field.url, output)
self.assertIn('href="something?chapter=1&sect=2&copy=3&lang=en"', output)
self.assertNotIn(six.text_type(field), output)
self.assertIn('something<div onclick="alert('oops')">.jpg', output)
self.assertIn('my<div>file', output)
self.assertNotIn('my<div>file', output)
def test_clear_input_renders_only_if_not_required(self):
"""
A ClearableFileInput with is_required=False does not render a clear
checkbox.
"""
widget = ClearableFileInput()
widget.is_required = True
self.assertHTMLEqual(widget.render('myfile', FakeFieldFile()),
'Currently: <a href="something">something</a> <br />Change: <input type="file" name="myfile" />')
def test_clear_input_renders_only_if_initial(self):
"""
A ClearableFileInput instantiated with no initial value does not render
a clear checkbox.
"""
widget = ClearableFileInput()
widget.is_required = False
self.assertHTMLEqual(widget.render('myfile', None),
'<input type="file" name="myfile" />')
def test_clear_input_checked_returns_false(self):
"""
ClearableFileInput.value_from_datadict returns False if the clear
checkbox is checked, if not required.
"""
widget = ClearableFileInput()
widget.is_required = False
self.assertEqual(widget.value_from_datadict(
data={'myfile-clear': True},
files={},
name='myfile'), False)
def test_clear_input_checked_returns_false_only_if_not_required(self):
"""
ClearableFileInput.value_from_datadict never returns False if the field
is required.
"""
widget = ClearableFileInput()
widget.is_required = True
f = SimpleUploadedFile('something.txt', b'content')
self.assertEqual(widget.value_from_datadict(
data={'myfile-clear': True},
files={'myfile': f},
name='myfile'), f)
def test_render_custom_template(self):
widget = ClearableFileInput()
widget.template_with_initial = (
'%(initial_text)s: <img src="%(initial_url)s" alt="%(initial)s" /> '
'%(clear_template)s<br />%(input_text)s: %(input)s'
)
self.assertHTMLEqual(
widget.render('myfile', FakeFieldFile()),
'Currently: <img src="something" alt="something" /> '
'<input type="checkbox" name="myfile-clear" id="myfile-clear_id" /> '
'<label for="myfile-clear_id">Clear</label><br />Change: <input type="file" name="myfile" />'
)
| bsd-3-clause |
peak6/st2 | st2common/st2common/validators/api/misc.py | 13 | 1503 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from st2common.constants.pack import SYSTEM_PACK_NAME
from st2common.exceptions.apivalidation import ValueValidationException
__all__ = [
'validate_not_part_of_system_pack'
]
def validate_not_part_of_system_pack(resource_db):
"""
Validate that the provided resource database object doesn't belong to
a system level pack.
If it does, ValueValidationException is thrown.
:param resource_db: Resource database object to check.
:type resource_db: ``object``
"""
pack = getattr(resource_db, 'pack', None)
if pack == SYSTEM_PACK_NAME:
msg = 'Resources belonging to system level packs can\'t be manipulated'
raise ValueValidationException(msg)
return resource_db
| apache-2.0 |
wilvk/ansible | lib/ansible/utils/module_docs_fragments/ec2.py | 196 | 1119 | # (c) 2015, Ansible, Inc
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# EC2 only documentation fragment
DOCUMENTATION = """
options:
region:
description:
- The AWS region to use. If not specified then the value of the AWS_REGION or EC2_REGION environment variable, if any, is used.
See U(http://docs.aws.amazon.com/general/latest/gr/rande.html#ec2_region)
required: false
aliases: [ 'aws_region', 'ec2_region' ]
"""
| gpl-3.0 |
GregoryVigoTorres/scrapy | scrapy/spiders/init.py | 95 | 1354 | from scrapy.spiders import Spider
from scrapy.utils.spider import iterate_spider_output
class InitSpider(Spider):
"""Base Spider with initialization facilities"""
def start_requests(self):
self._postinit_reqs = super(InitSpider, self).start_requests()
return iterate_spider_output(self.init_request())
def initialized(self, response=None):
"""This method must be set as the callback of your last initialization
request. See self.init_request() docstring for more info.
"""
return self.__dict__.pop('_postinit_reqs')
def init_request(self):
"""This function should return one initialization request, with the
self.initialized method as callback. When the self.initialized method
is called this spider is considered initialized. If you need to perform
several requests for initializing your spider, you can do so by using
different callbacks. The only requirement is that the final callback
(of the last initialization request) must be self.initialized.
The default implementation calls self.initialized immediately, and
means that no initialization is needed. This method should be
overridden only when you need to perform requests to initialize your
spider
"""
return self.initialized()
| bsd-3-clause |
lum4chi/mygensim | models/qlmodel.py | 1 | 1822 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Francesco Lumachi <francesco.lumachi@gmail.com>
from __future__ import division
from gensim import models, utils
import math
class QLModel(models.TfidfModel):
""" Use of models.TfidfModel as base to build Query Likelihood Model (12.9) appeared in
"An introduction to Information Retrieval" by Manning, Raghavan and Schütze
"""
def __init__(self, *args, **kwargs):
super(QLModel, self).__init__(*args, normalize=False, **kwargs)
def __str__(self):
return "QueryLikelihoodModel(num_docs=%s, num_nnz=%s)" % (self.num_docs, self.num_nnz)
def __getitem__(self, bog, eps=1e-12):
""" Overwrite weight calculus with estimation of a Model of d, based on its own "gram"
(we can see bag-of-word as bag-of-gram based upon what tokenize policy to adopt):
P(q|d) ≈ prod( P(g|d) for g in q ) # product of only the gram present in query
P(g|d) ≈ tf(g,d) / len(d) # compute prob of every gram
"""
# if the input vector is in fact a corpus, return a transformed corpus as a result
is_corpus, bog = utils.is_corpus(bog)
if is_corpus:
return self._apply(bog)
# --- only vector component calculation has changed from original method ---
# unknown (new) terms will be given zero weight
# 0 < P(g|d) <= 1, then -1 * log() to avoid negative
vector = [(gramid, -math.log(tf / len(bog)))
for gramid, tf in bog if self.idfs.get(gramid, 0.0) != 0.0]
# --- no need to normalize ---
# make sure there are no explicit zeroes in the vector (must be sparse)
vector = [(termid, weight) for termid, weight in vector if abs(weight) > eps]
return vector | gpl-3.0 |
dragly/conan | conans/client/rest/auth_manager.py | 2 | 8110 | '''
Collaborate with RestApiClient to make remote anonymous and authenticated calls.
Uses user_io to request user's login and password and obtain a token for calling authenticated
methods if receives AuthenticationException from RestApiClient.
Flow:
Directly invoke a REST method in RestApiClient, example: get_conan.
if receives AuthenticationException (not open method) will ask user for login and password
and will invoke RestApiClient.get_token() (with LOGIN_RETRIES retries) and retry to call
get_conan with the new token.
'''
from conans.errors import AuthenticationException, ForbiddenException,\
ConanException
from uuid import getnode as get_mac
import hashlib
from conans.util.log import logger
def input_credentials_if_unauthorized(func):
"""Decorator. Handles AuthenticationException and request user
to input a user and a password"""
LOGIN_RETRIES = 3
def wrapper(self, *args, **kwargs):
try:
# Set custom headers of mac_digest and username
self.set_custom_headers(self.user)
ret = func(self, *args, **kwargs)
return ret
except ForbiddenException:
raise ForbiddenException("Permission denied for user: '%s'" % self.user)
except AuthenticationException:
# User valid but not enough permissions
if self.user is None or self._rest_client.token is None:
# token is None when you change user with user command
# Anonymous is not enough, ask for a user
remote = self.remote
self._user_io.out.info('Please log in to "%s" to perform this action. '
'Execute "conan user" command.' % remote.name)
if remote.name == "conan.io":
self._user_io.out.info('If you don\'t have an account sign up here: '
'http://www.conan.io')
return retry_with_new_token(self, *args, **kwargs)
else:
# Token expired or not valid, so clean the token and repeat the call
# (will be anonymous call but exporting who is calling)
self._store_login((self.user, None))
self._rest_client.token = None
# Set custom headers of mac_digest and username
self.set_custom_headers(self.user)
return wrapper(self, *args, **kwargs)
def retry_with_new_token(self, *args, **kwargs):
"""Try LOGIN_RETRIES to obtain a password from user input for which
we can get a valid token from api_client. If a token is returned,
credentials are stored in localdb and rest method is called"""
for _ in range(LOGIN_RETRIES):
user, password = self._user_io.request_login(self._remote.name, self.user)
token = None
try:
token = self.authenticate(user, password)
except AuthenticationException:
if self.user is None:
self._user_io.out.error('Wrong user or password')
else:
self._user_io.out.error(
'Wrong password for user "%s"' % self.user)
self._user_io.out.info(
'You can change username with "conan user <username>"')
if token:
logger.debug("Got token: %s" % str(token))
self._rest_client.token = token
self.user = user
self._store_login((user, token))
# Set custom headers of mac_digest and username
self.set_custom_headers(user)
return wrapper(self, *args, **kwargs)
raise AuthenticationException("Too many failed login attempts, bye!")
return wrapper
class ConanApiAuthManager(object):
def __init__(self, rest_client, user_io, localdb):
self._user_io = user_io
self._rest_client = rest_client
self._localdb = localdb
self._remote = None
@property
def remote(self):
return self._remote
@remote.setter
def remote(self, remote):
self._remote = remote
self._rest_client.remote_url = remote.url
self.user, self._rest_client.token = self._localdb.get_login(remote.url)
def _store_login(self, login):
try:
self._localdb.set_login(login, self._remote.url)
except Exception as e:
self._user_io.out.error(
'Your credentials could not be stored in local cache\n')
self._user_io.out.debug(str(e) + '\n')
@staticmethod
def get_mac_digest():
sha1 = hashlib.sha1()
sha1.update(str(get_mac()).encode())
return str(sha1.hexdigest())
def set_custom_headers(self, username):
# First identifies our machine, second the username even if it was not
# authenticated
custom_headers = self._rest_client.custom_headers
custom_headers['X-Client-Anonymous-Id'] = self.get_mac_digest()
custom_headers['X-Client-Id'] = str(username or "")
# ######### CONAN API METHODS ##########
@input_credentials_if_unauthorized
def upload_conan(self, conan_reference, the_files):
return self._rest_client.upload_conan(conan_reference, the_files)
@input_credentials_if_unauthorized
def upload_package(self, package_reference, the_files):
return self._rest_client.upload_package(package_reference, the_files)
@input_credentials_if_unauthorized
def get_conan_digest(self, conan_reference):
return self._rest_client.get_conan_digest(conan_reference)
@input_credentials_if_unauthorized
def get_package_digest(self, package_reference):
return self._rest_client.get_package_digest(package_reference)
@input_credentials_if_unauthorized
def get_recipe(self, conan_reference, dest_folder):
return self._rest_client.get_recipe(conan_reference, dest_folder)
@input_credentials_if_unauthorized
def get_package(self, package_reference, dest_folder):
return self._rest_client.get_package(package_reference, dest_folder)
@input_credentials_if_unauthorized
def search(self, pattern, ignorecase):
return self._rest_client.search(pattern, ignorecase)
@input_credentials_if_unauthorized
def search_packages(self, reference, query):
return self._rest_client.search_packages(reference, query)
@input_credentials_if_unauthorized
def remove(self, conan_refernce):
return self._rest_client.remove_conanfile(conan_refernce)
@input_credentials_if_unauthorized
def remove_packages(self, conan_reference, package_ids):
return self._rest_client.remove_packages(conan_reference, package_ids)
def authenticate(self, user, password):
remote_url = self._remote.url
prev_user = self._localdb.get_username(remote_url)
prev_username = prev_user or "None (anonymous)"
if not user:
self._user_io.out.info("Current '%s' user: %s" % (self._remote.name, prev_username))
else:
user = None if user.lower() == 'none' else user
if user and password is not None:
token = self._remote_auth(user, password)
else:
token = None
if prev_user == user:
self._user_io.out.info("Current '%s' user already: %s"
% (self._remote.name, prev_username))
else:
username = user or "None (anonymous)"
self._user_io.out.info("Change '%s' user from %s to %s"
% (self._remote.name, prev_username, username))
self._localdb.set_login((user, token), remote_url)
return token
def _remote_auth(self, user, password):
try:
return self._rest_client.authenticate(user, password)
except UnicodeDecodeError:
raise ConanException("Password contains not allowed symbols")
| mit |
barche/k3d | tests/mesh/mesh.modifier.CGALBoolean.cylinders.py | 2 | 1571 | #python
import k3d
import testing
document = k3d.new_document()
small_cylinder = k3d.plugin.create("PolyCylinder", document)
small_cylinder.radius = 2.0
small_cylinder.zmax = 7.5
small_cylinder.zmin = -7.5
small_cylinder.u_segments = 8
big_cylinder = k3d.plugin.create("PolyCylinder", document)
big_cylinder.u_segments = 8
torus = k3d.plugin.create("PolyTorus", document)
torus.u_segments = 8
torus.v_segments = 4
first_boolean = k3d.plugin.create("CGALBoolean", document)
first_boolean.type = "difference"
k3d.property.create(first_boolean, "k3d::mesh*", "input_1", "Input 1", "")
k3d.property.create(first_boolean, "k3d::mesh*", "input_2", "Input 2", "")
second_boolean = k3d.plugin.create("CGALBoolean", document)
second_boolean.type = "reverse_difference"
k3d.property.create(second_boolean, "k3d::mesh*", "input_1", "Input 1", "")
k3d.property.create(second_boolean, "k3d::mesh*", "input_2", "Input 2", "")
k3d.property.connect(document, big_cylinder.get_property("output_mesh"), first_boolean.get_property("input_1"))
k3d.property.connect(document, small_cylinder.get_property("output_mesh"), first_boolean.get_property("input_2"))
k3d.property.connect(document, torus.get_property("output_mesh"), second_boolean.get_property("input_1"))
k3d.property.connect(document, first_boolean.get_property("output_mesh"), second_boolean.get_property("input_2"))
testing.require_valid_mesh(document, second_boolean.get_property("output_mesh"))
testing.require_similar_mesh(document, second_boolean.get_property("output_mesh"), "mesh.modifier.CGALBoolean.cylinders", 1)
| gpl-2.0 |
cricketclubucd/davisdragons | platform-tools/systrace/catapult/telemetry/telemetry/internal/backends/chrome/oobe.py | 4 | 5080 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from functools import partial
import logging
from telemetry.core import exceptions
from telemetry.internal.browser import web_contents
import py_utils
class Oobe(web_contents.WebContents):
def __init__(self, inspector_backend):
super(Oobe, self).__init__(inspector_backend)
def _GaiaIFrameContext(self):
max_context_id = self.EnableAllContexts()
logging.debug('%d contexts in Gaia page' % max_context_id)
for gaia_iframe_context in range(max_context_id + 1):
try:
if self.EvaluateJavaScript(
"document.readyState == 'complete' && "
"document.getElementById('Email') != null",
context_id=gaia_iframe_context):
return gaia_iframe_context
except exceptions.EvaluateException:
pass
return None
def _GaiaWebviewContext(self):
webview_contexts = self.GetWebviewContexts()
if webview_contexts:
return webview_contexts[0]
return None
def _ExecuteOobeApi(self, api, *args):
logging.info('Invoking %s' % api)
self.WaitForJavaScriptCondition("typeof Oobe == 'function'", timeout=120)
if self.EvaluateJavaScript(
"typeof {{ @api }} == 'undefined'", api=api):
raise exceptions.LoginException('%s js api missing' % api)
# Example values:
# |api|: 'doLogin'
# |args|: ['username', 'pass', True]
# Executes: 'doLogin("username", "pass", true)'
self.ExecuteJavaScript('{{ @f }}({{ *args }})', f=api, args=args)
def NavigateGuestLogin(self):
"""Logs in as guest."""
self._ExecuteOobeApi('Oobe.guestLoginForTesting')
def NavigateFakeLogin(self, username, password, gaia_id,
enterprise_enroll=False):
"""Fake user login."""
self._ExecuteOobeApi('Oobe.loginForTesting', username, password, gaia_id,
enterprise_enroll)
def NavigateGaiaLogin(self, username, password,
enterprise_enroll=False,
for_user_triggered_enrollment=False):
"""Logs in using the GAIA webview or IFrame, whichever is
present. |enterprise_enroll| allows for enterprise enrollment.
|for_user_triggered_enrollment| should be False for remora enrollment."""
self._ExecuteOobeApi('Oobe.skipToLoginForTesting')
if for_user_triggered_enrollment:
self._ExecuteOobeApi('Oobe.switchToEnterpriseEnrollmentForTesting')
self._NavigateGaiaLogin(username, password, enterprise_enroll)
if enterprise_enroll:
self.WaitForJavaScriptCondition(
'Oobe.isEnrollmentSuccessfulForTest()', timeout=30)
self._ExecuteOobeApi('Oobe.enterpriseEnrollmentDone')
def _NavigateGaiaLogin(self, username, password, enterprise_enroll):
"""Invokes NavigateIFrameLogin or NavigateWebViewLogin as appropriate."""
def _GetGaiaFunction():
if self._GaiaWebviewContext() is not None:
return partial(Oobe._NavigateWebViewLogin,
wait_for_close=not enterprise_enroll)
elif self._GaiaIFrameContext() is not None:
return partial(Oobe._NavigateIFrameLogin,
add_user_for_testing=not enterprise_enroll)
return None
py_utils.WaitFor(_GetGaiaFunction, 20)(self, username, password)
def _NavigateIFrameLogin(self, username, password, add_user_for_testing):
"""Logs into the IFrame-based GAIA screen"""
gaia_iframe_context = py_utils.WaitFor(self._GaiaIFrameContext, timeout=30)
if add_user_for_testing:
self._ExecuteOobeApi('Oobe.showAddUserForTesting')
self.ExecuteJavaScript("""
document.getElementById('Email').value= {{ username }};
document.getElementById('Passwd').value= {{ password }};
document.getElementById('signIn').click();""",
username=username, password=password,
context_id=gaia_iframe_context)
def _NavigateWebViewLogin(self, username, password, wait_for_close):
"""Logs into the webview-based GAIA screen"""
self._NavigateWebViewEntry('identifierId', username, 'identifierNext')
self._NavigateWebViewEntry('password', password, 'passwordNext')
if wait_for_close:
py_utils.WaitFor(lambda: not self._GaiaWebviewContext(), 60)
def _NavigateWebViewEntry(self, field, value, next_field):
self._WaitForField(field)
self._WaitForField(next_field)
gaia_webview_context = self._GaiaWebviewContext()
gaia_webview_context.EvaluateJavaScript("""
document.getElementById({{ field }}).value= {{ value }};
document.getElementById({{ next_field }}).click()""",
field=field, value=value, next_field=next_field)
def _WaitForField(self, field):
gaia_webview_context = py_utils.WaitFor(self._GaiaWebviewContext, 5)
py_utils.WaitFor(gaia_webview_context.HasReachedQuiescence, 20)
gaia_webview_context.WaitForJavaScriptCondition(
"document.getElementById({{ field }}) != null",
field=field, timeout=20)
| mit |
jvehent/pelican | pelican/generators.py | 8 | 30732 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import os
import six
import logging
import shutil
import fnmatch
import calendar
from codecs import open
from collections import defaultdict
from functools import partial
from itertools import chain, groupby
from operator import attrgetter
from jinja2 import (Environment, FileSystemLoader, PrefixLoader, ChoiceLoader,
BaseLoader, TemplateNotFound)
from pelican.cache import FileStampDataCacher
from pelican.contents import Article, Draft, Page, Static, is_valid_content
from pelican.readers import Readers
from pelican.utils import (copy, process_translations, mkdir_p, DateFormatter,
python_2_unicode_compatible, posixize_path)
from pelican import signals
logger = logging.getLogger(__name__)
class PelicanTemplateNotFound(Exception):
pass
@python_2_unicode_compatible
class Generator(object):
"""Baseclass generator"""
def __init__(self, context, settings, path, theme, output_path,
readers_cache_name='', **kwargs):
self.context = context
self.settings = settings
self.path = path
self.theme = theme
self.output_path = output_path
for arg, value in kwargs.items():
setattr(self, arg, value)
self.readers = Readers(self.settings, readers_cache_name)
# templates cache
self._templates = {}
self._templates_path = []
self._templates_path.append(os.path.expanduser(
os.path.join(self.theme, 'templates')))
self._templates_path += self.settings['EXTRA_TEMPLATES_PATHS']
theme_path = os.path.dirname(os.path.abspath(__file__))
simple_loader = FileSystemLoader(os.path.join(theme_path,
"themes", "simple", "templates"))
self.env = Environment(
trim_blocks=True,
lstrip_blocks=True,
loader=ChoiceLoader([
FileSystemLoader(self._templates_path),
simple_loader, # implicit inheritance
PrefixLoader({'!simple': simple_loader}) # explicit one
]),
extensions=self.settings['JINJA_EXTENSIONS'],
)
logger.debug('Template list: %s', self.env.list_templates())
# provide utils.strftime as a jinja filter
self.env.filters.update({'strftime': DateFormatter()})
# get custom Jinja filters from user settings
custom_filters = self.settings['JINJA_FILTERS']
self.env.filters.update(custom_filters)
signals.generator_init.send(self)
def get_template(self, name):
"""Return the template by name.
Use self.theme to get the templates to use, and return a list of
templates ready to use with Jinja2.
"""
if name not in self._templates:
try:
self._templates[name] = self.env.get_template(name + '.html')
except TemplateNotFound:
raise PelicanTemplateNotFound('[templates] unable to load %s.html from %s'
% (name, self._templates_path))
return self._templates[name]
def _include_path(self, path, extensions=None):
"""Inclusion logic for .get_files(), returns True/False
:param path: the path which might be including
:param extensions: the list of allowed extensions (if False, all
extensions are allowed)
"""
if extensions is None:
extensions = tuple(self.readers.extensions)
basename = os.path.basename(path)
#check IGNORE_FILES
ignores = self.settings['IGNORE_FILES']
if any(fnmatch.fnmatch(basename, ignore) for ignore in ignores):
return False
if extensions is False or basename.endswith(extensions):
return True
return False
def get_files(self, paths, exclude=[], extensions=None):
"""Return a list of files to use, based on rules
:param paths: the list pf paths to search (relative to self.path)
:param exclude: the list of path to exclude
:param extensions: the list of allowed extensions (if False, all
extensions are allowed)
"""
if isinstance(paths, six.string_types):
paths = [paths] # backward compatibility for older generators
# group the exclude dir names by parent path, for use with os.walk()
exclusions_by_dirpath = {}
for e in exclude:
parent_path, subdir = os.path.split(os.path.join(self.path, e))
exclusions_by_dirpath.setdefault(parent_path, set()).add(subdir)
files = []
ignores = self.settings['IGNORE_FILES']
for path in paths:
# careful: os.path.join() will add a slash when path == ''.
root = os.path.join(self.path, path) if path else self.path
if os.path.isdir(root):
for dirpath, dirs, temp_files in os.walk(root, followlinks=True):
drop = []
excl = exclusions_by_dirpath.get(dirpath, ())
for d in dirs:
if (d in excl or
any(fnmatch.fnmatch(d, ignore)
for ignore in ignores)):
drop.append(d)
for d in drop:
dirs.remove(d)
reldir = os.path.relpath(dirpath, self.path)
for f in temp_files:
fp = os.path.join(reldir, f)
if self._include_path(fp, extensions):
files.append(fp)
elif os.path.exists(root) and self._include_path(path, extensions):
files.append(path) # can't walk non-directories
return files
def add_source_path(self, content):
"""Record a source file path that a Generator found and processed.
Store a reference to its Content object, for url lookups later.
"""
location = content.get_relative_source_path()
self.context['filenames'][location] = content
def _add_failed_source_path(self, path):
"""Record a source file path that a Generator failed to process.
(For example, one that was missing mandatory metadata.)
The path argument is expected to be relative to self.path.
"""
self.context['filenames'][posixize_path(os.path.normpath(path))] = None
def _is_potential_source_path(self, path):
"""Return True if path was supposed to be used as a source file.
(This includes all source files that have been found by generators
before this method is called, even if they failed to process.)
The path argument is expected to be relative to self.path.
"""
return posixize_path(os.path.normpath(path)) in self.context['filenames']
def _update_context(self, items):
"""Update the context with the given items from the currrent
processor.
"""
for item in items:
value = getattr(self, item)
if hasattr(value, 'items'):
value = list(value.items()) # py3k safeguard for iterators
self.context[item] = value
def __str__(self):
# return the name of the class for logging purposes
return self.__class__.__name__
class CachingGenerator(Generator, FileStampDataCacher):
'''Subclass of Generator and FileStampDataCacher classes
enables content caching, either at the generator or reader level
'''
def __init__(self, *args, **kwargs):
'''Initialize the generator, then set up caching
note the multiple inheritance structure
'''
cls_name = self.__class__.__name__
Generator.__init__(self, *args,
readers_cache_name=(cls_name + '-Readers'),
**kwargs)
cache_this_level = self.settings['CONTENT_CACHING_LAYER'] == 'generator'
caching_policy = cache_this_level and self.settings['CACHE_CONTENT']
load_policy = cache_this_level and self.settings['LOAD_CONTENT_CACHE']
FileStampDataCacher.__init__(self, self.settings, cls_name,
caching_policy, load_policy
)
def _get_file_stamp(self, filename):
'''Get filestamp for path relative to generator.path'''
filename = os.path.join(self.path, filename)
return super(CachingGenerator, self)._get_file_stamp(filename)
class _FileLoader(BaseLoader):
def __init__(self, path, basedir):
self.path = path
self.fullpath = os.path.join(basedir, path)
def get_source(self, environment, template):
if template != self.path or not os.path.exists(self.fullpath):
raise TemplateNotFound(template)
mtime = os.path.getmtime(self.fullpath)
with open(self.fullpath, 'r', encoding='utf-8') as f:
source = f.read()
return (source, self.fullpath,
lambda: mtime == os.path.getmtime(self.fullpath))
class TemplatePagesGenerator(Generator):
def generate_output(self, writer):
for source, dest in self.settings['TEMPLATE_PAGES'].items():
self.env.loader.loaders.insert(0, _FileLoader(source, self.path))
try:
template = self.env.get_template(source)
rurls = self.settings['RELATIVE_URLS']
writer.write_file(dest, template, self.context, rurls,
override_output=True)
finally:
del self.env.loader.loaders[0]
class ArticlesGenerator(CachingGenerator):
"""Generate blog articles"""
def __init__(self, *args, **kwargs):
"""initialize properties"""
self.articles = [] # only articles in default language
self.translations = []
self.dates = {}
self.tags = defaultdict(list)
self.categories = defaultdict(list)
self.related_posts = []
self.authors = defaultdict(list)
self.drafts = [] # only drafts in default language
self.drafts_translations = []
super(ArticlesGenerator, self).__init__(*args, **kwargs)
signals.article_generator_init.send(self)
def generate_feeds(self, writer):
"""Generate the feeds from the current context, and output files."""
if self.settings.get('FEED_ATOM'):
writer.write_feed(self.articles, self.context,
self.settings['FEED_ATOM'])
if self.settings.get('FEED_RSS'):
writer.write_feed(self.articles, self.context,
self.settings['FEED_RSS'], feed_type='rss')
if (self.settings.get('FEED_ALL_ATOM')
or self.settings.get('FEED_ALL_RSS')):
all_articles = list(self.articles)
for article in self.articles:
all_articles.extend(article.translations)
all_articles.sort(key=attrgetter('date'), reverse=True)
if self.settings.get('FEED_ALL_ATOM'):
writer.write_feed(all_articles, self.context,
self.settings['FEED_ALL_ATOM'])
if self.settings.get('FEED_ALL_RSS'):
writer.write_feed(all_articles, self.context,
self.settings['FEED_ALL_RSS'],
feed_type='rss')
for cat, arts in self.categories:
arts.sort(key=attrgetter('date'), reverse=True)
if self.settings.get('CATEGORY_FEED_ATOM'):
writer.write_feed(arts, self.context,
self.settings['CATEGORY_FEED_ATOM']
% cat.slug)
if self.settings.get('CATEGORY_FEED_RSS'):
writer.write_feed(arts, self.context,
self.settings['CATEGORY_FEED_RSS']
% cat.slug, feed_type='rss')
for auth, arts in self.authors:
arts.sort(key=attrgetter('date'), reverse=True)
if self.settings.get('AUTHOR_FEED_ATOM'):
writer.write_feed(arts, self.context,
self.settings['AUTHOR_FEED_ATOM']
% auth.slug)
if self.settings.get('AUTHOR_FEED_RSS'):
writer.write_feed(arts, self.context,
self.settings['AUTHOR_FEED_RSS']
% auth.slug, feed_type='rss')
if (self.settings.get('TAG_FEED_ATOM')
or self.settings.get('TAG_FEED_RSS')):
for tag, arts in self.tags.items():
arts.sort(key=attrgetter('date'), reverse=True)
if self.settings.get('TAG_FEED_ATOM'):
writer.write_feed(arts, self.context,
self.settings['TAG_FEED_ATOM']
% tag.slug)
if self.settings.get('TAG_FEED_RSS'):
writer.write_feed(arts, self.context,
self.settings['TAG_FEED_RSS'] % tag.slug,
feed_type='rss')
if (self.settings.get('TRANSLATION_FEED_ATOM')
or self.settings.get('TRANSLATION_FEED_RSS')):
translations_feeds = defaultdict(list)
for article in chain(self.articles, self.translations):
translations_feeds[article.lang].append(article)
for lang, items in translations_feeds.items():
items.sort(key=attrgetter('date'), reverse=True)
if self.settings.get('TRANSLATION_FEED_ATOM'):
writer.write_feed(
items, self.context,
self.settings['TRANSLATION_FEED_ATOM'] % lang)
if self.settings.get('TRANSLATION_FEED_RSS'):
writer.write_feed(
items, self.context,
self.settings['TRANSLATION_FEED_RSS'] % lang,
feed_type='rss')
def generate_articles(self, write):
"""Generate the articles."""
for article in chain(self.translations, self.articles):
signals.article_generator_write_article.send(self, content=article)
write(article.save_as, self.get_template(article.template),
self.context, article=article, category=article.category,
override_output=hasattr(article, 'override_save_as'),
blog=True)
def generate_period_archives(self, write):
"""Generate per-year, per-month, and per-day archives."""
try:
template = self.get_template('period_archives')
except PelicanTemplateNotFound:
template = self.get_template('archives')
period_save_as = {
'year': self.settings['YEAR_ARCHIVE_SAVE_AS'],
'month': self.settings['MONTH_ARCHIVE_SAVE_AS'],
'day': self.settings['DAY_ARCHIVE_SAVE_AS'],
}
period_date_key = {
'year': attrgetter('date.year'),
'month': attrgetter('date.year', 'date.month'),
'day': attrgetter('date.year', 'date.month', 'date.day')
}
def _generate_period_archives(dates, key, save_as_fmt):
"""Generate period archives from `dates`, grouped by
`key` and written to `save_as`.
"""
# `dates` is already sorted by date
for _period, group in groupby(dates, key=key):
archive = list(group)
# arbitrarily grab the first date so that the usual
# format string syntax can be used for specifying the
# period archive dates
date = archive[0].date
save_as = save_as_fmt.format(date=date)
context = self.context.copy()
if key == period_date_key['year']:
context["period"] = (_period,)
else:
month_name = calendar.month_name[_period[1]]
if not six.PY3:
month_name = month_name.decode('utf-8')
if key == period_date_key['month']:
context["period"] = (_period[0],
month_name)
else:
context["period"] = (_period[0],
month_name,
_period[2])
write(save_as, template, context,
dates=archive, blog=True)
for period in 'year', 'month', 'day':
save_as = period_save_as[period]
if save_as:
key = period_date_key[period]
_generate_period_archives(self.dates, key, save_as)
def generate_direct_templates(self, write):
"""Generate direct templates pages"""
PAGINATED_TEMPLATES = self.settings['PAGINATED_DIRECT_TEMPLATES']
for template in self.settings['DIRECT_TEMPLATES']:
paginated = {}
if template in PAGINATED_TEMPLATES:
paginated = {'articles': self.articles, 'dates': self.dates}
save_as = self.settings.get("%s_SAVE_AS" % template.upper(),
'%s.html' % template)
if not save_as:
continue
write(save_as, self.get_template(template),
self.context, blog=True, paginated=paginated,
page_name=os.path.splitext(save_as)[0])
def generate_tags(self, write):
"""Generate Tags pages."""
tag_template = self.get_template('tag')
for tag, articles in self.tags.items():
articles.sort(key=attrgetter('date'), reverse=True)
dates = [article for article in self.dates if article in articles]
write(tag.save_as, tag_template, self.context, tag=tag,
articles=articles, dates=dates,
paginated={'articles': articles, 'dates': dates}, blog=True,
page_name=tag.page_name, all_articles=self.articles)
def generate_categories(self, write):
"""Generate category pages."""
category_template = self.get_template('category')
for cat, articles in self.categories:
articles.sort(key=attrgetter('date'), reverse=True)
dates = [article for article in self.dates if article in articles]
write(cat.save_as, category_template, self.context,
category=cat, articles=articles, dates=dates,
paginated={'articles': articles, 'dates': dates}, blog=True,
page_name=cat.page_name, all_articles=self.articles)
def generate_authors(self, write):
"""Generate Author pages."""
author_template = self.get_template('author')
for aut, articles in self.authors:
articles.sort(key=attrgetter('date'), reverse=True)
dates = [article for article in self.dates if article in articles]
write(aut.save_as, author_template, self.context,
author=aut, articles=articles, dates=dates,
paginated={'articles': articles, 'dates': dates}, blog=True,
page_name=aut.page_name, all_articles=self.articles)
def generate_drafts(self, write):
"""Generate drafts pages."""
for draft in chain(self.drafts_translations, self.drafts):
write(draft.save_as, self.get_template(draft.template),
self.context, article=draft, category=draft.category,
override_output=hasattr(draft, 'override_save_as'),
blog=True, all_articles=self.articles)
def generate_pages(self, writer):
"""Generate the pages on the disk"""
write = partial(writer.write_file,
relative_urls=self.settings['RELATIVE_URLS'])
# to minimize the number of relative path stuff modification
# in writer, articles pass first
self.generate_articles(write)
self.generate_period_archives(write)
self.generate_direct_templates(write)
# and subfolders after that
self.generate_tags(write)
self.generate_categories(write)
self.generate_authors(write)
self.generate_drafts(write)
def generate_context(self):
"""Add the articles into the shared context"""
all_articles = []
all_drafts = []
for f in self.get_files(
self.settings['ARTICLE_PATHS'],
exclude=self.settings['ARTICLE_EXCLUDES']):
article_or_draft = self.get_cached_data(f, None)
if article_or_draft is None:
#TODO needs overhaul, maybe nomad for read_file solution, unified behaviour
try:
article_or_draft = self.readers.read_file(
base_path=self.path, path=f, content_class=Article,
context=self.context,
preread_signal=signals.article_generator_preread,
preread_sender=self,
context_signal=signals.article_generator_context,
context_sender=self)
except Exception as e:
logger.error('Could not process %s\n%s', f, e,
exc_info=self.settings.get('DEBUG', False))
self._add_failed_source_path(f)
continue
if not is_valid_content(article_or_draft, f):
self._add_failed_source_path(f)
continue
if article_or_draft.status.lower() == "published":
all_articles.append(article_or_draft)
elif article_or_draft.status.lower() == "draft":
article_or_draft = self.readers.read_file(
base_path=self.path, path=f, content_class=Draft,
context=self.context,
preread_signal=signals.article_generator_preread,
preread_sender=self,
context_signal=signals.article_generator_context,
context_sender=self)
self.add_source_path(article_or_draft)
all_drafts.append(article_or_draft)
else:
logger.error("Unknown status '%s' for file %s, skipping it.",
article_or_draft.status, f)
self._add_failed_source_path(f)
continue
self.cache_data(f, article_or_draft)
self.add_source_path(article_or_draft)
self.articles, self.translations = process_translations(all_articles,
order_by=self.settings['ARTICLE_ORDER_BY'])
self.drafts, self.drafts_translations = \
process_translations(all_drafts)
signals.article_generator_pretaxonomy.send(self)
for article in self.articles:
# only main articles are listed in categories and tags
# not translations
self.categories[article.category].append(article)
if hasattr(article, 'tags'):
for tag in article.tags:
self.tags[tag].append(article)
for author in getattr(article, 'authors', []):
self.authors[author].append(article)
self.dates = list(self.articles)
self.dates.sort(key=attrgetter('date'),
reverse=self.context['NEWEST_FIRST_ARCHIVES'])
# and generate the output :)
# order the categories per name
self.categories = list(self.categories.items())
self.categories.sort(
reverse=self.settings['REVERSE_CATEGORY_ORDER'])
self.authors = list(self.authors.items())
self.authors.sort()
self._update_context(('articles', 'dates', 'tags', 'categories',
'authors', 'related_posts', 'drafts'))
self.save_cache()
self.readers.save_cache()
signals.article_generator_finalized.send(self)
def generate_output(self, writer):
self.generate_feeds(writer)
self.generate_pages(writer)
signals.article_writer_finalized.send(self, writer=writer)
class PagesGenerator(CachingGenerator):
"""Generate pages"""
def __init__(self, *args, **kwargs):
self.pages = []
self.hidden_pages = []
self.hidden_translations = []
super(PagesGenerator, self).__init__(*args, **kwargs)
signals.page_generator_init.send(self)
def generate_context(self):
all_pages = []
hidden_pages = []
for f in self.get_files(
self.settings['PAGE_PATHS'],
exclude=self.settings['PAGE_EXCLUDES']):
page = self.get_cached_data(f, None)
if page is None:
try:
page = self.readers.read_file(
base_path=self.path, path=f, content_class=Page,
context=self.context,
preread_signal=signals.page_generator_preread,
preread_sender=self,
context_signal=signals.page_generator_context,
context_sender=self)
except Exception as e:
logger.error('Could not process %s\n%s', f, e,
exc_info=self.settings.get('DEBUG', False))
self._add_failed_source_path(f)
continue
if not is_valid_content(page, f):
self._add_failed_source_path(f)
continue
if page.status.lower() == "published":
all_pages.append(page)
elif page.status.lower() == "hidden":
hidden_pages.append(page)
else:
logger.error("Unknown status '%s' for file %s, skipping it.",
page.status, f)
self._add_failed_source_path(f)
continue
self.cache_data(f, page)
self.add_source_path(page)
self.pages, self.translations = process_translations(all_pages,
order_by=self.settings['PAGE_ORDER_BY'])
self.hidden_pages, self.hidden_translations = (
process_translations(hidden_pages))
self._update_context(('pages', 'hidden_pages'))
self.save_cache()
self.readers.save_cache()
signals.page_generator_finalized.send(self)
def generate_output(self, writer):
for page in chain(self.translations, self.pages,
self.hidden_translations, self.hidden_pages):
writer.write_file(
page.save_as, self.get_template(page.template),
self.context, page=page,
relative_urls=self.settings['RELATIVE_URLS'],
override_output=hasattr(page, 'override_save_as'))
signals.page_writer_finalized.send(self, writer=writer)
class StaticGenerator(Generator):
"""copy static paths (what you want to copy, like images, medias etc.
to output"""
def __init__(self, *args, **kwargs):
super(StaticGenerator, self).__init__(*args, **kwargs)
signals.static_generator_init.send(self)
def _copy_paths(self, paths, source, destination, output_path,
final_path=None):
"""Copy all the paths from source to destination"""
for path in paths:
if final_path:
copy(os.path.join(source, path),
os.path.join(output_path, destination, final_path),
self.settings['IGNORE_FILES'])
else:
copy(os.path.join(source, path),
os.path.join(output_path, destination, path),
self.settings['IGNORE_FILES'])
def generate_context(self):
self.staticfiles = []
for f in self.get_files(self.settings['STATIC_PATHS'],
exclude=self.settings['STATIC_EXCLUDES'],
extensions=False):
# skip content source files unless the user explicitly wants them
if self.settings['STATIC_EXCLUDE_SOURCES']:
if self._is_potential_source_path(f):
continue
static = self.readers.read_file(
base_path=self.path, path=f, content_class=Static,
fmt='static', context=self.context,
preread_signal=signals.static_generator_preread,
preread_sender=self,
context_signal=signals.static_generator_context,
context_sender=self)
self.staticfiles.append(static)
self.add_source_path(static)
self._update_context(('staticfiles',))
signals.static_generator_finalized.send(self)
def generate_output(self, writer):
self._copy_paths(self.settings['THEME_STATIC_PATHS'], self.theme,
self.settings['THEME_STATIC_DIR'], self.output_path,
os.curdir)
# copy all Static files
for sc in self.context['staticfiles']:
source_path = os.path.join(self.path, sc.source_path)
save_as = os.path.join(self.output_path, sc.save_as)
mkdir_p(os.path.dirname(save_as))
shutil.copy2(source_path, save_as)
logger.info('Copying %s to %s', sc.source_path, sc.save_as)
class SourceFileGenerator(Generator):
def generate_context(self):
self.output_extension = self.settings['OUTPUT_SOURCES_EXTENSION']
def _create_source(self, obj):
output_path, _ = os.path.splitext(obj.save_as)
dest = os.path.join(self.output_path,
output_path + self.output_extension)
copy(obj.source_path, dest)
def generate_output(self, writer=None):
logger.info('Generating source files...')
for obj in chain(self.context['articles'], self.context['pages']):
self._create_source(obj)
for obj_trans in obj.translations:
self._create_source(obj_trans)
| agpl-3.0 |
theflofly/tensorflow | tensorflow/python/debug/lib/session_debug_testlib.py | 11 | 64583 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for debugger functionalities in tf.Session."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import glob
import os
import shutil
import tempfile
import threading
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.core.util import event_pb2
from tensorflow.python.client import session
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.debug.lib import debug_graphs
from tensorflow.python.debug.lib import debug_utils
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
import tensorflow.python.ops.tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
def no_rewrite_session_config():
rewriter_config = rewriter_config_pb2.RewriterConfig(
disable_model_pruning=True,
arithmetic_optimization=rewriter_config_pb2.RewriterConfig.OFF,
dependency_optimization=rewriter_config_pb2.RewriterConfig.OFF)
graph_options = config_pb2.GraphOptions(rewrite_options=rewriter_config)
return config_pb2.ConfigProto(graph_options=graph_options)
class _RNNCellForTest(rnn_cell_impl.RNNCell):
"""RNN cell for testing."""
def __init__(self, input_output_size, state_size):
self._input_output_size = input_output_size
self._state_size = state_size
self._w = variables.VariableV1(1.0, dtype=dtypes.float32, name="w")
@property
def output_size(self):
return self._input_output_size
@property
def state_size(self):
return self._state_size
def __call__(self, input_, state, scope=None):
return (math_ops.multiply(self._w, input_), state)
@test_util.run_v1_only("b/120545219")
class SessionDebugTestBase(test_util.TensorFlowTestCase):
"""Base class for unit tests of tfdbg running with tf.Session."""
@classmethod
def setUpClass(cls):
if test.is_gpu_available():
cls._expected_partition_graph_count = 2
cls._expected_num_devices = 2
gpu_name = test_util.gpu_device_name()
cls._main_device = "/job:localhost/replica:0/task:0" + gpu_name
else:
cls._expected_partition_graph_count = 1
cls._expected_num_devices = 1
cls._main_device = "/job:localhost/replica:0/task:0/device:CPU:0"
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
self._dump_root = tempfile.mkdtemp()
def tearDown(self):
ops.reset_default_graph()
# Tear down temporary dump directory.
if os.path.isdir(self._dump_root):
shutil.rmtree(self._dump_root)
def _debug_urls(self, run_number=None):
raise NotImplementedError(
"_debug_urls() method is not implemented in the base test class.")
def _debug_dump_dir(self, run_number=None):
raise NotImplementedError(
"_debug_dump_dir() method is not implemented in the base test class.")
def _debug_run_and_get_dump(self,
sess,
fetches,
feed_dict=None,
debug_ops="DebugIdentity",
tolerate_debug_op_creation_failures=False,
global_step=-1,
validate=True,
expected_partition_graph_count=None):
"""Run fetches with debugging and obtain DebugDumpDir.
Args:
sess: the tf.Session to be used.
fetches: fetches of the Session.run().
feed_dict: feed dict for the Session.run().
debug_ops: name(s) of the debug ops to be used.
tolerate_debug_op_creation_failures: whether to tolerate debug op
creation failures.
global_step: Optional global step.
validate: whether to validate dumped tensors against graph.
expected_partition_graph_count: optional count of partition graphs to
assert on.
Returns:
1. Return values of the Session.run().
2. The DebugDumpDir object from the debugged run().
"""
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=debug_ops,
debug_urls=self._debug_urls(),
tolerate_debug_op_creation_failures=tolerate_debug_op_creation_failures,
global_step=global_step)
run_metadata = config_pb2.RunMetadata()
run_output = sess.run(fetches,
feed_dict=feed_dict,
options=run_options,
run_metadata=run_metadata)
if expected_partition_graph_count is not None:
self.assertEqual(expected_partition_graph_count,
len(run_metadata.partition_graphs))
return run_output, debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs,
validate=validate)
def _generate_dump_from_simple_addition_graph(self):
with session.Session(config=no_rewrite_session_config()) as sess:
u_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])
v_init_val = np.array([[2.0], [-1.0]])
# Use node names with overlapping namespace (i.e., parent directory) to
# test concurrent, non-racing directory creation.
u_name = "u"
v_name = "v"
w_name = "w"
u_init = constant_op.constant(u_init_val, shape=[2, 2])
u = variables.VariableV1(u_init, name=u_name)
v_init = constant_op.constant(v_init_val, shape=[2, 1])
v = variables.VariableV1(v_init, name=v_name)
w = math_ops.matmul(u, v, name=w_name)
u.initializer.run()
v.initializer.run()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_urls = "file://%s" % self._dump_root
# Add debug tensor watch for u.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % u_name, 0, debug_urls=debug_urls)
# Add debug tensor watch for v.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % v_name, 0, debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
# Invoke Session.run().
sess.run(w, options=run_options, run_metadata=run_metadata)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
simple_add_results = collections.namedtuple("SimpleAddResults", [
"u_init_val", "v_init_val", "u", "v", "w", "u_name", "v_name", "w_name",
"dump"
])
return simple_add_results(u_init_val, v_init_val, u, v, w, u_name, v_name,
w_name, dump)
def testCopyNodesHaveCorrectDebugOpsAndURLsAttributeValues(self):
with session.Session() as sess:
u = variables.VariableV1(2.1, name="u")
v = variables.VariableV1(20.0, name="v")
w = math_ops.multiply(u, v, name="w")
sess.run(variables.global_variables_initializer())
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_urls = self._debug_urls()
debug_utils.add_debug_tensor_watch(
run_options,
"u",
0, ["DebugNumericSummary(gated_grpc=True)", "DebugIdentity"],
debug_urls=debug_urls)
debug_utils.add_debug_tensor_watch(
run_options, "v", 0, ["DebugNumericSummary"], debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
r = sess.run(w, options=run_options, run_metadata=run_metadata)
self.assertAllClose(42.0, r)
u_copy_node_def = None
v_copy_node_def = None
for partition_graph in run_metadata.partition_graphs:
for node_def in partition_graph.node:
if debug_graphs.is_copy_node(node_def.name):
if node_def.name == "__copy_u_0":
u_copy_node_def = node_def
elif node_def.name == "__copy_v_0":
v_copy_node_def = node_def
self.assertIsNotNone(u_copy_node_def)
debug_ops_spec = u_copy_node_def.attr["debug_ops_spec"].list.s
self.assertEqual(2, len(debug_ops_spec))
self.assertEqual("DebugNumericSummary;%s;1" % debug_urls[0],
debug_ops_spec[0].decode("utf-8"))
self.assertEqual("DebugIdentity;%s;0" % debug_urls[0],
debug_ops_spec[1].decode("utf-8"))
self.assertIsNotNone(v_copy_node_def)
debug_ops_spec = v_copy_node_def.attr["debug_ops_spec"].list.s
self.assertEqual(1, len(debug_ops_spec))
self.assertEqual("DebugNumericSummary;%s;0" % debug_urls[0],
debug_ops_spec[0].decode("utf-8"))
def testConcurrentDumpingToPathsWithOverlappingParentDirsWorks(self):
results = self._generate_dump_from_simple_addition_graph()
self.assertTrue(results.dump.loaded_partition_graphs())
# Since global_step is not explicitly specified, it should take its default
# value: -1.
self.assertEqual(-1, results.dump.core_metadata.global_step)
self.assertGreaterEqual(results.dump.core_metadata.session_run_index, 0)
self.assertGreaterEqual(results.dump.core_metadata.executor_step_index, 0)
self.assertEqual([], results.dump.core_metadata.input_names)
self.assertEqual([results.w.name], results.dump.core_metadata.output_names)
self.assertEqual([], results.dump.core_metadata.target_nodes)
# Verify the dumped tensor values for u and v.
self.assertEqual(2, results.dump.size)
self.assertAllClose([results.u_init_val],
results.dump.get_tensors("%s/read" % results.u_name, 0,
"DebugIdentity"))
self.assertAllClose([results.v_init_val],
results.dump.get_tensors("%s/read" % results.v_name, 0,
"DebugIdentity"))
self.assertGreaterEqual(
results.dump.get_rel_timestamps("%s/read" % results.u_name, 0,
"DebugIdentity")[0], 0)
self.assertGreaterEqual(
results.dump.get_rel_timestamps("%s/read" % results.v_name, 0,
"DebugIdentity")[0], 0)
self.assertGreater(
results.dump.get_dump_sizes_bytes("%s/read" % results.u_name, 0,
"DebugIdentity")[0], 0)
self.assertGreater(
results.dump.get_dump_sizes_bytes("%s/read" % results.v_name, 0,
"DebugIdentity")[0], 0)
def testGetOpTypeWorks(self):
results = self._generate_dump_from_simple_addition_graph()
self.assertEqual(results.u.op.type,
results.dump.node_op_type(results.u_name))
self.assertIn(results.v.op.type, results.dump.node_op_type(results.v_name))
self.assertIn(results.w.op.type, results.dump.node_op_type(results.w_name))
with self.assertRaisesRegexp(
ValueError, r"None of the .* device\(s\) has a node named "):
results.dump.node_op_type("foo_bar")
def testDumpStringTensorsWorks(self):
with session.Session(config=no_rewrite_session_config()) as sess:
str1_init_val = np.array(b"abc")
str2_init_val = np.array(b"def")
str1_init = constant_op.constant(str1_init_val)
str2_init = constant_op.constant(str2_init_val)
str1_name = "str1"
str2_name = "str2"
str1 = variables.VariableV1(str1_init, name=str1_name)
str2 = variables.VariableV1(str2_init, name=str2_name)
# Concatenate str1 and str2
str_concat = math_ops.add(str1, str2, name="str_concat")
str1.initializer.run()
str2.initializer.run()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_urls = self._debug_urls()
# Add debug tensor watch for u.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % str1_name, 0, debug_urls=debug_urls)
# Add debug tensor watch for v.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % str2_name, 0, debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
sess.run(str_concat, options=run_options, run_metadata=run_metadata)
# String ops are located on CPU.
self.assertEqual(1, len(run_metadata.partition_graphs))
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
self.assertIn(str1_name, dump.nodes())
self.assertIn(str2_name, dump.nodes())
self.assertEqual(2, dump.size)
self.assertEqual([str1_init_val],
dump.get_tensors("%s/read" % str1_name, 0,
"DebugIdentity"))
self.assertEqual([str2_init_val],
dump.get_tensors("%s/read" % str2_name, 0,
"DebugIdentity"))
self.assertGreaterEqual(
dump.get_rel_timestamps("%s/read" % str1_name, 0, "DebugIdentity")[0],
0)
self.assertGreaterEqual(
dump.get_rel_timestamps("%s/read" % str2_name, 0, "DebugIdentity")[0],
0)
self.assertGreater(
dump.get_dump_sizes_bytes("%s/read" % str1_name, 0,
"DebugIdentity")[0], 0)
self.assertGreater(
dump.get_dump_sizes_bytes("%s/read" % str2_name, 0,
"DebugIdentity")[0], 0)
def testDumpUninitializedVariable(self):
op_namespace = "testDumpUninitializedVariable"
with session.Session() as sess:
u_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])
s_init_val = b"str1"
u_name = "%s/u" % op_namespace
s_name = "%s/s" % op_namespace
u_init = constant_op.constant(u_init_val, shape=[2, 2])
u = variables.VariableV1(u_init, name=u_name)
s_init = constant_op.constant(s_init_val)
s = variables.VariableV1(s_init, name=s_name)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_urls = self._debug_urls()
# Add debug tensor watch for u.
debug_utils.add_debug_tensor_watch(
run_options, u_name, 0, debug_urls=debug_urls)
debug_utils.add_debug_tensor_watch(
run_options, s_name, 0, debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
# Initialize u and s.
sess.run(variables.global_variables_initializer(),
options=run_options,
run_metadata=run_metadata)
# Verify the dump file for the uninitialized value of u.
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
self.assertEqual(2, dump.size)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
# Verify that the variable is properly initialized by the run() call.
u_vals = dump.get_tensors(u_name, 0, "DebugIdentity")
s_vals = dump.get_tensors(s_name, 0, "DebugIdentity")
self.assertEqual(1, len(u_vals))
self.assertIsInstance(u_vals[0], debug_data.InconvertibleTensorProto)
self.assertFalse(u_vals[0].initialized)
self.assertEqual(1, len(s_vals))
self.assertIsInstance(s_vals[0], debug_data.InconvertibleTensorProto)
self.assertFalse(s_vals[0].initialized)
# Call run() again, to check that u is initialized properly.
self.assertAllClose(u_init_val, sess.run(u))
self.assertEqual(s_init_val, sess.run(s))
def testDebugWhileLoopGeneratesMultipleDumps(self):
with session.Session(config=no_rewrite_session_config()) as sess:
num_iter = 10
# "u" is the Variable being updated in the loop.
u_name = "testDumpToFileWhileLoop/u"
u_namespace = u_name.split("/")[0]
u_init_val = np.array(11.0)
u_init = constant_op.constant(u_init_val)
u = variables.VariableV1(u_init, name=u_name)
# "v" is the increment.
v_name = "testDumpToFileWhileLoop/v"
v_namespace = v_name.split("/")[0]
v_init_val = np.array(2.0)
v_init = constant_op.constant(v_init_val)
v = variables.VariableV1(v_init, name=v_name)
u.initializer.run()
v.initializer.run()
i = constant_op.constant(0, name="testDumpToFileWhileLoop/i")
def cond(i):
return math_ops.less(i, num_iter)
def body(i):
new_u = state_ops.assign_add(u, v)
new_i = math_ops.add(i, 1)
op = control_flow_ops.group(new_u)
new_i = control_flow_ops.with_dependencies([op], new_i)
return [new_i]
loop = control_flow_ops.while_loop(
cond, body, [i], parallel_iterations=10)
# Create RunOptions for debug-watching tensors
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_urls = self._debug_urls()
# Add debug tensor watch for u.
debug_utils.add_debug_tensor_watch(
run_options, u_name, 0, debug_urls=debug_urls)
# Add debug tensor watch for v.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % v_name, 0, debug_urls=debug_urls)
# Add debug tensor watch for while/Identity.
debug_utils.add_debug_tensor_watch(
run_options, "while/Identity", 0, debug_urls=debug_urls)
# Add debug tensor watch for while/Add/y.
debug_utils.add_debug_tensor_watch(
run_options, "while/Add/y", 0, debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
r = sess.run(loop, options=run_options, run_metadata=run_metadata)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
self.assertEqual(num_iter, r)
u_val_final = sess.run(u)
self.assertAllClose(u_init_val + num_iter * v_init_val, u_val_final)
# Verify dump files
self.assertTrue(os.path.isdir(self._dump_root))
u_glob_out = glob.glob(os.path.join(self._dump_root, "*", u_namespace))
v_glob_out = glob.glob(os.path.join(
self._dump_root, "*", v_namespace, "v"))
self.assertTrue(os.path.isdir(u_glob_out[0]))
self.assertTrue(os.path.isdir(v_glob_out[0]))
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
# Expected dumped tensors: u, v/read, 10 iterations of while/Identity,
# and 10 iterations of while/Add/y.
self.assertEqual(1 + 1 + num_iter + num_iter, dump.size)
# Verify tensor values.
self.assertAllClose([u_init_val],
dump.get_tensors(u_name, 0, "DebugIdentity"))
self.assertAllClose([v_init_val],
dump.get_tensors("%s/read" % v_name, 0,
"DebugIdentity"))
while_id_tensors = dump.get_tensors("while/Identity", 0, "DebugIdentity")
self.assertEqual(10, len(while_id_tensors))
for k in xrange(len(while_id_tensors)):
self.assertAllClose(np.array(k), while_id_tensors[k])
# Verify ascending timestamps from the while loops.
while_id_rel_timestamps = dump.get_rel_timestamps("while/Identity", 0,
"DebugIdentity")
while_id_dump_sizes_bytes = dump.get_dump_sizes_bytes("while/Identity", 0,
"DebugIdentity")
self.assertEqual(10, len(while_id_rel_timestamps))
prev_rel_time = 0
prev_dump_size_bytes = while_id_dump_sizes_bytes[0]
for rel_time, dump_size_bytes in zip(while_id_rel_timestamps,
while_id_dump_sizes_bytes):
self.assertGreaterEqual(rel_time, prev_rel_time)
self.assertEqual(dump_size_bytes, prev_dump_size_bytes)
prev_rel_time = rel_time
prev_dump_size_bytes = dump_size_bytes
# Test querying debug watch keys from node name.
watch_keys = dump.debug_watch_keys("while/Identity")
self.assertEqual(["while/Identity:0:DebugIdentity"], watch_keys)
# Test querying debug datum instances from debug watch key.
self.assertEqual(10, len(dump.watch_key_to_data(watch_keys[0])))
self.assertEqual([], dump.watch_key_to_data("foo"))
def testDebugWhileLoopWatchingWholeGraphWorks(self):
with session.Session() as sess:
loop_body = lambda i: math_ops.add(i, 2)
loop_cond = lambda i: math_ops.less(i, 16)
i = constant_op.constant(10, name="i")
loop = control_flow_ops.while_loop(loop_cond, loop_body, [i])
loop_result, dump = self._debug_run_and_get_dump(sess, loop)
self.assertEqual(16, loop_result)
self.assertEqual(
[[10]], dump.get_tensors("while/Enter", 0, "DebugIdentity"))
self.assertEqual(
[[12], [14], [16]],
dump.get_tensors("while/NextIteration", 0, "DebugIdentity"))
def testDebugTrainingDynamicRNNWorks(self):
with session.Session() as sess:
input_size = 3
state_size = 2
time_steps = 4
batch_size = 2
input_values = np.random.randn(time_steps, batch_size, input_size)
sequence_length = np.random.randint(0, time_steps, size=batch_size)
concat_inputs = array_ops.placeholder(
dtypes.float32, shape=(time_steps, batch_size, input_size))
outputs_dynamic, _ = rnn.dynamic_rnn(
_RNNCellForTest(input_size, state_size),
inputs=concat_inputs,
sequence_length=sequence_length,
time_major=True,
dtype=dtypes.float32)
toy_loss = math_ops.reduce_sum(outputs_dynamic * outputs_dynamic)
train_op = gradient_descent.GradientDescentOptimizer(
learning_rate=0.1).minimize(toy_loss, name="train_op")
sess.run(variables.global_variables_initializer())
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph_with_blacklists(
run_options,
sess.graph,
node_name_regex_blacklist="(.*rnn/while/.*|.*TensorArray.*)",
debug_urls=self._debug_urls())
# b/36870549: Nodes with these name patterns need to be excluded from
# tfdbg in order to prevent MSAN warnings of uninitialized Tensors
# under both file:// and grpc:// debug URL schemes.
run_metadata = config_pb2.RunMetadata()
sess.run(train_op, feed_dict={concat_inputs: input_values},
options=run_options, run_metadata=run_metadata)
debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
def testDebugCondWatchingWholeGraphWorks(self):
with session.Session() as sess:
x = variables.VariableV1(10.0, name="x")
y = variables.VariableV1(20.0, name="y")
cond = control_flow_ops.cond(
x > y, lambda: math_ops.add(x, 1), lambda: math_ops.add(y, 1))
sess.run(variables.global_variables_initializer())
cond_result, dump = self._debug_run_and_get_dump(sess, cond)
self.assertEqual(21, cond_result)
self.assertAllClose(
[21.0], dump.get_tensors("cond/Merge", 0, "DebugIdentity"))
def testFindNodesWithBadTensorValues(self):
with session.Session() as sess:
u_name = "testFindNodesWithBadTensorValues/u"
v_name = "testFindNodesWithBadTensorValues/v"
w_name = "testFindNodesWithBadTensorValues/w"
x_name = "testFindNodesWithBadTensorValues/x"
y_name = "testFindNodesWithBadTensorValues/y"
z_name = "testFindNodesWithBadTensorValues/z"
u_init = constant_op.constant([2.0, 4.0])
u = variables.VariableV1(u_init, name=u_name)
v_init = constant_op.constant([2.0, 1.0])
v = variables.VariableV1(v_init, name=v_name)
# Expected output: [0.0, 3.0]
w = math_ops.subtract(u, v, name=w_name)
# Expected output: [inf, 1.3333]
x = math_ops.div(u, w, name=x_name)
# Expected output: [nan, 4.0]
y = math_ops.multiply(w, x, name=y_name)
z = math_ops.multiply(y, y, name=z_name)
u.initializer.run()
v.initializer.run()
_, dump = self._debug_run_and_get_dump(
sess, z,
expected_partition_graph_count=self._expected_partition_graph_count)
def has_bad_value(_, tensor):
return np.any(np.isnan(tensor)) or np.any(np.isinf(tensor))
# Find all "offending tensors".
bad_data = dump.find(has_bad_value)
# Verify that the nodes with bad values are caught through running find
# on the debug dump.
self.assertEqual(3, len(bad_data))
self.assertEqual(x_name, bad_data[0].node_name)
self.assertEqual(y_name, bad_data[1].node_name)
self.assertEqual(z_name, bad_data[2].node_name)
# Test first_n kwarg of find(): Find the first offending tensor.
first_bad_datum = dump.find(has_bad_value, first_n=1)
self.assertEqual(1, len(first_bad_datum))
self.assertEqual(x_name, first_bad_datum[0].node_name)
def testFindInfOrNanWithOpNameExclusion(self):
with session.Session() as sess:
u_name = "testFindInfOrNanWithOpNameExclusion/u"
v_name = "testFindInfOrNanWithOpNameExclusion/v"
w_name = "testFindInfOrNanWithOpNameExclusion/w"
x_name = "testFindInfOrNanWithOpNameExclusion/x"
y_name = "testFindInfOrNanWithOpNameExclusion/y"
z_name = "testFindInfOrNanWithOpNameExclusion/z"
u_init = constant_op.constant([2.0, 4.0])
u = variables.VariableV1(u_init, name=u_name)
v_init = constant_op.constant([2.0, 1.0])
v = variables.VariableV1(v_init, name=v_name)
# Expected output: [0.0, 3.0]
w = math_ops.subtract(u, v, name=w_name)
# Expected output: [inf, 1.3333]
x = math_ops.div(u, w, name=x_name)
# Expected output: [nan, 4.0]
y = math_ops.multiply(w, x, name=y_name)
z = math_ops.multiply(y, y, name=z_name)
u.initializer.run()
v.initializer.run()
_, dump = self._debug_run_and_get_dump(
sess, z,
expected_partition_graph_count=self._expected_partition_graph_count)
# Find all "offending tensors".
bad_data = dump.find(debug_data.has_inf_or_nan,
exclude_node_names=".*/x$")
# Verify that the nodes with bad values are caught through running find
# on the debug dump.
self.assertEqual(2, len(bad_data))
# Assert that the node `x` should have been excluded.
self.assertEqual(y_name, bad_data[0].node_name)
self.assertEqual(z_name, bad_data[1].node_name)
first_bad_datum = dump.find(
debug_data.has_inf_or_nan, first_n=1, exclude_node_names=".*/x$")
self.assertEqual(1, len(first_bad_datum))
self.assertEqual(y_name, first_bad_datum[0].node_name)
def _session_run_for_graph_structure_lookup(self):
with session.Session(config=no_rewrite_session_config()) as sess:
u_name = "testDumpGraphStructureLookup/u"
v_name = "testDumpGraphStructureLookup/v"
w_name = "testDumpGraphStructureLookup/w"
u_init = constant_op.constant([2.0, 4.0])
u = variables.VariableV1(u_init, name=u_name)
v = math_ops.add(u, u, name=v_name)
w = math_ops.add(v, v, name=w_name)
u.initializer.run()
_, dump = self._debug_run_and_get_dump(
sess, w,
expected_partition_graph_count=self._expected_partition_graph_count)
return u_name, v_name, w_name, dump
def testGraphStructureLookupGivesDevicesAndNodesInfo(self):
u_name, _, _, dump = self._session_run_for_graph_structure_lookup()
# Test num_devices().
self.assertEqual(self._expected_num_devices, len(dump.devices()))
# Test node_device().
self.assertEqual(self._main_device, dump.node_device(u_name))
with self.assertRaisesRegexp(ValueError,
"does not exist in partition graphs"):
dump.node_device(u_name + "foo")
# Test node_exists().
self.assertTrue(dump.node_exists(u_name))
self.assertTrue(dump.node_exists(u_name + "/read"))
self.assertFalse(dump.node_exists(u_name + "/read" + "/foo"))
def testGraphStructureLookupGivesNodesAndAttributes(self):
u_name, _, _, dump = self._session_run_for_graph_structure_lookup()
u_read_name = u_name + "/read"
# Test node name list lookup of the DebugDumpDir object.
if test_util.gpu_device_name():
node_names = dump.nodes(
device_name="/job:localhost/replica:0/task:0/device:GPU:0")
else:
node_names = dump.nodes()
self.assertTrue(u_name in node_names)
self.assertTrue(u_read_name in node_names)
# Test querying node attributes.
u_attr = dump.node_attributes(u_name)
self.assertEqual(dtypes.float32, u_attr["dtype"].type)
self.assertEqual(1, len(u_attr["shape"].shape.dim))
self.assertEqual(2, u_attr["shape"].shape.dim[0].size)
with self.assertRaisesRegexp(
ValueError, r"None of the .* device\(s\) has a node named "):
dump.node_attributes("foo")
def testGraphStructureLookupGivesDebugWatchKeys(self):
u_name, v_name, w_name, dump = (
self._session_run_for_graph_structure_lookup())
# Test querying the debug watch keys with node names.
self.assertEqual(["%s:0:DebugIdentity" % u_name],
dump.debug_watch_keys(u_name))
self.assertEqual(["%s:0:DebugIdentity" % v_name],
dump.debug_watch_keys(v_name))
self.assertEqual(["%s:0:DebugIdentity" % w_name],
dump.debug_watch_keys(w_name))
self.assertEqual([], dump.debug_watch_keys("foo"))
# Test querying debug datum instances from debug watch.
u_data = dump.watch_key_to_data(dump.debug_watch_keys(u_name)[0])
self.assertEqual(1, len(u_data))
self.assertEqual(u_name, u_data[0].node_name)
self.assertEqual(0, u_data[0].output_slot)
self.assertEqual("DebugIdentity", u_data[0].debug_op)
self.assertGreaterEqual(u_data[0].timestamp, 0)
self.assertEqual([], dump.watch_key_to_data("foo"))
def testGraphStructureLookupGivesNodeInputsAndRecipients(self):
u_name, v_name, w_name, dump = (
self._session_run_for_graph_structure_lookup())
u_read_name = u_name + "/read"
# Test the inputs lookup of the DebugDumpDir object.
self.assertEqual([], dump.node_inputs(u_name))
self.assertEqual([u_name], dump.node_inputs(u_read_name))
self.assertEqual([u_read_name] * 2, dump.node_inputs(v_name))
self.assertEqual([v_name] * 2, dump.node_inputs(w_name))
self.assertEqual([], dump.node_inputs(u_name, is_control=True))
self.assertEqual([], dump.node_inputs(u_read_name, is_control=True))
self.assertEqual([], dump.node_inputs(v_name, is_control=True))
self.assertEqual([], dump.node_inputs(w_name, is_control=True))
# Test the outputs recipient lookup of the DebugDumpDir object.
self.assertTrue(u_read_name in dump.node_recipients(u_name))
self.assertEqual(2, dump.node_recipients(u_read_name).count(v_name))
self.assertEqual(2, dump.node_recipients(v_name).count(w_name))
self.assertEqual([], dump.node_recipients(u_name, is_control=True))
self.assertEqual([], dump.node_recipients(u_read_name, is_control=True))
self.assertEqual([], dump.node_recipients(v_name, is_control=True))
self.assertEqual([], dump.node_recipients(w_name, is_control=True))
# Test errors raised on invalid node names.
with self.assertRaisesRegexp(
ValueError, r"None of the .* device\(s\) has a node named "):
dump.node_inputs(u_name + "foo")
with self.assertRaisesRegexp(
ValueError, r"None of the .* device\(s\) has a node named "):
dump.node_recipients(u_name + "foo")
# Test transitive_inputs().
self.assertEqual([], dump.transitive_inputs(u_name))
self.assertEqual([u_name], dump.transitive_inputs(u_read_name))
self.assertEqual(
set([u_name, u_read_name]), set(dump.transitive_inputs(v_name)))
self.assertEqual(
set([u_name, u_read_name, v_name]), set(dump.transitive_inputs(w_name)))
with self.assertRaisesRegexp(
ValueError, r"None of the .* device\(s\) has a node named "):
dump.transitive_inputs(u_name + "foo")
def testGraphStructureLookupWithoutPartitionGraphsDoesNotErrorOut(self):
_, _, _, dump = self._session_run_for_graph_structure_lookup()
# Now load the dump again, without the partition graphs, so we can check
# errors are not raised because the partition graphs are loaded from the
# dump directory.
dump = debug_data.DebugDumpDir(self._dump_root, validate=False)
self.assertTrue(dump.loaded_partition_graphs())
def testGraphPathFindingOnControlEdgesWorks(self):
with session.Session(config=no_rewrite_session_config()) as sess:
v1 = variables.VariableV1(1.0, name="v1")
v2 = variables.VariableV1(2.0, name="v2")
v3 = variables.VariableV1(3.0, name="v3")
a = math_ops.add(v1, v2, name="a")
with ops.control_dependencies([a]):
c = math_ops.subtract(v3, v3, name="c")
sess.run(variables.global_variables_initializer())
_, dump = self._debug_run_and_get_dump(sess, c)
self.assertEqual(["v1", "v1/read", "a", "c"],
dump.find_some_path("v1", "c"))
self.assertIsNone(dump.find_some_path("v1", "c", include_control=False))
def testGraphPathFindingReverseRefEdgeWorks(self):
with session.Session(config=no_rewrite_session_config()) as sess:
v = variables.VariableV1(10.0, name="v")
delta = variables.VariableV1(1.0, name="delta")
inc_v = state_ops.assign_add(v, delta, name="inc_v")
sess.run(variables.global_variables_initializer())
_, dump = self._debug_run_and_get_dump(sess, inc_v)
self.assertEqual(
["delta", "delta/read", "inc_v", "v"],
dump.find_some_path("delta", "v", include_reversed_ref=True))
self.assertIsNone(dump.find_some_path("delta", "v"))
def testCausalityCheckOnDumpsDetectsWrongTemporalOrder(self):
with session.Session(config=no_rewrite_session_config()) as sess:
u_name = "testDumpCausalityCheck/u"
v_name = "testDumpCausalityCheck/v"
w_name = "testDumpCausalityCheck/w"
u_init = constant_op.constant([2.0, 4.0])
u = variables.VariableV1(u_init, name=u_name)
v = math_ops.add(u, u, name=v_name)
w = math_ops.add(v, v, name=w_name)
u.initializer.run()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls=self._debug_urls())
run_metadata = config_pb2.RunMetadata()
sess.run(w, options=run_options, run_metadata=run_metadata)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
# First, loading the original dump without supplying the
# partition_graphs should not cause a LookupError, validation occurs
# only with partition_graphs loaded.
debug_data.DebugDumpDir(self._dump_root)
# Now, loading the original dump with partition graphs supplied should
# succeed. The validation should pass quietly.
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
# Get the dump file names and compute their timestamps.
self.assertEqual(
1, len(dump.get_tensor_file_paths(v_name, 0, "DebugIdentity")))
v_file_path = dump.get_tensor_file_paths(v_name, 0, "DebugIdentity")[0]
self.assertEqual(
1, len(dump.get_tensor_file_paths(w_name, 0, "DebugIdentity")))
w_file_path = dump.get_tensor_file_paths(w_name, 0, "DebugIdentity")[0]
v_timestamp = int(v_file_path[v_file_path.rindex("_") + 1:])
w_timestamp = int(w_file_path[w_file_path.rindex("_") + 1:])
# Swap and slightly shift the time stamps of the last two dumped tensors,
# to simulate "causality violation", which can happen if the dump
# directory contains incomplete data and/or mixes data from different
# Session.run() calls.
v_file_path_1 = v_file_path[:v_file_path.rindex(
"_")] + "_%d" % w_timestamp
w_file_path_1 = w_file_path[:w_file_path.rindex("_")] + "_%d" % (
v_timestamp - 1)
os.rename(v_file_path, v_file_path_1)
os.rename(w_file_path, w_file_path_1)
# Load the dump directory again. Now a ValueError is expected to be
# raised due to the timestamp swap.
with self.assertRaisesRegexp(ValueError, "Causality violated"):
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
# Loading the dump directory with kwarg "validate" set explicitly to
# False should get rid of the error.
dump = debug_data.DebugDumpDir(
self._dump_root,
partition_graphs=run_metadata.partition_graphs,
validate=False)
# Next, set the two times stamps to be the same, which should be fine.
v_file_path_2 = v_file_path[:v_file_path.rindex(
"_")] + "_%d" % w_timestamp
w_file_path_2 = w_file_path[:w_file_path.rindex(
"_")] + "_%d" % w_timestamp
os.rename(v_file_path_1, v_file_path_2)
os.rename(w_file_path_1, w_file_path_2)
debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
def testWatchingOnlyOneOfTwoOutputSlotsDoesNotLeadToCausalityFailure(self):
with session.Session() as sess:
x_name = "oneOfTwoSlots/x"
u_name = "oneOfTwoSlots/u"
v_name = "oneOfTwoSlots/v"
w_name = "oneOfTwoSlots/w"
y_name = "oneOfTwoSlots/y"
x = variables.VariableV1([1, 3, 3, 7], dtype=dtypes.int32, name=x_name)
sess.run(x.initializer)
unique_x, indices, _ = array_ops.unique_with_counts(x, name=u_name)
v = math_ops.add(unique_x, unique_x, name=v_name)
w = math_ops.add(indices, indices, name=w_name)
y = math_ops.add(w, w, name=y_name)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
# Watch only the first output slot of u, even though it has two output
# slots.
debug_utils.add_debug_tensor_watch(
run_options, u_name, 0, debug_urls=self._debug_urls())
debug_utils.add_debug_tensor_watch(
run_options, w_name, 0, debug_urls=self._debug_urls())
debug_utils.add_debug_tensor_watch(
run_options, y_name, 0, debug_urls=self._debug_urls())
run_metadata = config_pb2.RunMetadata()
sess.run([v, y], options=run_options, run_metadata=run_metadata)
dump = debug_data.DebugDumpDir(
self._dump_root,
partition_graphs=run_metadata.partition_graphs,
validate=True)
self.assertAllClose([1, 3, 7],
dump.get_tensors(u_name, 0, "DebugIdentity")[0])
def testOutputSlotWithoutOutgoingEdgeCanBeWatched(self):
"""Test watching output slots not attached to any outgoing edges."""
with session.Session(config=no_rewrite_session_config()) as sess:
u_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])
u = constant_op.constant(u_init_val, shape=[2, 2], name="u")
# Create a control edge from a node with an output: From u to z.
# Node u will get executed only because of the control edge. The output
# tensor u:0 is not attached to any outgoing edge in the graph. This test
# checks that the debugger can watch such a tensor.
with ops.control_dependencies([u]):
z = control_flow_ops.no_op(name="z")
_, dump = self._debug_run_and_get_dump(sess, z)
# Assert that the DebugIdentity watch on u works properly.
self.assertEqual(1, len(dump.dumped_tensor_data))
datum = dump.dumped_tensor_data[0]
self.assertEqual("u", datum.node_name)
self.assertEqual(0, datum.output_slot)
self.assertEqual("DebugIdentity", datum.debug_op)
self.assertAllClose([[5.0, 3.0], [-1.0, 0.0]], datum.get_tensor())
def testWatchingVariableUpdateOpsSeesUpdatedValues(self):
"""Watch output slots on Variable-updating ops, with no emitted edges."""
with session.Session(config=no_rewrite_session_config()) as sess:
u_init = constant_op.constant(10.0)
u = variables.VariableV1(u_init, name="gdo/u")
v_init = constant_op.constant(20.0)
v = variables.VariableV1(v_init, name="gdo/v")
w = math_ops.multiply(u, v, name="gdo/w")
# gdo stands for GradientDescentOptimizer.
train_op = gradient_descent.GradientDescentOptimizer(
learning_rate=0.1).minimize(
w, name="gdo/train")
u.initializer.run()
v.initializer.run()
_, dump = self._debug_run_and_get_dump(sess, train_op)
update_u_data = dump.watch_key_to_data(
"gdo/train/update_gdo/u/ApplyGradientDescent:0:DebugIdentity")
self.assertEqual(1, len(update_u_data))
# Gradient descent on u: w = u * v, so dw / du = v.
# Updated value of u should be:
# 10.0 - learning_rate * v = 10.0 - 0.1 * 20.0 = 8.0
self.assertAllClose(8.0, update_u_data[0].get_tensor())
update_v_data = dump.watch_key_to_data(
"gdo/train/update_gdo/v/ApplyGradientDescent:0:DebugIdentity")
self.assertEqual(1, len(update_v_data))
# Gradient descent on u: w = u * v, so dw / dv = u.
# Updated value of u should be:
# 20.0 - learning_rate * u = 20.0 - 0.1 * 10.0 = 19.0
self.assertAllClose(19.0, update_v_data[0].get_tensor())
# Verify that the Variables u and v are updated properly.
self.assertAllClose(8.0, sess.run(u))
self.assertAllClose(19.0, sess.run(v))
def testAllowsWatchingUnconnectedOutputTensor(self):
"""Watch an output slot not emitting any edges.
(Not even control edges from the node.)
"""
with session.Session() as sess:
x_init = constant_op.constant([2, 2, 3, 5, 5])
x = variables.VariableV1(x_init, name="unconnected/x")
# The UniqueOp (tf.unique) has two output slots. Use only slot 0 in the
# graph. Let the debugger watch the unused slot 1.
unique_x, _ = array_ops.unique(x, name="unconnected/unique_x")
y = math_ops.add(unique_x, [0, 1, 2], name="unconnected/y")
x.initializer.run()
# Verify that only slot 0 of unique_x has recipients, while slot 1 of the
# same node does not have recipients.
unique_x_slot_0_recipients = []
unique_x_slot_1_recipients = []
for op in sess.graph.get_operations():
for inp in op.inputs:
if inp.name == "unconnected/unique_x:0":
unique_x_slot_0_recipients.append(op.name)
elif inp.name == "unconnected/unique_x:1":
unique_x_slot_1_recipients.append(op.name)
self.assertEqual(["unconnected/y"], unique_x_slot_0_recipients)
self.assertEqual([], unique_x_slot_1_recipients)
y_result, dump = self._debug_run_and_get_dump(sess, y)
self.assertAllClose([2, 4, 7], y_result)
# Assert that the connected slot (slot 0) is dumped properly.
unique_x_slot_0_dumps = dump.watch_key_to_data(
"unconnected/unique_x:0:DebugIdentity")
self.assertEqual(1, len(unique_x_slot_0_dumps))
self.assertEqual("unconnected/unique_x",
unique_x_slot_0_dumps[0].node_name)
self.assertEqual(0, unique_x_slot_0_dumps[0].output_slot)
self.assertAllClose([2, 3, 5], unique_x_slot_0_dumps[0].get_tensor())
# Assert that the unconnected slot (slot 1) is dumped properly.
unique_x_slot_1_dumps = dump.watch_key_to_data(
"unconnected/unique_x:1:DebugIdentity")
self.assertEqual(1, len(unique_x_slot_1_dumps))
self.assertEqual("unconnected/unique_x",
unique_x_slot_1_dumps[0].node_name)
self.assertEqual(1, unique_x_slot_1_dumps[0].output_slot)
self.assertAllClose([0, 0, 1, 2, 2],
unique_x_slot_1_dumps[0].get_tensor())
def testSuccessiveDebuggingRunsIncreasesCounters(self):
"""Test repeated Session.run() calls with debugger increments counters."""
with session.Session() as sess:
ph = array_ops.placeholder(dtypes.float32, name="successive/ph")
x = array_ops.transpose(ph, name="mismatch/x")
y = array_ops.squeeze(ph, name="mismatch/y")
_, dump1 = self._debug_run_and_get_dump(
sess, x, feed_dict={ph: np.array([[7.0, 8.0]])}, global_step=1)
self.assertEqual(1, dump1.core_metadata.global_step)
self.assertGreaterEqual(dump1.core_metadata.session_run_index, 0)
self.assertEqual(0, dump1.core_metadata.executor_step_index)
self.assertEqual([ph.name], dump1.core_metadata.input_names)
self.assertEqual([x.name], dump1.core_metadata.output_names)
self.assertEqual([], dump1.core_metadata.target_nodes)
shutil.rmtree(self._dump_root)
# Calling run() with the same feed, same output and same debug watch
# options should increment both session_run_index and
# executor_step_index.
_, dump2 = self._debug_run_and_get_dump(
sess, x, feed_dict={ph: np.array([[7.0, 8.0]])}, global_step=2)
self.assertEqual(2, dump2.core_metadata.global_step)
self.assertEqual(dump1.core_metadata.session_run_index + 1,
dump2.core_metadata.session_run_index)
self.assertEqual(dump1.core_metadata.executor_step_index + 1,
dump2.core_metadata.executor_step_index)
self.assertEqual([ph.name], dump2.core_metadata.input_names)
self.assertEqual([x.name], dump2.core_metadata.output_names)
self.assertEqual([], dump2.core_metadata.target_nodes)
shutil.rmtree(self._dump_root)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options, sess.graph, debug_urls=self._debug_urls(), global_step=3)
# Calling run() with a different output should increment
# session_run_index, but not executor_step_index.
_, dump3 = self._debug_run_and_get_dump(
sess, y, feed_dict={ph: np.array([[7.0, 8.0]])}, global_step=3)
self.assertEqual(3, dump3.core_metadata.global_step)
self.assertEqual(dump2.core_metadata.session_run_index + 1,
dump3.core_metadata.session_run_index)
self.assertEqual(0, dump3.core_metadata.executor_step_index)
self.assertEqual([ph.name], dump3.core_metadata.input_names)
self.assertEqual([y.name], dump3.core_metadata.output_names)
self.assertEqual([], dump3.core_metadata.target_nodes)
def testDebuggingDuringOpError(self):
"""Test the debug tensor dumping when error occurs in graph runtime."""
with session.Session() as sess:
ph = array_ops.placeholder(dtypes.float32, name="mismatch/ph")
x = array_ops.transpose(ph, name="mismatch/x")
m = constant_op.constant(
np.array(
[[1.0, 2.0]], dtype=np.float32), name="mismatch/m")
y = math_ops.matmul(m, x, name="mismatch/y")
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls=self._debug_urls())
with self.assertRaises(errors.OpError):
sess.run(y,
options=run_options,
feed_dict={ph: np.array([[-3.0], [0.0]])})
dump = debug_data.DebugDumpDir(self._dump_root)
self.assertGreaterEqual(dump.core_metadata.session_run_index, 0)
self.assertGreaterEqual(dump.core_metadata.executor_step_index, 0)
self.assertEqual([ph.name], dump.core_metadata.input_names)
self.assertEqual([y.name], dump.core_metadata.output_names)
self.assertEqual([], dump.core_metadata.target_nodes)
# Despite the fact that the run() call errored out and partition_graphs
# are not available via run_metadata, the partition graphs should still
# have been loaded from the dump directory.
self.assertTrue(dump.loaded_partition_graphs())
m_dumps = dump.watch_key_to_data("mismatch/m:0:DebugIdentity")
self.assertEqual(1, len(m_dumps))
self.assertAllClose(np.array([[1.0, 2.0]]), m_dumps[0].get_tensor())
x_dumps = dump.watch_key_to_data("mismatch/x:0:DebugIdentity")
self.assertEqual(1, len(x_dumps))
self.assertAllClose(np.array([[-3.0, 0.0]]), x_dumps[0].get_tensor())
def testDebugNumericSummaryOnInitializedTensorGivesCorrectResult(self):
with session.Session(config=no_rewrite_session_config()) as sess:
a = variables.VariableV1(
[
np.nan, np.nan, 0.0, 0.0, 0.0, -1.0, -3.0, 3.0, 7.0, -np.inf,
-np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, np.nan, np.nan
],
dtype=np.float32,
name="numeric_summary/a")
b = variables.VariableV1(
[0.0] * 18, dtype=np.float32, name="numeric_summary/b")
c = math_ops.add(a, b, name="numeric_summary/c")
sess.run(variables.global_variables_initializer())
_, dump = self._debug_run_and_get_dump(
sess, c, debug_ops=["DebugNumericSummary"])
self.assertTrue(dump.loaded_partition_graphs())
self.assertAllClose([[
1.0, 18.0, 4.0, 2.0, 2.0, 3.0, 2.0, 5.0, -3.0, 7.0, 0.85714286,
8.97959184, 1.0, 1.0, 18.0
]], dump.get_tensors("numeric_summary/a/read", 0, "DebugNumericSummary"))
def testDebugNumericSummaryOnUninitializedTensorGivesCorrectResult(self):
with session.Session() as sess:
a = variables.VariableV1(
[42], dtype=np.float32, name="numeric_summary_uninit/a")
_, dump = self._debug_run_and_get_dump(
sess, a.initializer, debug_ops=["DebugNumericSummary"])
self.assertTrue(dump.loaded_partition_graphs())
# DebugNumericSummary output should reflect the uninitialized state of
# the watched tensor.
numeric_summary = dump.get_tensors("numeric_summary_uninit/a", 0,
"DebugNumericSummary")[0]
self.assertAllClose([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
numeric_summary[0:8])
# Check dtype (index 12), ndims (index 13) and dimension sizes (index
# 14+).
self.assertAllClose([1.0, 1.0, 1.0], numeric_summary[12:])
self.assertTrue(np.isinf(numeric_summary[8]))
self.assertGreater(numeric_summary[8], 0.0)
self.assertTrue(np.isinf(numeric_summary[9]))
self.assertLess(numeric_summary[9], 0.0)
self.assertTrue(np.isnan(numeric_summary[10]))
self.assertTrue(np.isnan(numeric_summary[11]))
def testDebugNumericSummaryFailureIsToleratedWhenOrdered(self):
with session.Session() as sess:
a = variables.VariableV1("1", name="a")
b = variables.VariableV1("3", name="b")
c = variables.VariableV1("2", name="c")
d = math_ops.add(a, b, name="d")
e = math_ops.add(d, c, name="e")
n = parsing_ops.string_to_number(e, name="n")
m = math_ops.add(n, n, name="m")
sess.run(variables.global_variables_initializer())
# Using DebugNumericSummary on sess.run(m) with the default
# tolerate_debug_op_creation_failures=False should error out due to the
# presence of string-dtype Tensors in the graph.
run_metadata = config_pb2.RunMetadata()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugNumericSummary"],
debug_urls=self._debug_urls())
with self.assertRaises(errors.FailedPreconditionError):
sess.run(m, options=run_options, run_metadata=run_metadata)
# Using tolerate_debug_op_creation_failures=True should get rid of the
# error.
m_result, dump = self._debug_run_and_get_dump(
sess, m, debug_ops=["DebugNumericSummary"],
tolerate_debug_op_creation_failures=True)
self.assertEqual(264, m_result)
# The integer-dtype Tensors in the graph should have been dumped
# properly.
self.assertIn("n:0:DebugNumericSummary", dump.debug_watch_keys("n"))
self.assertIn("m:0:DebugNumericSummary", dump.debug_watch_keys("m"))
def testDebugNumericSummaryInvalidAttributesStringAreCaught(self):
with session.Session(config=no_rewrite_session_config()) as sess:
a = variables.VariableV1(10.0, name="a")
b = variables.VariableV1(0.0, name="b")
c = variables.VariableV1(0.0, name="c")
x = math_ops.divide(a, b, name="x")
y = math_ops.multiply(x, c, name="y")
sess.run(variables.global_variables_initializer())
run_metadata = config_pb2.RunMetadata()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugNumericSummary(foo=1.0)"],
debug_urls=self._debug_urls())
with self.assertRaisesRegexp(
errors.FailedPreconditionError,
r"1 attribute key\(s\) were not valid for debug node "
r"__dbg_.:0_0_DebugNumericSummary: foo"):
sess.run(y, options=run_options, run_metadata=run_metadata)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugNumericSummary(foo=1.0; bar=false)"],
debug_urls=self._debug_urls())
with self.assertRaisesRegexp(
errors.FailedPreconditionError,
r"2 attribute key\(s\) were not valid for debug node "
r"__dbg_.:0_0_DebugNumericSummary:"):
sess.run(y, options=run_options, run_metadata=run_metadata)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugNumericSummary(foo=1.0; mute_if_healthy=true)"],
debug_urls=self._debug_urls())
with self.assertRaisesRegexp(
errors.FailedPreconditionError,
r"1 attribute key\(s\) were not valid for debug node "
r"__dbg_.:0_0_DebugNumericSummary: foo"):
sess.run(y, options=run_options, run_metadata=run_metadata)
def testDebugNumericSummaryMuteOnHealthyMutesOnlyHealthyTensorDumps(self):
with session.Session(config=no_rewrite_session_config()) as sess:
a = variables.VariableV1(10.0, name="a")
b = variables.VariableV1(0.0, name="b")
c = variables.VariableV1(0.0, name="c")
x = math_ops.divide(a, b, name="x")
y = math_ops.multiply(x, c, name="y")
sess.run(variables.global_variables_initializer())
# Here, validate=False is necessary to avoid causality check error.
# TODO(cais): Maybe let DebugDumpDir constructor automatically ignore
# debug ops with mute_if_healthy=false attribute during validation.
_, dump = self._debug_run_and_get_dump(
sess, y, debug_ops=["DebugNumericSummary(mute_if_healthy=true)"],
validate=False)
self.assertEqual(2, dump.size)
self.assertAllClose([[
1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, np.inf, -np.inf, np.nan,
np.nan, 1.0, 0.0
]], dump.get_tensors("x", 0, "DebugNumericSummary"))
self.assertAllClose([[
1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, np.inf, -np.inf, np.nan,
np.nan, 1.0, 0.0
]], dump.get_tensors("y", 0, "DebugNumericSummary"))
# Another run with the default mute_if_healthy (false) value should
# dump all the tensors.
shutil.rmtree(self._dump_root)
_, dump = self._debug_run_and_get_dump(
sess, y, debug_ops=["DebugNumericSummary()"])
self.assertEqual(8, dump.size)
def testDebugNumericSummaryMuteOnHealthyAndCustomBoundsWork(self):
with session.Session() as sess:
a = variables.VariableV1([10.0, 10.0], name="a")
b = variables.VariableV1([10.0, 2.0], name="b")
x = math_ops.add(a, b, name="x") # [20.0, 12.0]
y = math_ops.divide(x, b, name="y") # [2.0, 6.0]
sess.run(variables.global_variables_initializer())
# Here, validate=False is necessary to avoid causality check error.
# TODO(cais): Maybe let DebugDumpDir constructor automatically ignore
# debug ops with mute_if_healthy=false attribute during validation.
_, dump = self._debug_run_and_get_dump(
sess, y, debug_ops=[
"DebugNumericSummary(mute_if_healthy=true; upper_bound=11.0)"],
validate=False)
self.assertEqual(1, dump.size)
self.assertAllClose([[
1.0, 2.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.0, 12.0, 20.0, 16.0, 16.0, 1.0,
1.0, 2.0]], dump.get_tensors("x", 0, "DebugNumericSummary"))
def testDebugQueueOpsDoesNotoErrorOut(self):
with session.Session() as sess:
q = data_flow_ops.FIFOQueue(3, "float", name="fifo_queue")
q_init = q.enqueue_many(([101.0, 202.0, 303.0],), name="enqueue_many")
_, dump = self._debug_run_and_get_dump(sess, q_init)
self.assertTrue(dump.loaded_partition_graphs())
fifo_queue_tensor = dump.get_tensors("fifo_queue", 0, "DebugIdentity")[0]
self.assertIsInstance(fifo_queue_tensor,
debug_data.InconvertibleTensorProto)
self.assertTrue(fifo_queue_tensor.initialized)
self.assertAllClose(
[101.0, 202.0, 303.0],
dump.get_tensors("enqueue_many/component_0", 0, "DebugIdentity")[0])
def testLookUpNodePythonTracebackWorks(self):
with session.Session() as sess:
u_init = constant_op.constant(10.0)
u = variables.VariableV1(u_init, name="traceback/u")
v_init = constant_op.constant(20.0)
v = variables.VariableV1(v_init, name="traceback/v")
w = math_ops.multiply(u, v, name="traceback/w")
sess.run(variables.global_variables_initializer())
_, dump = self._debug_run_and_get_dump(sess, w)
# Prior to setting the Python graph, attempts to do traceback lookup
# should lead to exceptions.
with self.assertRaisesRegexp(
LookupError, "Python graph is not available for traceback lookup"):
dump.node_traceback("traceback/w")
dump.set_python_graph(sess.graph)
# After setting the Python graph, attempts to look up nonexistent nodes
# should lead to exceptions.
with self.assertRaisesRegexp(KeyError,
r"Cannot find node \"foo\" in Python graph"):
dump.node_traceback("foo")
# Lookup should work with node name input.
traceback = dump.node_traceback("traceback/w")
self.assertIsInstance(traceback, list)
self.assertGreater(len(traceback), 0)
for trace in traceback:
self.assertIsInstance(trace, tuple)
# Lookup should also work with tensor name input.
traceback = dump.node_traceback("traceback/w:0")
self.assertIsInstance(traceback, list)
self.assertGreater(len(traceback), 0)
for trace in traceback:
self.assertIsInstance(trace, tuple)
class DebugConcurrentRunCallsTest(test_util.TensorFlowTestCase):
"""Test for debugging concurrent Session.run() calls."""
def _get_concurrent_debug_urls(self):
"""Abstract method to generate debug URLs for concurrent debugged runs."""
raise NotImplementedError(
"_get_concurrent_debug_urls is not implemented in the base test class")
def testDebugConcurrentVariableUpdates(self):
if test.is_gpu_available():
self.skipTest("No testing concurrent runs on a single GPU.")
with session.Session() as sess:
v = variables.VariableV1(30.0, name="v")
constants = []
for i in xrange(self._num_concurrent_runs):
constants.append(constant_op.constant(1.0, name="c%d" % i))
incs = [
state_ops.assign_add(
v, c, use_locking=True, name=("inc%d" % i))
for (i, c) in enumerate(constants)
]
sess.run(v.initializer)
concurrent_debug_urls = self._get_concurrent_debug_urls()
def inc_job(index):
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options, sess.graph, debug_urls=concurrent_debug_urls[index])
for _ in xrange(100):
sess.run(incs[index], options=run_options)
inc_threads = []
for index in xrange(self._num_concurrent_runs):
inc_thread = threading.Thread(target=functools.partial(inc_job, index))
inc_thread.start()
inc_threads.append(inc_thread)
for inc_thread in inc_threads:
inc_thread.join()
self.assertAllClose(30.0 + 1.0 * self._num_concurrent_runs * 100,
sess.run(v))
all_session_run_indices = []
for index in xrange(self._num_concurrent_runs):
dump = debug_data.DebugDumpDir(self._dump_roots[index])
self.assertTrue(dump.loaded_partition_graphs())
v_data = dump.get_tensors("v", 0, "DebugIdentity")
self.assertEqual(100, len(v_data))
# Examine all the core metadata files
core_metadata_files = glob.glob(
os.path.join(self._dump_roots[index], "_tfdbg_core*"))
timestamps = []
session_run_indices = []
executor_step_indices = []
for core_metadata_file in core_metadata_files:
with open(core_metadata_file, "rb") as f:
event = event_pb2.Event()
event.ParseFromString(f.read())
core_metadata = (
debug_data.extract_core_metadata_from_event_proto(event))
timestamps.append(event.wall_time)
session_run_indices.append(core_metadata.session_run_index)
executor_step_indices.append(core_metadata.executor_step_index)
all_session_run_indices.extend(session_run_indices)
# Assert that executor_step_index increases by one at a time.
executor_step_indices = zip(timestamps, executor_step_indices)
executor_step_indices = sorted(
executor_step_indices, key=lambda x: x[0])
for i in xrange(len(executor_step_indices) - 1):
self.assertEquals(executor_step_indices[i][1] + 1,
executor_step_indices[i + 1][1])
# Assert that session_run_index increase monotonically.
session_run_indices = zip(timestamps, session_run_indices)
session_run_indices = sorted(session_run_indices, key=lambda x: x[0])
for i in xrange(len(session_run_indices) - 1):
self.assertGreater(session_run_indices[i + 1][1],
session_run_indices[i][1])
# Assert that the session_run_indices from the concurrent run() calls are
# all unique.
self.assertEqual(len(all_session_run_indices),
len(set(all_session_run_indices)))
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
sloanyang/android_external_webkit | Tools/TestResultServer/model/testfile.py | 15 | 4323 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from datetime import datetime
import logging
from google.appengine.ext import db
from model.datastorefile import DataStoreFile
class TestFile(DataStoreFile):
master = db.StringProperty()
builder = db.StringProperty()
test_type = db.StringProperty()
@classmethod
def delete_file(cls, key, master, builder, test_type, name, limit):
if key:
file = db.get(key)
if not file:
logging.warning("File not found, key: %s.", key)
return False
file._delete_all()
else:
files = cls.get_files(master, builder, test_type, name, limit)
if not files:
logging.warning(
"File not found, master: %s, builder: %s, test_type:%s, name: %s.",
builder, test_type, name)
return False
for file in files:
file._delete_all()
return True
@classmethod
def get_files(cls, master, builder, test_type, name, load_data=True, limit=1):
query = TestFile.all()
if master:
query = query.filter("master =", master)
if builder:
query = query.filter("builder =", builder)
if test_type:
query = query.filter("test_type =", test_type)
if name:
query = query.filter("name =", name)
files = query.order("-date").fetch(limit)
if load_data:
for file in files:
file.load_data()
return files
@classmethod
def add_file(cls, master, builder, test_type, name, data):
file = TestFile()
file.master = master
file.builder = builder
file.test_type = test_type
file.name = name
if not file.save(data):
return None
logging.info(
"File saved, master: %s, builder: %s, test_type: %s, name: %s, key: %s.",
master, builder, test_type, file.name, str(file.data_keys))
return file
@classmethod
def update(cls, master, builder, test_type, name, data):
files = cls.get_files(master, builder, test_type, name)
if not files:
return cls.add_file(master, builder, test_type, name, data)
file = files[0]
if not file.save(data):
return None
logging.info(
"File replaced, master: %s, builder: %s, test_type: %s, name: %s, data key: %s.",
master, builder, test_type, file.name, str(file.data_keys))
return file
def save(self, data):
if not self.save_data(data):
return False
self.date = datetime.now()
self.put()
return True
def _delete_all(self):
self.delete_data()
self.delete()
| gpl-2.0 |
ibrahimkarahan/Flexget | tests/test_discover.py | 11 | 5577 | from __future__ import unicode_literals, division, absolute_import
from datetime import datetime, timedelta
from flexget.entry import Entry
from flexget import plugin
from tests import FlexGetBase
class SearchPlugin(object):
"""
Fake search plugin. Result differs depending on config value:
`'fail'`: raises a PluginError
`False`: Returns an empty list
otherwise: Just passes back the entry that was searched for
"""
schema = {}
def search(self, task, entry, config=None):
if not config:
return []
elif config == 'fail':
raise plugin.PluginError('search plugin failure')
return [Entry(entry)]
plugin.register(SearchPlugin, 'test_search', groups=['search'], api_ver=2)
class EstRelease(object):
"""Fake release estimate plugin. Just returns 'est_release' entry field."""
def estimate(self, entry):
return entry.get('est_release')
plugin.register(EstRelease, 'test_release', groups=['estimate_release'], api_ver=2)
class TestDiscover(FlexGetBase):
__yaml__ = """
tasks:
test_sort:
discover:
ignore_estimations: yes
what:
- mock:
- title: Foo
search_sort: 1
- title: Bar
search_sort: 3
- title: Baz
search_sort: 2
from:
- test_search: yes
test_interval:
discover:
ignore_estimations: yes
what:
- mock:
- title: Foo
from:
- test_search: yes
test_estimates:
discover:
interval: 0 seconds
what:
- mock:
- title: Foo
from:
- test_search: yes
test_emit_series:
discover:
ignore_estimations: yes
what:
- emit_series:
from_start: yes
from:
- test_search: yes
series:
- My Show:
identified_by: ep
rerun: 0
test_emit_series_with_bad_search:
discover:
ignore_estimations: yes
what:
- emit_series:
from_start: yes
from:
- test_search: fail
- test_search: no
- test_search: yes
series:
- My Show:
identified_by: ep
mock_output: yes
rerun: 3
"""
def test_sort(self):
self.execute_task('test_sort')
assert len(self.task.entries) == 3
# Entries should be ordered by search_sort
order = list(e.get('search_sort') for e in self.task.entries)
assert order == sorted(order, reverse=True)
def test_interval(self):
self.execute_task('test_interval')
assert len(self.task.entries) == 1
# Insert a new entry into the search input
self.manager.config['tasks']['test_interval']['discover']['what'][0]['mock'].append({'title': 'Bar'})
self.execute_task('test_interval')
# First entry should be waiting for interval
assert len(self.task.entries) == 1
assert self.task.entries[0]['title'] == 'Bar'
# Now they should both be waiting
self.execute_task('test_interval')
assert len(self.task.entries) == 0
def test_estimates(self):
mock_config = self.manager.config['tasks']['test_estimates']['discover']['what'][0]['mock']
# It should not be searched before the release date
mock_config[0]['est_release'] = datetime.now() + timedelta(days=7)
self.execute_task('test_estimates')
assert len(self.task.entries) == 0
# It should be searched after the release date
mock_config[0]['est_release'] = datetime.now()
self.execute_task('test_estimates')
assert len(self.task.entries) == 1
def test_emit_series(self):
self.execute_task('test_emit_series')
assert self.task.find_entry(title='My Show S01E01')
def test_emit_series_with_bad_search(self):
self.execute_task('test_emit_series_with_bad_search')
for epnum in xrange(1, 5):
title = 'My Show S01E0%d' % epnum
assert any(e['title'] == title for e in self.task.mock_output), '%s not accepted' % title
assert len(self.task.mock_output) == 4, \
'4 episodes should have been accepted, not %s' % len(self.task.mock_output)
class TestEmitSeriesInDiscover(FlexGetBase):
__yaml__ = """
tasks:
inject_series:
series:
- My Show 2
test_emit_series_backfill:
discover:
ignore_estimations: yes
what:
- emit_series:
backfill: yes
from:
- test_search: yes
series:
- My Show 2:
tracking: backfill
identified_by: ep
rerun: 0
"""
def inject_series(self, release_name):
self.execute_task('inject_series', options = {'inject': [Entry(title=release_name, url='')]})
def test_emit_series_backfill(self):
self.inject_series('My Show 2 S02E01')
self.execute_task('test_emit_series_backfill')
assert self.task.find_entry(title='My Show 2 S01E01')
assert self.task.find_entry(title='My Show 2 S02E02')
| mit |
lightbase/LBConverter | lbconverter/config.py | 1 | 4423 |
def set_config():
import ConfigParser
config = ConfigParser.ConfigParser()
config.read('development.ini')
global REST_URL
global OUTPATH
global DEFAULT_OPENOFFICE_PORT
global PIDFILE_PATH
global LOGFILE_PATH
global SUPPORTED_FILES
#---------------------#
# Configuration Start #
#---------------------#
REST_URL = config.get('LBConverter', 'rest_url')
OUTPATH = config.get('LBConverter', 'outpath')
DEFAULT_OPENOFFICE_PORT = int(config.get('LBConverter', 'default_openoffice_port'))
PIDFILE_PATH = config.get('Daemon', 'pidfile_path')
LOGFILE_PATH = config.get('Daemon', 'logfile_path')
SUPPORTED_FILES = [
'doc',
'docx',
'odt',
'rtf',
'txt',
'html',
'pdf',
'xml',
#'ods',
#'xls',
#'xlsx',
#'ppt',
#'pptx',
#'pps',
#'ppsx',
#'odp'
]
#-------------------#
# Configuration End #
#-------------------#
global FAMILY_TEXT
global FAMILY_WEB
global FAMILY_SPREADSHEET
global FAMILY_PRESENTATION
global FAMILY_DRAWING
FAMILY_TEXT = "Text"
FAMILY_WEB = "Web"
FAMILY_SPREADSHEET = "Spreadsheet"
FAMILY_PRESENTATION = "Presentation"
FAMILY_DRAWING = "Drawing"
# see http://wiki.services.openoffice.org/wiki/Framework/Article/Filter
# most formats are auto-detected; only those requiring options are defined here
global IMPORT_FILTER_MAP
IMPORT_FILTER_MAP = {
"txt": {
"FilterName": "Text (encoded)",
"FilterOptions": "utf8"
},
"csv": {
"FilterName": "Text - txt - csv (StarCalc)",
"FilterOptions": "44,34,0"
},
'default':{
'Hidden': True,
'RepairPackage': True,
'Silent': True,
}
}
global EXPORT_FILTER_MAP
EXPORT_FILTER_MAP = {
"pdf": {
FAMILY_TEXT: { "FilterName": "writer_pdf_Export" },
FAMILY_WEB: { "FilterName": "writer_web_pdf_Export" },
FAMILY_SPREADSHEET: { "FilterName": "calc_pdf_Export" },
FAMILY_PRESENTATION: { "FilterName": "impress_pdf_Export" },
FAMILY_DRAWING: { "FilterName": "draw_pdf_Export" }
},
"html": {
FAMILY_TEXT: { "FilterName": "HTML (StarWriter)" },
FAMILY_SPREADSHEET: { "FilterName": "HTML (StarCalc)" },
FAMILY_PRESENTATION: { "FilterName": "impress_html_Export" }
},
"odt": {
FAMILY_TEXT: { "FilterName": "writer8" },
FAMILY_WEB: { "FilterName": "writerweb8_writer" }
},
"doc": {
FAMILY_TEXT: { "FilterName": "MS Word 97" }
},
"docx": {
FAMILY_TEXT: { "FilterName": "MS Word 2007 XML" }
},
"rtf": {
FAMILY_TEXT: { "FilterName": "Rich Text Format" }
},
"txt": {
FAMILY_TEXT: {
"FilterName": "Text",
"FilterOptions": "utf8"
}
},
"ods": {
FAMILY_SPREADSHEET: { "FilterName": "calc8" }
},
"xls": {
FAMILY_SPREADSHEET: { "FilterName": "MS Excel 97" }
},
"csv": {
FAMILY_SPREADSHEET: {
"FilterName": "Text - txt - csv (StarCalc)",
"FilterOptions": "44,34,0"
}
},
"odp": {
FAMILY_PRESENTATION: { "FilterName": "impress8" }
},
"ppt": {
FAMILY_PRESENTATION: { "FilterName": "MS PowerPoint 97" }
},
"swf": {
FAMILY_DRAWING: { "FilterName": "draw_flash_Export" },
FAMILY_PRESENTATION: { "FilterName": "impress_flash_Export" }
}
}
global PAGE_STYLE_OVERRIDE_PROPERTIES
PAGE_STYLE_OVERRIDE_PROPERTIES = {
FAMILY_SPREADSHEET: {
#--- Scale options: uncomment 1 of the 3 ---
# a) 'Reduce / enlarge printout': 'Scaling factor'
"PageScale": 100,
# b) 'Fit print range(s) to width / height': 'Width in pages' and 'Height in pages'
#"ScaleToPagesX": 1, "ScaleToPagesY": 1000,
# c) 'Fit print range(s) on number of pages': 'Fit print range(s) on number of pages'
#"ScaleToPages": 1,
"PrintGrid": False
}
}
| gpl-2.0 |
Deren-Liao/google-cloud-visualstudio | tools/find_unused_strings.py | 4 | 2843 | #! /usr/bin/env python
"""Script that lists all files that match the given extension
This script takes a directory and will list all of the files
recursively filtering by the given extension.
"""
import argparse
import os
import sys
import xml.etree.ElementTree as ET
# Parse for the program.
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--directory',
help='The root directory',
required=True)
parser.add_argument('-s', '--strings',
help='The strings (.resx) file to check.',
required=True)
# All string references are of the form: Resource.<string name>
resource_string_prefix='Resources.'
def list_all_files(dir):
"""Lists all of the source files (.cs and .xaml) under the given directory"""
result = []
for root, _, filenames in os.walk(dir):
for name in filenames:
filename, ext = os.path.splitext(name)
if ext == '.cs' or ext == '.xaml':
result.append(os.path.join(root, name))
return result
def is_valid_char(src):
"""Determins if the given char is valid as an identifier char."""
return src.isalnum()
def is_valid_string_name(src):
"""All string names start with an uppercase char."""
return src[0].isupper()
def extract_string(line, idx, result):
"""Extracts the first string reference on, or after, idx in line."""
begin = line.find(resource_string_prefix, idx)
if begin == -1:
return -1
begin = begin + len(resource_string_prefix)
end = -1
for i in range(begin, len(line)):
if not is_valid_char(line[i]):
end = i
break
result.add(line[begin:end])
return end
def find_strings(line, result):
"""Finds all of the string references in the given line."""
idx = 0
while idx != -1:
idx = extract_string(line, idx, result)
def get_used_strings(file):
"""Returns all of the strings being used by the given file."""
result = set()
with open(file, 'r') as src:
for line in src.readlines():
find_strings(line, result)
return result
def load_strings(src):
"""Loads the .resx file and extracts all of the string names."""
tree = ET.parse(src)
root = tree.getroot()
result = set()
for child in root.findall('data'):
result.add(child.attrib['name'])
return result
def main(params):
strings = load_strings(params.strings)
files = list_all_files(params.directory)
for file in files:
used = get_used_strings(file)
strings = strings - used
if len(strings) > 0:
print('Strings that are not used:')
for s in strings:
print(s)
# Entrypoint into the script.
if __name__ == '__main__':
main(parser.parse_args())
| apache-2.0 |
etherkit/OpenBeacon2 | client/macos/venv/lib/python3.8/site-packages/pip/_internal/operations/build/wheel_legacy.py | 5 | 3356 | import logging
import os.path
from pip._internal.cli.spinners import open_spinner
from pip._internal.utils.setuptools_build import (
make_setuptools_bdist_wheel_args,
)
from pip._internal.utils.subprocess import (
LOG_DIVIDER,
call_subprocess,
format_command_args,
)
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import List, Optional, Text
logger = logging.getLogger(__name__)
def format_command_result(
command_args, # type: List[str]
command_output, # type: Text
):
# type: (...) -> str
"""Format command information for logging."""
command_desc = format_command_args(command_args)
text = 'Command arguments: {}\n'.format(command_desc)
if not command_output:
text += 'Command output: None'
elif logger.getEffectiveLevel() > logging.DEBUG:
text += 'Command output: [use --verbose to show]'
else:
if not command_output.endswith('\n'):
command_output += '\n'
text += 'Command output:\n{}{}'.format(command_output, LOG_DIVIDER)
return text
def get_legacy_build_wheel_path(
names, # type: List[str]
temp_dir, # type: str
name, # type: str
command_args, # type: List[str]
command_output, # type: Text
):
# type: (...) -> Optional[str]
"""Return the path to the wheel in the temporary build directory."""
# Sort for determinism.
names = sorted(names)
if not names:
msg = (
'Legacy build of wheel for {!r} created no files.\n'
).format(name)
msg += format_command_result(command_args, command_output)
logger.warning(msg)
return None
if len(names) > 1:
msg = (
'Legacy build of wheel for {!r} created more than one file.\n'
'Filenames (choosing first): {}\n'
).format(name, names)
msg += format_command_result(command_args, command_output)
logger.warning(msg)
return os.path.join(temp_dir, names[0])
def build_wheel_legacy(
name, # type: str
setup_py_path, # type: str
source_dir, # type: str
global_options, # type: List[str]
build_options, # type: List[str]
tempd, # type: str
):
# type: (...) -> Optional[str]
"""Build one unpacked package using the "legacy" build process.
Returns path to wheel if successfully built. Otherwise, returns None.
"""
wheel_args = make_setuptools_bdist_wheel_args(
setup_py_path,
global_options=global_options,
build_options=build_options,
destination_dir=tempd,
)
spin_message = 'Building wheel for {} (setup.py)'.format(name)
with open_spinner(spin_message) as spinner:
logger.debug('Destination directory: %s', tempd)
try:
output = call_subprocess(
wheel_args,
cwd=source_dir,
spinner=spinner,
)
except Exception:
spinner.finish("error")
logger.error('Failed building wheel for %s', name)
return None
names = os.listdir(tempd)
wheel_path = get_legacy_build_wheel_path(
names=names,
temp_dir=tempd,
name=name,
command_args=wheel_args,
command_output=output,
)
return wheel_path
| gpl-3.0 |
CSC-ORG/Dynamic-Dashboard-2015 | engine/lib/python2.7/site-packages/django/contrib/redirects/tests.py | 112 | 3358 | from django import http
from django.conf import settings
from django.contrib.sites.models import Site
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase, modify_settings, override_settings
from django.utils import six
from .middleware import RedirectFallbackMiddleware
from .models import Redirect
@modify_settings(MIDDLEWARE_CLASSES={'append':
'django.contrib.redirects.middleware.RedirectFallbackMiddleware'})
@override_settings(APPEND_SLASH=False, SITE_ID=1)
class RedirectTests(TestCase):
def setUp(self):
self.site = Site.objects.get(pk=settings.SITE_ID)
def test_model(self):
r1 = Redirect.objects.create(
site=self.site, old_path='/initial', new_path='/new_target')
self.assertEqual(six.text_type(r1), "/initial ---> /new_target")
def test_redirect(self):
Redirect.objects.create(
site=self.site, old_path='/initial', new_path='/new_target')
response = self.client.get('/initial')
self.assertRedirects(response,
'/new_target', status_code=301, target_status_code=404)
@override_settings(APPEND_SLASH=True)
def test_redirect_with_append_slash(self):
Redirect.objects.create(
site=self.site, old_path='/initial/', new_path='/new_target/')
response = self.client.get('/initial')
self.assertRedirects(response,
'/new_target/', status_code=301, target_status_code=404)
@override_settings(APPEND_SLASH=True)
def test_redirect_with_append_slash_and_query_string(self):
Redirect.objects.create(
site=self.site, old_path='/initial/?foo', new_path='/new_target/')
response = self.client.get('/initial?foo')
self.assertRedirects(response,
'/new_target/', status_code=301, target_status_code=404)
def test_response_gone(self):
"""When the redirect target is '', return a 410"""
Redirect.objects.create(
site=self.site, old_path='/initial', new_path='')
response = self.client.get('/initial')
self.assertEqual(response.status_code, 410)
@modify_settings(INSTALLED_APPS={'remove': 'django.contrib.sites'})
def test_sites_not_installed(self):
with self.assertRaises(ImproperlyConfigured):
RedirectFallbackMiddleware()
class OverriddenRedirectFallbackMiddleware(RedirectFallbackMiddleware):
# Use HTTP responses different from the defaults
response_gone_class = http.HttpResponseForbidden
response_redirect_class = http.HttpResponseRedirect
@modify_settings(MIDDLEWARE_CLASSES={'append':
'django.contrib.redirects.tests.OverriddenRedirectFallbackMiddleware'})
@override_settings(SITE_ID=1)
class OverriddenRedirectMiddlewareTests(TestCase):
def setUp(self):
self.site = Site.objects.get(pk=settings.SITE_ID)
def test_response_gone_class(self):
Redirect.objects.create(
site=self.site, old_path='/initial/', new_path='')
response = self.client.get('/initial/')
self.assertEqual(response.status_code, 403)
def test_response_redirect_class(self):
Redirect.objects.create(
site=self.site, old_path='/initial/', new_path='/new_target/')
response = self.client.get('/initial/')
self.assertEqual(response.status_code, 302)
| mit |
jpopelka/fabric8-analytics-worker | alembic/versions/3fd95b3a69f5_bye_reviews.py | 3 | 1476 | """Bye reviews.
Revision ID: 3fd95b3a69f5
Revises: 49c27b67936f
Create Date: 2016-06-17 06:56:52.333454
"""
# revision identifiers, used by Alembic.
revision = '3fd95b3a69f5'
down_revision = '49c27b67936f'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
"""Upgrade the database to a newer revision."""
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('review')
# ### end Alembic commands ###
def downgrade():
"""Downgrade the database to an older revision."""
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('review',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('approved', sa.BOOLEAN(), autoincrement=False, nullable=True),
sa.Column('user', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('timestamp', postgresql.TIMESTAMP(), autoincrement=False,
nullable=True),
sa.Column('comment', sa.TEXT(), autoincrement=False, nullable=True),
sa.Column('epv', sa.VARCHAR(length=255), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['user'], ['user.id'], name='review_user_fkey'),
sa.PrimaryKeyConstraint('id', name='review_pkey'))
# ### end Alembic commands ###
| gpl-3.0 |
ajohnson23/depot_tools | tests/checkout_test.py | 25 | 10136 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for checkout.py."""
import logging
import os
import shutil
import sys
import unittest
from xml.etree import ElementTree
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.dirname(ROOT_DIR))
from testing_support import fake_repos
from testing_support.patches_data import GIT, RAW
import checkout
import patch
import subprocess2
# pass -v to enable it.
DEBUGGING = False
# A patch that will fail to apply.
BAD_PATCH = ''.join(
[l for l in GIT.PATCH.splitlines(True) if l.strip() != 'e'])
class FakeRepos(fake_repos.FakeReposBase):
TEST_GIT_REPO = 'repo_1'
def populateGit(self):
"""Creates a few revisions of changes files."""
self._commit_git(self.TEST_GIT_REPO, self._git_tree())
# Fix for the remote rejected error. For more details see:
# http://stackoverflow.com/questions/2816369/git-push-error-remote
subprocess2.check_output(
['git', '--git-dir',
os.path.join(self.git_root, self.TEST_GIT_REPO, '.git'),
'config', '--bool', 'core.bare', 'true'])
assert os.path.isdir(
os.path.join(self.git_root, self.TEST_GIT_REPO, '.git'))
@staticmethod
def _git_tree():
fs = {}
fs['origin'] = 'git@1'
fs['extra'] = 'dummy\n' # new
fs['codereview.settings'] = (
'# Test data\n'
'bar: pouet\n')
fs['chrome/file.cc'] = (
'a\n'
'bb\n'
'ccc\n'
'dd\n'
'e\n'
'ff\n'
'ggg\n'
'hh\n'
'i\n'
'jj\n'
'kkk\n'
'll\n'
'm\n'
'nn\n'
'ooo\n'
'pp\n'
'q\n')
fs['chromeos/views/DOMui_menu_widget.h'] = (
'// Copyright (c) 2010\n'
'// Use of this source code\n'
'// found in the LICENSE file.\n'
'\n'
'#ifndef DOM\n'
'#define DOM\n'
'#pragma once\n'
'\n'
'#include <string>\n'
'#endif\n')
return fs
# pylint: disable=R0201
class BaseTest(fake_repos.FakeReposTestBase):
name = 'foo'
FAKE_REPOS_CLASS = FakeRepos
is_read_only = False
def setUp(self):
super(BaseTest, self).setUp()
self._old_call = subprocess2.call
def redirect_call(args, **kwargs):
if not DEBUGGING:
kwargs.setdefault('stdout', subprocess2.PIPE)
kwargs.setdefault('stderr', subprocess2.STDOUT)
return self._old_call(args, **kwargs)
subprocess2.call = redirect_call
self.usr, self.pwd = self.FAKE_REPOS.USERS[0]
self.previous_log = None
def tearDown(self):
subprocess2.call = self._old_call
super(BaseTest, self).tearDown()
def get_patches(self):
return patch.PatchSet([
patch.FilePatchDiff('new_dir/subdir/new_file', GIT.NEW_SUBDIR, []),
patch.FilePatchDiff('chrome/file.cc', GIT.PATCH, []),
# TODO(maruel): Test with is_new == False.
patch.FilePatchBinary('bin_file', '\x00', [], is_new=True),
patch.FilePatchDelete('extra', False),
])
def get_trunk(self, modified):
raise NotImplementedError()
def _check_base(self, co, root, expected):
raise NotImplementedError()
def _check_exception(self, co, err_msg):
co.prepare(None)
try:
co.apply_patch([patch.FilePatchDiff('chrome/file.cc', BAD_PATCH, [])])
self.fail()
except checkout.PatchApplicationFailed, e:
self.assertEquals(e.filename, 'chrome/file.cc')
self.assertEquals(e.status, err_msg)
def _log(self):
raise NotImplementedError()
def _test_process(self, co_lambda):
"""Makes sure the process lambda is called correctly."""
post_processors = [lambda *args: results.append(args)]
co = co_lambda(post_processors)
self.assertEquals(post_processors, co.post_processors)
co.prepare(None)
ps = self.get_patches()
results = []
co.apply_patch(ps)
expected_co = getattr(co, 'checkout', co)
# Because of ReadOnlyCheckout.
expected = [(expected_co, p) for p in ps.patches]
self.assertEquals(len(expected), len(results))
self.assertEquals(expected, results)
def _check_move(self, co):
"""Makes sure file moves are handled correctly."""
co.prepare(None)
patchset = patch.PatchSet([
patch.FilePatchDelete('chromeos/views/DOMui_menu_widget.h', False),
patch.FilePatchDiff(
'chromeos/views/webui_menu_widget.h', GIT.RENAME_PARTIAL, []),
])
co.apply_patch(patchset)
# Make sure chromeos/views/DOMui_menu_widget.h is deleted and
# chromeos/views/webui_menu_widget.h is correctly created.
root = os.path.join(self.root_dir, self.name)
tree = self.get_trunk(False)
del tree['chromeos/views/DOMui_menu_widget.h']
tree['chromeos/views/webui_menu_widget.h'] = (
'// Copyright (c) 2011\n'
'// Use of this source code\n'
'// found in the LICENSE file.\n'
'\n'
'#ifndef WEB\n'
'#define WEB\n'
'#pragma once\n'
'\n'
'#include <string>\n'
'#endif\n')
#print patchset[0].get()
#print fake_repos.read_tree(root)
self.assertTree(tree, root)
class GitBaseTest(BaseTest):
def setUp(self):
super(GitBaseTest, self).setUp()
self.enabled = self.FAKE_REPOS.set_up_git()
self.assertTrue(self.enabled)
self.previous_log = self._log()
# pylint: disable=W0221
def _log(self, log_from_local_repo=False):
if log_from_local_repo:
repo_root = os.path.join(self.root_dir, self.name)
else:
repo_root = os.path.join(self.FAKE_REPOS.git_root,
self.FAKE_REPOS.TEST_GIT_REPO)
out = subprocess2.check_output(
['git',
'--git-dir',
os.path.join(repo_root, '.git'),
'log', '--pretty=format:"%H%x09%ae%x09%ad%x09%s"',
'--max-count=1']).strip('"')
if out and len(out.split()) != 0:
revision = out.split()[0]
else:
return {'revision': 0}
return {
'revision': revision,
'author': out.split()[1],
'msg': out.split()[-1],
}
def _check_base(self, co, root, expected):
read_only = isinstance(co, checkout.ReadOnlyCheckout)
self.assertEquals(read_only, self.is_read_only)
if not read_only:
self.FAKE_REPOS.git_dirty = True
self.assertEquals(root, co.project_path)
git_rev = co.prepare(None)
self.assertEquals(unicode, type(git_rev))
self.assertEquals(self.previous_log['revision'], git_rev)
self.assertEquals('pouet', co.get_settings('bar'))
self.assertTree(self.get_trunk(False), root)
patches = self.get_patches()
co.apply_patch(patches)
self.assertEquals(
['bin_file', 'chrome/file.cc', 'new_dir/subdir/new_file', 'extra'],
patches.filenames)
# Hackish to verify _branches() internal function.
# pylint: disable=W0212
self.assertEquals(
(['master', 'working_branch'], 'working_branch'),
co._branches())
# Verify that the patch is applied even for read only checkout.
self.assertTree(self.get_trunk(True), root)
fake_author = self.FAKE_REPOS.USERS[1][0]
revision = co.commit(u'msg', fake_author)
# Nothing changed.
self.assertTree(self.get_trunk(True), root)
if read_only:
self.assertEquals('FAKE', revision)
self.assertEquals(self.previous_log['revision'], co.prepare(None))
# Changes should be reverted now.
self.assertTree(self.get_trunk(False), root)
expected = self.previous_log
else:
self.assertEquals(self._log()['revision'], revision)
self.assertEquals(self._log()['revision'], co.prepare(None))
self.assertTree(self.get_trunk(True), root)
expected = self._log()
actual = self._log(log_from_local_repo=True)
self.assertEquals(expected, actual)
def get_trunk(self, modified):
tree = {}
for k, v in self.FAKE_REPOS.git_hashes[
self.FAKE_REPOS.TEST_GIT_REPO][1][1].iteritems():
assert k not in tree
tree[k] = v
if modified:
content_lines = tree['chrome/file.cc'].splitlines(True)
tree['chrome/file.cc'] = ''.join(
content_lines[0:5] + ['FOO!\n'] + content_lines[5:])
tree['bin_file'] = '\x00'
del tree['extra']
tree['new_dir/subdir/new_file'] = 'A new file\nshould exist.\n'
return tree
def _test_prepare(self, co):
print co.prepare(None)
class GitCheckout(GitBaseTest):
def _get_co(self, post_processors):
self.assertNotEqual(False, post_processors)
return checkout.GitCheckout(
root_dir=self.root_dir,
project_name=self.name,
remote_branch='master',
git_url=os.path.join(self.FAKE_REPOS.git_root,
self.FAKE_REPOS.TEST_GIT_REPO),
commit_user=self.usr,
post_processors=post_processors)
def testAll(self):
root = os.path.join(self.root_dir, self.name)
self._check_base(self._get_co(None), root, None)
@unittest.skip('flaky')
def testException(self):
self._check_exception(
self._get_co(None),
'While running git apply --index -3 -p1;\n fatal: corrupt patch at '
'line 12\n')
def testProcess(self):
self._test_process(self._get_co)
def _testPrepare(self):
self._test_prepare(self._get_co(None))
def testMove(self):
co = self._get_co(None)
self._check_move(co)
out = subprocess2.check_output(
['git', 'diff', '--staged', '--name-status'], cwd=co.project_path)
out = sorted(out.splitlines())
expected = sorted(
[
'A\tchromeos/views/webui_menu_widget.h',
'D\tchromeos/views/DOMui_menu_widget.h',
])
self.assertEquals(expected, out)
if __name__ == '__main__':
if '-v' in sys.argv:
DEBUGGING = True
logging.basicConfig(
level=logging.DEBUG,
format='%(levelname)5s %(filename)15s(%(lineno)3d): %(message)s')
else:
logging.basicConfig(
level=logging.ERROR,
format='%(levelname)5s %(filename)15s(%(lineno)3d): %(message)s')
unittest.main()
| bsd-3-clause |
blippy/pyRenamer | src/pyrenamer_pattern_editor.py | 2 | 9950 | # -*- coding: utf-8 -*-
"""
Copyright (C) 2006-2008 Adolfo González Blázquez <code@infinicode.org>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
If you find any bugs or have any suggestions email: code@infinicode.org
"""
try:
import pygtk
pygtk.require('2.0')
except:
print "PyGtk 2.0 or later required for this app to run"
raise SystemExit
try:
import gtk
import gtk.glade
import gobject
except:
raise SystemExit
import pyrenamer_globals as pyrenamerglob
from gettext import gettext as _
import os
class PyrenamerPatternEditor:
def __init__(self, main):
self.main = main
# Check if config directory exists
self.config_dir = os.path.join(pyrenamerglob.config_dir, 'patterns')
if not os.path.isdir(self.config_dir):
os.makedirs(self.config_dir)
def get_patterns(self, selector):
patterns = []
default_patterns = {
"main_ori": "{X}\n",
"main_dest": "{1}\n",
"images_ori": "{X}\n",
"images_dest": "{1}\n" \
"{imageyear}{imagemonth}{imageday}_{imagetime}_{1}\n" \
"{imagewidth}x{imageheight}_{1}\n",
"music_ori": "{X}\n",
"music_dest": "{1}\n" \
"{track} - {artist} ({album}) - {title}\n" \
"{track} - {artist}\n",
}
config_file = os.path.join(self.config_dir, selector)
# If the config file doesn't exist, create it with default options
if not os.path.isfile(config_file):
f = open(config_file, 'w')
try:
f.write(default_patterns[selector])
finally:
f.close()
# Read patterns from file
f = open(config_file, 'r')
try:
for line in f:
line = line.rstrip("\r\n")
patterns.append(line)
finally:
f.close()
# Return found patterns
if patterns == []:
if "ori" in selector: patterns.append('{X}')
else: patterns.append('{1}')
return patterns
def save_patterns(self, selector):
config_file = os.path.join(self.config_dir, selector)
f = open(config_file, 'w')
iter = self.model.get_iter_first()
try:
while iter != None:
val = self.model.get_value(iter, 0)
f.write(val + "\n")
iter = self.model.iter_next(iter)
finally:
f.close()
def add_pattern(self, selector, pattern):
config_file = os.path.join(self.config_dir, selector)
f = open(config_file, 'a')
try:
f.write(pattern + "\n")
finally:
f.close()
def create_window(self, selector):
""" Create pattern editor dialog and connect signals """
# Save the selector for the future
self.selector = selector
# Create the window
self.pattern_edit_tree = gtk.glade.XML(pyrenamerglob.gladefile, "pattern_edit_window")
# Get widgets
self.pattern_edit_window = self.pattern_edit_tree.get_widget('pattern_edit_window')
self.pattern_edit_treeview = self.pattern_edit_tree.get_widget('pattern_edit_treeview')
# Signals
signals = {
"on_pattern_edit_add_clicked": self.on_pattern_edit_add_clicked,
"on_pattern_edit_del_clicked": self.on_pattern_edit_del_clicked,
"on_pattern_edit_edit_clicked": self.on_pattern_edit_edit_clicked,
"on_pattern_edit_up_clicked": self.on_pattern_edit_up_clicked,
"on_pattern_edit_down_clicked": self.on_pattern_edit_down_clicked,
"on_pattern_edit_close_clicked": self.on_prefs_close_clicked,
"on_pattern_edit_treeview_button_press_event": self.on_pattern_edit_treeview_button_press_event,
"on_pattern_edit_window_destroy": self.on_pattern_edit_destroy,
}
self.pattern_edit_tree.signal_autoconnect(signals)
# Set prefs window icon
self.pattern_edit_window.set_icon_from_file(pyrenamerglob.icon)
# Set window name
if 'main' in selector:
self.pattern_edit_window.set_title(_('Patterns editor'))
elif 'images' in selector:
self.pattern_edit_window.set_title(_('Image patterns editor'))
elif 'music' in selector:
self.pattern_edit_window.set_title(_('Music patterns editor'))
self.populate_treeview(selector)
def populate_treeview(self, selector):
# Get data from main variable
if selector == "main_ori":
data = self.main.patterns["main_ori"]
elif selector == "main_dest":
data = self.main.patterns["main_dest"]
elif selector == "images_ori":
data = self.main.patterns["images_ori"]
elif selector == "images_dest":
data = self.main.patterns["images_dest"]
elif selector == "music_ori":
data = self.main.patterns["music_ori"]
elif selector == "music_dest":
data = self.main.patterns["music_dest"]
# Create model
self.model = gtk.ListStore(gobject.TYPE_STRING)
for p in data:
self.model.append([p])
self.pattern_edit_treeview.set_model(self.model)
# Draw treeview
renderer = gtk.CellRendererText()
renderer.set_property('editable', True)
renderer.connect('edited', self.on_cell_edited, self.model)
column = gtk.TreeViewColumn("Pattern",renderer, text=0)
self.pattern_edit_treeview.append_column(column)
self.pattern_edit_treeview.set_headers_visible(False)
def on_cell_edited(self, cell, path, new_text, model):
iter = model.get_iter(path)
model.set_value(iter, 0, new_text)
def on_pattern_edit_add_clicked(self, widget):
# Run add dialog and get data
dialog, entry = self.create_add_dialog()
output = dialog.run()
if output == 1:
data = entry.get_text()
dialog.destroy()
else:
dialog.destroy()
return False
if data == None or data == '':
return False
# Add the data
selection = self.pattern_edit_treeview.get_selection()
model, iter = selection.get_selected()
if iter != None:
iter = model.insert_after(iter, [data])
else:
iter = model.append([data])
selection.select_iter(iter)
def on_pattern_edit_del_clicked(self, widget):
selection = self.pattern_edit_treeview.get_selection()
model, iter = selection.get_selected()
if iter != None:
iter = model.remove(iter)
else:
return
def on_pattern_edit_edit_clicked(self, widget):
# Get data from treeview
selection = self.pattern_edit_treeview.get_selection()
model, iter = selection.get_selected()
if iter != None:
data = model.get_value(iter, 0)
else:
return False
# Run edit dialog
dialog, entry = self.create_add_dialog()
entry.set_text(data)
output = dialog.run()
if output == 1:
data = entry.get_text()
dialog.destroy()
else:
dialog.destroy()
return False
if data == None or data == '':
return False
# Edit the data
model.set_value(iter, 0, data)
def on_pattern_edit_up_clicked(self, widget):
model, iter = self.pattern_edit_treeview.get_selection().get_selected()
iter_prev = self.iter_prev(model, iter)
if iter_prev != None:
model.swap(iter, iter_prev)
def on_pattern_edit_down_clicked(self, widget):
model, iter = self.pattern_edit_treeview.get_selection().get_selected()
iter_next = model.iter_next(iter)
if iter_next != None:
model.swap(iter, iter_next)
def on_prefs_close_clicked(self, widget):
self.pattern_edit_window.destroy()
def on_pattern_edit_destroy(self, widget):
self.save_patterns(self.selector)
self.main.populate_pattern_combos()
def on_pattern_edit_treeview_button_press_event(self, widget, event):
""" If clicked in a clean part of the listview, unselect all rows """
x, y = event.get_coords()
if self.pattern_edit_treeview.get_path_at_pos(int(x), int(y)) == None:
self.pattern_edit_treeview.get_selection().unselect_all()
def iter_prev(self, model, iter):
path = model.get_string_from_iter(iter)
if path == "0":
return None
prev_path = int(path) -1
return model.get_iter_from_string(str(prev_path))
def create_add_dialog(self,):
""" Create pattern add dialog and connect signals """
# Create the dialog
tree = gtk.glade.XML(pyrenamerglob.gladefile, "add_pattern_dialog")
# Get widgets
dialog = tree.get_widget('add_pattern_dialog')
entry = tree.get_widget('add_pattern_entry')
return dialog, entry
| gpl-2.0 |
ftrain/django-ftrain | kcal/migrations/0002_to_energy_model.py | 1 | 3214 |
from south.db import db
from django.db import models
from ftrain.ohlih.models import *
class Migration:
def forwards(self, orm):
# Adding model 'Energy'
db.create_table('ohlih_energy', (
('kcal_is_est', orm['ohlih.energy:kcal_is_est']),
('kcal', orm['ohlih.energy:kcal']),
('id', orm['ohlih.energy:id']),
('name', orm['ohlih.energy:name']),
))
db.send_create_signal('ohlih', ['Energy'])
# Adding model 'Consumption'
db.create_table('ohlih_consumption', (
('in_event', orm['ohlih.consumption:in_event']),
('order', orm['ohlih.consumption:order']),
('id', orm['ohlih.consumption:id']),
('quantity', orm['ohlih.consumption:quantity']),
('of_energy', orm['ohlih.consumption:of_energy']),
))
db.send_create_signal('ohlih', ['Consumption'])
def backwards(self, orm):
# Deleting model 'Energy'
db.delete_table('ohlih_energy')
# Deleting model 'Consumption'
db.delete_table('ohlih_consumption')
models = {
'ohlih.event': {
'commentary': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'event_type': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {})
},
'ohlih.energy': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kcal': ('django.db.models.fields.IntegerField', [], {}),
'kcal_is_est': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'ohlih.food': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ohlih.Event']"}),
'kcal': ('django.db.models.fields.IntegerField', [], {}),
'kcal_is_est': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'quantity': ('django.db.models.fields.CharField', [], {'max_length': '10'})
},
'ohlih.consumption': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ohlih.Event']"}),
'of_energy': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ohlih.Energy']"}),
'order': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'quantity': ('django.db.models.fields.CharField', [], {'max_length': '10'})
}
}
complete_apps = ['ohlih']
| bsd-3-clause |
georgemarshall/django | tests/gis_tests/test_ptr.py | 74 | 2398 | import ctypes
from unittest import mock
from django.contrib.gis.ptr import CPointerBase
from django.test import SimpleTestCase
class CPointerBaseTests(SimpleTestCase):
def test(self):
destructor_mock = mock.Mock()
class NullPointerException(Exception):
pass
class FakeGeom1(CPointerBase):
null_ptr_exception_class = NullPointerException
class FakeGeom2(FakeGeom1):
ptr_type = ctypes.POINTER(ctypes.c_float)
destructor = destructor_mock
fg1 = FakeGeom1()
fg2 = FakeGeom2()
# These assignments are OK. None is allowed because it's equivalent
# to the NULL pointer.
fg1.ptr = fg1.ptr_type()
fg1.ptr = None
fg2.ptr = fg2.ptr_type(ctypes.c_float(5.23))
fg2.ptr = None
# Because pointers have been set to NULL, an exception is raised on
# access. Raising an exception is preferable to a segmentation fault
# that commonly occurs when a C method is given a NULL reference.
for fg in (fg1, fg2):
with self.assertRaises(NullPointerException):
fg.ptr
# Anything that's either not None or the acceptable pointer type
# results in a TypeError when trying to assign it to the `ptr` property.
# Thus, memory addresses (integers) and pointers of the incorrect type
# (in `bad_ptrs`) aren't allowed.
bad_ptrs = (5, ctypes.c_char_p(b'foobar'))
for bad_ptr in bad_ptrs:
for fg in (fg1, fg2):
with self.assertRaisesMessage(TypeError, 'Incompatible pointer type'):
fg.ptr = bad_ptr
# Object can be deleted without a destructor set.
fg = FakeGeom1()
fg.ptr = fg.ptr_type(1)
del fg
# A NULL pointer isn't passed to the destructor.
fg = FakeGeom2()
fg.ptr = None
del fg
self.assertFalse(destructor_mock.called)
# The destructor is called if set.
fg = FakeGeom2()
ptr = fg.ptr_type(ctypes.c_float(1.))
fg.ptr = ptr
del fg
destructor_mock.assert_called_with(ptr)
def test_destructor_catches_importerror(self):
class FakeGeom(CPointerBase):
destructor = mock.Mock(side_effect=ImportError)
fg = FakeGeom()
fg.ptr = fg.ptr_type(1)
del fg
| bsd-3-clause |
google/vroom | vroom/__main__.py | 4 | 1977 | """The vroom test runner."""
import os
import signal
import subprocess
import sys
import vroom.args
import vroom.color
import vroom.output
import vroom.runner
import vroom.vim
def main(argv=None):
if argv is None:
argv = sys.argv
try:
args = vroom.args.Parse(argv[1:])
except ValueError as e:
sys.stderr.write('%s\n' % ', '.join(e.args))
return 1
if args.murder:
try:
output = subprocess.check_output(['ps', '-A']).decode('utf-8')
except subprocess.CalledProcessError:
sys.stdout.write("Can't find running processes.\n")
return 1
for line in output.splitlines():
if line.endswith('vroom'):
pid = int(line.split(None, 1)[0])
# ARE YOU SUICIDAL?!
if pid != os.getpid():
sys.stdout.write('Killing a vroom: %s\n' % line)
os.kill(pid, signal.SIGKILL)
break
else:
sys.stdout.write('No running vrooms found.\n')
return 0
end = 'VroomEnd()'
kill = ['vim', '--servername', args.servername, '--remote-expr', end]
sys.stdout.write("I hope you're happy.\n")
return subprocess.call(kill)
dirty = False
writers = []
try:
for filename in args.filenames:
with open(filename) as f:
runner = vroom.runner.Vroom(filename, args)
writers.append(runner(f))
if runner.dirty:
dirty = True
except vroom.vim.ServerQuit as e:
# If the vim server process fails, the details are probably on stderr, so hope
# for the best and exit without shell reset.
sys.stderr.write('Exception: {}\n'.format(e))
return 2
if dirty:
# Running vim in a process can screw with shell line endings. Reset terminal.
subprocess.call(['reset'])
for writer in writers:
writer.Write()
vroom.output.WriteBackmatter(writers, args)
failed_tests = [w for w in writers if w.Status() != vroom.output.STATUS.PASS]
if failed_tests:
return 3
if __name__ == '__main__':
sys.exit(main())
| apache-2.0 |
Drooids/odoo | openerp/addons/base/ir/ir_actions.py | 174 | 60020 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2014 OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from functools import partial
import logging
import operator
import os
import time
import datetime
import dateutil
import openerp
from openerp import SUPERUSER_ID
from openerp import tools
from openerp import workflow
import openerp.api
from openerp.osv import fields, osv
from openerp.osv.orm import browse_record
import openerp.report.interface
from openerp.report.report_sxw import report_sxw, report_rml
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools.translate import _
import openerp.workflow
_logger = logging.getLogger(__name__)
class actions(osv.osv):
_name = 'ir.actions.actions'
_table = 'ir_actions'
_order = 'name'
_columns = {
'name': fields.char('Name', required=True),
'type': fields.char('Action Type', required=True),
'usage': fields.char('Action Usage'),
'help': fields.text('Action description',
help='Optional help text for the users with a description of the target view, such as its usage and purpose.',
translate=True),
}
_defaults = {
'usage': lambda *a: False,
}
def unlink(self, cr, uid, ids, context=None):
"""unlink ir.action.todo which are related to actions which will be deleted.
NOTE: ondelete cascade will not work on ir.actions.actions so we will need to do it manually."""
todo_obj = self.pool.get('ir.actions.todo')
if not ids:
return True
if isinstance(ids, (int, long)):
ids = [ids]
todo_ids = todo_obj.search(cr, uid, [('action_id', 'in', ids)], context=context)
todo_obj.unlink(cr, uid, todo_ids, context=context)
return super(actions, self).unlink(cr, uid, ids, context=context)
class ir_actions_report_xml(osv.osv):
def _report_content(self, cursor, user, ids, name, arg, context=None):
res = {}
for report in self.browse(cursor, user, ids, context=context):
data = report[name + '_data']
if not data and report[name[:-8]]:
fp = None
try:
fp = tools.file_open(report[name[:-8]], mode='rb')
data = fp.read()
except:
data = False
finally:
if fp:
fp.close()
res[report.id] = data
return res
def _report_content_inv(self, cursor, user, id, name, value, arg, context=None):
self.write(cursor, user, id, {name+'_data': value}, context=context)
def _report_sxw(self, cursor, user, ids, name, arg, context=None):
res = {}
for report in self.browse(cursor, user, ids, context=context):
if report.report_rml:
res[report.id] = report.report_rml.replace('.rml', '.sxw')
else:
res[report.id] = False
return res
def _lookup_report(self, cr, name):
"""
Look up a report definition.
"""
opj = os.path.join
# First lookup in the deprecated place, because if the report definition
# has not been updated, it is more likely the correct definition is there.
# Only reports with custom parser sepcified in Python are still there.
if 'report.' + name in openerp.report.interface.report_int._reports:
new_report = openerp.report.interface.report_int._reports['report.' + name]
else:
cr.execute("SELECT * FROM ir_act_report_xml WHERE report_name=%s", (name,))
r = cr.dictfetchone()
if r:
if r['report_type'] in ['qweb-pdf', 'qweb-html']:
return r['report_name']
elif r['report_rml'] or r['report_rml_content_data']:
if r['parser']:
kwargs = { 'parser': operator.attrgetter(r['parser'])(openerp.addons) }
else:
kwargs = {}
new_report = report_sxw('report.'+r['report_name'], r['model'],
opj('addons',r['report_rml'] or '/'), header=r['header'], register=False, **kwargs)
elif r['report_xsl'] and r['report_xml']:
new_report = report_rml('report.'+r['report_name'], r['model'],
opj('addons',r['report_xml']),
r['report_xsl'] and opj('addons',r['report_xsl']), register=False)
else:
raise Exception, "Unhandled report type: %s" % r
else:
raise Exception, "Required report does not exist: %s" % name
return new_report
def render_report(self, cr, uid, res_ids, name, data, context=None):
"""
Look up a report definition and render the report for the provided IDs.
"""
new_report = self._lookup_report(cr, name)
if isinstance(new_report, (str, unicode)): # Qweb report
# The only case where a QWeb report is rendered with this method occurs when running
# yml tests originally written for RML reports.
if openerp.tools.config['test_enable'] and not tools.config['test_report_directory']:
# Only generate the pdf when a destination folder has been provided.
return self.pool['report'].get_html(cr, uid, res_ids, new_report, data=data, context=context), 'html'
else:
return self.pool['report'].get_pdf(cr, uid, res_ids, new_report, data=data, context=context), 'pdf'
else:
return new_report.create(cr, uid, res_ids, data, context)
_name = 'ir.actions.report.xml'
_inherit = 'ir.actions.actions'
_table = 'ir_act_report_xml'
_sequence = 'ir_actions_id_seq'
_order = 'name'
_columns = {
'type': fields.char('Action Type', required=True),
'name': fields.char('Name', required=True, translate=True),
'model': fields.char('Model', required=True),
'report_type': fields.selection([('qweb-pdf', 'PDF'),
('qweb-html', 'HTML'),
('controller', 'Controller'),
('pdf', 'RML pdf (deprecated)'),
('sxw', 'RML sxw (deprecated)'),
('webkit', 'Webkit (deprecated)'),
], 'Report Type', required=True, help="HTML will open the report directly in your browser, PDF will use wkhtmltopdf to render the HTML into a PDF file and let you download it, Controller allows you to define the url of a custom controller outputting any kind of report."),
'report_name': fields.char('Template Name', required=True, help="For QWeb reports, name of the template used in the rendering. The method 'render_html' of the model 'report.template_name' will be called (if any) to give the html. For RML reports, this is the LocalService name."),
'groups_id': fields.many2many('res.groups', 'res_groups_report_rel', 'uid', 'gid', 'Groups'),
# options
'multi': fields.boolean('On Multiple Doc.', help="If set to true, the action will not be displayed on the right toolbar of a form view."),
'attachment_use': fields.boolean('Reload from Attachment', help='If you check this, then the second time the user prints with same attachment name, it returns the previous report.'),
'attachment': fields.char('Save as Attachment Prefix', help='This is the filename of the attachment used to store the printing result. Keep empty to not save the printed reports. You can use a python expression with the object and time variables.'),
# Deprecated rml stuff
'usage': fields.char('Action Usage'),
'header': fields.boolean('Add RML Header', help="Add or not the corporate RML header"),
'parser': fields.char('Parser Class'),
'auto': fields.boolean('Custom Python Parser'),
'report_xsl': fields.char('XSL Path'),
'report_xml': fields.char('XML Path'),
'report_rml': fields.char('Main Report File Path/controller', help="The path to the main report file/controller (depending on Report Type) or NULL if the content is in another data field"),
'report_file': fields.related('report_rml', type="char", required=False, readonly=False, string='Report File', help="The path to the main report file (depending on Report Type) or NULL if the content is in another field", store=True),
'report_sxw': fields.function(_report_sxw, type='char', string='SXW Path'),
'report_sxw_content_data': fields.binary('SXW Content'),
'report_rml_content_data': fields.binary('RML Content'),
'report_sxw_content': fields.function(_report_content, fnct_inv=_report_content_inv, type='binary', string='SXW Content',),
'report_rml_content': fields.function(_report_content, fnct_inv=_report_content_inv, type='binary', string='RML Content'),
}
_defaults = {
'type': 'ir.actions.report.xml',
'multi': False,
'auto': True,
'header': True,
'report_sxw_content': False,
'report_type': 'pdf',
'attachment': False,
}
class ir_actions_act_window(osv.osv):
_name = 'ir.actions.act_window'
_table = 'ir_act_window'
_inherit = 'ir.actions.actions'
_sequence = 'ir_actions_id_seq'
_order = 'name'
def _check_model(self, cr, uid, ids, context=None):
for action in self.browse(cr, uid, ids, context):
if action.res_model not in self.pool:
return False
if action.src_model and action.src_model not in self.pool:
return False
return True
def _invalid_model_msg(self, cr, uid, ids, context=None):
return _('Invalid model name in the action definition.')
_constraints = [
(_check_model, _invalid_model_msg, ['res_model','src_model'])
]
def _views_get_fnc(self, cr, uid, ids, name, arg, context=None):
"""Returns an ordered list of the specific view modes that should be
enabled when displaying the result of this action, along with the
ID of the specific view to use for each mode, if any were required.
This function hides the logic of determining the precedence between
the view_modes string, the view_ids o2m, and the view_id m2o that can
be set on the action.
:rtype: dict in the form { action_id: list of pairs (tuples) }
:return: { action_id: [(view_id, view_mode), ...], ... }, where view_mode
is one of the possible values for ir.ui.view.type and view_id
is the ID of a specific view to use for this mode, or False for
the default one.
"""
res = {}
for act in self.browse(cr, uid, ids):
res[act.id] = [(view.view_id.id, view.view_mode) for view in act.view_ids]
view_ids_modes = [view.view_mode for view in act.view_ids]
modes = act.view_mode.split(',')
missing_modes = [mode for mode in modes if mode not in view_ids_modes]
if missing_modes:
if act.view_id and act.view_id.type in missing_modes:
# reorder missing modes to put view_id first if present
missing_modes.remove(act.view_id.type)
res[act.id].append((act.view_id.id, act.view_id.type))
res[act.id].extend([(False, mode) for mode in missing_modes])
return res
def _search_view(self, cr, uid, ids, name, arg, context=None):
res = {}
for act in self.browse(cr, uid, ids, context=context):
field_get = self.pool[act.res_model].fields_view_get(cr, uid,
act.search_view_id and act.search_view_id.id or False,
'search', context=context)
res[act.id] = str(field_get)
return res
_columns = {
'name': fields.char('Action Name', required=True, translate=True),
'type': fields.char('Action Type', required=True),
'view_id': fields.many2one('ir.ui.view', 'View Ref.', ondelete='set null'),
'domain': fields.char('Domain Value',
help="Optional domain filtering of the destination data, as a Python expression"),
'context': fields.char('Context Value', required=True,
help="Context dictionary as Python expression, empty by default (Default: {})"),
'res_id': fields.integer('Record ID', help="Database ID of record to open in form view, when ``view_mode`` is set to 'form' only"),
'res_model': fields.char('Destination Model', required=True,
help="Model name of the object to open in the view window"),
'src_model': fields.char('Source Model',
help="Optional model name of the objects on which this action should be visible"),
'target': fields.selection([('current','Current Window'),('new','New Window'),('inline','Inline Edit'),('inlineview','Inline View')], 'Target Window'),
'view_mode': fields.char('View Mode', required=True,
help="Comma-separated list of allowed view modes, such as 'form', 'tree', 'calendar', etc. (Default: tree,form)"),
'view_type': fields.selection((('tree','Tree'),('form','Form')), string='View Type', required=True,
help="View type: Tree type to use for the tree view, set to 'tree' for a hierarchical tree view, or 'form' for a regular list view"),
'usage': fields.char('Action Usage',
help="Used to filter menu and home actions from the user form."),
'view_ids': fields.one2many('ir.actions.act_window.view', 'act_window_id', 'Views'),
'views': fields.function(_views_get_fnc, type='binary', string='Views',
help="This function field computes the ordered list of views that should be enabled " \
"when displaying the result of an action, federating view mode, views and " \
"reference view. The result is returned as an ordered list of pairs (view_id,view_mode)."),
'limit': fields.integer('Limit', help='Default limit for the list view'),
'auto_refresh': fields.integer('Auto-Refresh',
help='Add an auto-refresh on the view'),
'groups_id': fields.many2many('res.groups', 'ir_act_window_group_rel',
'act_id', 'gid', 'Groups'),
'search_view_id': fields.many2one('ir.ui.view', 'Search View Ref.'),
'filter': fields.boolean('Filter'),
'auto_search':fields.boolean('Auto Search'),
'search_view' : fields.function(_search_view, type='text', string='Search View'),
'multi': fields.boolean('Restrict to lists', help="If checked and the action is bound to a model, it will only appear in the More menu on list views"),
}
_defaults = {
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'tree,form',
'context': '{}',
'limit': 80,
'target': 'current',
'auto_refresh': 0,
'auto_search':True,
'multi': False,
}
def read(self, cr, uid, ids, fields=None, context=None, load='_classic_read'):
""" call the method get_empty_list_help of the model and set the window action help message
"""
ids_int = isinstance(ids, (int, long))
if ids_int:
ids = [ids]
results = super(ir_actions_act_window, self).read(cr, uid, ids, fields=fields, context=context, load=load)
if not fields or 'help' in fields:
for res in results:
model = res.get('res_model')
if model and self.pool.get(model):
ctx = dict(context or {})
res['help'] = self.pool[model].get_empty_list_help(cr, uid, res.get('help', ""), context=ctx)
if ids_int:
return results[0]
return results
def for_xml_id(self, cr, uid, module, xml_id, context=None):
""" Returns the act_window object created for the provided xml_id
:param module: the module the act_window originates in
:param xml_id: the namespace-less id of the action (the @id
attribute from the XML file)
:return: A read() view of the ir.actions.act_window
"""
dataobj = self.pool.get('ir.model.data')
data_id = dataobj._get_id (cr, SUPERUSER_ID, module, xml_id)
res_id = dataobj.browse(cr, uid, data_id, context).res_id
return self.read(cr, uid, [res_id], [], context)[0]
VIEW_TYPES = [
('tree', 'Tree'),
('form', 'Form'),
('graph', 'Graph'),
('calendar', 'Calendar'),
('gantt', 'Gantt'),
('kanban', 'Kanban')]
class ir_actions_act_window_view(osv.osv):
_name = 'ir.actions.act_window.view'
_table = 'ir_act_window_view'
_rec_name = 'view_id'
_order = 'sequence'
_columns = {
'sequence': fields.integer('Sequence'),
'view_id': fields.many2one('ir.ui.view', 'View'),
'view_mode': fields.selection(VIEW_TYPES, string='View Type', required=True),
'act_window_id': fields.many2one('ir.actions.act_window', 'Action', ondelete='cascade'),
'multi': fields.boolean('On Multiple Doc.',
help="If set to true, the action will not be displayed on the right toolbar of a form view."),
}
_defaults = {
'multi': False,
}
def _auto_init(self, cr, context=None):
super(ir_actions_act_window_view, self)._auto_init(cr, context)
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = \'act_window_view_unique_mode_per_action\'')
if not cr.fetchone():
cr.execute('CREATE UNIQUE INDEX act_window_view_unique_mode_per_action ON ir_act_window_view (act_window_id, view_mode)')
class ir_actions_act_window_close(osv.osv):
_name = 'ir.actions.act_window_close'
_inherit = 'ir.actions.actions'
_table = 'ir_actions'
_defaults = {
'type': 'ir.actions.act_window_close',
}
class ir_actions_act_url(osv.osv):
_name = 'ir.actions.act_url'
_table = 'ir_act_url'
_inherit = 'ir.actions.actions'
_sequence = 'ir_actions_id_seq'
_order = 'name'
_columns = {
'name': fields.char('Action Name', required=True, translate=True),
'type': fields.char('Action Type', required=True),
'url': fields.text('Action URL',required=True),
'target': fields.selection((
('new', 'New Window'),
('self', 'This Window')),
'Action Target', required=True
)
}
_defaults = {
'type': 'ir.actions.act_url',
'target': 'new'
}
class ir_actions_server(osv.osv):
""" Server actions model. Server action work on a base model and offer various
type of actions that can be executed automatically, for example using base
action rules, of manually, by adding the action in the 'More' contextual
menu.
Since OpenERP 8.0 a button 'Create Menu Action' button is available on the
action form view. It creates an entry in the More menu of the base model.
This allows to create server actions and run them in mass mode easily through
the interface.
The available actions are :
- 'Execute Python Code': a block of python code that will be executed
- 'Trigger a Workflow Signal': send a signal to a workflow
- 'Run a Client Action': choose a client action to launch
- 'Create or Copy a new Record': create a new record with new values, or
copy an existing record in your database
- 'Write on a Record': update the values of a record
- 'Execute several actions': define an action that triggers several other
server actions
"""
_name = 'ir.actions.server'
_table = 'ir_act_server'
_inherit = 'ir.actions.actions'
_sequence = 'ir_actions_id_seq'
_order = 'sequence,name'
def _select_objects(self, cr, uid, context=None):
model_pool = self.pool.get('ir.model')
ids = model_pool.search(cr, uid, [], limit=None)
res = model_pool.read(cr, uid, ids, ['model', 'name'])
return [(r['model'], r['name']) for r in res] + [('', '')]
def _get_states(self, cr, uid, context=None):
""" Override me in order to add new states in the server action. Please
note that the added key length should not be higher than already-existing
ones. """
return [('code', 'Execute Python Code'),
('trigger', 'Trigger a Workflow Signal'),
('client_action', 'Run a Client Action'),
('object_create', 'Create or Copy a new Record'),
('object_write', 'Write on a Record'),
('multi', 'Execute several actions')]
def _get_states_wrapper(self, cr, uid, context=None):
return self._get_states(cr, uid, context)
_columns = {
'name': fields.char('Action Name', required=True, translate=True),
'condition': fields.char('Condition',
help="Condition verified before executing the server action. If it "
"is not verified, the action will not be executed. The condition is "
"a Python expression, like 'object.list_price > 5000'. A void "
"condition is considered as always True. Help about python expression "
"is given in the help tab."),
'state': fields.selection(_get_states_wrapper, 'Action To Do', required=True,
help="Type of server action. The following values are available:\n"
"- 'Execute Python Code': a block of python code that will be executed\n"
"- 'Trigger a Workflow Signal': send a signal to a workflow\n"
"- 'Run a Client Action': choose a client action to launch\n"
"- 'Create or Copy a new Record': create a new record with new values, or copy an existing record in your database\n"
"- 'Write on a Record': update the values of a record\n"
"- 'Execute several actions': define an action that triggers several other server actions\n"
"- 'Send Email': automatically send an email (available in email_template)"),
'usage': fields.char('Action Usage'),
'type': fields.char('Action Type', required=True),
# Generic
'sequence': fields.integer('Sequence',
help="When dealing with multiple actions, the execution order is "
"based on the sequence. Low number means high priority."),
'model_id': fields.many2one('ir.model', 'Base Model', required=True, ondelete='cascade',
help="Base model on which the server action runs."),
'model_name': fields.related('model_id', 'model', type='char',
string='Model Name', readonly=True),
'menu_ir_values_id': fields.many2one('ir.values', 'More Menu entry', readonly=True,
help='More menu entry.', copy=False),
# Client Action
'action_id': fields.many2one('ir.actions.actions', 'Client Action',
help="Select the client action that has to be executed."),
# Python code
'code': fields.text('Python Code',
help="Write Python code that the action will execute. Some variables are "
"available for use; help about pyhon expression is given in the help tab."),
# Workflow signal
'use_relational_model': fields.selection([('base', 'Use the base model of the action'),
('relational', 'Use a relation field on the base model')],
string='Target Model', required=True),
'wkf_transition_id': fields.many2one('workflow.transition', string='Signal to Trigger',
help="Select the workflow signal to trigger."),
'wkf_model_id': fields.many2one('ir.model', 'Target Model',
help="The model that will receive the workflow signal. Note that it should have a workflow associated with it."),
'wkf_model_name': fields.related('wkf_model_id', 'model', type='char', string='Target Model Name', store=True, readonly=True),
'wkf_field_id': fields.many2one('ir.model.fields', string='Relation Field',
oldname='trigger_obj_id',
help="The field on the current object that links to the target object record (must be a many2one, or an integer field with the record ID)"),
# Multi
'child_ids': fields.many2many('ir.actions.server', 'rel_server_actions',
'server_id', 'action_id',
string='Child Actions',
help='Child server actions that will be executed. Note that the last return returned action value will be used as global return value.'),
# Create/Copy/Write
'use_create': fields.selection([('new', 'Create a new record in the Base Model'),
('new_other', 'Create a new record in another model'),
('copy_current', 'Copy the current record'),
('copy_other', 'Choose and copy a record in the database')],
string="Creation Policy", required=True,
help=""),
'crud_model_id': fields.many2one('ir.model', 'Target Model',
oldname='srcmodel_id',
help="Model for record creation / update. Set this field only to specify a different model than the base model."),
'crud_model_name': fields.related('crud_model_id', 'model', type='char',
string='Create/Write Target Model Name',
store=True, readonly=True),
'ref_object': fields.reference('Reference record', selection=_select_objects, size=128,
oldname='copy_object'),
'link_new_record': fields.boolean('Attach the new record',
help="Check this if you want to link the newly-created record "
"to the current record on which the server action runs."),
'link_field_id': fields.many2one('ir.model.fields', 'Link using field',
oldname='record_id',
help="Provide the field where the record id is stored after the operations."),
'use_write': fields.selection([('current', 'Update the current record'),
('expression', 'Update a record linked to the current record using python'),
('other', 'Choose and Update a record in the database')],
string='Update Policy', required=True,
help=""),
'write_expression': fields.char('Expression',
oldname='write_id',
help="Provide an expression that, applied on the current record, gives the field to update."),
'fields_lines': fields.one2many('ir.server.object.lines', 'server_id',
string='Value Mapping',
copy=True),
# Fake fields used to implement the placeholder assistant
'model_object_field': fields.many2one('ir.model.fields', string="Field",
help="Select target field from the related document model.\n"
"If it is a relationship field you will be able to select "
"a target field at the destination of the relationship."),
'sub_object': fields.many2one('ir.model', 'Sub-model', readonly=True,
help="When a relationship field is selected as first field, "
"this field shows the document model the relationship goes to."),
'sub_model_object_field': fields.many2one('ir.model.fields', 'Sub-field',
help="When a relationship field is selected as first field, "
"this field lets you select the target field within the "
"destination document model (sub-model)."),
'copyvalue': fields.char('Placeholder Expression', help="Final placeholder expression, to be copy-pasted in the desired template field."),
# Fake fields used to implement the ID finding assistant
'id_object': fields.reference('Record', selection=_select_objects, size=128),
'id_value': fields.char('Record ID'),
}
_defaults = {
'state': 'code',
'condition': 'True',
'type': 'ir.actions.server',
'sequence': 5,
'code': """# Available locals:
# - time, datetime, dateutil: Python libraries
# - env: Odoo Environement
# - model: Model of the record on which the action is triggered
# - object: Record on which the action is triggered if there is one, otherwise None
# - workflow: Workflow engine
# - Warning: Warning Exception to use with raise
# To return an action, assign: action = {...}""",
'use_relational_model': 'base',
'use_create': 'new',
'use_write': 'current',
}
def _check_expression(self, cr, uid, expression, model_id, context):
""" Check python expression (condition, write_expression). Each step of
the path must be a valid many2one field, or an integer field for the last
step.
:param str expression: a python expression, beginning by 'obj' or 'object'
:param int model_id: the base model of the server action
:returns tuple: (is_valid, target_model_name, error_msg)
"""
if not model_id:
return (False, None, 'Your expression cannot be validated because the Base Model is not set.')
# fetch current model
current_model_name = self.pool.get('ir.model').browse(cr, uid, model_id, context).model
# transform expression into a path that should look like 'object.many2onefield.many2onefield'
path = expression.split('.')
initial = path.pop(0)
if initial not in ['obj', 'object']:
return (False, None, 'Your expression should begin with obj or object.\nAn expression builder is available in the help tab.')
# analyze path
while path:
step = path.pop(0)
field = self.pool[current_model_name]._fields.get(step)
if not field:
return (False, None, 'Part of the expression (%s) is not recognized as a column in the model %s.' % (step, current_model_name))
ftype = field.type
if ftype not in ['many2one', 'int']:
return (False, None, 'Part of the expression (%s) is not a valid column type (is %s, should be a many2one or an int)' % (step, ftype))
if ftype == 'int' and path:
return (False, None, 'Part of the expression (%s) is an integer field that is only allowed at the end of an expression' % (step))
if ftype == 'many2one':
current_model_name = field.comodel_name
return (True, current_model_name, None)
def _check_write_expression(self, cr, uid, ids, context=None):
for record in self.browse(cr, uid, ids, context=context):
if record.write_expression and record.model_id:
correct, model_name, message = self._check_expression(cr, uid, record.write_expression, record.model_id.id, context=context)
if not correct:
_logger.warning('Invalid expression: %s' % message)
return False
return True
_constraints = [
(_check_write_expression,
'Incorrect Write Record Expression',
['write_expression']),
(partial(osv.Model._check_m2m_recursion, field_name='child_ids'),
'Recursion found in child server actions',
['child_ids']),
]
def on_change_model_id(self, cr, uid, ids, model_id, wkf_model_id, crud_model_id, context=None):
""" When changing the action base model, reset workflow and crud config
to ease value coherence. """
values = {
'use_create': 'new',
'use_write': 'current',
'use_relational_model': 'base',
'wkf_model_id': model_id,
'wkf_field_id': False,
'crud_model_id': model_id,
}
if model_id:
values['model_name'] = self.pool.get('ir.model').browse(cr, uid, model_id, context).model
return {'value': values}
def on_change_wkf_wonfig(self, cr, uid, ids, use_relational_model, wkf_field_id, wkf_model_id, model_id, context=None):
""" Update workflow type configuration
- update the workflow model (for base (model_id) /relational (field.relation))
- update wkf_transition_id to False if workflow model changes, to force
the user to choose a new one
"""
values = {}
if use_relational_model == 'relational' and wkf_field_id:
field = self.pool['ir.model.fields'].browse(cr, uid, wkf_field_id, context=context)
new_wkf_model_id = self.pool.get('ir.model').search(cr, uid, [('model', '=', field.relation)], context=context)[0]
values['wkf_model_id'] = new_wkf_model_id
else:
values['wkf_model_id'] = model_id
return {'value': values}
def on_change_wkf_model_id(self, cr, uid, ids, wkf_model_id, context=None):
""" When changing the workflow model, update its stored name also """
wkf_model_name = False
if wkf_model_id:
wkf_model_name = self.pool.get('ir.model').browse(cr, uid, wkf_model_id, context).model
values = {'wkf_transition_id': False, 'wkf_model_name': wkf_model_name}
return {'value': values}
def on_change_crud_config(self, cr, uid, ids, state, use_create, use_write, ref_object, crud_model_id, model_id, context=None):
""" Wrapper on CRUD-type (create or write) on_change """
if state == 'object_create':
return self.on_change_create_config(cr, uid, ids, use_create, ref_object, crud_model_id, model_id, context=context)
elif state == 'object_write':
return self.on_change_write_config(cr, uid, ids, use_write, ref_object, crud_model_id, model_id, context=context)
else:
return {}
def on_change_create_config(self, cr, uid, ids, use_create, ref_object, crud_model_id, model_id, context=None):
""" When changing the object_create type configuration:
- `new` and `copy_current`: crud_model_id is the same as base model
- `new_other`: user choose crud_model_id
- `copy_other`: disassemble the reference object to have its model
- if the target model has changed, then reset the link field that is
probably not correct anymore
"""
values = {}
if use_create == 'new':
values['crud_model_id'] = model_id
elif use_create == 'new_other':
pass
elif use_create == 'copy_current':
values['crud_model_id'] = model_id
elif use_create == 'copy_other' and ref_object:
ref_model, ref_id = ref_object.split(',')
ref_model_id = self.pool['ir.model'].search(cr, uid, [('model', '=', ref_model)], context=context)[0]
values['crud_model_id'] = ref_model_id
if values.get('crud_model_id') != crud_model_id:
values['link_field_id'] = False
return {'value': values}
def on_change_write_config(self, cr, uid, ids, use_write, ref_object, crud_model_id, model_id, context=None):
""" When changing the object_write type configuration:
- `current`: crud_model_id is the same as base model
- `other`: disassemble the reference object to have its model
- `expression`: has its own on_change, nothing special here
"""
values = {}
if use_write == 'current':
values['crud_model_id'] = model_id
elif use_write == 'other' and ref_object:
ref_model, ref_id = ref_object.split(',')
ref_model_id = self.pool['ir.model'].search(cr, uid, [('model', '=', ref_model)], context=context)[0]
values['crud_model_id'] = ref_model_id
elif use_write == 'expression':
pass
if values.get('crud_model_id') != crud_model_id:
values['link_field_id'] = False
return {'value': values}
def on_change_write_expression(self, cr, uid, ids, write_expression, model_id, context=None):
""" Check the write_expression and update crud_model_id accordingly """
values = {}
if write_expression:
valid, model_name, message = self._check_expression(cr, uid, write_expression, model_id, context=context)
else:
valid, model_name, message = True, None, False
if model_id:
model_name = self.pool['ir.model'].browse(cr, uid, model_id, context).model
if not valid:
return {
'warning': {
'title': 'Incorrect expression',
'message': message or 'Invalid expression',
}
}
if model_name:
ref_model_id = self.pool['ir.model'].search(cr, uid, [('model', '=', model_name)], context=context)[0]
values['crud_model_id'] = ref_model_id
return {'value': values}
return {'value': {}}
def on_change_crud_model_id(self, cr, uid, ids, crud_model_id, context=None):
""" When changing the CRUD model, update its stored name also """
crud_model_name = False
if crud_model_id:
crud_model_name = self.pool.get('ir.model').browse(cr, uid, crud_model_id, context).model
values = {'link_field_id': False, 'crud_model_name': crud_model_name}
return {'value': values}
def _build_expression(self, field_name, sub_field_name):
""" Returns a placeholder expression for use in a template field,
based on the values provided in the placeholder assistant.
:param field_name: main field name
:param sub_field_name: sub field name (M2O)
:return: final placeholder expression
"""
expression = ''
if field_name:
expression = "object." + field_name
if sub_field_name:
expression += "." + sub_field_name
return expression
def onchange_sub_model_object_value_field(self, cr, uid, ids, model_object_field, sub_model_object_field=False, context=None):
result = {
'sub_object': False,
'copyvalue': False,
'sub_model_object_field': False,
}
if model_object_field:
fields_obj = self.pool.get('ir.model.fields')
field_value = fields_obj.browse(cr, uid, model_object_field, context)
if field_value.ttype in ['many2one', 'one2many', 'many2many']:
res_ids = self.pool.get('ir.model').search(cr, uid, [('model', '=', field_value.relation)], context=context)
sub_field_value = False
if sub_model_object_field:
sub_field_value = fields_obj.browse(cr, uid, sub_model_object_field, context)
if res_ids:
result.update({
'sub_object': res_ids[0],
'copyvalue': self._build_expression(field_value.name, sub_field_value and sub_field_value.name or False),
'sub_model_object_field': sub_model_object_field or False,
})
else:
result.update({
'copyvalue': self._build_expression(field_value.name, False),
})
return {'value': result}
def onchange_id_object(self, cr, uid, ids, id_object, context=None):
if id_object:
ref_model, ref_id = id_object.split(',')
return {'value': {'id_value': ref_id}}
return {'value': {'id_value': False}}
def create_action(self, cr, uid, ids, context=None):
""" Create a contextual action for each of the server actions. """
for action in self.browse(cr, uid, ids, context=context):
ir_values_id = self.pool.get('ir.values').create(cr, SUPERUSER_ID, {
'name': _('Run %s') % action.name,
'model': action.model_id.model,
'key2': 'client_action_multi',
'value': "ir.actions.server,%s" % action.id,
}, context)
action.write({
'menu_ir_values_id': ir_values_id,
})
return True
def unlink_action(self, cr, uid, ids, context=None):
""" Remove the contextual actions created for the server actions. """
for action in self.browse(cr, uid, ids, context=context):
if action.menu_ir_values_id:
try:
self.pool.get('ir.values').unlink(cr, SUPERUSER_ID, action.menu_ir_values_id.id, context)
except Exception:
raise osv.except_osv(_('Warning'), _('Deletion of the action record failed.'))
return True
def run_action_client_action(self, cr, uid, action, eval_context=None, context=None):
if not action.action_id:
raise osv.except_osv(_('Error'), _("Please specify an action to launch!"))
return self.pool[action.action_id.type].read(cr, uid, [action.action_id.id], context=context)[0]
def run_action_code_multi(self, cr, uid, action, eval_context=None, context=None):
eval(action.code.strip(), eval_context, mode="exec", nocopy=True) # nocopy allows to return 'action'
if 'action' in eval_context:
return eval_context['action']
def run_action_trigger(self, cr, uid, action, eval_context=None, context=None):
""" Trigger a workflow signal, depending on the use_relational_model:
- `base`: base_model_pool.signal_workflow(cr, uid, context.get('active_id'), <TRIGGER_NAME>)
- `relational`: find the related model and object, using the relational
field, then target_model_pool.signal_workflow(cr, uid, target_id, <TRIGGER_NAME>)
"""
# weird signature and calling -> no self.env, use action param's
record = action.env[action.model_id.model].browse(context['active_id'])
if action.use_relational_model == 'relational':
record = getattr(record, action.wkf_field_id.name)
if not isinstance(record, openerp.models.BaseModel):
record = action.env[action.wkf_model_id.model].browse(record)
record.signal_workflow(action.wkf_transition_id.signal)
def run_action_multi(self, cr, uid, action, eval_context=None, context=None):
res = False
for act in action.child_ids:
result = self.run(cr, uid, [act.id], context=context)
if result:
res = result
return res
def run_action_object_write(self, cr, uid, action, eval_context=None, context=None):
""" Write server action.
- 1. evaluate the value mapping
- 2. depending on the write configuration:
- `current`: id = active_id
- `other`: id = from reference object
- `expression`: id = from expression evaluation
"""
res = {}
for exp in action.fields_lines:
res[exp.col1.name] = exp.eval_value(eval_context=eval_context)[exp.id]
if action.use_write == 'current':
model = action.model_id.model
ref_id = context.get('active_id')
elif action.use_write == 'other':
model = action.crud_model_id.model
ref_id = action.ref_object.id
elif action.use_write == 'expression':
model = action.crud_model_id.model
ref = eval(action.write_expression, eval_context)
if isinstance(ref, browse_record):
ref_id = getattr(ref, 'id')
else:
ref_id = int(ref)
obj_pool = self.pool[model]
obj_pool.write(cr, uid, [ref_id], res, context=context)
def run_action_object_create(self, cr, uid, action, eval_context=None, context=None):
""" Create and Copy server action.
- 1. evaluate the value mapping
- 2. depending on the write configuration:
- `new`: new record in the base model
- `copy_current`: copy the current record (id = active_id) + gives custom values
- `new_other`: new record in target model
- `copy_other`: copy the current record (id from reference object)
+ gives custom values
"""
res = {}
for exp in action.fields_lines:
res[exp.col1.name] = exp.eval_value(eval_context=eval_context)[exp.id]
if action.use_create in ['new', 'copy_current']:
model = action.model_id.model
elif action.use_create in ['new_other', 'copy_other']:
model = action.crud_model_id.model
obj_pool = self.pool[model]
if action.use_create == 'copy_current':
ref_id = context.get('active_id')
res_id = obj_pool.copy(cr, uid, ref_id, res, context=context)
elif action.use_create == 'copy_other':
ref_id = action.ref_object.id
res_id = obj_pool.copy(cr, uid, ref_id, res, context=context)
else:
res_id = obj_pool.create(cr, uid, res, context=context)
if action.link_new_record and action.link_field_id:
self.pool[action.model_id.model].write(cr, uid, [context.get('active_id')], {action.link_field_id.name: res_id})
def _get_eval_context(self, cr, uid, action, context=None):
""" Prepare the context used when evaluating python code, like the
condition or code server actions.
:param action: the current server action
:type action: browse record
:returns: dict -- evaluation context given to (safe_)eval """
obj_pool = self.pool[action.model_id.model]
env = openerp.api.Environment(cr, uid, context)
model = env[action.model_id.model]
obj = None
if context.get('active_model') == action.model_id.model and context.get('active_id'):
obj = model.browse(context['active_id'])
return {
# python libs
'time': time,
'datetime': datetime,
'dateutil': dateutil,
# orm
'env': env,
'model': model,
'workflow': workflow,
# Exceptions
'Warning': openerp.exceptions.Warning,
# record
# TODO: When porting to master move badly named obj and object to
# deprecated and define record (active_id) and records (active_ids)
'object': obj,
'obj': obj,
# Deprecated use env or model instead
'self': obj_pool,
'pool': self.pool,
'cr': cr,
'uid': uid,
'context': context,
'user': env.user,
}
def run(self, cr, uid, ids, context=None):
""" Runs the server action. For each server action, the condition is
checked. Note that a void (``False``) condition is considered as always
valid. If it is verified, the run_action_<STATE> method is called. This
allows easy overriding of the server actions.
:param dict context: context should contain following keys
- active_id: id of the current object (single mode)
- active_model: current model that should equal the action's model
The following keys are optional:
- active_ids: ids of the current records (mass mode). If active_ids
and active_id are present, active_ids is given precedence.
:return: an action_id to be executed, or False is finished correctly without
return action
"""
if context is None:
context = {}
res = False
for action in self.browse(cr, uid, ids, context):
eval_context = self._get_eval_context(cr, uid, action, context=context)
condition = action.condition
if condition is False:
# Void (aka False) conditions are considered as True
condition = True
if hasattr(self, 'run_action_%s_multi' % action.state):
run_context = eval_context['context']
expr = eval(str(condition), eval_context)
if not expr:
continue
# call the multi method
func = getattr(self, 'run_action_%s_multi' % action.state)
res = func(cr, uid, action, eval_context=eval_context, context=run_context)
elif hasattr(self, 'run_action_%s' % action.state):
func = getattr(self, 'run_action_%s' % action.state)
active_id = context.get('active_id')
active_ids = context.get('active_ids', [active_id] if active_id else [])
for active_id in active_ids:
# run context dedicated to a particular active_id
run_context = dict(context, active_ids=[active_id], active_id=active_id)
eval_context["context"] = run_context
expr = eval(str(condition), eval_context)
if not expr:
continue
# call the single method related to the action: run_action_<STATE>
res = func(cr, uid, action, eval_context=eval_context, context=run_context)
return res
class ir_server_object_lines(osv.osv):
_name = 'ir.server.object.lines'
_description = 'Server Action value mapping'
_sequence = 'ir_actions_id_seq'
_columns = {
'server_id': fields.many2one('ir.actions.server', 'Related Server Action', ondelete='cascade'),
'col1': fields.many2one('ir.model.fields', 'Field', required=True),
'value': fields.text('Value', required=True, help="Expression containing a value specification. \n"
"When Formula type is selected, this field may be a Python expression "
" that can use the same values as for the condition field on the server action.\n"
"If Value type is selected, the value will be used directly without evaluation."),
'type': fields.selection([
('value', 'Value'),
('equation', 'Python expression')
], 'Evaluation Type', required=True, change_default=True),
}
_defaults = {
'type': 'value',
}
def eval_value(self, cr, uid, ids, eval_context=None, context=None):
res = dict.fromkeys(ids, False)
for line in self.browse(cr, uid, ids, context=context):
expr = line.value
if line.type == 'equation':
expr = eval(line.value, eval_context)
elif line.col1.ttype in ['many2one', 'integer']:
try:
expr = int(line.value)
except Exception:
pass
res[line.id] = expr
return res
TODO_STATES = [('open', 'To Do'),
('done', 'Done')]
TODO_TYPES = [('manual', 'Launch Manually'),('once', 'Launch Manually Once'),
('automatic', 'Launch Automatically')]
class ir_actions_todo(osv.osv):
"""
Configuration Wizards
"""
_name = 'ir.actions.todo'
_description = "Configuration Wizards"
_columns={
'action_id': fields.many2one(
'ir.actions.actions', 'Action', select=True, required=True),
'sequence': fields.integer('Sequence'),
'state': fields.selection(TODO_STATES, string='Status', required=True),
'name': fields.char('Name'),
'type': fields.selection(TODO_TYPES, 'Type', required=True,
help="""Manual: Launched manually.
Automatic: Runs whenever the system is reconfigured.
Launch Manually Once: after having been launched manually, it sets automatically to Done."""),
'groups_id': fields.many2many('res.groups', 'res_groups_action_rel', 'uid', 'gid', 'Groups'),
'note': fields.text('Text', translate=True),
}
_defaults={
'state': 'open',
'sequence': 10,
'type': 'manual',
}
_order="sequence,id"
def name_get(self, cr, uid, ids, context=None):
return [(rec.id, rec.action_id.name) for rec in self.browse(cr, uid, ids, context=context)]
def name_search(self, cr, user, name, args=None, operator='ilike', context=None, limit=100):
if args is None:
args = []
if name:
ids = self.search(cr, user, [('action_id', operator, name)] + args, limit=limit)
return self.name_get(cr, user, ids, context=context)
return super(ir_actions_todo, self).name_search(cr, user, name, args=args, operator=operator, context=context, limit=limit)
def action_launch(self, cr, uid, ids, context=None):
""" Launch Action of Wizard"""
wizard_id = ids and ids[0] or False
wizard = self.browse(cr, uid, wizard_id, context=context)
if wizard.type in ('automatic', 'once'):
wizard.write({'state': 'done'})
# Load action
act_type = wizard.action_id.type
res = self.pool[act_type].read(cr, uid, [wizard.action_id.id], [], context=context)[0]
if act_type != 'ir.actions.act_window':
return res
res.setdefault('context','{}')
res['nodestroy'] = True
# Open a specific record when res_id is provided in the context
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
ctx = eval(res['context'], {'user': user})
if ctx.get('res_id'):
res.update({'res_id': ctx.pop('res_id')})
# disable log for automatic wizards
if wizard.type == 'automatic':
ctx.update({'disable_log': True})
res.update({'context': ctx})
return res
def action_open(self, cr, uid, ids, context=None):
""" Sets configuration wizard in TODO state"""
return self.write(cr, uid, ids, {'state': 'open'}, context=context)
def progress(self, cr, uid, context=None):
""" Returns a dict with 3 keys {todo, done, total}.
These keys all map to integers and provide the number of todos
marked as open, the total number of todos and the number of
todos not open (which is basically a shortcut to total-todo)
:rtype: dict
"""
user_groups = set(map(
lambda x: x.id,
self.pool['res.users'].browse(cr, uid, [uid], context=context)[0].groups_id))
def groups_match(todo):
""" Checks if the todo's groups match those of the current user
"""
return not todo.groups_id \
or bool(user_groups.intersection((
group.id for group in todo.groups_id)))
done = filter(
groups_match,
self.browse(cr, uid,
self.search(cr, uid, [('state', '!=', 'open')], context=context),
context=context))
total = filter(
groups_match,
self.browse(cr, uid,
self.search(cr, uid, [], context=context),
context=context))
return {
'done': len(done),
'total': len(total),
'todo': len(total) - len(done)
}
class ir_actions_act_client(osv.osv):
_name = 'ir.actions.client'
_inherit = 'ir.actions.actions'
_table = 'ir_act_client'
_sequence = 'ir_actions_id_seq'
_order = 'name'
def _get_params(self, cr, uid, ids, field_name, arg, context):
result = {}
# Need to remove bin_size from context, to obtains the binary and not the length.
context = dict(context, bin_size_params_store=False)
for record in self.browse(cr, uid, ids, context=context):
result[record.id] = record.params_store and eval(record.params_store, {'uid': uid}) or False
return result
def _set_params(self, cr, uid, id, field_name, field_value, arg, context):
if isinstance(field_value, dict):
self.write(cr, uid, id, {'params_store': repr(field_value)}, context=context)
else:
self.write(cr, uid, id, {'params_store': field_value}, context=context)
_columns = {
'name': fields.char('Action Name', required=True, translate=True),
'tag': fields.char('Client action tag', required=True,
help="An arbitrary string, interpreted by the client"
" according to its own needs and wishes. There "
"is no central tag repository across clients."),
'res_model': fields.char('Destination Model',
help="Optional model, mostly used for needactions."),
'context': fields.char('Context Value', required=True,
help="Context dictionary as Python expression, empty by default (Default: {})"),
'params': fields.function(_get_params, fnct_inv=_set_params,
type='binary',
string="Supplementary arguments",
help="Arguments sent to the client along with"
"the view tag"),
'params_store': fields.binary("Params storage", readonly=True)
}
_defaults = {
'type': 'ir.actions.client',
'context': '{}',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
RanadeepPolavarapu/kuma | vendor/packages/translate/convert/test_json2po.py | 25 | 2728 | #!/usr/bin/env python
from translate.convert import json2po, test_convert
from translate.misc import wStringIO
from translate.storage import jsonl10n
class TestJson2PO:
def json2po(self, jsonsource, template=None, filter=None):
"""helper that converts json source to po source without requiring files"""
inputfile = wStringIO.StringIO(jsonsource)
inputjson = jsonl10n.JsonFile(inputfile, filter=filter)
convertor = json2po.json2po()
outputpo = convertor.convert_store(inputjson)
return outputpo
def singleelement(self, storage):
"""checks that the pofile contains a single non-header element, and returns it"""
print(str(storage))
assert len(storage.units) == 1
return storage.units[0]
def test_simple(self):
"""test the most basic json conversion"""
jsonsource = '''{ "text": "A simple string"}'''
poexpected = '''#: .text
msgid "A simple string"
msgstr ""
'''
poresult = self.json2po(jsonsource)
assert str(poresult.units[1]) == poexpected
def test_filter(self):
"""test basic json conversion with filter option"""
jsonsource = '''{ "text": "A simple string", "number": 42 }'''
poexpected = '''#: .text
msgid "A simple string"
msgstr ""
'''
poresult = self.json2po(jsonsource, filter=["text"])
assert str(poresult.units[1]) == poexpected
def test_miltiple_units(self):
"""test that we can handle json with multiple units"""
jsonsource = '''
{
"name": "John",
"surname": "Smith",
"address":
{
"streetAddress": "Koeistraat 21",
"city": "Pretoria",
"country": "South Africa",
"postalCode": "10021"
},
"phoneNumber":
[
{
"type": "home",
"number": "012 345-6789"
},
{
"type": "fax",
"number": "012 345-6788"
}
]
}
'''
poresult = self.json2po(jsonsource)
assert poresult.units[0].isheader()
print(len(poresult.units))
assert len(poresult.units) == 11
class TestJson2POCommand(test_convert.TestConvertCommand, TestJson2PO):
"""Tests running actual json2po commands on files"""
convertmodule = json2po
defaultoptions = {"progress": "none"}
def test_help(self):
"""tests getting help"""
options = test_convert.TestConvertCommand.test_help(self)
options = self.help_check(options, "-P, --pot")
options = self.help_check(options, "--duplicates")
options = self.help_check(options, "-t TEMPLATE, --template=TEMPLATE")
options = self.help_check(options, "--filter", last=True)
| mpl-2.0 |
gmarkall/numba | numba/roc/dispatch.py | 4 | 4683 | import numpy as np
from numba.np.ufunc.deviceufunc import (UFuncMechanism, GenerializedUFunc,
GUFuncCallSteps)
from numba.roc.hsadrv.driver import dgpu_present
import numba.roc.hsadrv.devicearray as devicearray
import numba.roc.api as api
class HsaUFuncDispatcher(object):
"""
Invoke the HSA ufunc specialization for the given inputs.
"""
def __init__(self, types_to_retty_kernels):
self.functions = types_to_retty_kernels
def __call__(self, *args, **kws):
"""
*args: numpy arrays
**kws:
stream -- hsa stream; when defined, asynchronous mode is used.
out -- output array. Can be a numpy array or DeviceArrayBase
depending on the input arguments. Type must match
the input arguments.
"""
return HsaUFuncMechanism.call(self.functions, args, kws)
def reduce(self, arg, stream=0):
raise NotImplementedError
class HsaUFuncMechanism(UFuncMechanism):
"""
Provide OpenCL specialization
"""
DEFAULT_STREAM = 0
ARRAY_ORDER = 'A'
def is_device_array(self, obj):
if dgpu_present:
return devicearray.is_hsa_ndarray(obj)
else:
return isinstance(obj, np.ndarray)
def is_host_array(self, obj):
if dgpu_present:
return False
else:
return isinstance(obj, np.ndarray)
def to_device(self, hostary, stream):
if dgpu_present:
return api.to_device(hostary)
else:
return hostary
def launch(self, func, count, stream, args):
# ILP must match vectorize kernel source
ilp = 4
# Use more wavefront to allow hiding latency
tpb = 64 * 2
count = (count + (ilp - 1)) // ilp
blockcount = (count + (tpb - 1)) // tpb
func[blockcount, tpb](*args)
def device_array(self, shape, dtype, stream):
if dgpu_present:
return api.device_array(shape=shape, dtype=dtype)
else:
return np.empty(shape=shape, dtype=dtype)
def broadcast_device(self, ary, shape):
if dgpu_present:
raise NotImplementedError('device broadcast_device NIY')
else:
ax_differs = [ax for ax in range(len(shape))
if ax >= ary.ndim
or ary.shape[ax] != shape[ax]]
missingdim = len(shape) - len(ary.shape)
strides = [0] * missingdim + list(ary.strides)
for ax in ax_differs:
strides[ax] = 0
return np.ndarray(shape=shape, strides=strides,
dtype=ary.dtype, buffer=ary)
class _HsaGUFuncCallSteps(GUFuncCallSteps):
__slots__ = ()
def is_device_array(self, obj):
if dgpu_present:
return devicearray.is_hsa_ndarray(obj)
else:
return True
def to_device(self, hostary):
if dgpu_present:
return api.to_device(hostary)
else:
return hostary
def to_host(self, devary, hostary):
if dgpu_present:
out = devary.copy_to_host(hostary)
return out
else:
pass
def device_array(self, shape, dtype):
if dgpu_present:
return api.device_array(shape=shape, dtype=dtype)
else:
return np.empty(shape=shape, dtype=dtype)
def launch_kernel(self, kernel, nelem, args):
kernel.configure(nelem, min(nelem, 64))(*args)
class HSAGenerializedUFunc(GenerializedUFunc):
@property
def _call_steps(self):
return _HsaGUFuncCallSteps
def _broadcast_scalar_input(self, ary, shape):
if dgpu_present:
return devicearray.DeviceNDArray(shape=shape,
strides=(0,),
dtype=ary.dtype,
dgpu_data=ary.dgpu_data)
else:
return np.lib.stride_tricks.as_strided(ary, shape=(shape,),
strides=(0,))
def _broadcast_add_axis(self, ary, newshape):
newax = len(newshape) - len(ary.shape)
# Add 0 strides for missing dimension
newstrides = (0,) * newax + ary.strides
if dgpu_present:
return devicearray.DeviceNDArray(shape=newshape,
strides=newstrides,
dtype=ary.dtype,
dgpu_data=ary.dgpu_data)
else:
raise NotImplementedError
| bsd-2-clause |
jlengrand/Ivolution | ivolution/util/Notifier.py | 1 | 2002 | """
.. module:: Notifier
:platform: Unix, Windows
:synopsis: Implements a simple Observer/Observable pattern for communication between between Facemovie thread and Ivolution GUI
.. moduleauthor:: Julien Lengrand-Lambert <jlengrand@gmail.com>
"""
class Observer():
"""
Implements a simple Observer from the Observer pattern
"""
def __init__(self, name="Observer"):
"""
"""
self.name = name
def update(self, message):
"""
"""
if message is not None:
#print "%s received %s" %(self.name, message)
pass
def __str__(self):
return self.name
class Observable():
"""
Implements a simple Observable from the Observer pattern
"""
def __init__(self):
"""
"""
self.val = 1
self.obs_collection = []
def subscribe(self, observer):
"""
"""
try:
if not(observer in self.obs_collection):
self.obs_collection.append(observer)
#print "%s added to collection" %(str(observer))
else:
#print "%s already in collection" %(str(observer))
pass
except TypeError:
#print "Failed to add %s" %(str(observer))
pass
def unsubscribe(self, observer):
"""
"""
try:
if observer in self.obs_collection:
self.obs_collection.remove(observer)
#print "%s removed from collection" %(str(observer))
else:
#print "%s not in collection" %(str(observer))
pass
except TypeError:
#print "Failed to remove %s" %(str(observer))
pass
def notify(self, message):
"""
"""
for observer in self.obs_collection:
#print "sent %s to %s" %(message, str(observer))
if message[0] == observer.name:
observer.update(message[1])
| bsd-3-clause |
mpetyx/palmdrop | venv/lib/python2.7/site-packages/cms/plugins/flash/migrations/0001_initial.py | 11 | 3244 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Flash'
db.create_table('cmsplugin_flash', (
('cmsplugin_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['cms.CMSPlugin'], unique=True, primary_key=True)),
('file', self.gf('django.db.models.fields.files.FileField')(max_length=100)),
('width', self.gf('django.db.models.fields.CharField')(max_length=6)),
('height', self.gf('django.db.models.fields.CharField')(max_length=6)),
))
db.send_create_signal('flash', ['Flash'])
def backwards(self, orm):
# Deleting model 'Flash'
db.delete_table('cmsplugin_flash')
models = {
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'flash.flash': {
'Meta': {'object_name': 'Flash', 'db_table': "'cmsplugin_flash'", '_ormbases': ['cms.CMSPlugin']},
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'height': ('django.db.models.fields.CharField', [], {'max_length': '6'}),
'width': ('django.db.models.fields.CharField', [], {'max_length': '6'})
}
}
complete_apps = ['flash'] | apache-2.0 |
jbking/demo-appengine-django-golang | myproject/django/contrib/gis/geos/mutable_list.py | 217 | 10972 | # Copyright (c) 2008-2009 Aryeh Leib Taurog, all rights reserved.
# Released under the New BSD license.
"""
This module contains a base type which provides list-style mutations
without specific data storage methods.
See also http://www.aryehleib.com/MutableLists.html
Author: Aryeh Leib Taurog.
"""
from django.utils.functional import total_ordering
from django.utils import six
from django.utils.six.moves import xrange
@total_ordering
class ListMixin(object):
"""
A base class which provides complete list interface.
Derived classes must call ListMixin's __init__() function
and implement the following:
function _get_single_external(self, i):
Return single item with index i for general use.
The index i will always satisfy 0 <= i < len(self).
function _get_single_internal(self, i):
Same as above, but for use within the class [Optional]
Note that if _get_single_internal and _get_single_internal return
different types of objects, _set_list must distinguish
between the two and handle each appropriately.
function _set_list(self, length, items):
Recreate the entire object.
NOTE: items may be a generator which calls _get_single_internal.
Therefore, it is necessary to cache the values in a temporary:
temp = list(items)
before clobbering the original storage.
function _set_single(self, i, value):
Set the single item at index i to value [Optional]
If left undefined, all mutations will result in rebuilding
the object using _set_list.
function __len__(self):
Return the length
int _minlength:
The minimum legal length [Optional]
int _maxlength:
The maximum legal length [Optional]
type or tuple _allowed:
A type or tuple of allowed item types [Optional]
class _IndexError:
The type of exception to be raise on invalid index [Optional]
"""
_minlength = 0
_maxlength = None
_IndexError = IndexError
### Python initialization and special list interface methods ###
def __init__(self, *args, **kwargs):
if not hasattr(self, '_get_single_internal'):
self._get_single_internal = self._get_single_external
if not hasattr(self, '_set_single'):
self._set_single = self._set_single_rebuild
self._assign_extended_slice = self._assign_extended_slice_rebuild
super(ListMixin, self).__init__(*args, **kwargs)
def __getitem__(self, index):
"Get the item(s) at the specified index/slice."
if isinstance(index, slice):
return [self._get_single_external(i) for i in xrange(*index.indices(len(self)))]
else:
index = self._checkindex(index)
return self._get_single_external(index)
def __delitem__(self, index):
"Delete the item(s) at the specified index/slice."
if not isinstance(index, six.integer_types + (slice,)):
raise TypeError("%s is not a legal index" % index)
# calculate new length and dimensions
origLen = len(self)
if isinstance(index, six.integer_types):
index = self._checkindex(index)
indexRange = [index]
else:
indexRange = range(*index.indices(origLen))
newLen = origLen - len(indexRange)
newItems = ( self._get_single_internal(i)
for i in xrange(origLen)
if i not in indexRange )
self._rebuild(newLen, newItems)
def __setitem__(self, index, val):
"Set the item(s) at the specified index/slice."
if isinstance(index, slice):
self._set_slice(index, val)
else:
index = self._checkindex(index)
self._check_allowed((val,))
self._set_single(index, val)
def __iter__(self):
"Iterate over the items in the list"
for i in xrange(len(self)):
yield self[i]
### Special methods for arithmetic operations ###
def __add__(self, other):
'add another list-like object'
return self.__class__(list(self) + list(other))
def __radd__(self, other):
'add to another list-like object'
return other.__class__(list(other) + list(self))
def __iadd__(self, other):
'add another list-like object to self'
self.extend(list(other))
return self
def __mul__(self, n):
'multiply'
return self.__class__(list(self) * n)
def __rmul__(self, n):
'multiply'
return self.__class__(list(self) * n)
def __imul__(self, n):
'multiply'
if n <= 0:
del self[:]
else:
cache = list(self)
for i in range(n-1):
self.extend(cache)
return self
def __eq__(self, other):
olen = len(other)
for i in range(olen):
try:
c = self[i] == other[i]
except self._IndexError:
# self must be shorter
return False
if not c:
return False
return len(self) == olen
def __lt__(self, other):
olen = len(other)
for i in range(olen):
try:
c = self[i] < other[i]
except self._IndexError:
# self must be shorter
return True
if c:
return c
elif other[i] < self[i]:
return False
return len(self) < olen
### Public list interface Methods ###
## Non-mutating ##
def count(self, val):
"Standard list count method"
count = 0
for i in self:
if val == i: count += 1
return count
def index(self, val):
"Standard list index method"
for i in xrange(0, len(self)):
if self[i] == val: return i
raise ValueError('%s not found in object' % str(val))
## Mutating ##
def append(self, val):
"Standard list append method"
self[len(self):] = [val]
def extend(self, vals):
"Standard list extend method"
self[len(self):] = vals
def insert(self, index, val):
"Standard list insert method"
if not isinstance(index, six.integer_types):
raise TypeError("%s is not a legal index" % index)
self[index:index] = [val]
def pop(self, index=-1):
"Standard list pop method"
result = self[index]
del self[index]
return result
def remove(self, val):
"Standard list remove method"
del self[self.index(val)]
def reverse(self):
"Standard list reverse method"
self[:] = self[-1::-1]
def sort(self, cmp=None, key=None, reverse=False):
"Standard list sort method"
if key:
temp = [(key(v),v) for v in self]
temp.sort(key=lambda x: x[0], reverse=reverse)
self[:] = [v[1] for v in temp]
else:
temp = list(self)
if cmp is not None:
temp.sort(cmp=cmp, reverse=reverse)
else:
temp.sort(reverse=reverse)
self[:] = temp
### Private routines ###
def _rebuild(self, newLen, newItems):
if newLen < self._minlength:
raise ValueError('Must have at least %d items' % self._minlength)
if self._maxlength is not None and newLen > self._maxlength:
raise ValueError('Cannot have more than %d items' % self._maxlength)
self._set_list(newLen, newItems)
def _set_single_rebuild(self, index, value):
self._set_slice(slice(index, index + 1, 1), [value])
def _checkindex(self, index, correct=True):
length = len(self)
if 0 <= index < length:
return index
if correct and -length <= index < 0:
return index + length
raise self._IndexError('invalid index: %s' % str(index))
def _check_allowed(self, items):
if hasattr(self, '_allowed'):
if False in [isinstance(val, self._allowed) for val in items]:
raise TypeError('Invalid type encountered in the arguments.')
def _set_slice(self, index, values):
"Assign values to a slice of the object"
try:
iter(values)
except TypeError:
raise TypeError('can only assign an iterable to a slice')
self._check_allowed(values)
origLen = len(self)
valueList = list(values)
start, stop, step = index.indices(origLen)
# CAREFUL: index.step and step are not the same!
# step will never be None
if index.step is None:
self._assign_simple_slice(start, stop, valueList)
else:
self._assign_extended_slice(start, stop, step, valueList)
def _assign_extended_slice_rebuild(self, start, stop, step, valueList):
'Assign an extended slice by rebuilding entire list'
indexList = range(start, stop, step)
# extended slice, only allow assigning slice of same size
if len(valueList) != len(indexList):
raise ValueError('attempt to assign sequence of size %d '
'to extended slice of size %d'
% (len(valueList), len(indexList)))
# we're not changing the length of the sequence
newLen = len(self)
newVals = dict(zip(indexList, valueList))
def newItems():
for i in xrange(newLen):
if i in newVals:
yield newVals[i]
else:
yield self._get_single_internal(i)
self._rebuild(newLen, newItems())
def _assign_extended_slice(self, start, stop, step, valueList):
'Assign an extended slice by re-assigning individual items'
indexList = range(start, stop, step)
# extended slice, only allow assigning slice of same size
if len(valueList) != len(indexList):
raise ValueError('attempt to assign sequence of size %d '
'to extended slice of size %d'
% (len(valueList), len(indexList)))
for i, val in zip(indexList, valueList):
self._set_single(i, val)
def _assign_simple_slice(self, start, stop, valueList):
'Assign a simple slice; Can assign slice of any length'
origLen = len(self)
stop = max(start, stop)
newLen = origLen - stop + start + len(valueList)
def newItems():
for i in xrange(origLen + 1):
if i == start:
for val in valueList:
yield val
if i < origLen:
if i < start or i >= stop:
yield self._get_single_internal(i)
self._rebuild(newLen, newItems())
| mit |
Nispand1492/appengine-try-python-flask | lib/rsa/asn1.py | 89 | 1147 | '''ASN.1 definitions.
Not all ASN.1-handling code use these definitions, but when it does, they should be here.
'''
from pyasn1.type import univ, namedtype, tag
class PubKeyHeader(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('oid', univ.ObjectIdentifier()),
namedtype.NamedType('parameters', univ.Null()),
)
class OpenSSLPubKey(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('header', PubKeyHeader()),
# This little hack (the implicit tag) allows us to get a Bit String as Octet String
namedtype.NamedType('key', univ.OctetString().subtype(
implicitTag=tag.Tag(tagClass=0, tagFormat=0, tagId=3))),
)
class AsnPubKey(univ.Sequence):
'''ASN.1 contents of DER encoded public key:
RSAPublicKey ::= SEQUENCE {
modulus INTEGER, -- n
publicExponent INTEGER, -- e
'''
componentType = namedtype.NamedTypes(
namedtype.NamedType('modulus', univ.Integer()),
namedtype.NamedType('publicExponent', univ.Integer()),
)
| apache-2.0 |
edx/edx-platform | lms/djangoapps/course_blocks/transformers/tests/test_user_partitions.py | 5 | 20048 | # pylint: disable=attribute-defined-outside-init, protected-access
"""
Tests for UserPartitionTransformer.
"""
import string
from collections import namedtuple
from datetime import datetime
from unittest.mock import patch
import ddt
from common.djangoapps.course_modes.tests.factories import CourseModeFactory
from common.djangoapps.student.tests.factories import CourseEnrollmentFactory
from openedx.core.djangoapps.course_groups.cohorts import add_user_to_cohort
from openedx.core.djangoapps.course_groups.partition_scheme import CohortPartitionScheme
from openedx.core.djangoapps.course_groups.tests.helpers import CohortFactory, config_course_cohorts
from openedx.core.djangoapps.course_groups.views import link_cohort_to_partition_group
from openedx.features.content_type_gating.models import ContentTypeGatingConfig
from openedx.features.content_type_gating.partitions import create_content_gating_partition
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.partitions.partitions import Group, UserPartition
from ...api import get_course_blocks
from ..user_partitions import UserPartitionTransformer, _MergedGroupAccess
from .helpers import CourseStructureTestCase, update_block
class UserPartitionTestMixin:
"""
Helper Mixin for testing user partitions.
"""
TRANSFORMER_CLASS_TO_TEST = UserPartitionTransformer
def setup_groups_partitions(self, num_user_partitions=1, num_groups=4, active=True):
"""
Sets up groups and user partitions for testing.
"""
# Set up groups
self.groups = []
for group_num in range(1, num_groups + 1):
self.groups.append(Group(group_num, 'Group ' + str(group_num)))
# Set up user partitions
self.user_partitions = []
for user_partition_num in range(1, num_user_partitions + 1):
user_partition = UserPartition(
id=user_partition_num,
name='Partition ' + str(user_partition_num),
description='This is partition ' + str(user_partition_num),
groups=self.groups,
scheme=CohortPartitionScheme,
active=active,
)
user_partition.scheme.name = "cohort"
self.user_partitions.append(user_partition)
def setup_cohorts(self, course):
"""
Sets up a cohort for each previously created user partition.
"""
config_course_cohorts(course, is_cohorted=True)
self.partition_cohorts = []
for user_partition in self.user_partitions:
partition_cohorts = []
for group in self.groups:
cohort = CohortFactory(course_id=course.id)
partition_cohorts.append(cohort)
link_cohort_to_partition_group(
cohort,
user_partition.id,
group.id,
)
self.partition_cohorts.append(partition_cohorts)
@ddt.ddt
class UserPartitionTransformerTestCase(UserPartitionTestMixin, CourseStructureTestCase):
"""
UserPartitionTransformer Test
"""
def setup_partitions_and_course(self, active=True):
"""
Setup course structure and create user for user partition
transformer test.
Args:
active: boolean representing if the user partitions are
active or not
"""
# Set up user partitions and groups.
self.setup_groups_partitions(active=active)
self.user_partition = self.user_partitions[0]
# Build course.
self.course_hierarchy = self.get_course_hierarchy()
self.blocks = self.build_course(self.course_hierarchy)
self.course = self.blocks['course']
# Enroll user in course.
CourseEnrollmentFactory.create(
user=self.user, course_id=self.course.id, is_active=True
)
# Set up cohorts.
self.setup_cohorts(self.course)
def get_course_hierarchy(self):
"""
Returns a course hierarchy to test with.
"""
# course
# / \
# / \
# A[1, 2, 3] B
# / | \ |
# / | \ |
# / | \ |
# C[1, 2] D[2, 3] E /
# / | \ | / \ /
# / | \ | / \ /
# / | \ | / \ /
# F G[1] H[2] I J K[4] /
# / \ / / \ /
# / \ / / \ /
# / \ / / \/
# L[1, 2] M[1, 2, 3] N O
#
return [
{
'org': 'UserPartitionTransformer',
'course': 'UP101F',
'run': 'test_run',
'user_partitions': [self.user_partition],
'#type': 'course',
'#ref': 'course',
'#children': [
{
'#type': 'vertical',
'#ref': 'A',
'metadata': {'group_access': {self.user_partition.id: [0, 1, 2, 3]}},
},
{'#type': 'vertical', '#ref': 'B'},
],
},
{
'#type': 'vertical',
'#ref': 'C',
'#parents': ['A'],
'metadata': {'group_access': {self.user_partition.id: [1, 2]}},
'#children': [
{'#type': 'vertical', '#ref': 'F'},
{
'#type': 'vertical',
'#ref': 'G',
'metadata': {'group_access': {self.user_partition.id: [1]}},
},
{
'#type': 'vertical',
'#ref': 'H',
'metadata': {'group_access': {self.user_partition.id: [2]}},
},
],
},
{
'#type': 'vertical',
'#ref': 'D',
'#parents': ['A'],
'metadata': {'group_access': {self.user_partition.id: [2, 3]}},
'#children': [{'#type': 'vertical', '#ref': 'I'}],
},
{
'#type': 'vertical',
'#ref': 'E',
'#parents': ['A'],
'#children': [{'#type': 'vertical', '#ref': 'J'}],
},
{
'#type': 'vertical',
'#ref': 'K',
'#parents': ['E'],
'metadata': {'group_access': {self.user_partition.id: [4, 51]}},
'#children': [{'#type': 'vertical', '#ref': 'N'}],
},
{
'#type': 'vertical',
'#ref': 'L',
'#parents': ['G'],
'metadata': {'group_access': {self.user_partition.id: [1, 2]}},
},
{
'#type': 'vertical',
'#ref': 'M',
'#parents': ['G', 'H'],
'metadata': {'group_access': {self.user_partition.id: [1, 2, 3]}},
},
{
'#type': 'vertical',
'#ref': 'O',
'#parents': ['K', 'B'],
},
]
@ddt.data(
(None, ('course', 'B', 'O')),
(1, ('course', 'A', 'B', 'C', 'E', 'F', 'G', 'J', 'L', 'M', 'O')),
(2, ('course', 'A', 'B', 'C', 'D', 'E', 'F', 'H', 'I', 'J', 'M', 'O')),
(3, ('course', 'A', 'B', 'D', 'E', 'I', 'J', 'O')),
(4, ('course', 'B', 'O')),
)
@ddt.unpack
def test_transform(self, group_id, expected_blocks):
self.setup_partitions_and_course()
if group_id:
cohort = self.partition_cohorts[self.user_partition.id - 1][group_id - 1]
add_user_to_cohort(cohort, self.user.username)
trans_block_structure = get_course_blocks(
self.user,
self.course.location,
self.transformers,
)
self.assertSetEqual(
set(trans_block_structure.get_block_keys()),
self.get_block_key_set(self.blocks, *expected_blocks)
)
def test_transform_with_content_gating_partition(self):
self.setup_partitions_and_course()
CourseModeFactory.create(course_id=self.course.id, mode_slug='audit')
CourseModeFactory.create(course_id=self.course.id, mode_slug='verified')
ContentTypeGatingConfig.objects.create(enabled=True, enabled_as_of=datetime(2018, 1, 1))
partition = create_content_gating_partition(self.course)
self.user_partitions.append(partition)
cohort = self.partition_cohorts[0][1]
add_user_to_cohort(cohort, self.user.username)
with patch(
'lms.djangoapps.course_blocks.transformers.user_partitions.get_partition_from_id',
return_value=partition
), patch(
'lms.djangoapps.course_blocks.transformers.user_partitions._MergedGroupAccess.get_allowed_groups',
return_value={51: set()}
):
trans_block_structure = get_course_blocks(
self.user,
self.course.location,
self.transformers,
)
xblocks_denial_reason = [trans_block_structure.get_xblock_field(b, 'authorization_denial_reason')
for b in trans_block_structure.get_block_keys()]
self.assertSetEqual(set(xblocks_denial_reason), {'Feature-based Enrollments'})
def test_transform_on_inactive_partition(self):
"""
Tests UserPartitionTransformer for inactive UserPartition.
"""
self.setup_partitions_and_course(active=False)
# we expect to find all blocks because the UserPartitions are all
# inactive
expected_blocks = ('course',) + tuple(string.ascii_uppercase[:15])
trans_block_structure = get_course_blocks(
self.user,
self.course.location,
self.transformers,
)
self.assertSetEqual(
set(trans_block_structure.get_block_keys()),
self.get_block_key_set(self.blocks, *expected_blocks)
)
@ddt.ddt
class MergedGroupAccessTestData(UserPartitionTestMixin, CourseStructureTestCase):
"""
_MergedGroupAccess Test
"""
def setUp(self):
"""
Setup course structure and create user for user partition
transformer test.
"""
super().setUp()
# Set up multiple user partitions and groups.
self.setup_groups_partitions(num_user_partitions=3)
self.course = CourseFactory.create(user_partitions=self.user_partitions)
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id, is_active=True)
# Set up cohorts.
self.setup_cohorts(self.course)
def get_course_hierarchy(self):
"""
Returns a course hierarchy to test with.
"""
# The block tree is as follows, with the numbers in the brackets
# specifying the group_id for each of the 3 partitions.
# A
# / | \
# / | \
# B C D
# [1][3][] [2][2][] [3][1][]
# \ /
# \ /
# E
#
return [
{
'org': 'MergedGroupAccess',
'course': 'MGA101F',
'run': 'test_run',
'user_partitions': self.user_partitions,
'#type': 'course',
'#ref': 'A',
},
{
'#type': 'vertical',
'#ref': 'B',
'#parents': ['A'],
'metadata': {'group_access': {1: [1], 2:[3], 3:[]}},
},
{
'#type': 'vertical',
'#ref': 'C',
'#parents': ['A'],
'metadata': {'group_access': {1: [2], 2:[2], 3:[]}},
},
{
'#type': 'vertical',
'#ref': 'D',
'#parents': ['A'],
'metadata': {'group_access': {1: [3], 2:[1], 3:[]}},
},
{
'#type': 'vertical',
'#ref': 'E',
'#parents': ['B', 'C'],
},
]
AccessTestData = namedtuple(
'AccessTestData',
['partition_groups', 'xblock_access', 'merged_parents_list', 'expected_access'],
)
AccessTestData.__new__.__defaults__ = ({}, None, [], False)
@ddt.data(
# universal access throughout
AccessTestData(expected_access=True),
AccessTestData(xblock_access={1: None}, expected_access=True),
AccessTestData(xblock_access={1: []}, expected_access=True),
# partition 1 requiring membership in group 1
AccessTestData(xblock_access={1: [1]}),
AccessTestData(partition_groups={2: 1, 3: 1}, xblock_access={1: [1]}),
AccessTestData(partition_groups={1: 1, 2: 1, 3: 1}, xblock_access={1: [1]}, expected_access=True),
AccessTestData(partition_groups={1: 1, 2: 1}, xblock_access={1: [1], 2: [], 3: []}, expected_access=True),
# partitions 1 and 2 requiring membership in group 1
AccessTestData(xblock_access={1: [1], 2: [1]}),
AccessTestData(partition_groups={2: 1, 3: 1}, xblock_access={1: [1], 2: [1]}),
AccessTestData(partition_groups={1: 1, 2: 1}, xblock_access={1: [1], 2: [1]}, expected_access=True),
# partitions 1 and 2 requiring membership in different groups
AccessTestData(xblock_access={1: [1], 2: [2]}),
AccessTestData(partition_groups={2: 1, 3: 1}, xblock_access={1: [1], 2: [2]}),
AccessTestData(partition_groups={1: 1, 2: 1, 3: 1}, xblock_access={1: [1], 2: [2]}),
AccessTestData(partition_groups={1: 1, 2: 2}, xblock_access={1: [1], 2: [2]}, expected_access=True),
# partitions 1 and 2 requiring membership in list of groups
AccessTestData(partition_groups={1: 3, 2: 3}, xblock_access={1: [1, 2], 2: [1, 2]}),
AccessTestData(partition_groups={1: 1, 2: 1}, xblock_access={1: [1, 2], 2: [1, 2]}, expected_access=True),
AccessTestData(partition_groups={1: 1, 2: 2}, xblock_access={1: [1, 2], 2: [1, 2]}, expected_access=True),
AccessTestData(partition_groups={1: 2, 2: 1}, xblock_access={1: [1, 2], 2: [1, 2]}, expected_access=True),
AccessTestData(partition_groups={1: 2, 2: 2}, xblock_access={1: [1, 2], 2: [1, 2]}, expected_access=True),
# parent inheritance
# 1 parent allows
AccessTestData(partition_groups={1: 1, 2: 2}, merged_parents_list=[{1: {1}}], expected_access=True),
# 2 parents allow
AccessTestData(partition_groups={1: 1, 2: 2}, merged_parents_list=[{1: {1}}, {1: {1}}], expected_access=True),
AccessTestData(partition_groups={1: 1, 2: 2}, merged_parents_list=[{1: {2}}, {1: {1}}], expected_access=True),
AccessTestData(
partition_groups={1: 1, 2: 2},
merged_parents_list=[{1: {2}, 2: {2}}, {1: {1}, 2: {1}}],
expected_access=True,
),
# 1 parent denies
AccessTestData(partition_groups={1: 1, 2: 2}, merged_parents_list=[{1: {}}]),
AccessTestData(partition_groups={1: 1, 2: 2}, merged_parents_list=[{1: {3}}]),
# 1 parent denies, 1 parent allows all
AccessTestData(partition_groups={1: 1, 2: 2}, merged_parents_list=[{1: {}}, {}], expected_access=True),
AccessTestData(partition_groups={1: 1, 2: 2}, merged_parents_list=[{1: {}}, {1: {}}, {}], expected_access=True),
AccessTestData(partition_groups={1: 1, 2: 2}, merged_parents_list=[{1: {}}, {}, {1: {}}], expected_access=True),
# 1 parent denies, 1 parent allows
AccessTestData(partition_groups={1: 1, 2: 2}, merged_parents_list=[{1: {3}}, {1: {1}}], expected_access=True),
# 2 parents deny
AccessTestData(partition_groups={1: 1, 2: 2}, merged_parents_list=[{1: {}}, {1: {}}]),
AccessTestData(partition_groups={1: 1, 2: 2}, merged_parents_list=[{1: {3}}, {1: {3}, 2: {2}}]),
# intersect with parent
# child denies, 1 parent allows
AccessTestData(partition_groups={1: 1, 2: 2}, xblock_access={1: [3]}, merged_parents_list=[{1: {1}}]),
AccessTestData(partition_groups={1: 1, 2: 2}, xblock_access={1: [2]}, merged_parents_list=[{1: {1}}]),
# child denies, 2 parents allow
AccessTestData(partition_groups={1: 1, 2: 2}, xblock_access={1: [3]}, merged_parents_list=[{1: {1}}, {2: {2}}]),
AccessTestData(partition_groups={1: 1, 2: 2}, xblock_access={2: [3]}, merged_parents_list=[{1: {1}}, {2: {2}}]),
# child allows, 1 parent denies
AccessTestData(partition_groups={1: 1, 2: 2}, xblock_access={2: [2]}, merged_parents_list=[{1: {}}]),
AccessTestData(partition_groups={1: 1, 2: 2}, xblock_access={1: [1]}, merged_parents_list=[{1: {2}}]),
AccessTestData(partition_groups={1: 1, 2: 2}, xblock_access={2: [2]}, merged_parents_list=[{1: {2}}]),
# child allows, 1 parent allows
AccessTestData(
partition_groups={1: 1, 2: 2},
xblock_access={1: [1]},
merged_parents_list=[{}],
expected_access=True,
),
AccessTestData(
partition_groups={1: 1, 2: 2}, xblock_access={2: [2]}, merged_parents_list=[{1: {1}}], expected_access=True
),
AccessTestData(
partition_groups={1: 1, 2: 2},
xblock_access={1: [1, 3], 2: [2, 3]},
merged_parents_list=[{1: {1, 2, 3}}, {2: {1, 2, 3}}],
expected_access=True,
),
# child allows, 1 parent allows, 1 parent denies
AccessTestData(
partition_groups={1: 1, 2: 2},
xblock_access={1: [1]},
merged_parents_list=[{1: {3}}, {1: {1}}],
expected_access=True,
),
)
@ddt.unpack
def test_merged_group_access(self, user_partition_groups, xblock_access, merged_parents_list, expected_access):
# use the course as the block to test
block = self.course
# update block access
if xblock_access is not None:
block.group_access = xblock_access
update_block(self.course)
# convert merged_parents_list to _MergedGroupAccess objects
for ind, merged_parent in enumerate(merged_parents_list):
converted_object = _MergedGroupAccess([], block, [])
converted_object._access = merged_parent
merged_parents_list[ind] = converted_object
merged_group_access = _MergedGroupAccess(self.user_partitions, block, merged_parents_list)
# convert group_id to groups in user_partition_groups parameter
for partition_id, group_id in user_partition_groups.items():
user_partition_groups[partition_id] = self.groups[group_id - 1]
assert merged_group_access.check_group_access(user_partition_groups) == expected_access
@ddt.data(
([None], None),
([{1}, None], {1}),
([None, {1}], {1}),
([None, {1}, {1, 2}], {1}),
([None, {1, 2}, {1, 2}], {1, 2}),
([{1, 2, 3}, {1, 2}, None], {1, 2}),
([{1, 2}, {1, 2, 3, 4}, None], {1, 2}),
([{1}, {2}, None], set()),
([None, {1}, {2}, None], set()),
)
@ddt.unpack
def test_intersection_method(self, input_value, expected_result):
assert _MergedGroupAccess._intersection(*input_value) == expected_result
| agpl-3.0 |
LoHChina/nova | tools/db/schema_diff.py | 36 | 8433 | #!/usr/bin/env python
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility for diff'ing two versions of the DB schema.
Each release cycle the plan is to compact all of the migrations from that
release into a single file. This is a manual and, unfortunately, error-prone
process. To ensure that the schema doesn't change, this tool can be used to
diff the compacted DB schema to the original, uncompacted form.
The database is specified by providing a SQLAlchemy connection URL WITHOUT the
database-name portion (that will be filled in automatically with a temporary
database name).
The schema versions are specified by providing a git ref (a branch name or
commit hash) and a SQLAlchemy-Migrate version number:
Run like:
MYSQL:
./tools/db/schema_diff.py mysql+pymysql://root@localhost \
master:latest my_branch:82
POSTGRESQL:
./tools/db/schema_diff.py postgresql://localhost \
master:latest my_branch:82
DB2:
./tools/db/schema_diff.py ibm_db_sa://localhost \
master:latest my_branch:82
"""
from __future__ import print_function
import datetime
import glob
import os
import subprocess
import sys
from nova.i18n import _
# Dump
def dump_db(db_driver, db_name, db_url, migration_version, dump_filename):
if not db_url.endswith('/'):
db_url += '/'
db_url += db_name
db_driver.create(db_name)
try:
_migrate(db_url, migration_version)
db_driver.dump(db_name, dump_filename)
finally:
db_driver.drop(db_name)
# Diff
def diff_files(filename1, filename2):
pipeline = ['diff -U 3 %(filename1)s %(filename2)s'
% {'filename1': filename1, 'filename2': filename2}]
# Use colordiff if available
if subprocess.call(['which', 'colordiff']) == 0:
pipeline.append('colordiff')
pipeline.append('less -R')
cmd = ' | '.join(pipeline)
subprocess.check_call(cmd, shell=True)
# Database
class Mysql(object):
def create(self, name):
subprocess.check_call(['mysqladmin', '-u', 'root', 'create', name])
def drop(self, name):
subprocess.check_call(['mysqladmin', '-f', '-u', 'root', 'drop', name])
def dump(self, name, dump_filename):
subprocess.check_call(
'mysqldump -u root %(name)s > %(dump_filename)s'
% {'name': name, 'dump_filename': dump_filename},
shell=True)
class Postgresql(object):
def create(self, name):
subprocess.check_call(['createdb', name])
def drop(self, name):
subprocess.check_call(['dropdb', name])
def dump(self, name, dump_filename):
subprocess.check_call(
'pg_dump %(name)s > %(dump_filename)s'
% {'name': name, 'dump_filename': dump_filename},
shell=True)
class Ibm_db_sa(object):
@classmethod
def db2cmd(cls, cmd):
"""Wraps a command to be run under the DB2 instance user."""
subprocess.check_call('su - $(db2ilist) -c "%s"' % cmd, shell=True)
def create(self, name):
self.db2cmd('db2 \'create database %s\'' % name)
def drop(self, name):
self.db2cmd('db2 \'drop database %s\'' % name)
def dump(self, name, dump_filename):
self.db2cmd('db2look -d %(name)s -e -o %(dump_filename)s' %
{'name': name, 'dump_filename': dump_filename})
# The output file gets dumped to the db2 instance user's home directory
# so we have to copy it back to our current working directory.
subprocess.check_call('cp /home/$(db2ilist)/%s ./' % dump_filename,
shell=True)
def _get_db_driver_class(db_url):
try:
return globals()[db_url.split('://')[0].capitalize()]
except KeyError:
raise Exception(_("database %s not supported") % db_url)
# Migrate
MIGRATE_REPO = os.path.join(os.getcwd(), "nova/db/sqlalchemy/migrate_repo")
def _migrate(db_url, migration_version):
earliest_version = _migrate_get_earliest_version()
# NOTE(sirp): sqlalchemy-migrate currently cannot handle the skipping of
# migration numbers.
_migrate_cmd(
db_url, 'version_control', str(earliest_version - 1))
upgrade_cmd = ['upgrade']
if migration_version != 'latest':
upgrade_cmd.append(str(migration_version))
_migrate_cmd(db_url, *upgrade_cmd)
def _migrate_cmd(db_url, *cmd):
manage_py = os.path.join(MIGRATE_REPO, 'manage.py')
args = ['python', manage_py]
args += cmd
args += ['--repository=%s' % MIGRATE_REPO,
'--url=%s' % db_url]
subprocess.check_call(args)
def _migrate_get_earliest_version():
versions_glob = os.path.join(MIGRATE_REPO, 'versions', '???_*.py')
versions = []
for path in glob.iglob(versions_glob):
filename = os.path.basename(path)
prefix = filename.split('_', 1)[0]
try:
version = int(prefix)
except ValueError:
pass
versions.append(version)
versions.sort()
return versions[0]
# Git
def git_current_branch_name():
ref_name = git_symbolic_ref('HEAD', quiet=True)
current_branch_name = ref_name.replace('refs/heads/', '')
return current_branch_name
def git_symbolic_ref(ref, quiet=False):
args = ['git', 'symbolic-ref', ref]
if quiet:
args.append('-q')
proc = subprocess.Popen(args, stdout=subprocess.PIPE)
stdout, stderr = proc.communicate()
return stdout.strip()
def git_checkout(branch_name):
subprocess.check_call(['git', 'checkout', branch_name])
def git_has_uncommited_changes():
return subprocess.call(['git', 'diff', '--quiet', '--exit-code']) == 1
# Command
def die(msg):
print("ERROR: %s" % msg, file=sys.stderr)
sys.exit(1)
def usage(msg=None):
if msg:
print("ERROR: %s" % msg, file=sys.stderr)
prog = "schema_diff.py"
args = ["<db-url>", "<orig-branch:orig-version>",
"<new-branch:new-version>"]
print("usage: %s %s" % (prog, ' '.join(args)), file=sys.stderr)
sys.exit(1)
def parse_options():
try:
db_url = sys.argv[1]
except IndexError:
usage("must specify DB connection url")
try:
orig_branch, orig_version = sys.argv[2].split(':')
except IndexError:
usage('original branch and version required (e.g. master:82)')
try:
new_branch, new_version = sys.argv[3].split(':')
except IndexError:
usage('new branch and version required (e.g. master:82)')
return db_url, orig_branch, orig_version, new_branch, new_version
def main():
timestamp = datetime.datetime.utcnow().strftime("%Y%m%d_%H%M%S")
ORIG_DB = 'orig_db_%s' % timestamp
NEW_DB = 'new_db_%s' % timestamp
ORIG_DUMP = ORIG_DB + ".dump"
NEW_DUMP = NEW_DB + ".dump"
options = parse_options()
db_url, orig_branch, orig_version, new_branch, new_version = options
# Since we're going to be switching branches, ensure user doesn't have any
# uncommited changes
if git_has_uncommited_changes():
die("You have uncommited changes. Please commit them before running "
"this command.")
db_driver = _get_db_driver_class(db_url)()
users_branch = git_current_branch_name()
git_checkout(orig_branch)
try:
# Dump Original Schema
dump_db(db_driver, ORIG_DB, db_url, orig_version, ORIG_DUMP)
# Dump New Schema
git_checkout(new_branch)
dump_db(db_driver, NEW_DB, db_url, new_version, NEW_DUMP)
diff_files(ORIG_DUMP, NEW_DUMP)
finally:
git_checkout(users_branch)
if os.path.exists(ORIG_DUMP):
os.unlink(ORIG_DUMP)
if os.path.exists(NEW_DUMP):
os.unlink(NEW_DUMP)
if __name__ == "__main__":
main()
| apache-2.0 |
Baloc/TouSIX-Manager | tousix_manager/Member_Manager/update/UpdateMixin.py | 2 | 2449 | # Copyright 2015 Rémy Lapeyrade <remy at lapeyrade dot net>
# Copyright 2015 LAAS-CNRS
#
#
# This file is part of TouSIX-Manager.
#
# TouSIX-Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# TouSIX-Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with TouSIX-Manager. If not, see <http://www.gnu.org/licenses/>.
from django.contrib.auth.forms import PasswordChangeForm
from django.core.urlresolvers import reverse
from tousix_manager.Database.models import Hote, UserMembre
class UpdateUrlMixin(object):
def get_success_url(self):
return reverse("update member")
def create_context_data(self, context, **kwargs):
if self.form_class != MemberForm:
context["member"] = MemberForm(instance=self.get_membre(), prefix="member")
if self.form_class != TechnicalForm:
context["technical"] = TechnicalForm(instance=self.get_membre().technical, prefix="technical")
if context["technical"].instance.pk is not None:
context["technical"].empty = False
if self.form_class != NOCForm:
context["noc"] = NOCForm(instance=self.get_membre().noc, prefix="noc")
if context["noc"].instance.pk is not None:
context["noc"].empty = False
if self.form_class != BillingForm:
context["billing"] = BillingForm(instance=self.get_membre().billing, prefix="billing")
if context["billing"].instance.pk is not None:
context["billing"].empty = False
if self.form_class != RouterForm:
context["router"] = RouterForm(instance=Hote.objects.filter(idmembre=self.get_membre().idmembre).first(), prefix="router")
if self.form_class != PasswordChangeForm:
context["password"] = PasswordChangeForm(self.request.user, prefix="password")
return context
def get_membre(self):
return UserMembre.objects.filter(user=self.request.user).first().membre
| gpl-3.0 |
IronLanguages/ironpython3 | Tests/test_bool.py | 1 | 4185 | # Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the Apache 2.0 License.
# See the LICENSE file in the project root for more information.
import unittest
from iptest import is_cli, long, run_test
class BoolTest(unittest.TestCase):
def test_types(self):
for x in [str, int, long, float, bool]:
if not x:
self.fail("should be true: %r", x)
def test_bool_dir(self):
bool_dir = ['__abs__', '__add__', '__and__', '__class__',
'__eq__', '__ne__', '__gt__', '__ge__', '__le__', '__lt__',
'__delattr__', '__divmod__', '__doc__',
'__float__', '__floordiv__', '__getattribute__', '__getnewargs__',
'__hash__', '__index__', '__init__', '__int__',
'__invert__', '__lshift__', '__mod__', '__mul__',
'__neg__', '__new__', '__bool__', '__or__', '__pos__',
'__pow__', '__radd__', '__rand__', '__rdivmod__', '__reduce__',
'__reduce_ex__', '__repr__', '__rfloordiv__', '__rlshift__', '__rmod__',
'__rmul__', '__ror__', '__rpow__', '__rrshift__', '__rshift__',
'__rsub__', '__rtruediv__', '__rxor__', '__setattr__', '__str__',
'__sub__', '__truediv__', '__xor__']
for t_list in [dir(bool), dir(True), dir(False)]:
for stuff in bool_dir:
self.assertTrue(stuff in t_list, "%s should be in dir(bool), but is not" % (stuff))
def test__float__(self):
self.assertEqual(float(True), 1.0)
self.assertEqual(float(False), 0.0)
def test__index__(self):
self.assertEqual(True.__index__(), 1)
self.assertEqual(False.__index__(), 0)
def test__long__(self):
self.assertEqual(long(True), long(1))
self.assertEqual(long(False), long(0))
def test__rdivmod__(self):
self.assertEqual(divmod(True, True), (1, 0))
self.assertEqual(divmod(False, True), (0, 0))
self.assertRaises(ZeroDivisionError, divmod, True, False)
self.assertRaises(ZeroDivisionError, divmod, False, False)
@unittest.skipUnless(is_cli, 'IronPython specific test')
def test_decimal(self):
import System
if not System.Decimal:
Fail("should be true: %r", System.Decimal)
self.assertEqual(bool(System.Decimal(0)), False)
self.assertEqual(bool(System.Decimal(1)), True)
self.assertEqual(System.Decimal(True), System.Decimal(1))
self.assertEqual(System.Decimal(False), System.Decimal(0))
def test__bool__(self):
class ClassWithBool:
def __init__(self, val):
self.val = val
def __bool__(self):
return self.val
class ClassWithLen:
def __init__(self, val):
self.val = val
def __len__(self):
return self.val
class MyIndex:
def __init__(self, val):
self.val = val
def __index__(self):
return self.val
class MyLong(long): pass
bool_cases = [
(True, True), (False, False), (MyIndex(0), TypeError),
]
len_cases = [
(1, True), (0, False), (0.0, TypeError), (-1, ValueError), (1<<64, OverflowError),
]
cases = []
cases += [(ClassWithBool(x), y) for x, y in bool_cases]
cases += [(ClassWithLen(x), y) for x, y in len_cases]
cases += [(ClassWithLen(long(x)), y) for x, y in len_cases if isinstance(x, int)]
cases += [(ClassWithLen(MyLong(x)), y) for x, y in len_cases if isinstance(x, int)]
cases += [(ClassWithLen(MyIndex(x)), y) for x, y in len_cases]
for val, res in cases:
if type(res) == type:
with self.assertRaises(res):
bool(val)
with self.assertRaises(res):
not val
else:
self.assertEqual(bool(val), res)
self.assertEqual(not val, not res)
run_test(__name__)
| apache-2.0 |
huanpc/IoT-1 | gui/controller/.venv/lib/python3.5/site-packages/pip/_vendor/colorama/win32.py | 535 | 5365 | # Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
# from winbase.h
STDOUT = -11
STDERR = -12
try:
import ctypes
from ctypes import LibraryLoader
windll = LibraryLoader(ctypes.WinDLL)
from ctypes import wintypes
except (AttributeError, ImportError):
windll = None
SetConsoleTextAttribute = lambda *_: None
winapi_test = lambda *_: None
else:
from ctypes import byref, Structure, c_char, POINTER
COORD = wintypes._COORD
class CONSOLE_SCREEN_BUFFER_INFO(Structure):
"""struct in wincon.h."""
_fields_ = [
("dwSize", COORD),
("dwCursorPosition", COORD),
("wAttributes", wintypes.WORD),
("srWindow", wintypes.SMALL_RECT),
("dwMaximumWindowSize", COORD),
]
def __str__(self):
return '(%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d)' % (
self.dwSize.Y, self.dwSize.X
, self.dwCursorPosition.Y, self.dwCursorPosition.X
, self.wAttributes
, self.srWindow.Top, self.srWindow.Left, self.srWindow.Bottom, self.srWindow.Right
, self.dwMaximumWindowSize.Y, self.dwMaximumWindowSize.X
)
_GetStdHandle = windll.kernel32.GetStdHandle
_GetStdHandle.argtypes = [
wintypes.DWORD,
]
_GetStdHandle.restype = wintypes.HANDLE
_GetConsoleScreenBufferInfo = windll.kernel32.GetConsoleScreenBufferInfo
_GetConsoleScreenBufferInfo.argtypes = [
wintypes.HANDLE,
POINTER(CONSOLE_SCREEN_BUFFER_INFO),
]
_GetConsoleScreenBufferInfo.restype = wintypes.BOOL
_SetConsoleTextAttribute = windll.kernel32.SetConsoleTextAttribute
_SetConsoleTextAttribute.argtypes = [
wintypes.HANDLE,
wintypes.WORD,
]
_SetConsoleTextAttribute.restype = wintypes.BOOL
_SetConsoleCursorPosition = windll.kernel32.SetConsoleCursorPosition
_SetConsoleCursorPosition.argtypes = [
wintypes.HANDLE,
COORD,
]
_SetConsoleCursorPosition.restype = wintypes.BOOL
_FillConsoleOutputCharacterA = windll.kernel32.FillConsoleOutputCharacterA
_FillConsoleOutputCharacterA.argtypes = [
wintypes.HANDLE,
c_char,
wintypes.DWORD,
COORD,
POINTER(wintypes.DWORD),
]
_FillConsoleOutputCharacterA.restype = wintypes.BOOL
_FillConsoleOutputAttribute = windll.kernel32.FillConsoleOutputAttribute
_FillConsoleOutputAttribute.argtypes = [
wintypes.HANDLE,
wintypes.WORD,
wintypes.DWORD,
COORD,
POINTER(wintypes.DWORD),
]
_FillConsoleOutputAttribute.restype = wintypes.BOOL
_SetConsoleTitleW = windll.kernel32.SetConsoleTitleA
_SetConsoleTitleW.argtypes = [
wintypes.LPCSTR
]
_SetConsoleTitleW.restype = wintypes.BOOL
handles = {
STDOUT: _GetStdHandle(STDOUT),
STDERR: _GetStdHandle(STDERR),
}
def winapi_test():
handle = handles[STDOUT]
csbi = CONSOLE_SCREEN_BUFFER_INFO()
success = _GetConsoleScreenBufferInfo(
handle, byref(csbi))
return bool(success)
def GetConsoleScreenBufferInfo(stream_id=STDOUT):
handle = handles[stream_id]
csbi = CONSOLE_SCREEN_BUFFER_INFO()
success = _GetConsoleScreenBufferInfo(
handle, byref(csbi))
return csbi
def SetConsoleTextAttribute(stream_id, attrs):
handle = handles[stream_id]
return _SetConsoleTextAttribute(handle, attrs)
def SetConsoleCursorPosition(stream_id, position, adjust=True):
position = COORD(*position)
# If the position is out of range, do nothing.
if position.Y <= 0 or position.X <= 0:
return
# Adjust for Windows' SetConsoleCursorPosition:
# 1. being 0-based, while ANSI is 1-based.
# 2. expecting (x,y), while ANSI uses (y,x).
adjusted_position = COORD(position.Y - 1, position.X - 1)
if adjust:
# Adjust for viewport's scroll position
sr = GetConsoleScreenBufferInfo(STDOUT).srWindow
adjusted_position.Y += sr.Top
adjusted_position.X += sr.Left
# Resume normal processing
handle = handles[stream_id]
return _SetConsoleCursorPosition(handle, adjusted_position)
def FillConsoleOutputCharacter(stream_id, char, length, start):
handle = handles[stream_id]
char = c_char(char.encode())
length = wintypes.DWORD(length)
num_written = wintypes.DWORD(0)
# Note that this is hard-coded for ANSI (vs wide) bytes.
success = _FillConsoleOutputCharacterA(
handle, char, length, start, byref(num_written))
return num_written.value
def FillConsoleOutputAttribute(stream_id, attr, length, start):
''' FillConsoleOutputAttribute( hConsole, csbi.wAttributes, dwConSize, coordScreen, &cCharsWritten )'''
handle = handles[stream_id]
attribute = wintypes.WORD(attr)
length = wintypes.DWORD(length)
num_written = wintypes.DWORD(0)
# Note that this is hard-coded for ANSI (vs wide) bytes.
return _FillConsoleOutputAttribute(
handle, attribute, length, start, byref(num_written))
def SetConsoleTitle(title):
return _SetConsoleTitleW(title)
| mit |
PourroyJean/performance_modelisation | script/data visualisation/venv/lib/python3.6/site-packages/pip/_vendor/colorama/win32.py | 535 | 5365 | # Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
# from winbase.h
STDOUT = -11
STDERR = -12
try:
import ctypes
from ctypes import LibraryLoader
windll = LibraryLoader(ctypes.WinDLL)
from ctypes import wintypes
except (AttributeError, ImportError):
windll = None
SetConsoleTextAttribute = lambda *_: None
winapi_test = lambda *_: None
else:
from ctypes import byref, Structure, c_char, POINTER
COORD = wintypes._COORD
class CONSOLE_SCREEN_BUFFER_INFO(Structure):
"""struct in wincon.h."""
_fields_ = [
("dwSize", COORD),
("dwCursorPosition", COORD),
("wAttributes", wintypes.WORD),
("srWindow", wintypes.SMALL_RECT),
("dwMaximumWindowSize", COORD),
]
def __str__(self):
return '(%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d)' % (
self.dwSize.Y, self.dwSize.X
, self.dwCursorPosition.Y, self.dwCursorPosition.X
, self.wAttributes
, self.srWindow.Top, self.srWindow.Left, self.srWindow.Bottom, self.srWindow.Right
, self.dwMaximumWindowSize.Y, self.dwMaximumWindowSize.X
)
_GetStdHandle = windll.kernel32.GetStdHandle
_GetStdHandle.argtypes = [
wintypes.DWORD,
]
_GetStdHandle.restype = wintypes.HANDLE
_GetConsoleScreenBufferInfo = windll.kernel32.GetConsoleScreenBufferInfo
_GetConsoleScreenBufferInfo.argtypes = [
wintypes.HANDLE,
POINTER(CONSOLE_SCREEN_BUFFER_INFO),
]
_GetConsoleScreenBufferInfo.restype = wintypes.BOOL
_SetConsoleTextAttribute = windll.kernel32.SetConsoleTextAttribute
_SetConsoleTextAttribute.argtypes = [
wintypes.HANDLE,
wintypes.WORD,
]
_SetConsoleTextAttribute.restype = wintypes.BOOL
_SetConsoleCursorPosition = windll.kernel32.SetConsoleCursorPosition
_SetConsoleCursorPosition.argtypes = [
wintypes.HANDLE,
COORD,
]
_SetConsoleCursorPosition.restype = wintypes.BOOL
_FillConsoleOutputCharacterA = windll.kernel32.FillConsoleOutputCharacterA
_FillConsoleOutputCharacterA.argtypes = [
wintypes.HANDLE,
c_char,
wintypes.DWORD,
COORD,
POINTER(wintypes.DWORD),
]
_FillConsoleOutputCharacterA.restype = wintypes.BOOL
_FillConsoleOutputAttribute = windll.kernel32.FillConsoleOutputAttribute
_FillConsoleOutputAttribute.argtypes = [
wintypes.HANDLE,
wintypes.WORD,
wintypes.DWORD,
COORD,
POINTER(wintypes.DWORD),
]
_FillConsoleOutputAttribute.restype = wintypes.BOOL
_SetConsoleTitleW = windll.kernel32.SetConsoleTitleA
_SetConsoleTitleW.argtypes = [
wintypes.LPCSTR
]
_SetConsoleTitleW.restype = wintypes.BOOL
handles = {
STDOUT: _GetStdHandle(STDOUT),
STDERR: _GetStdHandle(STDERR),
}
def winapi_test():
handle = handles[STDOUT]
csbi = CONSOLE_SCREEN_BUFFER_INFO()
success = _GetConsoleScreenBufferInfo(
handle, byref(csbi))
return bool(success)
def GetConsoleScreenBufferInfo(stream_id=STDOUT):
handle = handles[stream_id]
csbi = CONSOLE_SCREEN_BUFFER_INFO()
success = _GetConsoleScreenBufferInfo(
handle, byref(csbi))
return csbi
def SetConsoleTextAttribute(stream_id, attrs):
handle = handles[stream_id]
return _SetConsoleTextAttribute(handle, attrs)
def SetConsoleCursorPosition(stream_id, position, adjust=True):
position = COORD(*position)
# If the position is out of range, do nothing.
if position.Y <= 0 or position.X <= 0:
return
# Adjust for Windows' SetConsoleCursorPosition:
# 1. being 0-based, while ANSI is 1-based.
# 2. expecting (x,y), while ANSI uses (y,x).
adjusted_position = COORD(position.Y - 1, position.X - 1)
if adjust:
# Adjust for viewport's scroll position
sr = GetConsoleScreenBufferInfo(STDOUT).srWindow
adjusted_position.Y += sr.Top
adjusted_position.X += sr.Left
# Resume normal processing
handle = handles[stream_id]
return _SetConsoleCursorPosition(handle, adjusted_position)
def FillConsoleOutputCharacter(stream_id, char, length, start):
handle = handles[stream_id]
char = c_char(char.encode())
length = wintypes.DWORD(length)
num_written = wintypes.DWORD(0)
# Note that this is hard-coded for ANSI (vs wide) bytes.
success = _FillConsoleOutputCharacterA(
handle, char, length, start, byref(num_written))
return num_written.value
def FillConsoleOutputAttribute(stream_id, attr, length, start):
''' FillConsoleOutputAttribute( hConsole, csbi.wAttributes, dwConSize, coordScreen, &cCharsWritten )'''
handle = handles[stream_id]
attribute = wintypes.WORD(attr)
length = wintypes.DWORD(length)
num_written = wintypes.DWORD(0)
# Note that this is hard-coded for ANSI (vs wide) bytes.
return _FillConsoleOutputAttribute(
handle, attribute, length, start, byref(num_written))
def SetConsoleTitle(title):
return _SetConsoleTitleW(title)
| gpl-3.0 |
jbuchbinder/youtube-dl | test/test_update.py | 53 | 1109 | #!/usr/bin/env python
from __future__ import unicode_literals
# Allow direct execution
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import json
from youtube_dl.update import rsa_verify
class TestUpdate(unittest.TestCase):
def test_rsa_verify(self):
UPDATES_RSA_KEY = (0x9d60ee4d8f805312fdb15a62f87b95bd66177b91df176765d13514a0f1754bcd2057295c5b6f1d35daa6742c3ffc9a82d3e118861c207995a8031e151d863c9927e304576bc80692bc8e094896fcf11b66f3e29e04e3a71e9a11558558acea1840aec37fc396fb6b65dc81a1c4144e03bd1c011de62e3f1357b327d08426fe93, 65537)
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'versions.json'), 'rb') as f:
versions_info = f.read().decode()
versions_info = json.loads(versions_info)
signature = versions_info['signature']
del versions_info['signature']
self.assertTrue(rsa_verify(
json.dumps(versions_info, sort_keys=True).encode('utf-8'),
signature, UPDATES_RSA_KEY))
if __name__ == '__main__':
unittest.main()
| unlicense |
zcoinofficial/zcoin | src/tor/scripts/codegen/makedesc.py | 1 | 10850 | #!/usr/bin/python
# Copyright 2014-2019, The Tor Project, Inc.
# See LICENSE for license information
# This is a kludgey python script that uses ctypes and openssl to sign
# router descriptors and extrainfo documents and put all the keys in
# the right places. There are examples at the end of the file.
# I've used this to make inputs for unit tests. I wouldn't suggest
# using it for anything else.
import base64
import binascii
import ctypes
import ctypes.util
import hashlib
import optparse
import os
import re
import struct
import time
import UserDict
import slow_ed25519
import slownacl_curve25519
import ed25519_exts_ref
# Pull in the openssl stuff we need.
crypt = ctypes.CDLL(ctypes.util.find_library('crypto'))
BIO_s_mem = crypt.BIO_s_mem
BIO_s_mem.argtypes = []
BIO_s_mem.restype = ctypes.c_void_p
BIO_new = crypt.BIO_new
BIO_new.argtypes = [ctypes.c_void_p]
BIO_new.restype = ctypes.c_void_p
crypt.BIO_free.argtypes = [ctypes.c_void_p]
crypt.BIO_free.restype = ctypes.c_int
crypt.BIO_ctrl.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_long, ctypes.c_void_p ]
crypt.BIO_ctrl.restype = ctypes.c_long
crypt.PEM_write_bio_RSAPublicKey.argtypes = [ ctypes.c_void_p, ctypes.c_void_p ]
crypt.PEM_write_bio_RSAPublicKey.restype = ctypes.c_int
RSA_generate_key = crypt.RSA_generate_key
RSA_generate_key.argtypes = [ctypes.c_int, ctypes.c_ulong, ctypes.c_void_p, ctypes.c_void_p]
RSA_generate_key.restype = ctypes.c_void_p
RSA_private_encrypt = crypt.RSA_private_encrypt
RSA_private_encrypt.argtypes = [
ctypes.c_int, ctypes.c_char_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int ]
RSA_private_encrypt.restype = ctypes.c_int
i2d_RSAPublicKey = crypt.i2d_RSAPublicKey
i2d_RSAPublicKey.argtypes = [
ctypes.c_void_p, ctypes.POINTER(ctypes.c_char_p)
]
i2d_RSAPublicKey.restype = ctypes.c_int
def rsa_sign(msg, rsa):
buf = ctypes.create_string_buffer(1024)
n = RSA_private_encrypt(len(msg), msg, buf, rsa, 1)
if n <= 0:
raise Exception()
return buf.raw[:n]
def b64(x):
x = base64.b64encode(x)
res = []
for i in xrange(0, len(x), 64):
res.append(x[i:i+64]+"\n")
return "".join(res)
def bio_extract(bio):
buf = ctypes.c_char_p()
length = crypt.BIO_ctrl(bio, 3, 0, ctypes.byref(buf))
return ctypes.string_at(buf, length)
def make_rsa_key(e=65537):
rsa = crypt.RSA_generate_key(1024, e, None, None)
bio = BIO_new(BIO_s_mem())
crypt.PEM_write_bio_RSAPublicKey(bio, rsa)
pem = bio_extract(bio).rstrip()
crypt.BIO_free(bio)
buf = ctypes.create_string_buffer(1024)
pBuf = ctypes.c_char_p(ctypes.addressof(buf))
n = crypt.i2d_RSAPublicKey(rsa, ctypes.byref(pBuf))
s = buf.raw[:n]
digest = hashlib.sha1(s).digest()
return (rsa,pem,digest)
def makeEdSigningKeyCert(sk_master, pk_master, pk_signing, date,
includeSigning=False, certType=1):
assert len(pk_signing) == len(pk_master) == 32
expiration = struct.pack("!L", date//3600)
if includeSigning:
extensions = "\x01\x00\x20\x04\x00%s"%(pk_master)
else:
extensions = "\x00"
signed = "\x01%s%s\x01%s%s" % (
chr(certType), expiration, pk_signing, extensions)
signature = ed25519_exts_ref.signatureWithESK(signed, sk_master, pk_master)
assert len(signature) == 64
return signed+signature
def objwrap(identifier, body):
return ("-----BEGIN {0}-----\n"
"{1}"
"-----END {0}-----").format(identifier, body)
MAGIC1 = "<<<<<<MAGIC>>>>>>"
MAGIC2 = "<<<<<!#!#!#XYZZY#!#!#!>>>>>"
class OnDemandKeys(object):
def __init__(self, certDate=None):
if certDate is None:
certDate = time.time() + 86400
self.certDate = certDate
self.rsa_id = None
self.rsa_onion_key = None
self.ed_id_sk = None
self.ntor_sk = None
self.ntor_crosscert = None
self.rsa_crosscert_ed = None
self.rsa_crosscert_noed = None
@property
def RSA_IDENTITY(self):
if self.rsa_id is None:
self.rsa_id, self.rsa_ident_pem, self.rsa_id_digest = make_rsa_key()
return self.rsa_ident_pem
@property
def RSA_ID_DIGEST(self):
self.RSA_IDENTITY
return self.rsa_id_digest
@property
def RSA_FINGERPRINT_NOSPACE(self):
return binascii.b2a_hex(self.RSA_ID_DIGEST).upper()
@property
def RSA_ONION_KEY(self):
if self.rsa_onion_key is None:
self.rsa_onion_key, self.rsa_onion_pem, _ = make_rsa_key()
return self.rsa_onion_pem
@property
def RSA_FINGERPRINT(self):
hexdigest = self.RSA_FINGERPRINT_NOSPACEK
return " ".join(hexdigest[i:i+4] for i in range(0,len(hexdigest),4))
@property
def RSA_SIGNATURE(self):
return MAGIC1
@property
def ED_SIGNATURE(self):
return MAGIC2
@property
def NTOR_ONION_KEY(self):
if self.ntor_sk is None:
self.ntor_sk = slownacl_curve25519.Private()
self.ntor_pk = self.ntor_sk.get_public()
return base64.b64encode(self.ntor_pk.serialize())
@property
def ED_CERT(self):
if self.ed_id_sk is None:
self.ed_id_sk = ed25519_exts_ref.expandSK(os.urandom(32))
self.ed_signing_sk = ed25519_exts_ref.expandSK(os.urandom(32))
self.ed_id_pk = ed25519_exts_ref.publickeyFromESK(self.ed_id_sk)
self.ed_signing_pk = ed25519_exts_ref.publickeyFromESK(self.ed_signing_sk)
self.ed_cert = makeEdSigningKeyCert(self.ed_id_sk, self.ed_id_pk, self.ed_signing_pk, self.certDate, includeSigning=True, certType=4)
return objwrap('ED25519 CERT', b64(self.ed_cert))
@property
def NTOR_CROSSCERT(self):
if self.ntor_crosscert is None:
self.ED_CERT
self.NTOR_ONION_KEY
ed_privkey = self.ntor_sk.serialize() + os.urandom(32)
ed_pub0 = ed25519_exts_ref.publickeyFromESK(ed_privkey)
sign = (ord(ed_pub0[31]) & 255) >> 7
self.ntor_crosscert = makeEdSigningKeyCert(self.ntor_sk.serialize() + os.urandom(32), ed_pub0, self.ed_id_pk, self.certDate, certType=10)
self.ntor_crosscert_sign = sign
return objwrap('ED25519 CERT', b64(self.ntor_crosscert))
@property
def NTOR_CROSSCERT_SIGN(self):
self.NTOR_CROSSCERT
return self.ntor_crosscert_sign
@property
def RSA_CROSSCERT_NOED(self):
if self.rsa_crosscert_noed is None:
self.RSA_ONION_KEY
signed = self.RSA_ID_DIGEST
self.rsa_crosscert_noed = rsa_sign(signed, self.rsa_onion_key)
return objwrap("CROSSCERT",b64(self.rsa_crosscert_noed))
@property
def RSA_CROSSCERT_ED(self):
if self.rsa_crosscert_ed is None:
self.RSA_ONION_KEY
self.ED_CERT
signed = self.RSA_ID_DIGEST + self.ed_id_pk
self.rsa_crosscert_ed = rsa_sign(signed, self.rsa_onion_key)
return objwrap("CROSSCERT",b64(self.rsa_crosscert_ed))
def sign_desc(self, body):
idx = body.rfind("\nrouter-sig-ed25519 ")
if idx >= 0:
self.ED_CERT
signed_part = body[:idx+len("\nrouter-sig-ed25519 ")]
signed_part = "Tor router descriptor signature v1" + signed_part
digest = hashlib.sha256(signed_part).digest()
ed_sig = ed25519_exts_ref.signatureWithESK(digest,
self.ed_signing_sk, self.ed_signing_pk)
body = body.replace(MAGIC2, base64.b64encode(ed_sig).replace("=",""))
idx = body.rindex("\nrouter-signature")
end_of_sig = body.index("\n", idx+1)
signed_part = body[:end_of_sig+1]
digest = hashlib.sha1(signed_part).digest()
assert len(digest) == 20
rsasig = rsa_sign(digest, self.rsa_id)
body = body.replace(MAGIC1, objwrap("SIGNATURE", b64(rsasig)))
return body
def signdesc(body, args_out=None):
rsa, ident_pem, id_digest = make_key()
_, onion_pem, _ = make_key()
need_ed = '{ED25519-CERT}' in body or '{ED25519-SIGNATURE}' in body
if need_ed:
sk_master = os.urandom(32)
sk_signing = os.urandom(32)
pk_master = slow_ed25519.pubkey(sk_master)
pk_signing = slow_ed25519.pubkey(sk_signing)
hexdigest = binascii.b2a_hex(id_digest).upper()
fingerprint = " ".join(hexdigest[i:i+4] for i in range(0,len(hexdigest),4))
MAGIC = "<<<<<<MAGIC>>>>>>"
MORE_MAGIC = "<<<<<!#!#!#XYZZY#!#!#!>>>>>"
args = {
"RSA-IDENTITY" : ident_pem,
"ONION-KEY" : onion_pem,
"FINGERPRINT" : fingerprint,
"FINGERPRINT-NOSPACE" : hexdigest,
"RSA-SIGNATURE" : MAGIC
}
if need_ed:
args['ED25519-CERT'] = makeEdSigningKeyCert(
sk_master, pk_master, pk_signing)
args['ED25519-SIGNATURE'] = MORE_MAGIC
if args_out:
args_out.update(args)
body = body.format(**args)
idx = body.rindex("\nrouter-signature")
end_of_sig = body.index("\n", idx+1)
signed_part = body[:end_of_sig+1]
digest = hashlib.sha1(signed_part).digest()
assert len(digest) == 20
buf = ctypes.create_string_buffer(1024)
n = RSA_private_encrypt(20, digest, buf, rsa, 1)
sig = buf.raw[:n]
sig = """-----BEGIN SIGNATURE-----
%s
-----END SIGNATURE-----""" % b64(sig).rstrip()
body = body.replace(MAGIC, sig)
return body.rstrip()
def print_c_string(ident, body):
print "static const char %s[] =" % ident
for line in body.split("\n"):
print ' "%s\\n"' %(line)
print " ;"
def emit_ri(name, body):
info = OnDemandKeys()
body = body.format(d=info)
body = info.sign_desc(body)
print_c_string("EX_RI_%s"%name.upper(), body)
def emit_ei(name, body):
info = OnDemandKeys()
body = body.format(d=info)
body = info.sign_desc(body)
print_c_string("EX_EI_%s"%name.upper(), body)
print 'const char EX_EI_{NAME}_FP[] = "{d.RSA_FINGERPRINT_NOSPACE}";'.format(
d=info, NAME=name.upper())
print_c_string("EX_EI_%s_KEY"%name.upper(), info.RSA_IDENTITY)
def analyze(s):
fields = {}
while s.startswith(":::"):
first,s=s.split("\n", 1)
m = re.match(r'^:::(\w+)=(.*)',first)
if not m:
raise ValueError(first)
k,v = m.groups()
fields[k] = v
return fields, s
def process_file(s):
fields, s = analyze(s)
try:
name = fields['name']
tp = fields['type']
except KeyError:
raise ValueError("missing required field")
if tp == 'ei':
emit_ei(name, s)
elif tp == 'ri':
emit_ri(name, s)
else:
raise ValueError("unrecognized type")
if __name__ == '__main__':
import sys
for fn in sys.argv[1:]:
process_file(open(fn).read())
| mit |
zackdever/kafka-python | test/test_consumer_group.py | 1 | 4801 | import collections
import logging
import threading
import time
import pytest
import six
from kafka import SimpleClient
from kafka.conn import ConnectionStates
from kafka.consumer.group import KafkaConsumer
from kafka.structs import TopicPartition
from test.conftest import version
from test.testutil import random_string
def get_connect_str(kafka_broker):
return 'localhost:' + str(kafka_broker.port)
@pytest.fixture
def simple_client(kafka_broker):
return SimpleClient(get_connect_str(kafka_broker))
@pytest.fixture
def topic(simple_client):
topic = random_string(5)
simple_client.ensure_topic_exists(topic)
return topic
@pytest.mark.skipif(not version(), reason="No KAFKA_VERSION set")
def test_consumer(kafka_broker, version):
# 0.8.2 brokers need a topic to function well
if version >= (0, 8, 2) and version < (0, 9):
topic(simple_client(kafka_broker))
consumer = KafkaConsumer(bootstrap_servers=get_connect_str(kafka_broker))
consumer.poll(500)
assert len(consumer._client._conns) > 0
node_id = list(consumer._client._conns.keys())[0]
assert consumer._client._conns[node_id].state is ConnectionStates.CONNECTED
@pytest.mark.skipif(version() < (0, 9), reason='Unsupported Kafka Version')
@pytest.mark.skipif(not version(), reason="No KAFKA_VERSION set")
def test_group(kafka_broker, topic):
num_partitions = 4
connect_str = get_connect_str(kafka_broker)
consumers = {}
stop = {}
threads = {}
messages = collections.defaultdict(list)
def consumer_thread(i):
assert i not in consumers
assert i not in stop
stop[i] = threading.Event()
consumers[i] = KafkaConsumer(topic,
bootstrap_servers=connect_str,
heartbeat_interval_ms=500)
while not stop[i].is_set():
for tp, records in six.itervalues(consumers[i].poll(100)):
messages[i][tp].extend(records)
consumers[i].close()
del consumers[i]
del stop[i]
num_consumers = 4
for i in range(num_consumers):
t = threading.Thread(target=consumer_thread, args=(i,))
t.start()
threads[i] = t
try:
timeout = time.time() + 35
while True:
for c in range(num_consumers):
# Verify all consumers have been created
if c not in consumers:
break
# Verify all consumers have an assignment
elif not consumers[c].assignment():
break
# Verify all consumers are in the same generation
generations = set()
for consumer in six.itervalues(consumers):
generations.add(consumer._coordinator.generation)
if len(generations) != 1:
break
# If all checks passed, log state and break while loop
else:
for c in range(num_consumers):
logging.info("[%s] %s %s: %s", c,
consumers[c]._coordinator.generation,
consumers[c]._coordinator.member_id,
consumers[c].assignment())
break
assert time.time() < timeout, "timeout waiting for assignments"
group_assignment = set()
for c in range(num_consumers):
assert len(consumers[c].assignment()) != 0
assert set.isdisjoint(consumers[c].assignment(), group_assignment)
group_assignment.update(consumers[c].assignment())
assert group_assignment == set([
TopicPartition(topic, partition)
for partition in range(num_partitions)])
finally:
for c in range(num_consumers):
stop[c].set()
threads[c].join()
@pytest.mark.skipif(not version(), reason="No KAFKA_VERSION set")
def test_paused(kafka_broker, topic):
consumer = KafkaConsumer(bootstrap_servers=get_connect_str(kafka_broker))
topics = [TopicPartition(topic, 1)]
consumer.assign(topics)
assert set(topics) == consumer.assignment()
assert set() == consumer.paused()
consumer.pause(topics[0])
assert set([topics[0]]) == consumer.paused()
consumer.resume(topics[0])
assert set() == consumer.paused()
consumer.unsubscribe()
assert set() == consumer.paused()
def test_heartbeat_timeout(conn, mocker):
mocker.patch('kafka.client_async.KafkaClient.check_version', return_value = '0.9')
mocker.patch('time.time', return_value = 1234)
consumer = KafkaConsumer('foobar')
mocker.patch.object(consumer._coordinator.heartbeat, 'ttl', return_value = 0)
assert consumer._next_timeout() == 1234
| apache-2.0 |
georgemarshall/django | django/contrib/auth/__init__.py | 20 | 7733 | import inspect
import re
from django.apps import apps as django_apps
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured, PermissionDenied
from django.middleware.csrf import rotate_token
from django.utils.crypto import constant_time_compare
from django.utils.module_loading import import_string
from .signals import user_logged_in, user_logged_out, user_login_failed
SESSION_KEY = '_auth_user_id'
BACKEND_SESSION_KEY = '_auth_user_backend'
HASH_SESSION_KEY = '_auth_user_hash'
REDIRECT_FIELD_NAME = 'next'
def load_backend(path):
return import_string(path)()
def _get_backends(return_tuples=False):
backends = []
for backend_path in settings.AUTHENTICATION_BACKENDS:
backend = load_backend(backend_path)
backends.append((backend, backend_path) if return_tuples else backend)
if not backends:
raise ImproperlyConfigured(
'No authentication backends have been defined. Does '
'AUTHENTICATION_BACKENDS contain anything?'
)
return backends
def get_backends():
return _get_backends(return_tuples=False)
def _clean_credentials(credentials):
"""
Clean a dictionary of credentials of potentially sensitive info before
sending to less secure functions.
Not comprehensive - intended for user_login_failed signal
"""
SENSITIVE_CREDENTIALS = re.compile('api|token|key|secret|password|signature', re.I)
CLEANSED_SUBSTITUTE = '********************'
for key in credentials:
if SENSITIVE_CREDENTIALS.search(key):
credentials[key] = CLEANSED_SUBSTITUTE
return credentials
def _get_user_session_key(request):
# This value in the session is always serialized to a string, so we need
# to convert it back to Python whenever we access it.
return get_user_model()._meta.pk.to_python(request.session[SESSION_KEY])
def authenticate(request=None, **credentials):
"""
If the given credentials are valid, return a User object.
"""
for backend, backend_path in _get_backends(return_tuples=True):
try:
inspect.getcallargs(backend.authenticate, request, **credentials)
except TypeError:
# This backend doesn't accept these credentials as arguments. Try the next one.
continue
try:
user = backend.authenticate(request, **credentials)
except PermissionDenied:
# This backend says to stop in our tracks - this user should not be allowed in at all.
break
if user is None:
continue
# Annotate the user object with the path of the backend.
user.backend = backend_path
return user
# The credentials supplied are invalid to all backends, fire signal
user_login_failed.send(sender=__name__, credentials=_clean_credentials(credentials), request=request)
def login(request, user, backend=None):
"""
Persist a user id and a backend in the request. This way a user doesn't
have to reauthenticate on every request. Note that data set during
the anonymous session is retained when the user logs in.
"""
session_auth_hash = ''
if user is None:
user = request.user
if hasattr(user, 'get_session_auth_hash'):
session_auth_hash = user.get_session_auth_hash()
if SESSION_KEY in request.session:
if _get_user_session_key(request) != user.pk or (
session_auth_hash and
not constant_time_compare(request.session.get(HASH_SESSION_KEY, ''), session_auth_hash)):
# To avoid reusing another user's session, create a new, empty
# session if the existing session corresponds to a different
# authenticated user.
request.session.flush()
else:
request.session.cycle_key()
try:
backend = backend or user.backend
except AttributeError:
backends = _get_backends(return_tuples=True)
if len(backends) == 1:
_, backend = backends[0]
else:
raise ValueError(
'You have multiple authentication backends configured and '
'therefore must provide the `backend` argument or set the '
'`backend` attribute on the user.'
)
else:
if not isinstance(backend, str):
raise TypeError('backend must be a dotted import path string (got %r).' % backend)
request.session[SESSION_KEY] = user._meta.pk.value_to_string(user)
request.session[BACKEND_SESSION_KEY] = backend
request.session[HASH_SESSION_KEY] = session_auth_hash
if hasattr(request, 'user'):
request.user = user
rotate_token(request)
user_logged_in.send(sender=user.__class__, request=request, user=user)
def logout(request):
"""
Remove the authenticated user's ID from the request and flush their session
data.
"""
# Dispatch the signal before the user is logged out so the receivers have a
# chance to find out *who* logged out.
user = getattr(request, 'user', None)
if not getattr(user, 'is_authenticated', True):
user = None
user_logged_out.send(sender=user.__class__, request=request, user=user)
request.session.flush()
if hasattr(request, 'user'):
from django.contrib.auth.models import AnonymousUser
request.user = AnonymousUser()
def get_user_model():
"""
Return the User model that is active in this project.
"""
try:
return django_apps.get_model(settings.AUTH_USER_MODEL, require_ready=False)
except ValueError:
raise ImproperlyConfigured("AUTH_USER_MODEL must be of the form 'app_label.model_name'")
except LookupError:
raise ImproperlyConfigured(
"AUTH_USER_MODEL refers to model '%s' that has not been installed" % settings.AUTH_USER_MODEL
)
def get_user(request):
"""
Return the user model instance associated with the given request session.
If no user is retrieved, return an instance of `AnonymousUser`.
"""
from .models import AnonymousUser
user = None
try:
user_id = _get_user_session_key(request)
backend_path = request.session[BACKEND_SESSION_KEY]
except KeyError:
pass
else:
if backend_path in settings.AUTHENTICATION_BACKENDS:
backend = load_backend(backend_path)
user = backend.get_user(user_id)
# Verify the session
if hasattr(user, 'get_session_auth_hash'):
session_hash = request.session.get(HASH_SESSION_KEY)
session_hash_verified = session_hash and constant_time_compare(
session_hash,
user.get_session_auth_hash()
)
if not session_hash_verified:
request.session.flush()
user = None
return user or AnonymousUser()
def get_permission_codename(action, opts):
"""
Return the codename of the permission for the specified action.
"""
return '%s_%s' % (action, opts.model_name)
def update_session_auth_hash(request, user):
"""
Updating a user's password logs out all sessions for the user.
Take the current request and the updated user object from which the new
session hash will be derived and update the session hash appropriately to
prevent a password change from logging out the session from which the
password was changed.
"""
request.session.cycle_key()
if hasattr(user, 'get_session_auth_hash') and request.user == user:
request.session[HASH_SESSION_KEY] = user.get_session_auth_hash()
default_app_config = 'django.contrib.auth.apps.AuthConfig'
| bsd-3-clause |
samabhi/pstHealth | venv/lib/python2.7/site-packages/PIL/McIdasImagePlugin.py | 40 | 1769 | #
# The Python Imaging Library.
# $Id$
#
# Basic McIdas support for PIL
#
# History:
# 1997-05-05 fl Created (8-bit images only)
# 2009-03-08 fl Added 16/32-bit support.
#
# Thanks to Richard Jones and Craig Swank for specs and samples.
#
# Copyright (c) Secret Labs AB 1997.
# Copyright (c) Fredrik Lundh 1997.
#
# See the README file for information on usage and redistribution.
#
import struct
from PIL import Image, ImageFile
__version__ = "0.2"
def _accept(s):
return s[:8] == b"\x00\x00\x00\x00\x00\x00\x00\x04"
##
# Image plugin for McIdas area images.
class McIdasImageFile(ImageFile.ImageFile):
format = "MCIDAS"
format_description = "McIdas area file"
def _open(self):
# parse area file directory
s = self.fp.read(256)
if not _accept(s) or len(s) != 256:
raise SyntaxError("not an McIdas area file")
self.area_descriptor_raw = s
self.area_descriptor = w = [0] + list(struct.unpack("!64i", s))
# get mode
if w[11] == 1:
mode = rawmode = "L"
elif w[11] == 2:
# FIXME: add memory map support
mode = "I"
rawmode = "I;16B"
elif w[11] == 4:
# FIXME: add memory map support
mode = "I"
rawmode = "I;32B"
else:
raise SyntaxError("unsupported McIdas format")
self.mode = mode
self.size = w[10], w[9]
offset = w[34] + w[15]
stride = w[15] + w[10]*w[11]*w[14]
self.tile = [("raw", (0, 0) + self.size, offset, (rawmode, stride, 1))]
# --------------------------------------------------------------------
# registry
Image.register_open(McIdasImageFile.format, McIdasImageFile, _accept)
# no default extension
| mit |
paul99/clank | tools/gyp/test/defines-escaping/gyptest-defines-escaping.py | 350 | 4737 | #!/usr/bin/env python
# Copyright (c) 2010 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies build of an executable with C++ define specified by a gyp define using
various special characters such as quotes, commas, etc.
"""
import os
import TestGyp
test = TestGyp.TestGyp()
# Tests string literals, percents, and backslash escapes.
try:
os.environ['GYP_DEFINES'] = (
r"""test_format='\n%s\n' """
r"""test_args='"Simple test of %s with a literal"'""")
test.run_gyp('defines-escaping.gyp')
finally:
del os.environ['GYP_DEFINES']
test.build('defines-escaping.gyp')
expect = """
Simple test of %s with a literal
"""
test.run_built_executable('defines_escaping', stdout=expect)
# Test multiple comma-and-space-separated string literals.
try:
os.environ['GYP_DEFINES'] = \
r"""test_format='\n%s and %s\n' test_args='"foo", "bar"'"""
test.run_gyp('defines-escaping.gyp')
finally:
del os.environ['GYP_DEFINES']
test.sleep()
test.touch('defines-escaping.c')
test.build('defines-escaping.gyp')
expect = """
foo and bar
"""
test.run_built_executable('defines_escaping', stdout=expect)
# Test string literals containing quotes.
try:
os.environ['GYP_DEFINES'] = (
r"""test_format='\n%s %s %s %s %s\n' """
r"""test_args='"\"These,\"","""
r""" "\"words,\"","""
r""" "\"are,\"","""
r""" "\"in,\"","""
r""" "\"quotes.\""'""")
test.run_gyp('defines-escaping.gyp')
finally:
del os.environ['GYP_DEFINES']
test.sleep()
test.touch('defines-escaping.c')
test.build('defines-escaping.gyp')
expect = """
"These," "words," "are," "in," "quotes."
"""
test.run_built_executable('defines_escaping', stdout=expect)
# Test string literals containing single quotes.
try:
os.environ['GYP_DEFINES'] = (
r"""test_format='\n%s %s %s %s %s\n' """
r"""test_args="\"'These,'\","""
r""" \"'words,'\","""
r""" \"'are,'\","""
r""" \"'in,'\","""
r""" \"'quotes.'\"" """)
test.run_gyp('defines-escaping.gyp')
finally:
del os.environ['GYP_DEFINES']
test.sleep()
test.touch('defines-escaping.c')
test.build('defines-escaping.gyp')
expect = """
'These,' 'words,' 'are,' 'in,' 'quotes.'
"""
test.run_built_executable('defines_escaping', stdout=expect)
# Test string literals containing different numbers of backslashes before quotes
# (to exercise Windows' quoting behaviour).
try:
os.environ['GYP_DEFINES'] = (
r"""test_format='\n%s\n%s\n%s\n' """
r"""test_args='"\\\"1 visible slash\\\"","""
r""" "\\\\\"2 visible slashes\\\\\"","""
r""" "\\\\\\\"3 visible slashes\\\\\\\""'""")
test.run_gyp('defines-escaping.gyp')
finally:
del os.environ['GYP_DEFINES']
test.sleep()
test.touch('defines-escaping.c')
test.build('defines-escaping.gyp')
expect = r"""
\"1 visible slash\"
\\"2 visible slashes\\"
\\\"3 visible slashes\\\"
"""
test.run_built_executable('defines_escaping', stdout=expect)
# Test that various scary sequences are passed unfettered.
try:
os.environ['GYP_DEFINES'] = (
r"""test_format='\n%s\n' """
r"""test_args='"$foo, " `foo`;"'""")
test.run_gyp('defines-escaping.gyp')
finally:
del os.environ['GYP_DEFINES']
test.sleep()
test.touch('defines-escaping.c')
test.build('defines-escaping.gyp')
expect = """
$foo, " `foo`;
"""
test.run_built_executable('defines_escaping', stdout=expect)
# VisualStudio 2010 can't handle passing %PATH%
if not (test.format == 'msvs' and test.uses_msbuild):
try:
os.environ['GYP_DEFINES'] = (
"""test_format='%s' """
"""test_args='"%PATH%"'""")
test.run_gyp('defines-escaping.gyp')
finally:
del os.environ['GYP_DEFINES']
test.sleep()
test.touch('defines-escaping.c')
test.build('defines-escaping.gyp')
expect = "%PATH%"
test.run_built_executable('defines_escaping', stdout=expect)
# Test commas and semi-colons preceded by backslashes (to exercise Windows'
# quoting behaviour).
try:
os.environ['GYP_DEFINES'] = (
r"""test_format='\n%s\n%s\n' """
r"""test_args='"\\, \\\\;","""
# Same thing again, but enclosed in visible quotes.
r""" "\"\\, \\\\;\""'""")
test.run_gyp('defines-escaping.gyp')
finally:
del os.environ['GYP_DEFINES']
test.sleep()
test.touch('defines-escaping.c')
test.build('defines-escaping.gyp')
expect = r"""
\, \\;
"\, \\;"
"""
test.run_built_executable('defines_escaping', stdout=expect)
# We deliberately do not test having an odd number of quotes in a string
# literal because that isn't feasible in MSVS.
test.pass_test()
| bsd-3-clause |
kkouer/PcGcs | Lib/rexec.py | 228 | 20148 | """Restricted execution facilities.
The class RExec exports methods r_exec(), r_eval(), r_execfile(), and
r_import(), which correspond roughly to the built-in operations
exec, eval(), execfile() and import, but executing the code in an
environment that only exposes those built-in operations that are
deemed safe. To this end, a modest collection of 'fake' modules is
created which mimics the standard modules by the same names. It is a
policy decision which built-in modules and operations are made
available; this module provides a reasonable default, but derived
classes can change the policies e.g. by overriding or extending class
variables like ok_builtin_modules or methods like make_sys().
XXX To do:
- r_open should allow writing tmp dir
- r_exec etc. with explicit globals/locals? (Use rexec("exec ... in ...")?)
"""
from warnings import warnpy3k
warnpy3k("the rexec module has been removed in Python 3.0", stacklevel=2)
del warnpy3k
import sys
import __builtin__
import os
import ihooks
import imp
__all__ = ["RExec"]
class FileBase:
ok_file_methods = ('fileno', 'flush', 'isatty', 'read', 'readline',
'readlines', 'seek', 'tell', 'write', 'writelines', 'xreadlines',
'__iter__')
class FileWrapper(FileBase):
# XXX This is just like a Bastion -- should use that!
def __init__(self, f):
for m in self.ok_file_methods:
if not hasattr(self, m) and hasattr(f, m):
setattr(self, m, getattr(f, m))
def close(self):
self.flush()
TEMPLATE = """
def %s(self, *args):
return getattr(self.mod, self.name).%s(*args)
"""
class FileDelegate(FileBase):
def __init__(self, mod, name):
self.mod = mod
self.name = name
for m in FileBase.ok_file_methods + ('close',):
exec TEMPLATE % (m, m)
class RHooks(ihooks.Hooks):
def __init__(self, *args):
# Hacks to support both old and new interfaces:
# old interface was RHooks(rexec[, verbose])
# new interface is RHooks([verbose])
verbose = 0
rexec = None
if args and type(args[-1]) == type(0):
verbose = args[-1]
args = args[:-1]
if args and hasattr(args[0], '__class__'):
rexec = args[0]
args = args[1:]
if args:
raise TypeError, "too many arguments"
ihooks.Hooks.__init__(self, verbose)
self.rexec = rexec
def set_rexec(self, rexec):
# Called by RExec instance to complete initialization
self.rexec = rexec
def get_suffixes(self):
return self.rexec.get_suffixes()
def is_builtin(self, name):
return self.rexec.is_builtin(name)
def init_builtin(self, name):
m = __import__(name)
return self.rexec.copy_except(m, ())
def init_frozen(self, name): raise SystemError, "don't use this"
def load_source(self, *args): raise SystemError, "don't use this"
def load_compiled(self, *args): raise SystemError, "don't use this"
def load_package(self, *args): raise SystemError, "don't use this"
def load_dynamic(self, name, filename, file):
return self.rexec.load_dynamic(name, filename, file)
def add_module(self, name):
return self.rexec.add_module(name)
def modules_dict(self):
return self.rexec.modules
def default_path(self):
return self.rexec.modules['sys'].path
# XXX Backwards compatibility
RModuleLoader = ihooks.FancyModuleLoader
RModuleImporter = ihooks.ModuleImporter
class RExec(ihooks._Verbose):
"""Basic restricted execution framework.
Code executed in this restricted environment will only have access to
modules and functions that are deemed safe; you can subclass RExec to
add or remove capabilities as desired.
The RExec class can prevent code from performing unsafe operations like
reading or writing disk files, or using TCP/IP sockets. However, it does
not protect against code using extremely large amounts of memory or
processor time.
"""
ok_path = tuple(sys.path) # That's a policy decision
ok_builtin_modules = ('audioop', 'array', 'binascii',
'cmath', 'errno', 'imageop',
'marshal', 'math', 'md5', 'operator',
'parser', 'select',
'sha', '_sre', 'strop', 'struct', 'time',
'_weakref')
ok_posix_names = ('error', 'fstat', 'listdir', 'lstat', 'readlink',
'stat', 'times', 'uname', 'getpid', 'getppid',
'getcwd', 'getuid', 'getgid', 'geteuid', 'getegid')
ok_sys_names = ('byteorder', 'copyright', 'exit', 'getdefaultencoding',
'getrefcount', 'hexversion', 'maxint', 'maxunicode',
'platform', 'ps1', 'ps2', 'version', 'version_info')
nok_builtin_names = ('open', 'file', 'reload', '__import__')
ok_file_types = (imp.C_EXTENSION, imp.PY_SOURCE)
def __init__(self, hooks = None, verbose = 0):
"""Returns an instance of the RExec class.
The hooks parameter is an instance of the RHooks class or a subclass
of it. If it is omitted or None, the default RHooks class is
instantiated.
Whenever the RExec module searches for a module (even a built-in one)
or reads a module's code, it doesn't actually go out to the file
system itself. Rather, it calls methods of an RHooks instance that
was passed to or created by its constructor. (Actually, the RExec
object doesn't make these calls --- they are made by a module loader
object that's part of the RExec object. This allows another level of
flexibility, which can be useful when changing the mechanics of
import within the restricted environment.)
By providing an alternate RHooks object, we can control the file
system accesses made to import a module, without changing the
actual algorithm that controls the order in which those accesses are
made. For instance, we could substitute an RHooks object that
passes all filesystem requests to a file server elsewhere, via some
RPC mechanism such as ILU. Grail's applet loader uses this to support
importing applets from a URL for a directory.
If the verbose parameter is true, additional debugging output may be
sent to standard output.
"""
raise RuntimeError, "This code is not secure in Python 2.2 and later"
ihooks._Verbose.__init__(self, verbose)
# XXX There's a circular reference here:
self.hooks = hooks or RHooks(verbose)
self.hooks.set_rexec(self)
self.modules = {}
self.ok_dynamic_modules = self.ok_builtin_modules
list = []
for mname in self.ok_builtin_modules:
if mname in sys.builtin_module_names:
list.append(mname)
self.ok_builtin_modules = tuple(list)
self.set_trusted_path()
self.make_builtin()
self.make_initial_modules()
# make_sys must be last because it adds the already created
# modules to its builtin_module_names
self.make_sys()
self.loader = RModuleLoader(self.hooks, verbose)
self.importer = RModuleImporter(self.loader, verbose)
def set_trusted_path(self):
# Set the path from which dynamic modules may be loaded.
# Those dynamic modules must also occur in ok_builtin_modules
self.trusted_path = filter(os.path.isabs, sys.path)
def load_dynamic(self, name, filename, file):
if name not in self.ok_dynamic_modules:
raise ImportError, "untrusted dynamic module: %s" % name
if name in sys.modules:
src = sys.modules[name]
else:
src = imp.load_dynamic(name, filename, file)
dst = self.copy_except(src, [])
return dst
def make_initial_modules(self):
self.make_main()
self.make_osname()
# Helpers for RHooks
def get_suffixes(self):
return [item # (suff, mode, type)
for item in imp.get_suffixes()
if item[2] in self.ok_file_types]
def is_builtin(self, mname):
return mname in self.ok_builtin_modules
# The make_* methods create specific built-in modules
def make_builtin(self):
m = self.copy_except(__builtin__, self.nok_builtin_names)
m.__import__ = self.r_import
m.reload = self.r_reload
m.open = m.file = self.r_open
def make_main(self):
self.add_module('__main__')
def make_osname(self):
osname = os.name
src = __import__(osname)
dst = self.copy_only(src, self.ok_posix_names)
dst.environ = e = {}
for key, value in os.environ.items():
e[key] = value
def make_sys(self):
m = self.copy_only(sys, self.ok_sys_names)
m.modules = self.modules
m.argv = ['RESTRICTED']
m.path = map(None, self.ok_path)
m.exc_info = self.r_exc_info
m = self.modules['sys']
l = self.modules.keys() + list(self.ok_builtin_modules)
l.sort()
m.builtin_module_names = tuple(l)
# The copy_* methods copy existing modules with some changes
def copy_except(self, src, exceptions):
dst = self.copy_none(src)
for name in dir(src):
setattr(dst, name, getattr(src, name))
for name in exceptions:
try:
delattr(dst, name)
except AttributeError:
pass
return dst
def copy_only(self, src, names):
dst = self.copy_none(src)
for name in names:
try:
value = getattr(src, name)
except AttributeError:
continue
setattr(dst, name, value)
return dst
def copy_none(self, src):
m = self.add_module(src.__name__)
m.__doc__ = src.__doc__
return m
# Add a module -- return an existing module or create one
def add_module(self, mname):
m = self.modules.get(mname)
if m is None:
self.modules[mname] = m = self.hooks.new_module(mname)
m.__builtins__ = self.modules['__builtin__']
return m
# The r* methods are public interfaces
def r_exec(self, code):
"""Execute code within a restricted environment.
The code parameter must either be a string containing one or more
lines of Python code, or a compiled code object, which will be
executed in the restricted environment's __main__ module.
"""
m = self.add_module('__main__')
exec code in m.__dict__
def r_eval(self, code):
"""Evaluate code within a restricted environment.
The code parameter must either be a string containing a Python
expression, or a compiled code object, which will be evaluated in
the restricted environment's __main__ module. The value of the
expression or code object will be returned.
"""
m = self.add_module('__main__')
return eval(code, m.__dict__)
def r_execfile(self, file):
"""Execute the Python code in the file in the restricted
environment's __main__ module.
"""
m = self.add_module('__main__')
execfile(file, m.__dict__)
def r_import(self, mname, globals={}, locals={}, fromlist=[]):
"""Import a module, raising an ImportError exception if the module
is considered unsafe.
This method is implicitly called by code executing in the
restricted environment. Overriding this method in a subclass is
used to change the policies enforced by a restricted environment.
"""
return self.importer.import_module(mname, globals, locals, fromlist)
def r_reload(self, m):
"""Reload the module object, re-parsing and re-initializing it.
This method is implicitly called by code executing in the
restricted environment. Overriding this method in a subclass is
used to change the policies enforced by a restricted environment.
"""
return self.importer.reload(m)
def r_unload(self, m):
"""Unload the module.
Removes it from the restricted environment's sys.modules dictionary.
This method is implicitly called by code executing in the
restricted environment. Overriding this method in a subclass is
used to change the policies enforced by a restricted environment.
"""
return self.importer.unload(m)
# The s_* methods are similar but also swap std{in,out,err}
def make_delegate_files(self):
s = self.modules['sys']
self.delegate_stdin = FileDelegate(s, 'stdin')
self.delegate_stdout = FileDelegate(s, 'stdout')
self.delegate_stderr = FileDelegate(s, 'stderr')
self.restricted_stdin = FileWrapper(sys.stdin)
self.restricted_stdout = FileWrapper(sys.stdout)
self.restricted_stderr = FileWrapper(sys.stderr)
def set_files(self):
if not hasattr(self, 'save_stdin'):
self.save_files()
if not hasattr(self, 'delegate_stdin'):
self.make_delegate_files()
s = self.modules['sys']
s.stdin = self.restricted_stdin
s.stdout = self.restricted_stdout
s.stderr = self.restricted_stderr
sys.stdin = self.delegate_stdin
sys.stdout = self.delegate_stdout
sys.stderr = self.delegate_stderr
def reset_files(self):
self.restore_files()
s = self.modules['sys']
self.restricted_stdin = s.stdin
self.restricted_stdout = s.stdout
self.restricted_stderr = s.stderr
def save_files(self):
self.save_stdin = sys.stdin
self.save_stdout = sys.stdout
self.save_stderr = sys.stderr
def restore_files(self):
sys.stdin = self.save_stdin
sys.stdout = self.save_stdout
sys.stderr = self.save_stderr
def s_apply(self, func, args=(), kw={}):
self.save_files()
try:
self.set_files()
r = func(*args, **kw)
finally:
self.restore_files()
return r
def s_exec(self, *args):
"""Execute code within a restricted environment.
Similar to the r_exec() method, but the code will be granted access
to restricted versions of the standard I/O streams sys.stdin,
sys.stderr, and sys.stdout.
The code parameter must either be a string containing one or more
lines of Python code, or a compiled code object, which will be
executed in the restricted environment's __main__ module.
"""
return self.s_apply(self.r_exec, args)
def s_eval(self, *args):
"""Evaluate code within a restricted environment.
Similar to the r_eval() method, but the code will be granted access
to restricted versions of the standard I/O streams sys.stdin,
sys.stderr, and sys.stdout.
The code parameter must either be a string containing a Python
expression, or a compiled code object, which will be evaluated in
the restricted environment's __main__ module. The value of the
expression or code object will be returned.
"""
return self.s_apply(self.r_eval, args)
def s_execfile(self, *args):
"""Execute the Python code in the file in the restricted
environment's __main__ module.
Similar to the r_execfile() method, but the code will be granted
access to restricted versions of the standard I/O streams sys.stdin,
sys.stderr, and sys.stdout.
"""
return self.s_apply(self.r_execfile, args)
def s_import(self, *args):
"""Import a module, raising an ImportError exception if the module
is considered unsafe.
This method is implicitly called by code executing in the
restricted environment. Overriding this method in a subclass is
used to change the policies enforced by a restricted environment.
Similar to the r_import() method, but has access to restricted
versions of the standard I/O streams sys.stdin, sys.stderr, and
sys.stdout.
"""
return self.s_apply(self.r_import, args)
def s_reload(self, *args):
"""Reload the module object, re-parsing and re-initializing it.
This method is implicitly called by code executing in the
restricted environment. Overriding this method in a subclass is
used to change the policies enforced by a restricted environment.
Similar to the r_reload() method, but has access to restricted
versions of the standard I/O streams sys.stdin, sys.stderr, and
sys.stdout.
"""
return self.s_apply(self.r_reload, args)
def s_unload(self, *args):
"""Unload the module.
Removes it from the restricted environment's sys.modules dictionary.
This method is implicitly called by code executing in the
restricted environment. Overriding this method in a subclass is
used to change the policies enforced by a restricted environment.
Similar to the r_unload() method, but has access to restricted
versions of the standard I/O streams sys.stdin, sys.stderr, and
sys.stdout.
"""
return self.s_apply(self.r_unload, args)
# Restricted open(...)
def r_open(self, file, mode='r', buf=-1):
"""Method called when open() is called in the restricted environment.
The arguments are identical to those of the open() function, and a
file object (or a class instance compatible with file objects)
should be returned. RExec's default behaviour is allow opening
any file for reading, but forbidding any attempt to write a file.
This method is implicitly called by code executing in the
restricted environment. Overriding this method in a subclass is
used to change the policies enforced by a restricted environment.
"""
mode = str(mode)
if mode not in ('r', 'rb'):
raise IOError, "can't open files for writing in restricted mode"
return open(file, mode, buf)
# Restricted version of sys.exc_info()
def r_exc_info(self):
ty, va, tr = sys.exc_info()
tr = None
return ty, va, tr
def test():
import getopt, traceback
opts, args = getopt.getopt(sys.argv[1:], 'vt:')
verbose = 0
trusted = []
for o, a in opts:
if o == '-v':
verbose = verbose+1
if o == '-t':
trusted.append(a)
r = RExec(verbose=verbose)
if trusted:
r.ok_builtin_modules = r.ok_builtin_modules + tuple(trusted)
if args:
r.modules['sys'].argv = args
r.modules['sys'].path.insert(0, os.path.dirname(args[0]))
else:
r.modules['sys'].path.insert(0, "")
fp = sys.stdin
if args and args[0] != '-':
try:
fp = open(args[0])
except IOError, msg:
print "%s: can't open file %r" % (sys.argv[0], args[0])
return 1
if fp.isatty():
try:
import readline
except ImportError:
pass
import code
class RestrictedConsole(code.InteractiveConsole):
def runcode(self, co):
self.locals['__builtins__'] = r.modules['__builtin__']
r.s_apply(code.InteractiveConsole.runcode, (self, co))
try:
RestrictedConsole(r.modules['__main__'].__dict__).interact()
except SystemExit, n:
return n
else:
text = fp.read()
fp.close()
c = compile(text, fp.name, 'exec')
try:
r.s_exec(c)
except SystemExit, n:
return n
except:
traceback.print_exc()
return 1
if __name__ == '__main__':
sys.exit(test())
| gpl-3.0 |
ledtvavs/repository.ledtv | script.module.liveresolver/lib/liveresolver/modules/control.py | 10 | 5471 | # -*- coding: utf-8 -*-
'''
Genesis Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import os,xbmc,xbmcaddon,xbmcplugin,xbmcgui,xbmcvfs
lang = xbmcaddon.Addon().getLocalizedString
setting = xbmcaddon.Addon().getSetting
addon = xbmcaddon.Addon
addItem = xbmcplugin.addDirectoryItem
item = xbmcgui.ListItem
directory = xbmcplugin.endOfDirectory
content = xbmcplugin.setContent
property = xbmcplugin.setProperty
addonInfo = xbmcaddon.Addon().getAddonInfo
infoLabel = xbmc.getInfoLabel
condVisibility = xbmc.getCondVisibility
jsonrpc = xbmc.executeJSONRPC
window = xbmcgui.Window(10000)
dialog = xbmcgui.Dialog()
progressDialog = xbmcgui.DialogProgress()
windowDialog = xbmcgui.WindowDialog()
button = xbmcgui.ControlButton
image = xbmcgui.ControlImage
keyboard = xbmc.Keyboard
sleep = xbmc.sleep
execute = xbmc.executebuiltin
skin = xbmc.getSkinDir()
player = xbmc.Player()
playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
resolve = xbmcplugin.setResolvedUrl
openFile = xbmcvfs.File
makeFile = xbmcvfs.mkdir
deleteFile = xbmcvfs.delete
listDir = xbmcvfs.listdir
transPath = xbmc.translatePath
skinPath = xbmc.translatePath('special://skin/')
addonPath = xbmc.translatePath(addonInfo('path'))
dataPath = xbmc.translatePath(addonInfo('profile')).decode('utf-8')
settingsFile = os.path.join(dataPath, 'settings.xml')
databaseFile = os.path.join(dataPath, 'settings.db')
favouritesFile = os.path.join(dataPath, 'favourites.db')
sourcescacheFile = os.path.join(dataPath, 'sources.db')
cachemetaFile = os.path.join(dataPath, 'metacache.db')
libcacheFile = os.path.join(dataPath, 'library.db')
metacacheFile = os.path.join(dataPath, 'meta.db')
cacheFile = os.path.join(dataPath, 'cache.db')
def addonIcon():
appearance = setting('appearance').lower()
if appearance in ['-', '']: return addonInfo('icon')
else: return os.path.join(addonPath, 'resources', 'media', appearance, 'icon.png')
def addonPoster():
appearance = setting('appearance').lower()
if appearance in ['-', '']: return 'DefaultVideo.png'
else: return os.path.join(addonPath, 'resources', 'media', appearance, 'poster.png')
def addonBanner():
appearance = setting('appearance').lower()
if appearance in ['-', '']: return 'DefaultVideo.png'
else: return os.path.join(addonPath, 'resources', 'media', appearance, 'banner.png')
def addonThumb():
appearance = setting('appearance').lower()
if appearance == '-': return 'DefaultFolder.png'
elif appearance == '': return addonInfo('icon')
else: return os.path.join(addonPath, 'resources', 'media', appearance, 'icon.png')
def addonFanart():
appearance = setting('appearance').lower()
if appearance == '-': return None
elif appearance == '': return addonInfo('fanart')
else: return os.path.join(addonPath, 'resources', 'media', appearance, 'fanart.jpg')
def addonNext():
appearance = setting('appearance').lower()
if appearance in ['-', '']: return 'DefaultFolderBack.png'
else: return os.path.join(addonPath, 'resources', 'media', appearance, 'next.jpg')
def artPath():
appearance = setting('appearance').lower()
if appearance in ['-', '']: return None
else: return os.path.join(addonPath, 'resources', 'media', appearance)
def infoDialog(message, heading=addonInfo('name'), icon=addonIcon(), time=3000):
try: dialog.notification(heading, message, icon, time, sound=False)
except: execute("Notification(%s,%s, %s, %s)" % (heading, message, time, icon))
def yesnoDialog(line1, line2, line3, heading=addonInfo('name'), nolabel='', yeslabel=''):
return dialog.yesno(heading, line1, line2, line3, nolabel, yeslabel)
def selectDialog(list, heading=addonInfo('name')):
return dialog.select(heading, list)
def version():
num = ''
try: version = addon('xbmc.addon').getAddonInfo('version')
except: version = '999'
for i in version:
if i.isdigit(): num += i
else: break
return int(num)
def refresh():
return execute('Container.Refresh')
def idle():
return execute('Dialog.Close(busydialog)')
def queueItem():
return execute('Action(Queue)')
def openPlaylist():
return execute('ActivateWindow(VideoPlaylist)')
def openSettings(query=None, id=addonInfo('id')):
try:
idle()
execute('Addon.OpenSettings(%s)' % id)
if query == None: raise Exception()
c, f = query.split('.')
execute('SetFocus(%i)' % (int(c) + 100))
execute('SetFocus(%i)' % (int(f) + 200))
except:
return
def get_keyboard(heading, default=''):
keyboard = xbmc.Keyboard()
keyboard.setHeading(heading)
if default: keyboard.setDefault(default)
keyboard.doModal()
if keyboard.isConfirmed():
return keyboard.getText()
else:
return None | gpl-3.0 |
shakalaca/ASUS_ZenFone_A450CG | linux/kernel/tools/perf/scripts/python/futex-contention.py | 11261 | 1486 | # futex contention
# (c) 2010, Arnaldo Carvalho de Melo <acme@redhat.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Translation of:
#
# http://sourceware.org/systemtap/wiki/WSFutexContention
#
# to perf python scripting.
#
# Measures futex contention
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Util import *
process_names = {}
thread_thislock = {}
thread_blocktime = {}
lock_waits = {} # long-lived stats on (tid,lock) blockage elapsed time
process_names = {} # long-lived pid-to-execname mapping
def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, uaddr, op, val, utime, uaddr2, val3):
cmd = op & FUTEX_CMD_MASK
if cmd != FUTEX_WAIT:
return # we don't care about originators of WAKE events
process_names[tid] = comm
thread_thislock[tid] = uaddr
thread_blocktime[tid] = nsecs(s, ns)
def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, ret):
if thread_blocktime.has_key(tid):
elapsed = nsecs(s, ns) - thread_blocktime[tid]
add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed)
del thread_blocktime[tid]
del thread_thislock[tid]
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
for (tid, lock) in lock_waits:
min, max, avg, count = lock_waits[tid, lock]
print "%s[%d] lock %x contended %d times, %d avg ns" % \
(process_names[tid], tid, lock, count, avg)
| gpl-2.0 |
joeythesaint/yocto-autobuilder | lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/internet/_win32serialport.py | 42 | 4633 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Serial port support for Windows.
Requires PySerial and pywin32.
"""
# system imports
import serial
from serial import PARITY_NONE, PARITY_EVEN, PARITY_ODD
from serial import STOPBITS_ONE, STOPBITS_TWO
from serial import FIVEBITS, SIXBITS, SEVENBITS, EIGHTBITS
import win32file, win32event
# twisted imports
from twisted.internet import abstract
# sibling imports
from serialport import BaseSerialPort
class SerialPort(BaseSerialPort, abstract.FileDescriptor):
"""A serial device, acting as a transport, that uses a win32 event."""
connected = 1
def __init__(self, protocol, deviceNameOrPortNumber, reactor,
baudrate = 9600, bytesize = EIGHTBITS, parity = PARITY_NONE,
stopbits = STOPBITS_ONE, xonxoff = 0, rtscts = 0):
self._serial = self._serialFactory(
deviceNameOrPortNumber, baudrate=baudrate, bytesize=bytesize,
parity=parity, stopbits=stopbits, timeout=None,
xonxoff=xonxoff, rtscts=rtscts)
self.flushInput()
self.flushOutput()
self.reactor = reactor
self.protocol = protocol
self.outQueue = []
self.closed = 0
self.closedNotifies = 0
self.writeInProgress = 0
self.protocol = protocol
self._overlappedRead = win32file.OVERLAPPED()
self._overlappedRead.hEvent = win32event.CreateEvent(None, 1, 0, None)
self._overlappedWrite = win32file.OVERLAPPED()
self._overlappedWrite.hEvent = win32event.CreateEvent(None, 0, 0, None)
self.reactor.addEvent(self._overlappedRead.hEvent, self, 'serialReadEvent')
self.reactor.addEvent(self._overlappedWrite.hEvent, self, 'serialWriteEvent')
self.protocol.makeConnection(self)
self._finishPortSetup()
def _finishPortSetup(self):
"""
Finish setting up the serial port.
This is a separate method to facilitate testing.
"""
flags, comstat = win32file.ClearCommError(self._serial.hComPort)
rc, self.read_buf = win32file.ReadFile(self._serial.hComPort,
win32file.AllocateReadBuffer(1),
self._overlappedRead)
def serialReadEvent(self):
#get that character we set up
n = win32file.GetOverlappedResult(self._serial.hComPort, self._overlappedRead, 0)
if n:
first = str(self.read_buf[:n])
#now we should get everything that is already in the buffer
flags, comstat = win32file.ClearCommError(self._serial.hComPort)
if comstat.cbInQue:
win32event.ResetEvent(self._overlappedRead.hEvent)
rc, buf = win32file.ReadFile(self._serial.hComPort,
win32file.AllocateReadBuffer(comstat.cbInQue),
self._overlappedRead)
n = win32file.GetOverlappedResult(self._serial.hComPort, self._overlappedRead, 1)
#handle all the received data:
self.protocol.dataReceived(first + str(buf[:n]))
else:
#handle all the received data:
self.protocol.dataReceived(first)
#set up next one
win32event.ResetEvent(self._overlappedRead.hEvent)
rc, self.read_buf = win32file.ReadFile(self._serial.hComPort,
win32file.AllocateReadBuffer(1),
self._overlappedRead)
def write(self, data):
if data:
if self.writeInProgress:
self.outQueue.append(data)
else:
self.writeInProgress = 1
win32file.WriteFile(self._serial.hComPort, data, self._overlappedWrite)
def serialWriteEvent(self):
try:
dataToWrite = self.outQueue.pop(0)
except IndexError:
self.writeInProgress = 0
return
else:
win32file.WriteFile(self._serial.hComPort, dataToWrite, self._overlappedWrite)
def connectionLost(self, reason):
"""
Called when the serial port disconnects.
Will call C{connectionLost} on the protocol that is handling the
serial data.
"""
self.reactor.removeEvent(self._overlappedRead.hEvent)
self.reactor.removeEvent(self._overlappedWrite.hEvent)
abstract.FileDescriptor.connectionLost(self, reason)
self._serial.close()
self.protocol.connectionLost(reason)
| gpl-2.0 |
cheral/orange3 | Orange/widgets/utils/plot/owplot.py | 4 | 69148 | '''
#################
Plot (``owplot``)
#################
.. autoclass:: OrangeWidgets.plot.OWPlot
'''
from AnyQt.QtWidgets import \
QGraphicsView, QGraphicsScene, QGraphicsRectItem, QGraphicsTextItem,\
QToolTip, QApplication
from AnyQt.QtGui import QPen, QBrush, QColor, QPainter, QTransform, QPolygonF
from AnyQt.QtCore import \
QPointF, QRectF, QLineF, QPoint, QRect, QPropertyAnimation, Qt, QEvent, \
pyqtProperty
from Orange.widgets.gui import OWComponent
from Orange.widgets.settings import Setting
LeftLegend = 0
RightLegend = 1
BottomLegend = 2
TopLegend = 3
ExternalLegend = 4
UNUSED_ATTRIBUTES_STR = 'unused attributes'
from .owaxis import *
from .owcurve import *
from .owlegend import *
from .owplotgui import OWPlotGUI
from .owtools import *
from ..colorpalette import ColorPaletteGenerator
## Color values copied from orngView.SchemaView for consistency
SelectionPen = QPen(QBrush(QColor(51, 153, 255, 192)),
1, Qt.SolidLine, Qt.RoundCap)
SelectionBrush = QBrush(QColor(168, 202, 236, 192))
#from OWDlgs import OWChooseImageSizeDlg
#from OWColorPalette import * # color palletes, ...
#from Orange.utils import deprecated_members, deprecated_attribute
import orangeqt
def n_min(*args):
lst = args[0] if len(args) == 1 else args
a = [i for i in lst if i is not None]
return min(a) if a else None
def n_max(*args):
lst = args[0] if len(args) == 1 else args
a = [i for i in lst if i is not None]
return max(a) if a else None
name_map = {
"saveToFileDirect": "save_to_file_direct",
"saveToFile" : "save_to_file",
"addCurve" : "add_curve",
"addMarker" : "add_marker",
"updateLayout" : "update_layout",
"activateZooming" : "activate_zooming",
"activateSelection" : "activate_selection",
"activateRectangleSelection" : "activate_rectangle_selection",
"activatePolygonSelection" : "activate_polygon_selection",
"activatePanning" : "activate_panning",
"getSelectedPoints" : "get_selected_points",
"setAxisScale" : "set_axis_scale",
"setAxisLabels" : "set_axis_labels",
"setAxisAutoScale" : "set_axis_autoscale",
"setTickLength" : "set_axis_tick_length",
"updateCurves" : "update_curves",
"itemList" : "plot_items",
"setShowMainTitle" : "set_show_main_title",
"setMainTitle" : "set_main_title",
"invTransform" : "inv_transform",
"setAxisTitle" : "set_axis_title",
"setShowAxisTitle" : "set_show_axis_title"
}
#@deprecated_members(name_map, wrap_methods=list(name_map.keys()))
class OWPlot(orangeqt.Plot, OWComponent):
"""
The base class for all plots in Orange. It uses the Qt Graphics View Framework
to draw elements on a graph.
**Plot layout**
.. attribute:: show_legend
A boolean controlling whether the legend is displayed or not
.. attribute:: show_main_title
Controls whether or not the main plot title is displayed
.. attribute:: main_title
The plot title, usually show on top of the plot
.. automethod:: set_main_title
.. automethod:: set_show_main_title
.. attribute:: axis_margin
How much space (in pixels) should be left on each side for the axis, its label and its title.
.. attribute:: title_margin
How much space (in pixels) should be left at the top of the plot for the title, if the title is shown.
.. seealso:: attribute :attr:`show_main_title`
.. attribute:: plot_margin
How much space (in pixels) should be left at each side of the plot as whitespace.
**Coordinate transformation**
There are several coordinate systems used by OWPlot:
* `widget` coordinates.
This is the coordinate system of the position returned by :meth:`.QEvent.pos()`.
No calculations or positions is done with this coordinates, they must first be converted
to scene coordinates with :meth:`mapToScene`.
* `data` coordinates.
The value used internally in Orange to specify the values of attributes.
For example, this can be age in years, the number of legs, or any other numeric value.
* `plot` coordinates.
These coordinates specify where the plot items are placed on the graph, but doesn't account for zoom.
They can be retrieved for a particular plot item with :meth:`.PlotItem.pos()`.
* `scene` or `zoom` coordinates.
Like plot coordinates, except that they take the :attr:`zoom_transform` into account. They represent the
actual position of an item on the scene.
These are the coordinates returned by :meth:`.PlotItem.scenePos()` and :meth:`mapToScene`.
For example, they can be used to determine what is under the cursor.
In most cases, you will use data coordinates for interacting with the actual data, and scene coordinates for
interacting with the plot items. The other two sets are mostly used for converting.
.. automethod:: map_to_graph
.. automethod:: map_from_graph
.. automethod:: transform
.. automethod:: inv_transform
.. method:: nearest_point(pos)
Returns the point nearest to ``pos``, or ``None`` if no point is close enough.
:param pos: The position in scene coordinates
:type pos: QPointF
:rtype: :obj:`.OWPoint`
.. method:: point_at(pos)
If there is a point with data coordinates equal to ``pos``, if is returned.
Otherwise, this function returns None.
:param pos: The position in data coordinates
:type pos: tuple of float float
:rtype: :obj:`.OWPoint`
**Data curves**
The preferred method for showing a series of data points is :meth:`set_main_curve_data`.
It allows you to specify point positions, colors, labels, sizes and shapes.
.. automethod:: set_main_curve_data
.. automethod:: add_curve
.. automethod:: add_custom_curve
.. automethod:: add_marker
.. method:: add_item(item)
Adds any PlotItem ``item`` to this plot.
Calling this function directly is useful for adding a :obj:`.Marker` or another object that does not have to appear in the legend.
For data curves, consider using :meth:`add_custom_curve` instead.
.. method:: plot_items()
Returns the list of all plot items added to this graph with :meth:`add_item` or :meth:`.PlotItem.attach`.
**Axes**
.. automethod:: add_axis
.. automethod:: add_custom_axis
.. automethod:: set_axis_enabled
.. automethod:: set_axis_labels
.. automethod:: set_axis_scale
**Settings**
.. attribute:: gui
An :obj:`.OWPlotGUI` object associated with this graph
**Point Selection and Marking**
There are four possible selection behaviors used for selecting or marking points in OWPlot.
They are used in :meth:`select_points` and :meth:`mark_points` and are the same for both operations.
.. data:: AddSelection
The points are added to the selection, without affected the currently selected points
.. data:: RemoveSelection
The points are removed from the selection, without affected the currently selected points
.. data:: ToggleSelection
The points' selection state is toggled
.. data:: ReplaceSelection
The current selection is replaced with the new one
.. note:: There are exactly the same functions for point selection and marking.
For simplicity, they are only documented once.
.. method:: select_points(area, behavior)
.. method:: mark_points(area, behavior)
Selects or marks all points inside the ``area``
:param area: The newly selected/marked area
:type area: QRectF or QPolygonF
:param behavior: :data:`AddSelection`, :data:`RemoveSelection`, :data:`ToggleSelection` or :data:`ReplaceSelection`
:type behavior: int
.. method:: unselect_all_points()
.. method:: unmark_all_points()
Unselects or unmarks all the points in the plot
.. method:: selected_points()
.. method:: marked_points()
Returns a list of all selected or marked points
:rtype: list of OWPoint
.. method:: selected_points(xData, yData)
For each of the point specified by ``xData`` and ``yData``, the point's selection state is returned.
:param xData: The list of x coordinates
:type xData: list of float
:param yData: The list of y coordinates
:type yData: list of float
:rtype: list of int
**Color schemes**
By default, OWPlot uses the application's system palette for drawing everything
except data curves and points. This way, it maintains consistency with other application
with regards to the user interface.
If data is plotted with no color specified, it will use a system color as well,
so that a good contrast with the background in guaranteed.
OWPlot uses the :meth:`.OWidget.palette` to determine its color scheme, so it can be
changed using :meth:`.QWidget.setPalette`. There are also two predefined color schemes:
``OWPalette.Dark`` and ``OWPalette.Light``, which provides a dark and a light scheme
respectively.
.. attribute:: theme_name
A string attribute with three possible values:
============== ===========================
Value Meaning
-------------- ---------------------------
"default" The system palette is used
"dark" The dark theme is used
"light" The light theme is used
============== ===========================
To apply the settings, first set this attribute's value, and then call :meth:`update_theme`
.. automethod:: update_theme
On the other hand, curves with a specified color will use colors from Orange's palette,
which can be configured within Orange. Each plot contains two separate palettes:
one for continuous attributes, and one for discrete ones. Both are created by
:obj:`.OWColorPalette.ColorPaletteGenerator`
.. attribute:: continuous_palette
The palette used when point color represents a continuous attribute
.. attribute:: discrete_palette
The palette used when point color represents a discrete attribute
"""
point_settings = ["point_width", "alpha_value"]
plot_settings = ["show_legend", "show_grid"]
alpha_value = Setting(255)
show_legend = Setting(False)
show_grid = Setting(False)
appearance_settings = ["antialias_plot", "animate_plot", "animate_points", "disable_animations_threshold", "auto_adjust_performance"]
def settings_list(self, graph_name, settings):
return [graph_name + '.' + setting for setting in settings]
def __init__(self, parent = None, name = "None", show_legend = 1, axes = [xBottom, yLeft], widget = None):
"""
Creates a new graph
If your visualization uses axes other than ``xBottom`` and ``yLeft``, specify them in the
``axes`` parameter. To use non-cartesian axes, set ``axes`` to an empty list
and add custom axes with :meth:`add_axis` or :meth:`add_custom_axis`
"""
orangeqt.Plot.__init__(self, parent)
OWComponent.__init__(self, widget)
self.widget = widget
self.parent_name = name
self.title_item = None
self.setRenderHints(QPainter.Antialiasing | QPainter.TextAntialiasing)
self._legend = OWLegend(self, self.scene())
self._legend.setZValue(LegendZValue)
self._legend_margin = QRectF(0, 0, 100, 0)
self._legend_moved = False
self.axes = dict()
self.axis_margin = 50
self.y_axis_extra_margin = 30
self.title_margin = 40
self.graph_margin = 10
self.mainTitle = None
self.showMainTitle = False
self.XaxisTitle = None
self.YLaxisTitle = None
self.YRaxisTitle = None
# Method aliases, because there are some methods with different names but same functions
self.setCanvasBackground = self.setCanvasColor
self.map_from_widget = self.mapToScene
# OWScatterPlot needs these:
self.point_width = 5
self.show_filled_symbols = True
self.show_grid = True
self.curveSymbols = list(range(13))
self.tips = TooltipManager(self)
self.setMouseTracking(True)
self.grabGesture(Qt.PinchGesture)
self.grabGesture(Qt.PanGesture)
self.state = NOTHING
self._pressed_mouse_button = Qt.NoButton
self._pressed_point = None
self.selection_items = []
self._current_rs_item = None
self._current_ps_item = None
self.polygon_close_treshold = 10
self.sendSelectionOnUpdate = False
self.auto_send_selection_callback = None
self.data_range = {}
self.map_transform = QTransform()
self.graph_area = QRectF()
## Performance optimization
self.setViewportUpdateMode(QGraphicsView.FullViewportUpdate)
self.scene().setItemIndexMethod(QGraphicsScene.NoIndex)
self.animate_plot = True
self.animate_points = True
self.antialias_plot = True
self.antialias_points = True
self.antialias_lines = True
self.auto_adjust_performance = True
self.disable_animations_threshold = 5000
# self.setInteractive(False)
self.warn_unused_attributes = False
self._bounds_cache = {}
self._transform_cache = {}
self.block_update = False
self.use_animations = True
self._animations = []
## Mouse event handlers
self.mousePressEventHandler = None
self.mouseMoveEventHandler = None
self.mouseReleaseEventHandler = None
self.mouseStaticClickHandler = self.mouseStaticClick
self.static_click = False
self._marker_items = []
self.grid_curve = PlotGrid(self)
self._zoom_rect = None
self._zoom_transform = QTransform()
self.zoom_stack = []
self.old_legend_margin = None
self.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
## Add specified axes:
for key in axes:
if key in [yLeft, xTop]:
self.add_axis(key, title_above=1)
else:
self.add_axis(key)
self.continuous_palette = ColorPaletteGenerator(number_of_colors= -1)
self.discrete_palette = ColorPaletteGenerator()
self.gui = OWPlotGUI(self)
"""
An :obj:`.OWPlotGUI` object associated with this plot
"""
self.activate_zooming()
self.selection_behavior = self.AddSelection
self.main_curve = None
self.replot()
# selectionCurveList = deprecated_attribute("selectionCurveList", "selection_items")
# autoSendSelectionCallback = deprecated_attribute("autoSendSelectionCallback", "auto_send_selection_callback")
# showLegend = deprecated_attribute("showLegend", "show_legend")
# pointWidth = deprecated_attribute("pointWidth", "point_width")
# alphaValue = deprecated_attribute("alphaValue", "alpha_value")
# useAntialiasing = deprecated_attribute("useAntialiasing", "use_antialiasing")
# showFilledSymbols = deprecated_attribute("showFilledSymbols", "show_filled_symbols")
# mainTitle = deprecated_attribute("mainTitle", "main_title")
# showMainTitle = deprecated_attribute("showMainTitle", "show_main_title")
# gridCurve = deprecated_attribute("gridCurve", "grid_curve")
# contPalette = deprecated_attribute("contPalette", "continuous_palette")
# discPalette = deprecated_attribute("discPalette", "discrete_palette")
def scrollContentsBy(self, dx, dy):
# This is overriden here to prevent scrolling with mouse and keyboard
# Instead of moving the contents, we simply do nothing
pass
def graph_area_rect(self):
return self.graph_area
def map_to_graph(self, point, axes = None, zoom = False):
'''
Maps ``point``, which can be ether a tuple of (x,y), a QPoint or a QPointF, from data coordinates
to plot coordinates.
:param point: The point in data coordinates
:type point: tuple or QPointF
:param axes: The pair of axes along which to transform the point.
If none are specified, (xBottom, yLeft) will be used.
:type axes: tuple of float float
:param zoom: if ``True``, the current :attr:`zoom_transform` will be considered in the transformation, and the result will be in scene coordinates instead.
:type zoom: int
:return: The transformed point in scene coordinates
:type: tuple of float float
'''
if type(point) == tuple:
(x, y) = point
point = QPointF(x, y)
if axes:
x_id, y_id = axes
point = point * self.transform_for_axes(x_id, y_id)
else:
point = point * self.map_transform
if zoom:
point = point * self._zoom_transform
return (point.x(), point.y())
def map_from_graph(self, point, axes = None, zoom = False):
'''
Maps ``point``, which can be ether a tuple of (x,y), a QPoint or a QPointF, from plot coordinates
to data coordinates.
:param point: The point in data coordinates
:type point: tuple or QPointF
:param axes: The pair of axes along which to transform the point. If none are specified, (xBottom, yLeft) will be used.
:type axes: tuple of float float
:param zoom: if ``True``, the current :attr:`zoom_transform` will be considered in the transformation, and the ``point`` should be in scene coordinates instead.
:type zoom: int
:returns: The transformed point in data coordinates
:rtype: tuple of float float
'''
if type(point) == tuple:
(x, y) = point
point = QPointF(x,y)
if zoom:
t, ok = self._zoom_transform.inverted()
point = point * t
if axes:
x_id, y_id = axes
t, ok = self.transform_for_axes(x_id, y_id).inverted()
else:
t, ok = self.map_transform.inverted()
ret = point * t
return (ret.x(), ret.y())
def save_to_file(self, extraButtons = []):
sizeDlg = OWChooseImageSizeDlg(self, extraButtons, parent=self)
sizeDlg.exec_()
def save_to_file_direct(self, fileName, size = None):
sizeDlg = OWChooseImageSizeDlg(self)
sizeDlg.saveImage(fileName, size)
def activate_zooming(self):
'''
Activates the zooming mode, where the user can zoom in and out with a single mouse click
or by dragging the mouse to form a rectangular area
'''
self.state = ZOOMING
def activate_rectangle_selection(self):
'''
Activates the rectangle selection mode, where the user can select points in a rectangular area
by dragging the mouse over them
'''
self.state = SELECT_RECTANGLE
def activate_selection(self):
'''
Activates the point selection mode, where the user can select points by clicking on them
'''
self.state = SELECT
def activate_polygon_selection(self):
'''
Activates the polygon selection mode, where the user can select points by drawing a polygon around them
'''
self.state = SELECT_POLYGON
def activate_panning(self):
'''
Activates the panning mode, where the user can move the zoom projection by dragging the mouse
'''
self.state = PANNING
def set_show_main_title(self, b):
'''
Shows the main title if ``b`` is ``True``, and hides it otherwise.
'''
self.showMainTitle = b
self.replot()
def set_main_title(self, t):
'''
Sets the main title to ``t``
'''
self.mainTitle = t
self.replot()
def setShowXaxisTitle(self, b = -1):
if b == -1 and hasattr(self, 'showXaxisTitle'):
b = self.showXaxisTitle
self.set_show_axis_title(xBottom, b)
def setXaxisTitle(self, title):
self.set_axis_title(xBottom, title)
def setShowYLaxisTitle(self, b = -1):
if b == -1 and hasattr(self, 'showYLaxisTitle'):
b = self.showYLaxisTitle
self.set_show_axis_title(yLeft, b)
def setYLaxisTitle(self, title):
self.set_axis_title(yLeft, title)
def setShowYRaxisTitle(self, b = -1):
if b == -1 and hasattr(self, 'showYRaxisTitle'):
b = self.showYRaxisTitle
self.set_show_axis_title(yRight, b)
def setYRaxisTitle(self, title):
self.set_axis_title(yRight, title)
def enableGridXB(self, b):
self.grid_curve.set_x_enabled(b)
self.replot()
def enableGridYL(self, b):
self.grid_curve.set_y_enabled(b)
self.replot()
def setGridColor(self, c):
self.grid_curve.set_pen(QPen(c))
self.replot()
def setCanvasColor(self, c):
p = self.palette()
p.setColor(OWPalette.Canvas, c)
self.set_palette(p)
def setData(self, data):
self.clear()
self.replot()
def setXlabels(self, labels):
if xBottom in self.axes:
self.set_axis_labels(xBottom, labels)
elif xTop in self.axes:
self.set_axis_labels(xTop, labels)
def set_axis_autoscale(self, axis_id):
if axis_id in self.axes:
self.axes[axis_id].auto_scale = True
elif axis_id in self.data_range:
del self.data_range[axis_id]
def set_axis_labels(self, axis_id, labels, values=None):
'''
Sets the labels of axis ``axis_id`` to ``labels``. This is used for axes displaying a discrete data type.
:param labels: The ID of the axis to change
:type labels: int
:param labels: The list of labels to be displayed along the axis
:type labels: A list of strings
.. note:: This changes the axis scale and removes any previous scale set with :meth:`set_axis_scale`.
'''
if axis_id in self._bounds_cache:
del self._bounds_cache[axis_id]
self._transform_cache = {}
self.axes[axis_id].set_labels(labels, values)
def set_axis_scale(self, axis_id, min, max, step_size=0):
'''
Sets the scale of axis ``axis_id`` to show an interval between ``min`` and ``max``.
If ``step`` is specified and non-zero, it determines the steps between label on the axis.
Otherwise, they are calculated automatically.
.. note:: This changes the axis scale and removes any previous labels set with :meth:`set_axis_labels`.
'''
if axis_id in self._bounds_cache:
del self._bounds_cache[axis_id]
self._transform_cache = {}
if axis_id in self.axes:
self.axes[axis_id].set_scale(min, max, step_size)
else:
self.data_range[axis_id] = (min, max)
def set_axis_title(self, axis_id, title):
if axis_id in self.axes:
self.axes[axis_id].set_title(title)
def set_show_axis_title(self, axis_id, b):
if axis_id in self.axes:
if b == -1:
b = not self.axes[axis_id].show_title
self.axes[axis_id].set_show_title(b)
self.replot()
def set_axis_tick_length(self, axis_id, minor, medium, major):
if axis_id in self.axes:
self.axes[axis_id].set_tick_legth(minor, medium, major)
def setYLlabels(self, labels):
self.set_axis_labels(yLeft, labels)
def setYRlabels(self, labels):
self.set_axis_labels(yRight, labels)
def add_custom_curve(self, curve, enableLegend = False):
'''
Adds a custom PlotItem ``curve`` to the plot.
If ``enableLegend`` is ``True``, a curve symbol defined by
:meth:`.OWCurve.point_item` and the ``curve``'s name
:obj:`.OWCurve.name` is added to the legend.
This function recalculates axis bounds and replots the plot if needed.
:param curve: The curve to add
:type curve: :obj:`.OWCurve`
'''
self.add_item(curve)
if enableLegend:
self.legend().add_curve(curve)
for key in [curve.axes()]:
if key in self._bounds_cache:
del self._bounds_cache[key]
self._transform_cache = {}
if hasattr(curve, 'tooltip'):
curve.setToolTip(curve.tooltip)
x,y = curve.axes()
if curve.is_auto_scale() and (self.is_axis_auto_scale(x) or self.is_axis_auto_scale(y)):
self.set_dirty()
self.replot()
else:
curve.set_graph_transform(self.transform_for_axes(x,y))
curve.update_properties()
return curve
def add_curve(self, name, brushColor = None, penColor = None, size = 5, style = Qt.NoPen,
symbol = OWPoint.Ellipse, enableLegend = False, xData = [], yData = [], showFilledSymbols = None,
lineWidth = 1, pen = None, autoScale = 0, antiAlias = None, penAlpha = 255, brushAlpha = 255,
x_axis_key = xBottom, y_axis_key = yLeft):
'''
Creates a new :obj:`.OWCurve` with the specified parameters and adds it to the graph.
If ``enableLegend`` is ``True``, a curve symbol is added to the legend.
'''
c = OWCurve(xData, yData, x_axis_key, y_axis_key, tooltip=name)
c.set_zoom_transform(self._zoom_transform)
c.name = name
c.set_style(style)
if not brushColor:
brushColor = self.color(OWPalette.Data)
if not penColor:
penColor = self.color(OWPalette.Data)
c.set_color(penColor)
if pen:
p = pen
else:
p = QPen()
p.setColor(penColor)
p.setWidth(lineWidth)
c.set_pen(p)
c.set_brush(brushColor)
c.set_symbol(symbol)
c.set_point_size(size)
c.set_data(xData, yData)
c.set_auto_scale(autoScale)
return self.add_custom_curve(c, enableLegend)
def set_main_curve_data(self, x_data, y_data, color_data, label_data, size_data, shape_data, marked_data = [], valid_data = [], x_axis_key=xBottom, y_axis_key=yLeft):
"""
Creates a single curve that can have points of different colors, shapes and sizes.
This is the preferred method for visualization that show a series of different points.
:param x_data: The list of X coordinates of the points
:type x_data: list of float
:param y_data: The list of Y coordinates of the points
:type y_data: list of float
:param color_data: The list of point colors
:type color_data: list of QColor
:param label_data: The list of point labels
:type label_data: list of str
:param size_data: The list of point sizes
:type size_data: list of int
:param shape_data: The list of point symbols
:type shape_data: list of int
The number of points in the curve will be equal to min(len(x_data), len(y_data)).
The other four list can be empty, in which case a default value will be used.
If they contain only one element, its value will be used for all points.
.. note:: This function does not add items to the legend automatically.
You will have to add them yourself with :meth:`.OWLegend.add_item`.
.. seealso:: :obj:`.OWMultiCurve`, :obj:`.OWPoint`
"""
if not self.main_curve:
self.main_curve = OWMultiCurve([], [])
self.add_item(self.main_curve)
self.update_performance(len(x_data))
if len(valid_data):
import numpy
x_data = numpy.compress(valid_data, x_data)
y_data = numpy.compress(valid_data, y_data)
if len(color_data) > 1:
color_data = numpy.compress(valid_data, color_data)
if len(size_data) > 1:
size_data = numpy.compress(valid_data, size_data)
if len(shape_data) > 1:
shape_data = numpy.compress(valid_data, shape_data)
if len(label_data) > 1:
label_data = numpy.compress(valid_data, label_data)
if len(marked_data) > 1:
marked_data = numpy.compress(valid_data, marked_data).tolist()
c = self.main_curve
c.set_data(x_data, y_data)
c.set_axes(x_axis_key, y_axis_key)
c.set_point_colors(color_data)
c.set_point_labels(label_data)
c.set_point_sizes(size_data)
c.set_point_symbols(shape_data)
if len(marked_data):
c.set_points_marked(marked_data)
self.marked_points_changed.emit()
c.name = 'Main Curve'
self.replot()
def remove_curve(self, item):
'''
Removes ``item`` from the plot
'''
self.remove_item(item)
self.legend().remove_curve(item)
def plot_data(self, xData, yData, colors, labels, shapes, sizes):
pass
def add_axis(self, axis_id, title='', title_above=False, title_location=AxisMiddle,
line=None, arrows=0, zoomable=False, bounds=None):
'''
Creates an :obj:`OrangeWidgets.plot.OWAxis` with the specified ``axis_id`` and ``title``.
'''
a = OWAxis(axis_id, title, title_above, title_location, line, arrows, self, bounds=bounds)
self.scene().addItem(a)
a.zoomable = zoomable
a.update_callback = self.replot
if axis_id in self._bounds_cache:
del self._bounds_cache[axis_id]
self._transform_cache = {}
self.axes[axis_id] = a
if not axis_id in CartesianAxes:
self.set_show_axis_title(axis_id, True)
return a
def remove_all_axes(self, user_only = True):
'''
Removes all axes from the plot
'''
ids = []
for id,item in self.axes.items():
if not user_only or id >= UserAxis:
ids.append(id)
self.scene().removeItem(item)
for id in ids:
del self.axes[id]
def add_custom_axis(self, axis_id, axis):
'''
Adds a custom ``axis`` with id ``axis_id`` to the plot
'''
self.axes[axis_id] = axis
self.replot()
def add_marker(self, name, x, y, alignment = -1, bold = 0, color = None, brushColor = None, size=None, antiAlias = None,
x_axis_key = xBottom, y_axis_key = yLeft):
m = Marker(name, x, y, alignment, bold, color, brushColor)
self._marker_items.append((m, x, y, x_axis_key, y_axis_key))
self.add_custom_curve(m)
return m
def removeAllSelections(self):
## TODO
pass
def clear(self):
"""
Clears the plot, removing all curves, markers and tooltips.
Axes and the grid are not removed
"""
for i in self.plot_items():
if i is not self.grid_curve:
self.remove_item(i)
self.main_curve = None
self._bounds_cache = {}
self._transform_cache = {}
self.clear_markers()
self.tips.removeAll()
self.legend().clear()
self.old_legend_margin = None
self.update_grid()
def clear_markers(self):
"""
Removes all markers added with :meth:`add_marker` from the plot
"""
for item,x,y,x_axis,y_axis in self._marker_items:
item.detach()
self._marker_items = []
def update_layout(self):
'''
Updates the plot layout.
This function recalculates the position of titles, axes, the legend and the main plot area.
It does not update the curve or the other plot items.
'''
if not self.isVisible():
# No point in updating the graph if it's still hidden
return
graph_rect = QRectF(self.contentsRect())
self.centerOn(graph_rect.center())
m = self.graph_margin
graph_rect.adjust(m, m, -m, -m)
if self.showMainTitle and self.mainTitle:
if self.title_item:
self.scene().remove_item(self.title_item)
del self.title_item
self.title_item = QGraphicsTextItem(self.mainTitle, scene=self.scene())
title_size = self.title_item.boundingRect().size()
## TODO: Check if the title is too big
self.title_item.setPos( graph_rect.width()/2 - title_size.width()/2, self.title_margin/2 - title_size.height()/2 )
graph_rect.setTop(graph_rect.top() + self.title_margin)
if self.show_legend:
self._legend_outside_area = QRectF(graph_rect)
self._legend.max_size = self._legend_outside_area.size()
r = self._legend_margin
graph_rect.adjust(r.left(), r.top(), -r.right(), -r.bottom())
self._legend.update_items()
axis_rects = dict()
base_margin = min(self.axis_margin, graph_rect.height()/4, graph_rect.height()/4)
if xBottom in self.axes and self.axes[xBottom].isVisible():
margin = base_margin
if self.axes[xBottom].should_be_expanded():
margin += min(20, graph_rect.height()/8, graph_rect.width() / 8)
bottom_rect = QRectF(graph_rect)
bottom_rect.setTop( bottom_rect.bottom() - margin)
axis_rects[xBottom] = bottom_rect
graph_rect.setBottom( graph_rect.bottom() - margin)
if xTop in self.axes and self.axes[xTop].isVisible():
margin = base_margin
if self.axes[xTop].should_be_expanded():
margin += min(20, graph_rect.height()/8, graph_rect.width() / 8)
top_rect = QRectF(graph_rect)
top_rect.setBottom(top_rect.top() + margin)
axis_rects[xTop] = top_rect
graph_rect.setTop(graph_rect.top() + margin)
if yLeft in self.axes and self.axes[yLeft].isVisible():
margin = base_margin
if self.axes[yLeft].should_be_expanded():
margin += min(20, graph_rect.height()/8, graph_rect.width() / 8)
left_rect = QRectF(graph_rect)
left = graph_rect.left() + margin + self.y_axis_extra_margin
left_rect.setRight(left)
graph_rect.setLeft(left)
axis_rects[yLeft] = left_rect
if xBottom in axis_rects:
axis_rects[xBottom].setLeft(left)
if xTop in axis_rects:
axis_rects[xTop].setLeft(left)
if yRight in self.axes and self.axes[yRight].isVisible():
margin = base_margin
if self.axes[yRight].should_be_expanded():
margin += min(20, graph_rect.height()/8, graph_rect.width() / 8)
right_rect = QRectF(graph_rect)
right = graph_rect.right() - margin - self.y_axis_extra_margin
right_rect.setLeft(right)
graph_rect.setRight(right)
axis_rects[yRight] = right_rect
if xBottom in axis_rects:
axis_rects[xBottom].setRight(right)
if xTop in axis_rects:
axis_rects[xTop].setRight(right)
if self.graph_area != graph_rect:
self.graph_area = QRectF(graph_rect)
self.set_graph_rect(self.graph_area)
self._transform_cache = {}
if self._zoom_rect:
data_zoom_rect = self.map_transform.inverted()[0].mapRect(self._zoom_rect)
self.map_transform = self.transform_for_axes()
self.set_zoom_rect(self.map_transform.mapRect(data_zoom_rect))
self.map_transform = self.transform_for_axes()
for c in self.plot_items():
x,y = c.axes()
c.set_graph_transform(self.transform_for_axes(x,y))
c.update_properties()
def update_zoom(self):
'''
Updates the zoom transformation of the plot items.
'''
zt = self.zoom_transform()
self._zoom_transform = zt
self.set_zoom_transform(zt)
self.update_axes(zoom_only=True)
self.viewport().update()
def update_axes(self, zoom_only=False):
"""
Updates the axes.
If ``zoom_only`` is ``True``, only the positions of the axes and their labels are recalculated.
Otherwise, all their labels are updated.
"""
if self.warn_unused_attributes and not zoom_only:
self._legend.remove_category(UNUSED_ATTRIBUTES_STR)
for id, item in self.axes.items():
if item.scale is None and item.labels is None:
item.auto_range = self.bounds_for_axis(id)
if id in XAxes:
(x,y) = (id, yLeft)
elif id in YAxes:
(x,y) = (xBottom, id)
else:
(x,y) = (xBottom, yLeft)
if id in CartesianAxes:
## This class only sets the lines for these four axes, widgets are responsible for the rest
if x in self.axes and y in self.axes:
item.data_line = self.axis_line(self.data_rect_for_axes(x,y), id)
if id in CartesianAxes:
item.graph_line = self.axis_line(self.graph_area, id, invert_y = True)
elif item.data_line:
t = self.transform_for_axes(x, y)
item.graph_line = t.map(item.data_line)
if item.graph_line and item.zoomable:
item.graph_line = self._zoom_transform.map(item.graph_line)
if not zoom_only:
if item.graph_line:
item.show()
else:
item.hide()
if self.warn_unused_attributes:
self._legend.add_item(UNUSED_ATTRIBUTES_STR, item.title, None)
item.zoom_transform = self._zoom_transform
item.update(zoom_only)
def replot(self):
'''
Replot the entire graph.
This functions redraws everything on the graph, so it can be very slow
'''
#self.setBackgroundBrush(self.color(OWPalette.Canvas))
self._bounds_cache = {}
self._transform_cache = {}
self.set_clean()
self.update_antialiasing()
self.update_legend()
self.update_layout()
self.update_zoom()
self.update_axes()
self.update_grid()
self.update_filled_symbols()
self.setSceneRect(QRectF(self.contentsRect()))
self.viewport().update()
def update_legend(self):
if self.show_legend and not self._legend_moved:
## If the legend hasn't been moved it, we set it outside, in the top right corner
m = self.graph_margin
r = QRectF(self.contentsRect())
r.adjust(m, m, -m, -m)
self._legend.max_size = r.size()
self._legend.update_items()
w = self._legend.boundingRect().width()
self._legend_margin = QRectF(0, 0, w, 0)
self._legend.set_floating(False)
self._legend.set_orientation(Qt.Vertical)
self._legend.setPos(QRectF(self.contentsRect()).topRight() + QPointF(-w, 0))
if (self._legend.isVisible() == self.show_legend):
return
self._legend.setVisible(self.show_legend)
if self.show_legend:
if self.old_legend_margin is not None:
self.animate(self, 'legend_margin', self.old_legend_margin, duration = 100)
else:
r = self.legend_rect()
self.ensure_inside(r, self.contentsRect())
self._legend.setPos(r.topLeft())
self.notify_legend_moved(r.topLeft())
else:
self.old_legend_margin = self.legend_margin
self.animate(self, 'legend_margin', QRectF(), duration=100)
def update_filled_symbols(self):
## TODO: Implement this in Curve.cpp
pass
def update_grid(self):
self.grid_curve.set_x_enabled(self.show_grid)
self.grid_curve.set_y_enabled(self.show_grid)
self.grid_curve.update_properties()
def legend(self):
'''
Returns the plot's legend, which is a :obj:`OrangeWidgets.plot.OWLegend`
'''
return self._legend
def legend_rect(self):
if self.show_legend:
return self._legend.mapRectToScene(self._legend.boundingRect())
else:
return QRectF()
def isLegendEvent(self, event, function):
if self.show_legend and self.legend_rect().contains(self.mapToScene(event.pos())):
function(self, event)
return True
else:
return False
def mouse_action(self, event):
b = event.buttons() | event.button()
m = event.modifiers()
if b == Qt.LeftButton | Qt.RightButton:
b = Qt.MidButton
if m & Qt.AltModifier and b == Qt.LeftButton:
m = m & ~Qt.AltModifier
b = Qt.MidButton
if b == Qt.LeftButton and not m:
return self.state
if b == Qt.RightButton and not m and self.state == SELECT:
return SELECT_RIGHTCLICK
if b == Qt.MidButton:
return PANNING
if b in [Qt.LeftButton, Qt.RightButton] and (self.state == ZOOMING or m == Qt.ControlModifier):
return ZOOMING
if b == Qt.LeftButton and m == Qt.ShiftModifier:
return SELECT
## Event handling
def event(self, event):
if event.type() == QEvent.Gesture:
return self.gestureEvent(event)
else:
return orangeqt.Plot.event(self, event)
def gestureEvent(self, event):
for gesture in event.gestures():
if gesture.state() == Qt.GestureStarted:
self.current_gesture_scale = 1.
event.accept(gesture)
continue
elif gesture.gestureType() == Qt.PinchGesture:
old_animate_plot = self.animate_plot
self.animate_plot = False
self.zoom(gesture.centerPoint(), gesture.scaleFactor()/self.current_gesture_scale )
self.current_gesture_scale = gesture.scaleFactor()
self.animate_plot = old_animate_plot
elif gesture.gestureType() == Qt.PanGesture:
self.pan(gesture.delta())
return True
def resizeEvent(self, event):
self.replot()
s = event.size() - event.oldSize()
if self.legend_margin.right() > 0:
self._legend.setPos(self._legend.pos() + QPointF(s.width(), 0))
if self.legend_margin.bottom() > 0:
self._legend.setPos(self._legend.pos() + QPointF(0, s.height()))
def showEvent(self, event):
self.replot()
def mousePressEvent(self, event):
self.static_click = True
self._pressed_mouse_button = event.button()
self._pressed_mouse_pos = event.pos()
if self.mousePressEventHandler and self.mousePressEventHandler(event):
event.accept()
return
if self.isLegendEvent(event, QGraphicsView.mousePressEvent):
return
point = self.mapToScene(event.pos())
a = self.mouse_action(event)
if a == SELECT and hasattr(self, 'move_selected_points'):
self._pressed_point = self.nearest_point(point)
self._pressed_point_coor = None
if self._pressed_point is not None:
self._pressed_point_coor = self._pressed_point.coordinates()
if a == PANNING:
self._last_pan_pos = point
event.accept()
else:
orangeqt.Plot.mousePressEvent(self, event)
def mouseMoveEvent(self, event):
if event.buttons() and (self._pressed_mouse_pos - event.pos()).manhattanLength() > QApplication.instance().startDragDistance():
self.static_click = False
if self.mouseMoveEventHandler and self.mouseMoveEventHandler(event):
event.accept()
return
if self.isLegendEvent(event, QGraphicsView.mouseMoveEvent):
return
point = self.mapToScene(event.pos())
if not self._pressed_mouse_button:
if self.receivers(self.point_hovered) > 0:
self.point_hovered.emit(self.nearest_point(point))
## We implement a workaround here, because sometimes mouseMoveEvents are not fast enough
## so the moving legend gets left behind while dragging, and it's left in a pressed state
if self._legend.mouse_down:
QGraphicsView.mouseMoveEvent(self, event)
return
a = self.mouse_action(event)
if a == SELECT and self._pressed_point is not None and self._pressed_point.is_selected() and hasattr(self, 'move_selected_points'):
animate_points = self.animate_points
self.animate_points = False
x1, y1 = self._pressed_point_coor
x2, y2 = self.map_from_graph(point, zoom=True)
self.move_selected_points((x2 - x1, y2 - y1))
self.replot()
if self._pressed_point is not None:
self._pressed_point_coor = self._pressed_point.coordinates()
self.animate_points = animate_points
elif a in [SELECT, ZOOMING] and self.graph_area.contains(point):
if not self._current_rs_item:
self._selection_start_point = self.mapToScene(self._pressed_mouse_pos)
self._current_rs_item = QGraphicsRectItem(scene=self.scene())
self._current_rs_item.setPen(SelectionPen)
self._current_rs_item.setBrush(SelectionBrush)
self._current_rs_item.setZValue(SelectionZValue)
self._current_rs_item.setRect(QRectF(self._selection_start_point, point).normalized())
elif a == PANNING:
if not self._last_pan_pos:
self._last_pan_pos = self.mapToScene(self._pressed_mouse_pos)
self.pan(point - self._last_pan_pos)
self._last_pan_pos = point
else:
x, y = self.map_from_graph(point, zoom=True)
text, x, y = self.tips.maybeTip(x, y)
if type(text) == int:
text = self.buildTooltip(text)
if text and x is not None and y is not None:
tp = self.mapFromScene(QPointF(x,y) * self.map_transform * self._zoom_transform)
self.showTip(tp.x(), tp.y(), text)
else:
orangeqt.Plot.mouseMoveEvent(self, event)
def mouseReleaseEvent(self, event):
self._pressed_mouse_button = Qt.NoButton
if self.mouseReleaseEventHandler and self.mouseReleaseEventHandler(event):
event.accept()
return
if self.static_click and self.mouseStaticClickHandler and self.mouseStaticClickHandler(event):
event.accept()
return
if self.isLegendEvent(event, QGraphicsView.mouseReleaseEvent):
return
a = self.mouse_action(event)
if a == SELECT and self._pressed_point is not None:
self._pressed_point = None
if a in [ZOOMING, SELECT] and self._current_rs_item:
rect = self._current_rs_item.rect()
if a == ZOOMING:
self.zoom_to_rect(self._zoom_transform.inverted()[0].mapRect(rect))
else:
self.add_selection(rect)
self.scene().removeItem(self._current_rs_item)
self._current_rs_item = None
return
orangeqt.Plot.mouseReleaseEvent(self, event)
def mouseStaticClick(self, event):
point = self.mapToScene(event.pos())
if point not in self.graph_area:
return False
a = self.mouse_action(event)
b = event.buttons() | event.button()
if a == ZOOMING:
if event.button() == Qt.LeftButton:
self.zoom_in(point)
elif event.button() == Qt.RightButton:
self.zoom_back()
else:
return False
return True
elif a == SELECT and b == Qt.LeftButton:
point_item = self.nearest_point(point)
b = self.selection_behavior
if b == self.ReplaceSelection:
self.unselect_all_points()
b = self.AddSelection
if point_item:
point_item.set_selected(b == self.AddSelection or (b == self.ToggleSelection and not point_item.is_selected()))
self.selection_changed.emit()
elif a == SELECT and b == Qt.RightButton:
point_item = self.nearest_point(point)
if point_item:
self.point_rightclicked.emit(self.nearest_point(point))
else:
self.unselect_all_points()
else:
return False
def wheelEvent(self, event):
point = self.mapToScene(event.pos())
d = event.delta() / 120.0
self.zoom(point, pow(2,d))
@staticmethod
def transform_from_rects(r1, r2):
"""
Returns a QTransform that maps from rectangle ``r1`` to ``r2``.
"""
if r1 is None or r2 is None:
return QTransform()
if r1.width() == 0 or r1.height() == 0 or r2.width() == 0 or r2.height() == 0:
return QTransform()
tr1 = QTransform().translate(-r1.left(), -r1.top())
ts = QTransform().scale(r2.width()/r1.width(), r2.height()/r1.height())
tr2 = QTransform().translate(r2.left(), r2.top())
return tr1 * ts * tr2
def transform_for_zoom(self, factor, point, rect):
if factor == 1:
return QTransform()
dp = point
t = QTransform()
t.translate(dp.x(), dp.y())
t.scale(factor, factor)
t.translate(-dp.x(), -dp.y())
return t
def rect_for_zoom(self, point, old_rect, scale = 2):
r = QRectF()
r.setWidth(old_rect.width() / scale)
r.setHeight(old_rect.height() / scale)
r.moveCenter(point)
self.ensure_inside(r, self.graph_area)
return r
def set_state(self, state):
self.state = state
if state != SELECT_RECTANGLE:
self._current_rs_item = None
if state != SELECT_POLYGON:
self._current_ps_item = None
def get_selected_points(self, xData, yData, validData):
if self.main_curve:
selected = []
points = self.main_curve.points()
i = 0
for d in validData:
if d:
selected.append(points[i].is_selected())
i += 1
else:
selected.append(False)
else:
selected = self.selected_points(xData, yData)
unselected = [not i for i in selected]
return selected, unselected
def add_selection(self, reg):
"""
Selects all points in the region ``reg`` using the current :attr: `selection_behavior`.
"""
self.select_points(reg, self.selection_behavior)
self.viewport().update()
if self.auto_send_selection_callback:
self.auto_send_selection_callback()
def points_equal(self, p1, p2):
if type(p1) == tuple:
(x, y) = p1
p1 = QPointF(x, y)
if type(p2) == tuple:
(x, y) = p2
p2 = QPointF(x, y)
return (QPointF(p1)-QPointF(p2)).manhattanLength() < self.polygon_close_treshold
def data_rect_for_axes(self, x_axis = xBottom, y_axis = yLeft):
"""
Calculates the bounding rectangle in data coordinates for the axes ``x_axis`` and ``y_axis``.
"""
if x_axis in self.axes and y_axis in self.axes:
x_min, x_max = self.bounds_for_axis(x_axis, try_auto_scale=True)
y_min, y_max = self.bounds_for_axis(y_axis, try_auto_scale=True)
if (x_min or x_max) and (y_min or y_max):
r = QRectF(x_min, y_min, x_max-x_min, y_max-y_min)
return r
r = orangeqt.Plot.data_rect_for_axes(self, x_axis, y_axis)
for id, axis in self.axes.items():
if id not in CartesianAxes and axis.data_line:
r |= QRectF(axis.data_line.p1(), axis.data_line.p2())
## We leave a 5% margin on each side so the graph doesn't look overcrowded
## TODO: Perhaps change this from a fixed percentage to always round to a round number
dx = r.width() / 20.0
dy = r.height() / 20.0
r.adjust(-dx, -dy, dx, dy)
return r
def transform_for_axes(self, x_axis = xBottom, y_axis = yLeft):
"""
Returns the graph transform that maps from data to scene coordinates using axes ``x_axis`` and ``y_axis``.
"""
if not (x_axis, y_axis) in self._transform_cache:
# We must flip the graph area, becase Qt coordinates start from top left, while graph coordinates start from bottom left
a = QRectF(self.graph_area)
t = a.top()
a.setTop(a.bottom())
a.setBottom(t)
self._transform_cache[(x_axis, y_axis)] = self.transform_from_rects(self.data_rect_for_axes(x_axis, y_axis), a)
return self._transform_cache[(x_axis, y_axis)]
def transform(self, axis_id, value):
"""
Transforms the ``value`` from data to plot coordinates along the axis ``axis_id``.
This function always ignores zoom. If you need to account for zooming, use :meth:`map_to_graph`.
"""
if axis_id in XAxes:
size = self.graph_area.width()
margin = self.graph_area.left()
else:
size = self.graph_area.height()
margin = self.graph_area.top()
m, M = self.bounds_for_axis(axis_id)
if m is None or M is None or M == m:
return 0
else:
return margin + (value-m)/(M-m) * size
def inv_transform(self, axis_id, value):
"""
Transforms the ``value`` from plot to data coordinates along the axis ``axis_id``.
This function always ignores zoom. If you need to account for zooming, use :meth:`map_from_graph`.
"""
if axis_id in XAxes:
size = self.graph_area.width()
margin = self.graph_area.left()
else:
size = self.graph_area.height()
margin = self.graph_area.top()
m, M = self.bounds_for_axis(axis_id)
if m is not None and M is not None:
return m + (value-margin)/size * (M-m)
else:
return 0
def bounds_for_axis(self, axis_id, try_auto_scale=True):
if axis_id in self.axes and not self.axes[axis_id].auto_scale:
return self.axes[axis_id].bounds()
if try_auto_scale:
lower, upper = orangeqt.Plot.bounds_for_axis(self, axis_id)
if lower != upper:
lower = lower - (upper-lower)/20.0
upper = upper + (upper-lower)/20.0
return lower, upper
else:
return None, None
def enableYRaxis(self, enable=1):
self.set_axis_enabled(yRight, enable)
def enableLRaxis(self, enable=1):
self.set_axis_enabled(yLeft, enable)
def enableXaxis(self, enable=1):
self.set_axis_enabled(xBottom, enable)
def set_axis_enabled(self, axis, enable):
if axis not in self.axes:
self.add_axis(axis)
self.axes[axis].setVisible(enable)
self.replot()
@staticmethod
def axis_coordinate(point, axis_id):
if axis_id in XAxes:
return point.x()
elif axis_id in YAxes:
return point.y()
else:
return None
# ####################################################################
# return string with attribute names and their values for example example
def getExampleTooltipText(self, example, indices=None, maxIndices=20):
if indices and type(indices[0]) == str:
indices = [self.attributeNameIndex[i] for i in indices]
if not indices:
indices = list(range(len(self.dataDomain.attributes)))
# don't show the class value twice
if example.domain.classVar:
classIndex = self.attributeNameIndex[example.domain.classVar.name]
while classIndex in indices:
indices.remove(classIndex)
text = "<b>Attributes:</b><br>"
for index in indices[:maxIndices]:
attr = self.attributeNames[index]
if attr not in example.domain: text += " "*4 + "%s = ?<br>" % (Qt.escape(attr))
elif example[attr].isSpecial(): text += " "*4 + "%s = ?<br>" % (Qt.escape(attr))
else: text += " "*4 + "%s = %s<br>" % (Qt.escape(attr), Qt.escape(str(example[attr])))
if len(indices) > maxIndices:
text += " "*4 + " ... <br>"
if example.domain.classVar:
text = text[:-4]
text += "<hr><b>Class:</b><br>"
if example.getclass().isSpecial(): text += " "*4 + "%s = ?<br>" % (Qt.escape(example.domain.classVar.name))
else: text += " "*4 + "%s = %s<br>" % (Qt.escape(example.domain.classVar.name), Qt.escape(str(example.getclass())))
if len(example.domain.getmetas()) != 0:
text = text[:-4]
text += "<hr><b>Meta attributes:</b><br>"
# show values of meta attributes
for key in example.domain.getmetas():
try: text += " "*4 + "%s = %s<br>" % (Qt.escape(example.domain[key].name), Qt.escape(str(example[key])))
except: pass
return text[:-4] # remove the last <br>
# show a tooltip at x,y with text. if the mouse will move for more than 2 pixels it will be removed
def showTip(self, x, y, text):
QToolTip.showText(self.mapToGlobal(QPoint(x, y)), text, self, QRect(x-3,y-3,6,6))
def notify_legend_moved(self, pos):
self._legend_moved = True
l = self.legend_rect()
g = getattr(self, '_legend_outside_area', QRectF())
p = QPointF()
rect = QRectF()
offset = 20
if pos.x() > g.right() - offset:
self._legend.set_orientation(Qt.Vertical)
rect.setRight(self._legend.boundingRect().width())
p = g.topRight() - self._legend.boundingRect().topRight()
elif pos.x() < g.left() + offset:
self._legend.set_orientation(Qt.Vertical)
rect.setLeft(self._legend.boundingRect().width())
p = g.topLeft()
elif pos.y() < g.top() + offset:
self._legend.set_orientation(Qt.Horizontal)
rect.setTop(self._legend.boundingRect().height())
p = g.topLeft()
elif pos.y() > g.bottom() - offset:
self._legend.set_orientation(Qt.Horizontal)
rect.setBottom(self._legend.boundingRect().height())
p = g.bottomLeft() - self._legend.boundingRect().bottomLeft()
if p.isNull():
self._legend.set_floating(True, pos)
else:
self._legend.set_floating(False, p)
if rect != self._legend_margin:
orientation = Qt.Horizontal if rect.top() or rect.bottom() else Qt.Vertical
self._legend.set_orientation(orientation)
self.animate(self, 'legend_margin', rect, duration=100)
def get_legend_margin(self):
return self._legend_margin
def set_legend_margin(self, value):
self._legend_margin = value
self.update_layout()
self.update_axes()
legend_margin = pyqtProperty(QRectF, get_legend_margin, set_legend_margin)
def update_curves(self):
if self.main_curve:
self.main_curve.set_alpha_value(self.alpha_value)
else:
for c in self.plot_items():
if isinstance(c, orangeqt.Curve) and not getattr(c, 'ignore_alpha', False):
au = c.auto_update()
c.set_auto_update(False)
c.set_point_size(self.point_width)
color = c.color()
color.setAlpha(self.alpha_value)
c.set_color(color)
c.set_auto_update(au)
c.update_properties()
self.viewport().update()
update_point_size = update_curves
update_alpha_value = update_curves
def update_antialiasing(self, use_antialiasing=None):
if use_antialiasing is not None:
self.antialias_plot = use_antialiasing
self.setRenderHint(QPainter.Antialiasing, self.antialias_plot)
def update_animations(self, use_animations=None):
if use_animations is not None:
self.animate_plot = use_animations
self.animate_points = use_animations
def update_performance(self, num_points = None):
if self.auto_adjust_performance:
if not num_points:
if self.main_curve:
num_points = len(self.main_curve.points())
else:
num_points = sum( len(c.points()) for c in self.curves )
if num_points > self.disable_animations_threshold:
self.disabled_animate_points = self.animate_points
self.animate_points = False
self.disabled_animate_plot = self.animate_plot
self.animate_plot = False
self.disabled_antialias_lines = self.animate_points
self.antialias_lines = True
elif hasattr(self, 'disabled_animate_points'):
self.animate_points = self.disabled_animate_points
del self.disabled_animate_points
self.animate_plot = self.disabled_animate_plot
del self.disabled_animate_plot
self.antialias_lines = True # self.disabled_antialias_lines
del self.disabled_antialias_lines
def animate(self, target, prop_name, end_val, duration = None, start_val = None):
for a in self._animations:
if a.state() == QPropertyAnimation.Stopped:
self._animations.remove(a)
if self.animate_plot:
a = QPropertyAnimation(target, prop_name)
a.setEndValue(end_val)
if start_val is not None:
a.setStartValue(start_val)
if duration:
a.setDuration(duration)
self._animations.append(a)
a.start(QPropertyAnimation.KeepWhenStopped)
else:
target.setProperty(prop_name, end_val)
def clear_selection(self):
self.unselect_all_points()
def send_selection(self):
if self.auto_send_selection_callback:
self.auto_send_selection_callback()
def pan(self, delta):
if type(delta) == tuple:
x, y = delta
else:
x, y = delta.x(), delta.y()
t = self.zoom_transform()
x = x / t.m11()
y = y / t.m22()
r = QRectF(self.zoom_rect)
r.translate(-QPointF(x,y))
self.ensure_inside(r, self.graph_area)
self.zoom_rect = r
def zoom_to_rect(self, rect):
self.ensure_inside(rect, self.graph_area)
# add to zoom_stack if zoom_rect is larger
if self.zoom_rect.width() > rect.width() or self.zoom_rect.height() > rect.height():
self.zoom_stack.append(self.zoom_rect)
self.animate(self, 'zoom_rect', rect, start_val = self.get_zoom_rect())
def zoom_back(self):
if self.zoom_stack:
rect = self.zoom_stack.pop()
self.animate(self, 'zoom_rect', rect, start_val = self.get_zoom_rect())
def reset_zoom(self):
self._zoom_rect = None
self.update_zoom()
def zoom_transform(self):
return self.transform_from_rects(self.zoom_rect, self.graph_area)
def zoom_in(self, point):
self.zoom(point, scale = 2)
def zoom_out(self, point):
self.zoom(point, scale = 0.5)
def zoom(self, point, scale):
print(len(self.zoom_stack))
t, ok = self._zoom_transform.inverted()
point = point * t
r = QRectF(self.zoom_rect)
i = 1.0/scale
r.setTopLeft(point*(1-i) + r.topLeft()*i)
r.setBottomRight(point*(1-i) + r.bottomRight()*i)
self.ensure_inside(r, self.graph_area)
# remove smaller zoom rects from stack
while len(self.zoom_stack) > 0 and r.width() >= self.zoom_stack[-1].width() and r.height() >= self.zoom_stack[-1].height():
self.zoom_stack.pop()
self.zoom_to_rect(r)
def get_zoom_rect(self):
if self._zoom_rect:
return self._zoom_rect
else:
return self.graph_area
def set_zoom_rect(self, rect):
self._zoom_rect = rect
self._zoom_transform = self.transform_from_rects(rect, self.graph_area)
self.update_zoom()
zoom_rect = pyqtProperty(QRectF, get_zoom_rect, set_zoom_rect)
@staticmethod
def ensure_inside(small_rect, big_rect):
if small_rect.width() > big_rect.width():
small_rect.setWidth(big_rect.width())
if small_rect.height() > big_rect.height():
small_rect.setHeight(big_rect.height())
if small_rect.right() > big_rect.right():
small_rect.moveRight(big_rect.right())
elif small_rect.left() < big_rect.left():
small_rect.moveLeft(big_rect.left())
if small_rect.bottom() > big_rect.bottom():
small_rect.moveBottom(big_rect.bottom())
elif small_rect.top() < big_rect.top():
small_rect.moveTop(big_rect.top())
def shuffle_points(self):
if self.main_curve:
self.main_curve.shuffle_points()
def set_progress(self, done, total):
if not self.widget:
return
if done == total:
self.widget.progressBarFinished()
else:
self.widget.progressBarSet(100.0 * done / total)
def start_progress(self):
if self.widget:
self.widget.progressBarInit()
def end_progress(self):
if self.widget:
self.widget.progressBarFinished()
def is_axis_auto_scale(self, axis_id):
if axis_id not in self.axes:
return axis_id not in self.data_range
return self.axes[axis_id].auto_scale
def axis_line(self, rect, id, invert_y = False):
if invert_y:
r = QRectF(rect)
r.setTop(rect.bottom())
r.setBottom(rect.top())
rect = r
if id == xBottom:
line = QLineF(rect.topLeft(), rect.topRight())
elif id == xTop:
line = QLineF(rect.bottomLeft(), rect.bottomRight())
elif id == yLeft:
line = QLineF(rect.topLeft(), rect.bottomLeft())
elif id == yRight:
line = QLineF(rect.topRight(), rect.bottomRight())
else:
line = None
return line
def color(self, role, group = None):
if group:
return self.palette().color(group, role)
else:
return self.palette().color(role)
def set_palette(self, p):
'''
Sets the plot palette to ``p``.
:param p: The new color palette
:type p: :obj:`.QPalette`
'''
self.setPalette(p)
self.replot()
def update_theme(self):
'''
Updates the current color theme, depending on the value of :attr:`theme_name`.
'''
if self.theme_name.lower() == 'default':
self.set_palette(OWPalette.System)
elif self.theme_name.lower() == 'light':
self.set_palette(OWPalette.Light)
elif self.theme_name.lower() == 'dark':
self.set_palette(OWPalette.Dark)
| bsd-2-clause |
lgarren/spack | var/spack/repos/builtin/packages/r-packrat/package.py | 3 | 1642 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RPackrat(RPackage):
"""Manage the R packages your project depends on in an isolated, portable,
and reproducible way."""
homepage = "https://github.com/rstudio/packrat/"
url = "https://cran.r-project.org/src/contrib/packrat_0.4.7-1.tar.gz"
version('0.4.8-1', '14e82feba55fcda923396282fc490038')
version('0.4.7-1', '80c2413269b292ade163a70ba5053e84')
| lgpl-2.1 |
dan1/horizon-x509 | openstack_dashboard/dashboards/admin/networks/tables.py | 9 | 4497 | # Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.core.urlresolvers import reverse
from django.template import defaultfilters as filters
from django.utils.translation import pgettext_lazy
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from horizon import exceptions
from horizon import tables
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.networks \
import tables as project_tables
from openstack_dashboard import policy
LOG = logging.getLogger(__name__)
class DeleteNetwork(policy.PolicyTargetMixin, tables.DeleteAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Network",
u"Delete Networks",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Deleted Network",
u"Deleted Networks",
count
)
policy_rules = (("network", "delete_network"),)
def delete(self, request, obj_id):
try:
api.neutron.network_delete(request, obj_id)
except Exception:
msg = _('Failed to delete network %s') % obj_id
LOG.info(msg)
redirect = reverse('horizon:admin:networks:index')
exceptions.handle(request, msg, redirect=redirect)
class CreateNetwork(tables.LinkAction):
name = "create"
verbose_name = _("Create Network")
url = "horizon:admin:networks:create"
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("network", "create_network"),)
class EditNetwork(policy.PolicyTargetMixin, tables.LinkAction):
name = "update"
verbose_name = _("Edit Network")
url = "horizon:admin:networks:update"
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (("network", "update_network"),)
# def _get_subnets(network):
# cidrs = [subnet.get('cidr') for subnet in network.subnets]
# return ','.join(cidrs)
DISPLAY_CHOICES = (
("up", pgettext_lazy("Admin state of a Network", u"UP")),
("down", pgettext_lazy("Admin state of a Network", u"DOWN")),
)
class NetworksTable(tables.DataTable):
tenant = tables.Column("tenant_name", verbose_name=_("Project"))
name = tables.Column("name_or_id", verbose_name=_("Network Name"),
link='horizon:admin:networks:detail')
subnets = tables.Column(project_tables.get_subnets,
verbose_name=_("Subnets Associated"),)
num_agents = tables.Column("num_agents",
verbose_name=_("DHCP Agents"))
shared = tables.Column("shared", verbose_name=_("Shared"),
filters=(filters.yesno, filters.capfirst))
external = tables.Column("router:external",
verbose_name=_("External"),
filters=(filters.yesno, filters.capfirst))
status = tables.Column(
"status", verbose_name=_("Status"),
display_choices=project_tables.STATUS_DISPLAY_CHOICES)
admin_state = tables.Column("admin_state",
verbose_name=_("Admin State"),
display_choices=DISPLAY_CHOICES)
class Meta(object):
name = "networks"
verbose_name = _("Networks")
table_actions = (CreateNetwork, DeleteNetwork,
project_tables.NetworksFilterAction)
row_actions = (EditNetwork, DeleteNetwork)
def __init__(self, request, data=None, needs_form_wrapper=None, **kwargs):
super(NetworksTable, self).__init__(
request, data=data,
needs_form_wrapper=needs_form_wrapper,
**kwargs)
if not api.neutron.is_extension_supported(request,
'dhcp_agent_scheduler'):
del self.columns['num_agents']
| apache-2.0 |
coala-analyzer/coala-bears | bears/perl/PerlCriticBear.py | 3 | 1838 | from coalib.bearlib.abstractions.Linter import linter
from dependency_management.requirements.DistributionRequirement import (
DistributionRequirement)
from coalib.results.RESULT_SEVERITY import RESULT_SEVERITY
@linter(executable='perlcritic',
output_format='regex',
output_regex=r'(?P<message>.+) at '
r'line (?P<line>\d+), '
r'column (?P<column>\d+)\. '
r'(?P<origin>.+) '
r'\(Severity: (?P<severity>\d+)\)',
severity_map={'1': RESULT_SEVERITY.MAJOR,
'2': RESULT_SEVERITY.MAJOR,
'3': RESULT_SEVERITY.NORMAL,
'4': RESULT_SEVERITY.NORMAL,
'5': RESULT_SEVERITY.INFO})
class PerlCriticBear:
"""
Check the code with perlcritic. This will run perlcritic over
each of the files seperately.
"""
LANGUAGES = {'Perl'}
REQUIREMENTS = {
DistributionRequirement(
apt_get='libperl-critic-perl',
brew=None,
dnf='perl-Perl-Critic',
portage='dev-perl/Perl-Critic',
xbps=None,
yum='perl-Perl-Critic',
zypper='perl-Perl-Critic',
),
}
AUTHORS = {'The coala developers'}
AUTHORS_EMAILS = {'coala-devel@googlegroups.com'}
LICENSE = 'AGPL-3.0'
CAN_DETECT = {'Syntax', 'Formatting', 'Code Simplification'}
@staticmethod
def create_arguments(filename, file, config_file,
perlcritic_profile: str = '',
):
"""
:param perlcritic_profile: Location of the perlcriticrc config file.
"""
args = ('--no-color',)
if perlcritic_profile:
args += ('--profile', perlcritic_profile)
return args + (filename,)
| agpl-3.0 |
stackforge/python-openstacksdk | openstack/tests/unit/identity/v3/test_role_assignment.py | 3 | 1553 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.tests.unit import base
from openstack.identity.v3 import role_assignment
IDENTIFIER = 'IDENTIFIER'
EXAMPLE = {
'id': IDENTIFIER,
'links': {'self': 'http://example.com/user1'},
'scope': {'domain': {'id': '2'}},
'user': {'id': '3'},
'group': {'id': '4'}
}
class TestRoleAssignment(base.TestCase):
def test_basic(self):
sot = role_assignment.RoleAssignment()
self.assertEqual('role_assignment', sot.resource_key)
self.assertEqual('role_assignments', sot.resources_key)
self.assertEqual('/role_assignments',
sot.base_path)
self.assertTrue(sot.allow_list)
def test_make_it(self):
sot = role_assignment.RoleAssignment(**EXAMPLE)
self.assertEqual(EXAMPLE['id'], sot.id)
self.assertEqual(EXAMPLE['links'], sot.links)
self.assertEqual(EXAMPLE['scope'], sot.scope)
self.assertEqual(EXAMPLE['user'], sot.user)
self.assertEqual(EXAMPLE['group'], sot.group)
| apache-2.0 |
Antiun/odoomrp-utils | stock_warehouse_orderpoint_stock_info/models/stock_warehouse_orderpoint.py | 7 | 1053 | # -*- encoding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in root directory
##############################################################################
from openerp import models, fields, api
class StockWarehouseOrderpoint(models.Model):
_inherit = 'stock.warehouse.orderpoint'
@api.one
def _product_available_qty(self):
self.product_location_qty = self.product_id.with_context(
location=self.location_id.id
)._product_available()[self.product_id.id]['qty_available']
@api.one
@api.depends('product_location_qty', 'product_min_qty')
def _product_available(self):
self.available = self.product_location_qty > self.product_min_qty
product_location_qty = fields.Float(
string='Quantity On Location', compute='_product_available_qty')
available = fields.Boolean(
string='Is enough product available?', compute='_product_available',
store=True)
| agpl-3.0 |
Jon-ICS/mraa | examples/python/mcp3004.py | 10 | 1458 | #!/usr/bin/env python
# Author: Henry Bruce <henry.bruce@intel.com>
# Copyright (c) 2016 Intel Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE
# Read from MCP3004 ADC pin 0 in single ended mode
import mraa
import time
dev = mraa.Spi(0)
txbuf = bytearray(3)
txbuf[0] = 0x01
txbuf[1] = 0x80
txbuf[2] = 0x00
while True:
rxbuf = dev.write(txbuf)
value = ((rxbuf[1] & 0x03) << 8) | rxbuf[2]
print value
time.sleep(0.5)
| mit |
lamondlab/sipify | CppHeaderParser-2.7/CppHeaderParser/CppHeaderParser.py | 1 | 114661 | #!/usr/bin/python
#
# Author: Jashua R. Cloutier (contact via https://bitbucket.org/senex)
# Project: http://senexcanis.com/open-source/cppheaderparser/
#
# Copyright (C) 2011, Jashua R. Cloutier
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of Jashua R. Cloutier nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission. Stories,
# blog entries etc making reference to this project may mention the
# name Jashua R. Cloutier in terms of project originator/creator etc.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
#
# The CppHeaderParser.py script is written in Python 2.4 and released to
# the open source community for continuous improvements under the BSD
# 2.0 new license, which can be found at:
#
# http://www.opensource.org/licenses/bsd-license.php
#
"""Parse C++ header files and generate a data structure
representing the class
"""
import ply.lex as lex
import os
import sys
import re
import inspect
def lineno():
"""Returns the current line number in our program."""
return inspect.currentframe().f_back.f_lineno
version = __version__ = "2.7"
tokens = [
'NUMBER',
'FLOAT_NUMBER',
'TEMPLATE_NAME',
'NAME',
'OPEN_PAREN',
'CLOSE_PAREN',
'OPEN_BRACE',
'CLOSE_BRACE',
'OPEN_SQUARE_BRACKET',
'CLOSE_SQUARE_BRACKET',
'COLON',
'SEMI_COLON',
'COMMA',
'TAB',
'BACKSLASH',
'PIPE',
'PERCENT',
'EXCLAMATION',
'CARET',
'COMMENT_SINGLELINE',
'COMMENT_MULTILINE',
'PRECOMP_MACRO',
'PRECOMP_MACRO_CONT',
'ASTERISK',
'AMPERSTAND',
'EQUALS',
'MINUS',
'PLUS',
'DIVIDE',
'CHAR_LITERAL',
'STRING_LITERAL',
'NEW_LINE',
'SQUOTE',
]
t_ignore = " \r.?@\f"
t_NUMBER = r'[0-9][0-9XxA-Fa-f]*'
t_FLOAT_NUMBER = r'[-+]?[0-9]*\.[0-9]+([eE][-+]?[0-9]+)?'
t_TEMPLATE_NAME = r'CppHeaderParser_template_[0-9]+'
t_NAME = r'[<>A-Za-z_~][A-Za-z0-9_]*'
t_OPEN_PAREN = r'\('
t_CLOSE_PAREN = r'\)'
t_OPEN_BRACE = r'{'
t_CLOSE_BRACE = r'}'
t_OPEN_SQUARE_BRACKET = r'\['
t_CLOSE_SQUARE_BRACKET = r'\]'
t_SEMI_COLON = r';'
t_COLON = r':'
t_COMMA = r','
t_TAB = r'\t'
t_BACKSLASH = r'\\'
t_PIPE = r'\|'
t_PERCENT = r'%'
t_CARET = r'\^'
t_EXCLAMATION = r'!'
t_PRECOMP_MACRO = r'\#.*'
t_PRECOMP_MACRO_CONT = r'.*\\\n'
def t_COMMENT_SINGLELINE(t):
r'\/\/.*\n'
global doxygenCommentCache
if t.value.startswith("///") or t.value.startswith("//!"):
if doxygenCommentCache:
doxygenCommentCache += "\n"
if t.value.endswith("\n"):
doxygenCommentCache += t.value[:-1]
else:
doxygenCommentCache += t.value
t.lexer.lineno += len([a for a in t.value if a=="\n"])
t_ASTERISK = r'\*'
t_MINUS = r'\-'
t_PLUS = r'\+'
t_DIVIDE = r'/(?!/)'
t_AMPERSTAND = r'&'
t_EQUALS = r'='
t_CHAR_LITERAL = "'.'"
t_SQUOTE = "'"
#found at http://wordaligned.org/articles/string-literals-and-regular-expressions
#TODO: This does not work with the string "bla \" bla"
t_STRING_LITERAL = r'"([^"\\]|\\.)*"'
#Found at http://ostermiller.org/findcomment.html
def t_COMMENT_MULTILINE(t):
r'/\*([^*]|[\r\n]|(\*+([^*/]|[\r\n])))*\*+/'
global doxygenCommentCache
if t.value.startswith("/**") or t.value.startswith("/*!"):
#not sure why, but get double new lines
v = t.value.replace("\n\n", "\n")
#strip prefixing whitespace
v = re.sub("\n[\s]+\*", "\n*", v)
doxygenCommentCache += v
t.lexer.lineno += len([a for a in t.value if a=="\n"])
def t_NEWLINE(t):
r'\n+'
t.lexer.lineno += len(t.value)
def t_error(v):
print(( "Lex error: ", v ))
lex.lex()
# Controls error_print
print_errors = 1
# Controls warning_print
print_warnings = 1
# Controls debug_print
debug = 0
# Controls trace_print
debug_trace = 0
def error_print(arg):
if print_errors: print(("[%4d] %s"%(inspect.currentframe().f_back.f_lineno, arg)))
def warning_print(arg):
if print_warnings: print(("[%4d] %s"%(inspect.currentframe().f_back.f_lineno, arg)))
def debug_print(arg):
global debug
if debug: print(("[%4d] %s"%(inspect.currentframe().f_back.f_lineno, arg)))
def trace_print(*arg):
global debug_trace
if debug_trace:
sys.stdout.write("[%s] "%(inspect.currentframe().f_back.f_lineno))
for a in arg: sys.stdout.write("%s "%a)
sys.stdout.write("\n")
supportedAccessSpecifier = [
'public',
'protected',
'private',
'public slots',
'protected slots',
'private slots',
'public Q_SLOTS',
'protected Q_SLOTS',
'private Q_SLOTS',
'signals',
'Q_SIGNALS',
]
#Symbols to ignore, usually special macros
ignoreSymbols = [
'Q_OBJECT',
'Q_PROPERTY()',
'Q_DECLARE_FLAGS()',
'Q_INVOKABLE',
]
doxygenCommentCache = ""
#Track what was added in what order and at what depth
parseHistory = []
def is_namespace(nameStack):
"""Determines if a namespace is being specified"""
if len(nameStack) == 0:
return False
if nameStack[0] == "namespace":
return True
return False
def is_enum_namestack(nameStack):
"""Determines if a namestack is an enum namestack"""
if len(nameStack) == 0:
return False
if nameStack[0] == "enum":
return True
if len(nameStack) > 1 and nameStack[0] == "typedef" and nameStack[1] == "enum":
return True
return False
def is_fundamental(s):
for a in s.split():
if a not in ["size_t", "struct", "union", "unsigned", "signed", "bool", "char", "short", "int", "float", "double", "long", "void", "*"]: return False
return True
def is_function_pointer_stack(stack):
"""Count how many non-nested paranthesis are in the stack. Useful for determining if a stack is a function pointer"""
paren_depth = 0
paren_count = 0
star_after_first_paren = False
last_e = None
for e in stack:
if e == "(":
paren_depth += 1
elif e == ")" and paren_depth > 0:
paren_depth -= 1
if paren_depth == 0:
paren_count += 1
elif e == "*" and last_e == "(" and paren_count == 0 and paren_depth == 1:
star_after_first_paren = True
last_e = e
if star_after_first_paren and paren_count == 2:
return True
else:
return False
def is_method_namestack(stack):
r = False
if '(' not in stack: r = False
elif stack[0] == 'typedef': r = False # TODO deal with typedef function prototypes
#elif '=' in stack and stack.index('=') < stack.index('(') and stack[stack.index('=')-1] != 'operator': r = False #disabled July6th - allow all operators
elif 'operator' in stack: r = True # allow all operators
elif '{' in stack and stack.index('{') < stack.index('('): r = False # struct that looks like a method/class
elif '(' in stack and ')' in stack:
if '{' in stack and '}' in stack: r = True
elif stack[-1] == ';':
if is_function_pointer_stack(stack):
r = False
else:
r = True
elif '{' in stack: r = True # ideally we catch both braces... TODO
else: r = False
#Test for case of property set to something with parens such as "static const int CONST_A = (1 << 7) - 1;"
if r and "(" in stack and "=" in stack and 'operator' not in stack:
if stack.index("=") < stack.index("("): r = False
return r
def is_property_namestack(nameStack):
r = False
if '(' not in nameStack and ')' not in nameStack: r = True
elif "(" in nameStack and "=" in nameStack and nameStack.index("=") < nameStack.index("("): r = True
#See if we are a function pointer
if not r and is_function_pointer_stack(nameStack): r = True
return r
def detect_lineno(s):
"""Detect the line number for a given token string"""
try:
rtn = s.lineno()
if rtn != -1:
return rtn
except: pass
global curLine
return curLine
def filter_out_attribute_keyword(stack):
"""Strips __attribute__ and its parenthetical expression from the stack"""
if "__attribute__" not in stack: return stack
try:
debug_print("Stripping __attribute__ from %s"% stack)
attr_index = stack.index("__attribute__")
attr_end = attr_index + 1 #Assuming not followed by parenthetical expression which wont happen
#Find final paren
if stack[attr_index + 1] == '(':
paren_count = 1
for i in range(attr_index + 2, len(stack)):
elm = stack[i]
if elm == '(':
paren_count += 1
elif elm == ')':
paren_count -= 1
if paren_count == 0:
attr_end = i + 1
break
new_stack = stack[0:attr_index] + stack[attr_end:]
debug_print("stripped stack is %s"% new_stack)
return new_stack
except:
return stack
class TagStr(str):
"""Wrapper for a string that allows us to store the line number associated with it"""
lineno_reg = {}
def __new__(cls,*args,**kw):
new_obj = str.__new__(cls,*args)
if "lineno" in kw:
TagStr.lineno_reg[id(new_obj)] = kw["lineno"]
return new_obj
def __del__(self):
try:
del TagStr.lineno_reg[id(self)]
except: pass
def lineno(self):
return TagStr.lineno_reg.get(id(self), -1)
class CppParseError(Exception): pass
class CppClass(dict):
"""Takes a name stack and turns it into a class
Contains the following Keys:
self['name'] - Name of the class
self['doxygen'] - Doxygen comments associated with the class if they exist
self['inherits'] - List of Classes that this one inherits where the values
are of the form {"access": Anything in supportedAccessSpecifier
"class": Name of the class
self['methods'] - Dictionary where keys are from supportedAccessSpecifier
and values are a lists of CppMethod's
self['properties'] - Dictionary where keys are from supportedAccessSpecifier
and values are lists of CppVariable's
self['enums'] - Dictionary where keys are from supportedAccessSpecifier and
values are lists of CppEnum's
self['structs'] - Dictionary where keys are from supportedAccessSpecifier and
values are lists of nested Struct's
An example of how this could look is as follows:
#self =
{
'name': ""
'inherits':[]
'methods':
{
'public':[],
'protected':[],
'private':[]
},
'properties':
{
'public':[],
'protected':[],
'private':[]
},
'enums':
{
'public':[],
'protected':[],
'private':[]
}
}
"""
def get_all_methods(self):
r = []
for typ in supportedAccessSpecifier: r += self['methods'][typ]
return r
def get_all_method_names( self ):
r = []
for typ in supportedAccessSpecifier: r += self.get_method_names(typ) # returns list
return r
def get_all_pure_virtual_methods( self ):
r = {}
for typ in supportedAccessSpecifier: r.update(self.get_pure_virtual_methods(typ)) # returns dict
return r
def get_method_names( self, type='public' ): return [ meth['name'] for meth in self['methods'][ type ] ]
def get_pure_virtual_methods( self, type='public' ):
r = {}
for meth in self['methods'][ type ]:
if meth['pure_virtual']: r[ meth['name'] ] = meth
return r
def __init__(self, nameStack, curTemplate):
self['nested_classes'] = []
self['parent'] = None
self['abstract'] = False
self._public_enums = {}
self._public_structs = {}
self._public_typedefs = {}
self._public_forward_declares = []
self['namespace'] = ""
debug_print( "Class: %s"%nameStack )
debug_print( "Template: %s"%curTemplate)
if (len(nameStack) < 2):
nameStack.insert(1, "")#anonymous struct
global doxygenCommentCache
if len(doxygenCommentCache):
self["doxygen"] = doxygenCommentCache
doxygenCommentCache = ""
if "::" in "".join(nameStack):
#Re-Join class paths (ex ['class', 'Bar', ':', ':', 'Foo'] -> ['class', 'Bar::Foo']
try:
new_nameStack = []
for name in nameStack:
if len(new_nameStack) == 0:
new_nameStack.append(name)
elif name == ":" and new_nameStack[-1].endswith(":"):
new_nameStack[-1] += name
elif new_nameStack[-1].endswith("::"):
new_nameStack[-2] += new_nameStack[-1] + name
del new_nameStack[-1]
else:
new_nameStack.append(name)
trace_print("Convert from namestack\n %s\nto\n%s"%(nameStack, new_nameStack))
nameStack = new_nameStack
except: pass
# Handle final specifier
self["final"] = False
try:
final_index = nameStack.index("final")
# Dont trip up the rest of the logic
del nameStack[final_index]
self["final"] = True
trace_print("final")
except: pass
self["name"] = nameStack[1]
self["line_number"] = detect_lineno(nameStack[0])
#Handle template classes
if len(nameStack) > 3 and nameStack[2].startswith("<"):
open_template_count = 0
param_separator = 0
found_first = False
i = 0
for elm in nameStack:
if '<' in elm :
open_template_count += 1
found_first = True
elif '>' in elm:
open_template_count -= 1
if found_first and open_template_count == 0:
self["name"] = "".join(nameStack[1:i + 1])
break;
i += 1
elif ":" in nameStack:
self['name'] = nameStack[ nameStack.index(':') - 1 ]
inheritList = []
if nameStack.count(':') == 1:
nameStack = nameStack[nameStack.index(":") + 1:]
while len(nameStack):
tmpStack = []
tmpInheritClass = {"access":"private", "virtual": False}
if "," in nameStack:
tmpStack = nameStack[:nameStack.index(",")]
nameStack = nameStack[nameStack.index(",") + 1:]
else:
tmpStack = nameStack
nameStack = []
# Convert template classes to one name in the last index
for i in range(0, len(tmpStack)):
if '<' in tmpStack[i]:
tmpStack2 = tmpStack[:i-1]
tmpStack2.append("".join(tmpStack[i-1:]))
tmpStack = tmpStack2
break
if len(tmpStack) == 0:
break;
elif len(tmpStack) == 1:
tmpInheritClass["class"] = tmpStack[0]
elif len(tmpStack) == 2:
tmpInheritClass["access"] = tmpStack[0]
tmpInheritClass["class"] = tmpStack[1]
elif len(tmpStack) == 3 and "virtual" in tmpStack:
tmpInheritClass["access"] = tmpStack[1] if tmpStack[1] != "virtual" else tmpStack[0]
tmpInheritClass["class"] = tmpStack[2]
tmpInheritClass["virtual"] = True
else:
warning_print( "Warning: can not parse inheriting class %s"%(" ".join(tmpStack)))
if '>' in tmpStack: pass # allow skip templates for now
else: raise NotImplemented
if 'class' in tmpInheritClass: inheritList.append(tmpInheritClass)
elif nameStack.count(':') == 2: self['parent'] = self['name']; self['name'] = nameStack[-1]
elif nameStack.count(':') > 2 and nameStack[0] in ("class", "struct"):
tmpStack = nameStack[nameStack.index(":") + 1:]
superTmpStack = [[]]
for tok in tmpStack:
if tok == ',':
superTmpStack.append([])
else:
superTmpStack[-1].append(tok)
for tmpStack in superTmpStack:
tmpInheritClass = {"access":"private"}
if len(tmpStack) and tmpStack[0] in supportedAccessSpecifier:
tmpInheritClass["access"] = tmpStack[0]
tmpStack = tmpStack[1:]
inheritNSStack = []
while len(tmpStack) > 3:
if tmpStack[0] == ':': break;
if tmpStack[1] != ':': break;
if tmpStack[2] != ':': break;
inheritNSStack.append(tmpStack[0])
tmpStack = tmpStack[3:]
if len(tmpStack) == 1 and tmpStack[0] != ':':
inheritNSStack.append(tmpStack[0])
tmpInheritClass["class"] = "::".join(inheritNSStack)
inheritList.append(tmpInheritClass)
self['inherits'] = inheritList
if curTemplate:
self["template"] = curTemplate
trace_print("Setting template to '%s'"%self["template"])
methodAccessSpecificList = {}
propertyAccessSpecificList = {}
enumAccessSpecificList = {}
structAccessSpecificList = {}
typedefAccessSpecificList = {}
forwardAccessSpecificList = {}
for accessSpecifier in supportedAccessSpecifier:
methodAccessSpecificList[accessSpecifier] = []
propertyAccessSpecificList[accessSpecifier] = []
enumAccessSpecificList[accessSpecifier] = []
structAccessSpecificList[accessSpecifier] = []
typedefAccessSpecificList[accessSpecifier] = []
forwardAccessSpecificList[accessSpecifier] = []
self['methods'] = methodAccessSpecificList
self['properties'] = propertyAccessSpecificList
self['enums'] = enumAccessSpecificList
self['structs'] = structAccessSpecificList
self['typedefs'] = typedefAccessSpecificList
self['forward_declares'] = forwardAccessSpecificList
def show(self):
"""Convert class to a string"""
namespace_prefix = ""
if self["namespace"]: namespace_prefix = self["namespace"] + "::"
rtn = "%s %s"%(self["declaration_method"], namespace_prefix + self["name"])
if self["final"]: rtn += " final"
if self['abstract']: rtn += ' (abstract)\n'
else: rtn += '\n'
if 'doxygen' in list(self.keys()): rtn += self["doxygen"] + '\n'
if 'parent' in list(self.keys()) and self['parent']: rtn += 'parent class: ' + self['parent'] + '\n'
if "inherits" in list(self.keys()):
rtn += " Inherits: "
for inheritClass in self["inherits"]:
if inheritClass["virtual"]: rtn += "virtual "
rtn += "%s %s, "%(inheritClass["access"], inheritClass["class"])
rtn += "\n"
rtn += " {\n"
for accessSpecifier in supportedAccessSpecifier:
rtn += " %s\n"%(accessSpecifier)
#Enums
if (len(self["enums"][accessSpecifier])):
rtn += " <Enums>\n"
for enum in self["enums"][accessSpecifier]:
rtn += " %s\n"%(repr(enum))
#Properties
if (len(self["properties"][accessSpecifier])):
rtn += " <Properties>\n"
for property in self["properties"][accessSpecifier]:
rtn += " %s\n"%(repr(property))
#Methods
if (len(self["methods"][accessSpecifier])):
rtn += " <Methods>\n"
for method in self["methods"][accessSpecifier]:
rtn += "\t\t" + method.show() + '\n'
rtn += " }\n"
print(rtn)
def __str__(self):
"""Convert class to a string"""
namespace_prefix = ""
if self["namespace"]: namespace_prefix = self["namespace"] + "::"
rtn = "%s %s"%(self["declaration_method"], namespace_prefix + self["name"])
if self["final"]: rtn += " final"
if self['abstract']: rtn += ' (abstract)\n'
else: rtn += '\n'
if 'doxygen' in list(self.keys()): rtn += self["doxygen"] + '\n'
if 'parent' in list(self.keys()) and self['parent']: rtn += 'parent class: ' + self['parent'] + '\n'
if "inherits" in list(self.keys()) and len(self["inherits"]):
rtn += "Inherits: "
for inheritClass in self["inherits"]:
if inheritClass.get("virtual", False): rtn += "virtual "
rtn += "%s %s, "%(inheritClass["access"], inheritClass["class"])
rtn += "\n"
rtn += "{\n"
for accessSpecifier in supportedAccessSpecifier:
rtn += "%s\n"%(accessSpecifier)
#Enums
if (len(self["enums"][accessSpecifier])):
rtn += " // Enums\n"
for enum in self["enums"][accessSpecifier]:
rtn += " %s\n"%(repr(enum))
#Properties
if (len(self["properties"][accessSpecifier])):
rtn += " // Properties\n"
for property in self["properties"][accessSpecifier]:
rtn += " %s\n"%(repr(property))
#Methods
if (len(self["methods"][accessSpecifier])):
rtn += " // Methods\n"
for method in self["methods"][accessSpecifier]:
rtn += " %s\n"%(repr(method))
rtn += "}\n"
return rtn
class CppUnion( CppClass ):
"""Takes a name stack and turns it into a union
Contains the following Keys:
self['name'] - Name of the union
self['doxygen'] - Doxygen comments associated with the union if they exist
self['members'] - List of members the union has
An example of how this could look is as follows:
#self =
{
'name': ""
'members': []
}
"""
def __init__(self, nameStack):
CppClass.__init__(self, nameStack, None)
self["name"] = "union " + self["name"]
self["members"] = self["properties"]["public"]
def transform_to_union_keys(self):
print("union keys: %s"%list(self.keys()))
for key in ['inherits', 'parent', 'abstract', 'namespace', 'typedefs', 'methods']:
del self[key]
def show(self):
"""Convert class to a string"""
print(self)
def __str__(self):
"""Convert class to a string"""
namespace_prefix = ""
if self["namespace"]: namespace_prefix = self["namespace"] + "::"
rtn = "%s %s"%(self["declaration_method"], namespace_prefix + self["name"])
if self['abstract']: rtn += ' (abstract)\n'
else: rtn += '\n'
if 'doxygen' in list(self.keys()): rtn += self["doxygen"] + '\n'
if 'parent' in list(self.keys()) and self['parent']: rtn += 'parent class: ' + self['parent'] + '\n'
rtn += "{\n"
for member in self["members"]:
rtn += " %s\n"%(repr(member))
rtn += "}\n"
return rtn
class _CppMethod( dict ):
def _params_helper1( self, stack ):
# deal with "throw" keyword
if 'throw' in stack: stack = stack[ : stack.index('throw') ]
## remove GCC keyword __attribute__(...) and preserve returns ##
cleaned = []
hit = False; hitOpen = 0; hitClose = 0
for a in stack:
if a == '__attribute__': hit = True
if hit:
if a == '(': hitOpen += 1
elif a == ')': hitClose += 1
if a==')' and hitOpen == hitClose:
hit = False
else:
cleaned.append( a )
stack = cleaned
# also deal with attribute((const)) function prefix #
# TODO this needs to be better #
if len(stack) > 5:
a = ''.join(stack)
if a.startswith('((__const__))'): stack = stack[ 5 : ]
elif a.startswith('__attribute__((__const__))'): stack = stack[ 6 : ]
stack = stack[stack.index('(') + 1: ]
if not stack: return []
if len(stack)>=3 and stack[0]==')' and stack[1]==':': # is this always a constructor?
self['constructor'] = True
return []
stack.reverse(); _end_ = stack.index(')'); stack.reverse()
stack = stack[ : len(stack)-(_end_+1) ]
if '(' not in stack: return stack # safe to return, no defaults that init a class
# transforms ['someclass', '(', '0', '0', '0', ')'] into "someclass(0,0,0)'"
r = []; hit=False
for a in stack:
if a == '(': hit=True
elif a == ')': hit=False
if hit or a == ')': r[-1] = r[-1] + a
else: r.append( a )
return r
def _params_helper2( self, params ):
for p in params:
p['method'] = self # save reference in variable to parent method
if '::' in p['type']:
ns = p['type'].split('::')[0]
if ns not in Resolver.NAMESPACES and ns in Resolver.CLASSES:
p['type'] = self['namespace'] + p['type']
else: p['namespace'] = self[ 'namespace' ]
class CppMethod( _CppMethod ):
"""Takes a name stack and turns it into a method
Contains the following Keys:
self['rtnType'] - Return type of the method (ex. "int")
self['name'] - Name of the method (ex. "getSize")
self['doxygen'] - Doxygen comments associated with the method if they exist
self['parameters'] - List of CppVariables
"""
def show(self):
r = ['method name: %s (%s)' %(self['name'],self['debug']) ]
if self['returns']: r.append( 'returns: %s'%self['returns'] )
if self['parameters']: r.append( 'number arguments: %s' %len(self['parameters']))
if self['pure_virtual']: r.append( 'pure virtual: %s'%self['pure_virtual'] )
if self['constructor']: r.append( 'constructor' )
if self['destructor']: r.append( 'destructor' )
return '\n\t\t '.join( r )
def __init__(self, nameStack, curClass, methinfo, curTemplate):
debug_print( "Method: %s"%nameStack )
debug_print( "Template: %s"%curTemplate )
global doxygenCommentCache
if len(doxygenCommentCache):
self["doxygen"] = doxygenCommentCache
doxygenCommentCache = ""
if "operator" in nameStack:
self["rtnType"] = " ".join(nameStack[:nameStack.index('operator')])
self["name"] = "".join(nameStack[nameStack.index('operator'):nameStack.index('(')])
else:
self["rtnType"] = " ".join(nameStack[:nameStack.index('(') - 1])
self["name"] = " ".join(nameStack[nameStack.index('(') - 1:nameStack.index('(')])
if self["rtnType"].startswith("virtual"):
self["rtnType"] = self["rtnType"][len("virtual"):].strip()
if len(self["rtnType"]) == 0 or self["name"] == curClass:
self["rtnType"] = "void"
self["rtnType"] = self["rtnType"].replace(' : : ', '::' )
self["rtnType"] = self["rtnType"].replace(" <","<")
self["rtnType"] = self["rtnType"].replace(" >",">").replace(">>", "> >").replace(">>", "> >")
self["rtnType"] = self["rtnType"].replace(" ,",",")
for spec in ["const", "final", "override"]:
self[spec] = False
for i in reversed(nameStack):
if i == spec:
self[spec] = True
break
elif i == ")":
break
self.update( methinfo )
self["line_number"] = detect_lineno(nameStack[0])
#Filter out initializer lists used in constructors
try:
paren_depth_counter = 0
for i in range(0, len(nameStack)):
elm = nameStack[i]
if elm == "(":
paren_depth_counter += 1
if elm == ")":
paren_depth_counter -=1
if paren_depth_counter == 0 and nameStack[i+1] == ':':
debug_print("Stripping out initializer list")
nameStack = nameStack[:i+1]
break
except: pass
paramsStack = self._params_helper1( nameStack )
debug_print( "curTemplate: %s"%curTemplate)
if curTemplate:
self["template"] = curTemplate
debug_print( "SET self['template'] to `%s`"%self["template"])
params = []
#See if there is a doxygen comment for the variable
doxyVarDesc = {}
if "doxygen" in self:
doxyLines = self["doxygen"].split("\n")
lastParamDesc = ""
for doxyLine in doxyLines:
if " @param " in doxyLine or " \param " in doxyLine:
try:
#Strip out the param
doxyLine = doxyLine[doxyLine.find("param ") + 6:]
(var, desc) = doxyLine.split(" ", 1)
doxyVarDesc[var] = desc.strip()
lastParamDesc = var
except: pass
elif " @return " in doxyLine or " \return " in doxyLine:
lastParamDesc = ""
# not handled for now
elif lastParamDesc:
try:
doxyLine = doxyLine.strip()
if " " not in doxyLine:
lastParamDesc = ""
continue
doxyLine = doxyLine[doxyLine.find(" ") + 1:]
doxyVarDesc[lastParamDesc] += " " + doxyLine
except: pass
#Create the variable now
while (len(paramsStack)):
# Find commas that are not nexted in <>'s like template types
open_template_count = 0
param_separator = 0
i = 0
for elm in paramsStack:
if '<' in elm :
open_template_count += 1
elif '>' in elm:
open_template_count -= 1
elif elm == ',' and open_template_count == 0:
param_separator = i
break
i += 1
if param_separator:
param = CppVariable(paramsStack[0:param_separator], doxyVarDesc=doxyVarDesc)
if len(list(param.keys())): params.append(param)
paramsStack = paramsStack[param_separator + 1:]
else:
param = CppVariable(paramsStack, doxyVarDesc=doxyVarDesc)
if len(list(param.keys())): params.append(param)
break
self["parameters"] = params
#self._params_helper2( params ) # mods params inplace
def __str__(self):
filter_keys = ("parent", "defined", "operator", "returns_reference")
cpy = dict((k,v) for (k,v) in list(self.items()) if k not in filter_keys)
return "%s"%cpy
class _CppVariable(dict):
def _name_stack_helper( self, stack ):
stack = list(stack)
if '=' not in stack: # TODO refactor me
# check for array[n] and deal with funny array syntax: "int myvar:99"
array = []
while stack and stack[-1].isdigit(): array.append( stack.pop() )
if array: array.reverse(); self['array'] = int(''.join(array))
if stack and stack[-1].endswith(':'): stack[-1] = stack[-1][:-1]
while stack and not stack[-1]: stack.pop() # can be empty
return stack
def init(self):
#assert self['name'] # allow unnamed variables, methods like this: "void func(void);"
a = []
self['aliases'] = []; self['parent'] = None; self['typedef'] = None
for key in 'constant reference pointer static typedefs class fundamental unresolved'.split():
self[ key ] = 0
for b in self['type'].split():
if b == '__const__': b = 'const'
a.append( b )
self['type'] = ' '.join( a )
class CppVariable( _CppVariable ):
"""Takes a name stack and turns it into a method
Contains the following Keys:
self['type'] - Type for the variable (ex. "const string &")
self['name'] - Name of the variable (ex. "numItems")
self['namespace'] - Namespace containing the enum
self['desc'] - Description of the variable if part of a method (optional)
self['doxygen'] - Doxygen comments associated with the method if they exist
self['defaultValue'] - Default value of the variable, this key will only
exist if there is a default value
self['extern'] - True if its an extern, false if not
"""
Vars = []
def __init__(self, nameStack, **kwargs):
debug_print("trace %s"%nameStack)
if len(nameStack) and nameStack[0] == "extern":
self['extern'] = True
del nameStack[0]
else:
self['extern'] = False
_stack_ = nameStack
if "[" in nameStack: #strip off array informatin
arrayStack = nameStack[nameStack.index("["):]
if nameStack.count("[") > 1:
debug_print("Multi dimensional array")
debug_print("arrayStack=%s"%arrayStack)
nums = filter(lambda x: x.isdigit(), arrayStack)
# Calculate size by multiplying all dimensions
p = 1
for n in nums:
p *= int(n)
#Multi dimensional array
self["array_size"] = p
self["multi_dimensional_array"] = 1
self["multi_dimensional_array_size"] = "x".join(nums)
else:
debug_print("Array")
if len(arrayStack) == 3:
self["array_size"] = arrayStack[1]
nameStack = nameStack[:nameStack.index("[")]
self["array"] = 1
else:
self["array"] = 0
nameStack = self._name_stack_helper( nameStack )
global doxygenCommentCache
if len(doxygenCommentCache):
self["doxygen"] = doxygenCommentCache
doxygenCommentCache = ""
debug_print( "Variable: %s"%nameStack )
self["line_number"] = detect_lineno(nameStack[0])
self["function_pointer"] = 0
if (len(nameStack) < 2): # +++
if len(nameStack) == 1: self['type'] = nameStack[0]; self['name'] = ''
else: error_print(_stack_); assert 0
elif is_function_pointer_stack(nameStack): #function pointer
self["type"] = " ".join(nameStack[:nameStack.index("(") + 2] + nameStack[nameStack.index(")") :])
self["name"] = " ".join(nameStack[nameStack.index("(") + 2 : nameStack.index(")")])
self["function_pointer"] = 1
elif ("=" in nameStack):
self["type"] = " ".join(nameStack[:nameStack.index("=") - 1])
self["name"] = nameStack[nameStack.index("=") - 1]
self["defaultValue"] = " ".join(nameStack[nameStack.index("=") + 1:]) # deprecate camelCase in dicts
self['default'] = " ".join(nameStack[nameStack.index("=") + 1:])
elif is_fundamental(nameStack[-1]) or nameStack[-1] in ['>', '<' , ':', '.']:
#Un named parameter
self["type"] = " ".join(nameStack)
self["name"] = ""
else: # common case
self["type"] = " ".join(nameStack[:-1])
self["name"] = nameStack[-1]
self["type"] = self["type"].replace(" :",":")
self["type"] = self["type"].replace(": ",":")
self["type"] = self["type"].replace(" <","<")
self["type"] = self["type"].replace(" >",">").replace(">>", "> >").replace(">>", "> >")
self["type"] = self["type"].replace(" ,",",")
#Optional doxygen description
try:
self["desc"] = kwargs["doxyVarDesc"][self["name"]]
except: pass
self.init()
CppVariable.Vars.append( self ) # save and resolve later
def __str__(self):
keys_white_list = ['constant','name','reference','type','static','pointer','desc', 'line_number', 'extern']
cpy = dict((k,v) for (k,v) in list(self.items()) if k in keys_white_list)
if "array_size" in self: cpy["array_size"] = self["array_size"]
return "%s"%cpy
class _CppEnum(dict):
def resolve_enum_values( self, values ):
"""Evaluates the values list of dictionaries passed in and figures out what the enum value
for each enum is editing in place:
Example:
From: [{'name': 'ORANGE'},
{'name': 'RED'},
{'name': 'GREEN', 'value': '8'}]
To: [{'name': 'ORANGE', 'value': 0},
{'name': 'RED', 'value': 1},
{'name': 'GREEN', 'value': 8}]
"""
t = int; i = 0
names = [ v['name'] for v in values ]
for v in values:
if 'value' in v:
a = v['value'].strip()
# Remove single quotes from single quoted chars (unless part of some expression
if len(a) == 3 and a[0] == "'" and a[2] == "'":
a = v['value'] = a[1]
if a.lower().startswith("0x"):
try:
i = a = int(a , 16)
except:pass
elif a.isdigit():
i = a = int( a )
elif a in names:
for other in values:
if other['name'] == a:
v['value'] = other['value']
break
elif '"' in a or "'" in a: t = str # only if there are quotes it this a string enum
else:
try:
a = i = ord(a)
except: pass
#Allow access of what is in the file pre-convert if converted
if v['value'] != str(a):
v['raw_value'] = v['value']
v['value'] = a
else: v['value'] = i
try:
v['value'] = v['value'].replace(" < < ", " << ").replace(" >> ", " >> ")
except: pass
i += 1
return t
class CppEnum(_CppEnum):
"""Takes a name stack and turns it into an Enum
Contains the following Keys:
self['name'] - Name of the enum (ex. "ItemState")
self['namespace'] - Namespace containing the enum
self['values'] - List of values where the values are a dictionary of the
form {"name": name of the key (ex. "PARSING_HEADER"),
"value": Specified value of the enum, this key will only exist
if a value for a given enum value was defined
}
"""
def __init__(self, nameStack):
global doxygenCommentCache
if len(doxygenCommentCache):
self["doxygen"] = doxygenCommentCache
doxygenCommentCache = ""
if len(nameStack) == 3 and nameStack[0] == "enum":
debug_print("Created enum as just name/value")
self["name"] = nameStack[1]
self["instances"]=[nameStack[2]]
if len(nameStack) < 4 or "{" not in nameStack or "}" not in nameStack:
#Not enough stuff for an enum
debug_print("Bad enum")
return
valueList = []
self["line_number"] = detect_lineno(nameStack[0])
#Figure out what values it has
valueStack = nameStack[nameStack.index('{') + 1: nameStack.index('}')]
while len(valueStack):
tmpStack = []
if "," in valueStack:
tmpStack = valueStack[:valueStack.index(",")]
valueStack = valueStack[valueStack.index(",") + 1:]
else:
tmpStack = valueStack
valueStack = []
d = {}
if len(tmpStack) == 1: d["name"] = tmpStack[0]
elif len(tmpStack) >= 3 and tmpStack[1] == "=":
d["name"] = tmpStack[0]; d["value"] = " ".join(tmpStack[2:])
elif len(tmpStack) == 2 and tmpStack[1] == "=":
debug_print( "WARN-enum: parser missed value for %s"%tmpStack[0] )
d["name"] = tmpStack[0]
if d: valueList.append( d )
if len(valueList):
self['type'] = self.resolve_enum_values( valueList ) # returns int for standard enum
self["values"] = valueList
else:
warning_print( 'WARN-enum: empty enum %s'%nameStack )
return
#Figure out if it has a name
preBraceStack = nameStack[:nameStack.index("{")]
postBraceStack = nameStack[nameStack.index("}") + 1:]
self["typedef"] = False
if (len(preBraceStack) == 2 and "typedef" not in nameStack):
self["name"] = preBraceStack[1]
elif len(postBraceStack) and "typedef" in nameStack:
self["name"] = " ".join(postBraceStack)
self["typedef"] = True
else: warning_print( 'WARN-enum: nameless enum %s'%nameStack )
#See if there are instances of this
if "typedef" not in nameStack and len(postBraceStack):
self["instances"] = []
for var in postBraceStack:
if "," in var:
continue
self["instances"].append(var)
self["namespace"] = ""
class CppStruct(dict):
Structs = []
def __init__(self, nameStack):
if len(nameStack) >= 2: self['type'] = nameStack[1]
else: self['type'] = None
self['fields'] = []
self.Structs.append( self )
global curLine
self["line_number"] = curLine
C99_NONSTANDARD = {
'int8' : 'signed char',
'int16' : 'short int',
'int32' : 'int',
'int64' : 'int64_t', # this can be: long int (64bit), or long long int (32bit)
'uint' : 'unsigned int',
'uint8' : 'unsigned char',
'uint16' : 'unsigned short int',
'uint32' : 'unsigned int',
'uint64' : 'uint64_t', # depends on host bits
}
def standardize_fundamental( s ):
if s in C99_NONSTANDARD: return C99_NONSTANDARD[ s ]
else: return s
class Resolver(object):
C_FUNDAMENTAL = 'size_t unsigned signed bool char wchar short int float double long void'.split()
C_FUNDAMENTAL += 'struct union enum'.split()
SubTypedefs = {} # TODO deprecate?
NAMESPACES = []
CLASSES = {}
STRUCTS = {}
def initextra(self):
self.typedefs = {}
self.typedefs_order = []
self.classes_order = []
self.structs = Resolver.STRUCTS
self.structs_order = []
self.namespaces = Resolver.NAMESPACES # save all namespaces
self.curStruct = None
self.stack = [] # full name stack, good idea to keep both stacks? (simple stack and full stack)
self._classes_brace_level = {} # class name : level
self._structs_brace_level = {} # struct type : level
self._method_body = None
self._forward_decls = []
self._template_typenames = [] # template<typename XXX>
def current_namespace(self): return self.cur_namespace(True)
def cur_namespace(self, add_double_colon=False):
rtn = ""
i = 0
while i < len(self.nameSpaces):
rtn += self.nameSpaces[i]
if add_double_colon or i < len(self.nameSpaces) - 1: rtn += "::"
i+=1
return rtn
def guess_ctypes_type( self, string ):
pointers = string.count('*')
string = string.replace('*','')
a = string.split()
if 'unsigned' in a: u = 'u'
else: u = ''
if 'long' in a and 'double' in a: b = 'longdouble' # there is no ctypes.c_ulongdouble (this is a 64bit float?)
elif a.count('long') == 2 and 'int' in a: b = '%sint64' %u
elif a.count('long') == 2: b = '%slonglong' %u
elif 'long' in a: b = '%slong' %u
elif 'double' in a: b = 'double' # no udouble in ctypes
elif 'short' in a: b = '%sshort' %u
elif 'char' in a: b = '%schar' %u
elif 'wchar' in a: b = 'wchar'
elif 'bool' in a: b = 'bool'
elif 'float' in a: b = 'float'
elif 'int' in a: b = '%sint' %u
elif 'int8' in a: b = 'int8'
elif 'int16' in a: b = 'int16'
elif 'int32' in a: b = 'int32'
elif 'int64' in a: b = 'int64'
elif 'uint' in a: b = 'uint'
elif 'uint8' in a: b = 'uint8'
elif 'uint16' in a: b = 'uint16'
elif 'uint32' in a: b = 'uint32'
elif 'uint64' in a: b = 'uint64'
elif 'size_t' in a: b = 'size_t'
elif 'void' in a: b = 'void_p'
elif string in 'struct union'.split(): b = 'void_p' # what should be done here? don't trust struct, it could be a class, no need to expose via ctypes
else: b = 'void_p'
if not pointers: return 'ctypes.c_%s' %b
else:
x = ''
for i in range(pointers): x += 'ctypes.POINTER('
x += 'ctypes.c_%s' %b
x += ')' * pointers
return x
def resolve_type( self, string, result ): # recursive
'''
keeps track of useful things like: how many pointers, number of typedefs, is fundamental or a class, etc...
'''
## be careful with templates, what is inside <something*> can be a pointer but the overall type is not a pointer
## these come before a template
s = string.split('<')[0]
result[ 'constant' ] += s.split().count('const')
result[ 'static' ] += s.split().count('static')
result[ 'mutable' ] = 'mutable' in s.split()
## these come after a template
s = string.split('>')[-1]
result[ 'pointer' ] += s.count('*')
result[ 'reference' ] += s.count('&')
x = string; alias = False
for a in '* & const static mutable'.split(): x = x.replace(a,'')
for y in x.split():
if y not in self.C_FUNDAMENTAL: alias = y; break
#if alias == 'class':
# result['class'] = result['name'] # forward decl of class
# result['forward_decl'] = True
if alias == '__extension__': result['fundamental_extension'] = True
elif alias:
result['aliases'].append( alias )
if alias in C99_NONSTANDARD:
result['type'] = C99_NONSTANDARD[ alias ]
result['typedef'] = alias
result['typedefs'] += 1
elif alias in self.typedefs:
result['typedefs'] += 1
result['typedef'] = alias
self.resolve_type( self.typedefs[alias], result )
elif alias in self.classes:
klass = self.classes[alias]; result['fundamental'] = False
result['class'] = klass
result['unresolved'] = False
else: result['unresolved'] = True
else:
result['fundamental'] = True
result['unresolved'] = False
def finalize_vars(self):
for s in CppStruct.Structs: # vars within structs can be ignored if they do not resolve
for var in s['fields']: var['parent'] = s['type']
#for c in self.classes.values():
# for var in c.get_all_properties(): var['parent'] = c['name']
## RESOLVE ##
for var in CppVariable.Vars:
self.resolve_type( var['type'], var )
#if 'method' in var and var['method']['name'] == '_notifyCurrentCamera': print(var); assert 0
# then find concrete type and best guess ctypes type #
for var in CppVariable.Vars:
if not var['aliases']: #var['fundamental']:
var['ctypes_type'] = self.guess_ctypes_type( var['type'] )
else:
var['unresolved'] = False # below may test to True
if var['class']:
var['ctypes_type'] = 'ctypes.c_void_p'
else:
assert var['aliases']
tag = var['aliases'][0]
klass = None
nestedEnum = None
nestedStruct = None
nestedTypedef = None
if 'method' in var and 'parent' in list(var['method'].keys()):
klass = var['method']['parent']
if tag in var['method']['parent']._public_enums:
nestedEnum = var['method']['parent']._public_enums[ tag ]
elif tag in var['method']['parent']._public_structs:
nestedStruct = var['method']['parent']._public_structs[ tag ]
elif tag in var['method']['parent']._public_typedefs:
nestedTypedef = var['method']['parent']._public_typedefs[ tag ]
if '<' in tag: # should also contain '>'
var['template'] = tag # do not resolve templates
var['ctypes_type'] = 'ctypes.c_void_p'
var['unresolved'] = True
elif nestedEnum:
enum = nestedEnum
if enum['type'] is int:
var['ctypes_type'] = 'ctypes.c_int'
var['raw_type'] = 'int'
elif enum['type'] is str:
var['ctypes_type'] = 'ctypes.c_char_p'
var['raw_type'] = 'char*'
var['enum'] = var['method']['path'] + '::' + enum['name']
var['fundamental'] = True
elif nestedStruct:
var['ctypes_type'] = 'ctypes.c_void_p'
var['raw_type'] = var['method']['path'] + '::' + nestedStruct['type']
var['fundamental'] = False
elif nestedTypedef:
var['fundamental'] = is_fundamental( nestedTypedef )
if not var['fundamental']:
var['raw_type'] = var['method']['path'] + '::' + tag
else:
_tag = tag
if '::' in tag and tag.split('::')[0] in self.namespaces: tag = tag.split('::')[-1]
con = self.concrete_typedef( _tag )
if con:
var['concrete_type'] = con
var['ctypes_type'] = self.guess_ctypes_type( var['concrete_type'] )
elif tag in self.structs:
trace_print( 'STRUCT', var )
var['struct'] = tag
var['ctypes_type'] = 'ctypes.c_void_p'
var['raw_type'] = self.structs[tag]['namespace'] + '::' + tag
elif tag in self._forward_decls:
var['forward_declared'] = tag
var['ctypes_type'] = 'ctypes.c_void_p'
elif tag in self.global_enums:
enum = self.global_enums[ tag ]
if enum['type'] is int:
var['ctypes_type'] = 'ctypes.c_int'
var['raw_type'] = 'int'
elif enum['type'] is str:
var['ctypes_type'] = 'ctypes.c_char_p'
var['raw_type'] = 'char*'
var['enum'] = enum['namespace'] + enum['name']
var['fundamental'] = True
elif var['parent']:
warning_print( 'WARN unresolved %s'%_tag)
var['ctypes_type'] = 'ctypes.c_void_p'
var['unresolved'] = True
elif tag.count('::')==1:
trace_print( 'trying to find nested something in', tag )
a = tag.split('::')[0]
b = tag.split('::')[-1]
if a in self.classes: # a::b is most likely something nested in a class
klass = self.classes[ a ]
if b in klass._public_enums:
trace_print( '...found nested enum', b )
enum = klass._public_enums[ b ]
if enum['type'] is int:
var['ctypes_type'] = 'ctypes.c_int'
var['raw_type'] = 'int'
elif enum['type'] is str:
var['ctypes_type'] = 'ctypes.c_char_p'
var['raw_type'] = 'char*'
try:
if 'method' in var: var['enum'] = var['method']['path'] + '::' + enum['name']
else: # class property
var['unresolved'] = True
except:
var['unresolved'] = True
var['fundamental'] = True
else: var['unresolved'] = True # TODO klass._public_xxx
elif a in self.namespaces: # a::b can also be a nested namespace
if b in self.global_enums:
enum = self.global_enums[ b ]
trace_print(enum)
trace_print(var)
assert 0
elif b in self.global_enums: # falling back, this is a big ugly
enum = self.global_enums[ b ]
assert a in enum['namespace'].split('::')
if enum['type'] is int:
var['ctypes_type'] = 'ctypes.c_int'
var['raw_type'] = 'int'
elif enum['type'] is str:
var['ctypes_type'] = 'ctypes.c_char_p'
var['raw_type'] = 'char*'
var['fundamental'] = True
else: # boost::gets::crazy
trace_print('NAMESPACES', self.namespaces)
trace_print( a, b )
trace_print( '---- boost gets crazy ----' )
var['ctypes_type'] = 'ctypes.c_void_p'
var['unresolved'] = True
elif 'namespace' in var and self.concrete_typedef(var['namespace']+tag):
#print( 'TRYING WITH NS', var['namespace'] )
con = self.concrete_typedef( var['namespace']+tag )
if con:
var['typedef'] = var['namespace']+tag
var['type'] = con
if 'struct' in con.split():
var['raw_type'] = var['typedef']
var['ctypes_type'] = 'ctypes.c_void_p'
else:
self.resolve_type( var['type'], var )
var['ctypes_type'] = self.guess_ctypes_type( var['type'] )
elif '::' in var:
var['ctypes_type'] = 'ctypes.c_void_p'
var['unresolved'] = True
elif tag in self.SubTypedefs: # TODO remove SubTypedefs
if 'property_of_class' in var or 'property_of_struct' in var:
trace_print( 'class:', self.SubTypedefs[ tag ], 'tag:', tag )
var['typedef'] = self.SubTypedefs[ tag ] # class name
var['ctypes_type'] = 'ctypes.c_void_p'
else:
trace_print( "WARN-this should almost never happen!" )
trace_print( var ); trace_print('-'*80)
var['unresolved'] = True
elif tag in self._template_typenames:
var['typename'] = tag
var['ctypes_type'] = 'ctypes.c_void_p'
var['unresolved'] = True # TODO, how to deal with templates?
elif tag.startswith('_'): # assume starting with underscore is not important for wrapping
warning_print( 'WARN unresolved %s'%_tag)
var['ctypes_type'] = 'ctypes.c_void_p'
var['unresolved'] = True
else:
trace_print( 'WARN: unknown type', var )
assert 'property_of_class' in var or 'property_of_struct' # only allow this case
var['unresolved'] = True
## if not resolved and is a method param, not going to wrap these methods ##
if var['unresolved'] and 'method' in var: var['method']['unresolved_parameters'] = True
# create stripped raw_type #
p = '* & const static mutable'.split() # +++ new July7: "mutable"
for var in CppVariable.Vars:
if 'raw_type' not in var:
raw = []
for x in var['type'].split():
if x not in p: raw.append( x )
var['raw_type'] = ' '.join( raw )
#if 'AutoConstantEntry' in var['raw_type']: print(var); assert 0
if var['class']:
if '::' not in var['raw_type']:
if not var['class']['parent']:
var['raw_type'] = var['class']['namespace'] + '::' + var['raw_type']
elif var['class']['parent'] in self.classes:
parent = self.classes[ var['class']['parent'] ]
var['raw_type'] = parent['namespace'] + '::' + var['class']['name'] + '::' + var['raw_type']
else:
var['unresolved'] = True
elif '::' in var['raw_type'] and var['raw_type'].split('::')[0] not in self.namespaces:
var['raw_type'] = var['class']['namespace'] + '::' + var['raw_type']
else:
var['unresolved'] = True
elif 'forward_declared' in var and 'namespace' in var:
if '::' not in var['raw_type']:
var['raw_type'] = var['namespace'] + var['raw_type']
elif '::' in var['raw_type'] and var['raw_type'].split('::')[0] in self.namespaces:
pass
else: trace_print('-'*80); trace_print(var); raise NotImplemented
## need full name space for classes in raw type ##
if var['raw_type'].startswith( '::' ):
#print(var)
#print('NAMESPACE', var['class']['namespace'])
#print( 'PARENT NS', var['class']['parent']['namespace'] )
#assert 0
var['unresolved'] = True
if 'method' in var: var['method']['unresolved_parameters'] = True
#var['raw_type'] = var['raw_type'][2:]
# Take care of #defines and #pragmas etc
trace_print("Processing precomp_macro_buf: %s"%self._precomp_macro_buf)
for m in self._precomp_macro_buf:
macro = m.replace("<CppHeaderParser_newline_temp_replacement>\\n", "\n")
try:
if macro.lower().startswith("#define"):
trace_print("Adding #define %s"%macro)
self.defines.append(macro.split(" ", 1)[1].strip())
elif macro.lower().startswith("#if") or macro.lower().startswith("#endif") or macro.lower().startswith("#else"):
self.conditionals.append(macro)
elif macro.lower().startswith("#pragma"):
trace_print("Adding #pragma %s"%macro)
self.pragmas.append(macro.split(" ", 1)[1].strip())
elif macro.lower().startswith("#include"):
trace_print("Adding #include %s"%macro)
self.includes.append(macro.split(" ", 1)[1].strip())
else:
debug_print("Cant detect what to do with precomp macro '%s'"%macro)
except: pass
self._precomp_macro_buf = None
def concrete_typedef( self, key ):
if key not in self.typedefs:
#print( 'FAILED typedef', key )
return None
while key in self.typedefs:
prev = key
key = self.typedefs[ key ]
if '<' in key or '>' in key: return prev # stop at template
if key.startswith('std::'): return key # stop at std lib
return key
class _CppHeader( Resolver ):
def finalize(self):
self.finalize_vars()
# finalize classes and method returns types
for cls in list(self.classes.values()):
for meth in cls.get_all_methods():
if meth['pure_virtual']: cls['abstract'] = True
if not meth['returns_fundamental'] and meth['returns'] in C99_NONSTANDARD:
meth['returns'] = C99_NONSTANDARD[meth['returns']]
meth['returns_fundamental'] = True
elif not meth['returns_fundamental']: # describe the return type
con = None
if cls['namespace'] and '::' not in meth['returns']:
con = self.concrete_typedef( cls['namespace'] + '::' + meth['returns'] )
else: con = self.concrete_typedef( meth['returns'] )
if con:
meth['returns_concrete'] = con
meth['returns_fundamental'] = is_fundamental( con )
elif meth['returns'] in self.classes:
trace_print( 'meth returns class:', meth['returns'] )
meth['returns_class'] = True
elif meth['returns'] in self.SubTypedefs:
meth['returns_class'] = True
meth['returns_nested'] = self.SubTypedefs[ meth['returns'] ]
elif meth['returns'] in cls._public_enums:
enum = cls._public_enums[ meth['returns'] ]
meth['returns_enum'] = enum['type']
meth['returns_fundamental'] = True
if enum['type'] == int: meth['returns'] = 'int'
else: meth['returns'] = 'char*'
elif meth['returns'] in self.global_enums:
enum = self.global_enums[ meth['returns'] ]
meth['returns_enum'] = enum['type']
meth['returns_fundamental'] = True
if enum['type'] == int: meth['returns'] = 'int'
else: meth['returns'] = 'char*'
elif meth['returns'].count('::')==1:
trace_print( meth )
a,b = meth['returns'].split('::')
if a in self.namespaces:
if b in self.classes:
klass = self.classes[ b ]
meth['returns_class'] = a + '::' + b
elif '<' in b and '>' in b:
warning_print( 'WARN-can not return template: %s'%b )
meth['returns_unknown'] = True
elif b in self.global_enums:
enum = self.global_enums[ b ]
meth['returns_enum'] = enum['type']
meth['returns_fundamental'] = True
if enum['type'] == int: meth['returns'] = 'int'
else: meth['returns'] = 'char*'
else: trace_print( a, b); trace_print( meth); meth['returns_unknown'] = True # +++
elif a in self.classes:
klass = self.classes[ a ]
if b in klass._public_enums:
trace_print( '...found nested enum', b )
enum = klass._public_enums[ b ]
meth['returns_enum'] = enum['type']
meth['returns_fundamental'] = True
if enum['type'] == int: meth['returns'] = 'int'
else: meth['returns'] = 'char*'
elif b in klass._public_forward_declares:
meth['returns_class'] = True
elif b in klass._public_typedefs:
typedef = klass._public_typedefs[ b ]
meth['returns_fundamental'] = is_fundamental( typedef )
else:
trace_print( meth ) # should be a nested class, TODO fix me.
meth['returns_unknown'] = True
elif '::' in meth['returns']:
trace_print('TODO namespace or extra nested return:', meth)
meth['returns_unknown'] = True
else:
trace_print( 'WARN: UNKNOWN RETURN', meth['name'], meth['returns'])
meth['returns_unknown'] = True
if meth["returns"].startswith(": : "):
meth["returns"] = meth["returns"].replace(": : ", "::")
for cls in list(self.classes.values()):
methnames = cls.get_all_method_names()
pvm = cls.get_all_pure_virtual_methods()
for d in cls['inherits']:
c = d['class']
a = d['access'] # do not depend on this to be 'public'
trace_print( 'PARENT CLASS:', c )
if c not in self.classes: trace_print('WARN: parent class not found')
if c in self.classes and self.classes[c]['abstract']:
p = self.classes[ c ]
for meth in p.get_all_methods(): #p["methods"]["public"]:
trace_print( '\t\tmeth', meth['name'], 'pure virtual', meth['pure_virtual'] )
if meth['pure_virtual'] and meth['name'] not in methnames: cls['abstract'] = True; break
def evaluate_struct_stack(self):
"""Create a Struct out of the name stack (but not its parts)"""
#print( 'eval struct stack', self.nameStack )
#if self.braceDepth != len(self.nameSpaces): return
struct = CppStruct(self.nameStack)
struct["namespace"] = self.cur_namespace()
self.structs[ struct['type'] ] = struct
self.structs_order.append( struct )
if self.curClass:
struct['parent'] = self.curClass
klass = self.classes[ self.curClass ]
klass['structs'][self.curAccessSpecifier].append( struct )
if self.curAccessSpecifier == 'public': klass._public_structs[ struct['type'] ] = struct
self.curStruct = struct
self._structs_brace_level[ struct['type'] ] = self.braceDepth
def parse_method_type( self, stack ):
trace_print( 'meth type info', stack )
if stack[0] in ':;' and stack[1] != ':': stack = stack[1:]
info = {
'debug': ' '.join(stack).replace(' : : ', '::' ).replace(' < ', '<' ).replace(' > ', '> ' ).replace(" >",">").replace(">>", "> >").replace(">>", "> >"),
'class':None,
'namespace':self.cur_namespace(add_double_colon=True),
}
for tag in 'defined pure_virtual operator constructor destructor extern template virtual static explicit inline friend returns returns_pointer returns_fundamental returns_class'.split(): info[tag]=False
header = stack[ : stack.index('(') ]
header = ' '.join( header )
header = header.replace(' : : ', '::' )
header = header.replace(' < ', '<' )
header = header.replace(' > ', '> ' )
header = header.strip()
if '{' in stack:
info['defined'] = True
self._method_body = self.braceDepth + 1
trace_print( 'NEW METHOD WITH BODY', self.braceDepth )
elif stack[-1] == ';':
info['defined'] = False
self._method_body = None # not a great idea to be clearing here
else: assert 0
if len(stack) > 3 and stack[-1] == ';' and stack[-2] == '0' and stack[-3] == '=':
info['pure_virtual'] = True
r = header.split()
name = None
if 'operator' in stack: # rare case op overload defined outside of class
op = stack[ stack.index('operator')+1 : stack.index('(') ]
op = ''.join(op)
if not op:
if " ".join(['operator', '(', ')', '(']) in " ".join(stack):
op = "()"
else:
trace_print( 'Error parsing operator')
return None
info['operator'] = op
name = 'operator' + op
a = stack[ : stack.index('operator') ]
elif r:
name = r[-1]
a = r[ : -1 ] # strip name
if name is None: return None
#if name.startswith('~'): name = name[1:]
while a and a[0] == '}': # strip - can have multiple } }
a = a[1:]
if '::' in name:
#klass,name = name.split('::') # methods can be defined outside of class
klass = name[ : name.rindex('::') ]
name = name.split('::')[-1]
info['class'] = klass
if klass in self.classes and not self.curClass:
#Class function defined outside the class
return None
# info['name'] = name
#else: info['name'] = name
if name.startswith('~'):
info['destructor'] = True
name = name[1:]
elif not a or (name == self.curClass and len(self.curClass)):
info['constructor'] = True
info['name'] = name
for tag in 'extern virtual static explicit inline friend'.split():
if tag in a: info[ tag ] = True; a.remove( tag ) # inplace
if 'template' in a:
a.remove('template')
b = ' '.join( a )
if '>' in b:
info['template'] = b[ : b.index('>')+1 ]
info['returns'] = b[ b.index('>')+1 : ] # find return type, could be incorrect... TODO
if '<typename' in info['template'].split():
typname = info['template'].split()[-1]
typname = typname[ : -1 ] # strip '>'
if typname not in self._template_typenames: self._template_typenames.append( typname )
else: info['returns'] = ' '.join( a )
else: info['returns'] = ' '.join( a )
info['returns'] = info['returns'].replace(' <', '<').strip()
## be careful with templates, do not count pointers inside template
info['returns_pointer'] = info['returns'].split('>')[-1].count('*')
if info['returns_pointer']: info['returns'] = info['returns'].replace('*','').strip()
info['returns_reference'] = '&' in info['returns']
if info['returns']: info['returns'] = info['returns'].replace('&','').strip()
a = []
for b in info['returns'].split():
if b == '__const__': info['returns_const'] = True
elif b == 'const': info['returns_const'] = True
else: a.append( b )
info['returns'] = ' '.join( a )
info['returns_fundamental'] = is_fundamental( info['returns'] )
return info
def evaluate_method_stack(self):
"""Create a method out of the name stack"""
if self.curStruct:
trace_print( 'WARN - struct contains methods - skipping' )
trace_print( self.stack )
assert 0
info = self.parse_method_type( self.stack )
if info:
if info[ 'class' ] and info['class'] in self.classes: # case where methods are defined outside of class
newMethod = CppMethod(self.nameStack, info['name'], info, self.curTemplate)
klass = self.classes[ info['class'] ]
klass[ 'methods' ][ 'public' ].append( newMethod )
newMethod['parent'] = klass
if klass['namespace']: newMethod['path'] = klass['namespace'] + '::' + klass['name']
else: newMethod['path'] = klass['name']
elif self.curClass: # normal case
newMethod = CppMethod(self.nameStack, self.curClass, info, self.curTemplate)
klass = self.classes[self.curClass]
klass['methods'][self.curAccessSpecifier].append(newMethod)
newMethod['parent'] = klass
if klass['namespace']: newMethod['path'] = klass['namespace'] + '::' + klass['name']
else: newMethod['path'] = klass['name']
else: #non class functions
debug_print("FREE FUNCTION")
newMethod = CppMethod(self.nameStack, None, info, self.curTemplate)
self.functions.append(newMethod)
global parseHistory
parseHistory.append({"braceDepth": self.braceDepth, "item_type": "method", "item": newMethod})
else:
trace_print( 'free function?', self.nameStack )
self.stack = []
def _parse_typedef( self, stack, namespace='' ):
if not stack or 'typedef' not in stack: return
stack = list( stack ) # copy just to be safe
if stack[-1] == ';': stack.pop()
while stack and stack[-1].isdigit(): stack.pop() # throw away array size for now
idx = stack.index('typedef')
if stack[-1] == "]":
try:
name = namespace + "".join(stack[-4:])
# Strip off the array part so the rest of the parsing is better
stack = stack[:-3]
except:
name = namespace + stack[-1]
else:
name = namespace + stack[-1]
s = ''
for a in stack[idx+1:-1]:
if a == '{': break
if not s or s[-1] in ':<>' or a in ':<>': s += a # keep compact
else: s += ' ' + a # spacing
r = {'name':name, 'raw':s, 'type':s}
if not is_fundamental(s):
if 'struct' in s.split(): pass # TODO is this right? "struct ns::something"
elif '::' not in s: s = namespace + s # only add the current name space if no namespace given
r['type'] = s
if s: return r
def evaluate_typedef(self):
ns = self.cur_namespace(add_double_colon=True)
res = self._parse_typedef( self.stack, ns )
if res:
name = res['name']
self.typedefs[ name ] = res['type']
if name not in self.typedefs_order: self.typedefs_order.append( name )
def evaluate_property_stack(self):
"""Create a Property out of the name stack"""
global parseHistory
assert self.stack[-1] == ';'
debug_print( "trace" )
if self.nameStack[0] == 'typedef':
if self.curClass:
typedef = self._parse_typedef( self.stack )
name = typedef['name']
klass = self.classes[ self.curClass ]
klass[ 'typedefs' ][ self.curAccessSpecifier ].append( name )
if self.curAccessSpecifier == 'public': klass._public_typedefs[ name ] = typedef['type']
Resolver.SubTypedefs[ name ] = self.curClass
else: assert 0
elif self.curStruct or self.curClass:
if len(self.nameStack) == 1:
#See if we can de anonymize the type
filteredParseHistory = [h for h in parseHistory if h["braceDepth"] == self.braceDepth]
if len(filteredParseHistory) and filteredParseHistory[-1]["item_type"] == "class":
self.nameStack.insert(0, filteredParseHistory[-1]["item"]["name"])
debug_print("DEANONYMOIZING %s to type '%s'"%(self.nameStack[1], self.nameStack[0]))
if "," in self.nameStack: #Maybe we have a variable list
#Figure out what part is the variable separator but remember templates of function pointer
#First find left most comma outside of a > and )
leftMostComma = 0;
for i in range(0, len(self.nameStack)):
name = self.nameStack[i]
if name in (">", ")"): leftMostComma = 0
if leftMostComma == 0 and name == ",": leftMostComma = i
# Is it really a list of variables?
if leftMostComma != 0:
trace_print("Multiple variables for namestack in %s. Separating processing"%self.nameStack)
orig_nameStack = self.nameStack[:]
orig_stack = self.stack[:]
type_nameStack = orig_nameStack[:leftMostComma-1]
for name in orig_nameStack[leftMostComma - 1::2]:
self.nameStack = type_nameStack + [name]
self.stack = orig_stack[:] # Not maintained for mucking, but this path it doesnt matter
self.evaluate_property_stack()
return
newVar = CppVariable(self.nameStack)
newVar['namespace'] = self.current_namespace()
if self.curStruct:
self.curStruct[ 'fields' ].append( newVar )
newVar['property_of_struct'] = self.curStruct
elif self.curClass:
klass = self.classes[self.curClass]
klass["properties"][self.curAccessSpecifier].append(newVar)
newVar['property_of_class'] = klass['name']
parseHistory.append({"braceDepth": self.braceDepth, "item_type": "variable", "item": newVar})
else:
debug_print( "Found Global variable" )
newVar = CppVariable(self.nameStack)
self.variables.append(newVar)
self.stack = [] # CLEAR STACK
def evaluate_class_stack(self):
"""Create a Class out of the name stack (but not its parts)"""
#dont support sub classes today
#print( 'eval class stack', self.nameStack )
parent = self.curClass
if self.braceDepth > len( self.nameSpaces) and parent:
trace_print( 'HIT NESTED SUBCLASS' )
self.accessSpecifierStack.append(self.curAccessSpecifier)
elif self.braceDepth != len(self.nameSpaces):
error_print( 'ERROR: WRONG BRACE DEPTH' )
return
# When dealing with typedefed structs, get rid of typedef keyword to handle later on
if self.nameStack[0] == "typedef":
del self.nameStack[0]
if len(self.nameStack) == 1:
self.anon_struct_counter += 1
# We cant handle more than 1 anonymous struct, so name them uniquely
self.nameStack.append("<anon-struct-%d>"%self.anon_struct_counter)
if self.nameStack[0] == "class":
self.curAccessSpecifier = 'private'
else:#struct
self.curAccessSpecifier = 'public'
debug_print("curAccessSpecifier changed/defaulted to %s"%self.curAccessSpecifier)
if self.nameStack[0] == "union":
newClass = CppUnion(self.nameStack)
self.anon_union_counter = [self.braceDepth, 2]
trace_print( 'NEW UNION', newClass['name'] )
else:
newClass = CppClass(self.nameStack, self.curTemplate)
trace_print( 'NEW CLASS', newClass['name'] )
newClass["declaration_method"] = self.nameStack[0]
self.classes_order.append( newClass ) # good idea to save ordering
self.stack = [] # fixes if class declared with ';' in closing brace
if parent:
newClass["namespace"] = self.classes[ parent ]['namespace'] + '::' + parent
newClass['parent'] = parent
self.classes[ parent ]['nested_classes'].append( newClass )
## supports nested classes with the same name ##
self.curClass = key = parent+'::'+newClass['name']
self._classes_brace_level[ key ] = self.braceDepth
elif newClass['parent']: # nested class defined outside of parent. A::B {...}
parent = newClass['parent']
newClass["namespace"] = self.classes[ parent ]['namespace'] + '::' + parent
self.classes[ parent ]['nested_classes'].append( newClass )
## supports nested classes with the same name ##
self.curClass = key = parent+'::'+newClass['name']
self._classes_brace_level[ key ] = self.braceDepth
else:
newClass["namespace"] = self.cur_namespace()
key = newClass['name']
self.curClass = newClass["name"]
self._classes_brace_level[ newClass['name'] ] = self.braceDepth
if not key.endswith("::") and not key.endswith(" ") and len(key) != 0:
if key in self.classes:
trace_print( 'ERROR name collision:', key )
self.classes[key].show()
trace_print('-'*80)
newClass.show()
assert key not in self.classes # namespace collision
self.classes[ key ] = newClass
global parseHistory
parseHistory.append({"braceDepth": self.braceDepth, "item_type": "class", "item": newClass})
def evalute_forward_decl(self):
trace_print( 'FORWARD DECL', self.nameStack )
assert self.nameStack[0] in ('class', 'struct')
name = self.nameStack[-1]
if self.curClass:
klass = self.classes[ self.curClass ]
klass['forward_declares'][self.curAccessSpecifier].append( name )
if self.curAccessSpecifier == 'public': klass._public_forward_declares.append( name )
else: self._forward_decls.append( name )
class CppHeader( _CppHeader ):
"""Parsed C++ class header
Variables produced:
self.classes - Dictionary of classes found in a given header file where the
key is the name of the class
"""
IGNORE_NAMES = '__extension__'.split()
def show(self):
for className in list(self.classes.keys()):self.classes[className].show()
def __init__(self, headerFileName, argType="file", **kwargs):
"""Create the parsed C++ header file parse tree
headerFileName - Name of the file to parse OR actual file contents (depends on argType)
argType - Indicates how to interpret headerFileName as a file string or file name
kwargs - Supports the following keywords
"""
## reset global state ##
global doxygenCommentCache
doxygenCommentCache = ""
CppVariable.Vars = []
CppStruct.Structs = []
if (argType == "file"):
self.headerFileName = os.path.expandvars(headerFileName)
self.mainClass = os.path.split(self.headerFileName)[1][:-2]
headerFileStr = ""
elif argType == "string":
self.headerFileName = ""
self.mainClass = "???"
headerFileStr = headerFileName
else:
raise Exception("Arg type must be either file or string")
self.curClass = ""
# nested classes have parent::nested, but no extra namespace,
# this keeps the API compatible, TODO proper namespace for everything.
Resolver.CLASSES = {}
self.classes = Resolver.CLASSES
#Functions that are not part of a class
self.functions = []
self.pragmas = []
self.defines = []
self.includes = []
self.conditionals = []
self._precomp_macro_buf = [] #for internal purposes, will end up filling out pragmras and defines at the end
self.enums = []
self.variables = []
self.global_enums = {}
self.nameStack = []
self.nameSpaces = []
self.curAccessSpecifier = 'private' # private is default
self.curTemplate = None
self.accessSpecifierStack = []
self.accessSpecifierScratch = []
debug_print("curAccessSpecifier changed/defaulted to %s"%self.curAccessSpecifier)
self.initextra()
# Old namestacks for a given level
self.nameStackHistory = []
self.anon_struct_counter = 0
self.anon_union_counter = [-1, 0]
self.templateRegistry = []
if (len(self.headerFileName)):
fd = open(self.headerFileName)
headerFileStr = "".join(fd.readlines())
fd.close()
# Make sure supportedAccessSpecifier are sane
for i in range(0, len(supportedAccessSpecifier)):
if " " not in supportedAccessSpecifier[i]: continue
supportedAccessSpecifier[i] = re.sub("[ ]+", " ", supportedAccessSpecifier[i]).strip()
# Strip out template declarations
templateSectionsToSliceOut = []
try:
for m in re.finditer("template[\t ]*<[^>]*>", headerFileStr):
start = m.start()
# Search for the final '>' which may or may not be caught in the case of nexted <>'s
for i in range(start, len(headerFileStr)):
if headerFileStr[i] == '<':
firstBracket = i
break
ltgtStackCount = 1
#Now look for fianl '>'
for i in range(firstBracket + 1, len(headerFileStr)):
if headerFileStr[i] == '<':
ltgtStackCount += 1
elif headerFileStr[i] == '>':
ltgtStackCount -= 1
if ltgtStackCount == 0:
end = i
break
templateSectionsToSliceOut.append((start, end))
# Now strip out all instances of the template
templateSectionsToSliceOut.reverse()
for tslice in templateSectionsToSliceOut:
# Replace the template symbol with a single symbol
template_symbol="CppHeaderParser_template_%d"%len(self.templateRegistry)
self.templateRegistry.append(headerFileStr[tslice[0]: tslice[1]+1])
newlines = headerFileStr[tslice[0]: tslice[1]].count("\n") * "\n" #Keep line numbers the same
headerFileStr = headerFileStr[:tslice[0]] + newlines + " " + template_symbol + " " + headerFileStr[tslice[1] + 1:]
except:
pass
# Change multi line #defines and expressions to single lines maintaining line nubmers
# Based from http://stackoverflow.com/questions/2424458/regular-expression-to-match-cs-multiline-preprocessor-statements
matches = re.findall(r'(?m)^(?:.*\\\r?\n)+.*$', headerFileStr)
is_define = re.compile(r'[ \t\v]*#[Dd][Ee][Ff][Ii][Nn][Ee]')
for m in matches:
#Keep the newlines so that linecount doesnt break
num_newlines = len([a for a in m if a=="\n"])
if is_define.match(m):
new_m = m.replace("\n", "<CppHeaderParser_newline_temp_replacement>\\n")
else:
# Just expression taking up multiple lines, make it take 1 line for easier parsing
new_m = m.replace("\\\n", " ")
if (num_newlines > 0):
new_m += "\n"*(num_newlines)
headerFileStr = headerFileStr.replace(m, new_m)
#Filter out Extern "C" statements. These are order dependent
matches = re.findall(re.compile(r'extern[\t ]+"[Cc]"[\t \n\r]*{', re.DOTALL), headerFileStr)
for m in matches:
#Keep the newlines so that linecount doesnt break
num_newlines = len([a for a in m if a=="\n"])
headerFileStr = headerFileStr.replace(m, "\n" * num_newlines)
headerFileStr = re.sub(r'extern[ ]+"[Cc]"[ ]*', "", headerFileStr)
#Filter out any ignore symbols that end with "()" to account for #define magic functions
for ignore in ignoreSymbols:
if not ignore.endswith("()"): continue
while True:
locStart = headerFileStr.find(ignore[:-1])
if locStart == -1:
break;
locEnd = None
#Now walk till we find the last paren and account for sub parens
parenCount = 1
inQuotes = False
for i in range(locStart + len(ignore) - 1, len(headerFileStr)):
c = headerFileStr[i]
if not inQuotes:
if c == "(":
parenCount += 1
elif c == ")":
parenCount -= 1
elif c == '"':
inQuotes = True
if parenCount == 0:
locEnd = i + 1
break;
else:
if c == '"' and headerFileStr[i-1] != '\\':
inQuotes = False
if locEnd:
#Strip it out but keep the linecount the same so line numbers are right
match_str = headerFileStr[locStart:locEnd]
debug_print("Striping out '%s'"%match_str)
num_newlines = len([a for a in match_str if a=="\n"])
headerFileStr = headerFileStr.replace(headerFileStr[locStart:locEnd], "\n"*num_newlines)
self.braceDepth = 0
lex.lex()
lex.input(headerFileStr)
global curLine
global curChar
curLine = 0
curChar = 0
try:
while True:
tok = lex.token()
if not tok: break
if self.anon_union_counter[0] == self.braceDepth and self.anon_union_counter[1]:
self.anon_union_counter[1] -= 1
tok.value = TagStr(tok.value, lineno=tok.lineno)
#debug_print("TOK: %s"%tok)
if tok.type == 'NAME' and tok.value in self.IGNORE_NAMES: continue
if tok.type != 'TEMPLATE_NAME':
self.stack.append( tok.value )
curLine = tok.lineno
curChar = tok.lexpos
if (tok.type in ('PRECOMP_MACRO', 'PRECOMP_MACRO_CONT')):
debug_print("PRECOMP: %s"%tok)
self._precomp_macro_buf.append(tok.value)
self.stack = []
self.nameStack = []
continue
if tok.type == 'TEMPLATE_NAME':
try:
templateId = int(tok.value.replace("CppHeaderParser_template_",""))
self.curTemplate = self.templateRegistry[templateId]
except: pass
if (tok.type == 'OPEN_BRACE'):
if len(self.nameStack) >= 2 and is_namespace(self.nameStack): # namespace {} with no name used in boost, this sets default?
if self.nameStack[1] == "__IGNORED_NAMESPACE__CppHeaderParser__":#Used in filtering extern "C"
self.nameStack[1] = ""
self.nameSpaces.append(self.nameStack[1])
ns = self.cur_namespace(); self.stack = []
if ns not in self.namespaces: self.namespaces.append( ns )
# Detect special condition of macro magic before class declaration so we
# can filter it out
if 'class' in self.nameStack and self.nameStack[0] != 'class':
classLocationNS = self.nameStack.index("class")
classLocationS = self.stack.index("class")
if "(" not in self.nameStack[classLocationNS:]:
debug_print("keyword 'class' found in unexpected location in nameStack, must be following #define magic. Process that before moving on")
origNameStack = self.nameStack
origStack = self.stack
#Process first part of stack which is probably #define macro magic and may cause issues
self.nameStack = self.nameStack[:classLocationNS]
self.stack = self.stack[:classLocationS]
try:
self.evaluate_stack()
except:
debug_print("Error processing #define magic... Oh well")
#Process rest of stack
self.nameStack = origNameStack[classLocationNS:]
self.stack = origStack[classLocationS:]
if len(self.nameStack) and not is_enum_namestack(self.nameStack):
self.evaluate_stack()
else:
self.nameStack.append(tok.value)
if self.stack and self.stack[0] == 'class': self.stack = []
self.braceDepth += 1
elif (tok.type == 'CLOSE_BRACE'):
if self.braceDepth == 0:
continue
if (self.braceDepth == len(self.nameSpaces)):
tmp = self.nameSpaces.pop()
self.stack = [] # clear stack when namespace ends?
if len(self.nameStack) and is_enum_namestack(self.nameStack):
self.nameStack.append(tok.value)
elif self.braceDepth < 10:
self.evaluate_stack()
else:
self.nameStack = []
self.braceDepth -= 1
#self.stack = []; print 'BRACE DEPTH', self.braceDepth, 'NS', len(self.nameSpaces)
if self.curClass: debug_print( 'CURBD %s'%self._classes_brace_level[ self.curClass ] )
if (self.braceDepth == 0) or (self.curClass and self._classes_brace_level[self.curClass]==self.braceDepth):
trace_print( 'END OF CLASS DEF' )
if self.accessSpecifierStack:
self.curAccessSpecifier = self.accessSpecifierStack[-1]
self.accessSpecifierStack = self.accessSpecifierStack[:-1]
if self.curClass and self.classes[ self.curClass ]['parent']: self.curClass = self.classes[ self.curClass ]['parent']
else: self.curClass = ""; #self.curStruct = None
self.stack = []
#if self.curStruct: self.curStruct = None
if self.braceDepth == 0 or (self.curStruct and self._structs_brace_level[self.curStruct['type']]==self.braceDepth):
trace_print( 'END OF STRUCT DEF' )
self.curStruct = None
if self._method_body and (self.braceDepth + 1) <= self._method_body:
self._method_body = None; self.stack = []; self.nameStack = []; trace_print( 'FORCE CLEAR METHBODY' )
if (tok.type == 'OPEN_PAREN'):
self.nameStack.append(tok.value)
elif (tok.type == 'CLOSE_PAREN'):
self.nameStack.append(tok.value)
elif (tok.type == 'OPEN_SQUARE_BRACKET'):
self.nameStack.append(tok.value)
elif (tok.type == 'CLOSE_SQUARE_BRACKET'):
self.nameStack.append(tok.value)
elif (tok.type == 'TAB'): pass
elif (tok.type == 'EQUALS'):
self.nameStack.append(tok.value)
elif (tok.type == 'COMMA'):
self.nameStack.append(tok.value)
elif (tok.type == 'BACKSLASH'):
self.nameStack.append(tok.value)
elif (tok.type == 'DIVIDE'):
self.nameStack.append(tok.value)
elif (tok.type == 'PIPE'):
self.nameStack.append(tok.value)
elif (tok.type == 'PERCENT'):
self.nameStack.append(tok.value)
elif (tok.type == 'CARET'):
self.nameStack.append(tok.value)
elif (tok.type == 'EXCLAMATION'):
self.nameStack.append(tok.value)
elif (tok.type == 'SQUOTE'): pass
elif (tok.type == 'NUMBER' or tok.type == 'FLOAT_NUMBER'):
self.nameStack.append(tok.value)
elif (tok.type == 'MINUS'):
self.nameStack.append(tok.value)
elif (tok.type == 'PLUS'):
self.nameStack.append(tok.value)
elif (tok.type == 'STRING_LITERAL'):
self.nameStack.append(tok.value)
elif (tok.type == 'NAME' or tok.type == 'AMPERSTAND' or tok.type == 'ASTERISK' or tok.type == 'CHAR_LITERAL'):
if tok.value in ignoreSymbols:
debug_print("Ignore symbol %s"%tok.value)
elif (tok.value == 'class'):
self.nameStack.append(tok.value)
elif tok.value in supportedAccessSpecifier:
if len(self.nameStack) and self.nameStack[0] in ("class", "struct", "union"):
self.nameStack.append(tok.value)
elif self.braceDepth == len(self.nameSpaces) + 1 or self.braceDepth == (len(self.nameSpaces) + len(self.curClass.split("::"))):
self.curAccessSpecifier = tok.value;
self.accessSpecifierScratch.append(tok.value)
debug_print("curAccessSpecifier updated to %s"%self.curAccessSpecifier)
self.stack = []
else:
self.nameStack.append(tok.value)
if self.anon_union_counter[0] == self.braceDepth:
self.anon_union_counter = [-1, 0]
elif (tok.type == 'COLON'):
#Dont want colon to be first in stack
if len(self.nameStack) == 0:
self.accessSpecifierScratch = []
continue
# Handle situation where access specifiers can be multi words such as "public slots"
jns = " ".join(self.accessSpecifierScratch + self.nameStack)
if jns in supportedAccessSpecifier:
self.curAccessSpecifier = jns;
debug_print("curAccessSpecifier updated to %s"%self.curAccessSpecifier)
self.stack = []
self.nameStack = []
else:
self.nameStack.append(tok.value)
self.accessSpecifierScratch = []
elif (tok.type == 'SEMI_COLON'):
if self.anon_union_counter[0] == self.braceDepth and self.anon_union_counter[1]:
debug_print("Creating anonymous union")
#Force the processing of an anonymous union
saved_namestack = self.nameStack[:]
saved_stack = self.stack[:]
self.nameStack = [""]
self.stack = self.nameStack + [";"]
self.nameStack = self.nameStack[0:1]
debug_print("pre eval anon stack")
self.evaluate_stack( tok.type )
debug_print("post eval anon stack")
self.nameStack = saved_namestack
self.stack = saved_stack
self.anon_union_counter = [-1, 0];
if (self.braceDepth < 10): self.evaluate_stack( tok.type )
self.stack = []
self.nameStack = []
except:
if (debug): raise
raise CppParseError("Not able to parse %s on line %d evaluating \"%s\"\nError around: %s"
% (self.headerFileName, tok.lineno, tok.value, " ".join(self.nameStack)))
self.finalize()
global parseHistory
parseHistory = []
# Delete some temporary variables
for key in ["_precomp_macro_buf", "nameStack", "nameSpaces", "curAccessSpecifier", "accessSpecifierStack",
"accessSpecifierScratch", "nameStackHistory", "anon_struct_counter", "anon_union_counter",
"_classes_brace_level", "_forward_decls", "stack", "mainClass", "curStruct", "_template_typenames",
"_method_body", "braceDepth", "_structs_brace_level", "typedefs_order", "curTemplate", "templateRegistry"]:
del self.__dict__[key]
def evaluate_stack(self, token=None):
"""Evaluates the current name stack"""
global doxygenCommentCache
self.nameStack = filter_out_attribute_keyword(self.nameStack)
self.stack = filter_out_attribute_keyword(self.stack)
nameStackCopy = self.nameStack[:]
debug_print( "Evaluating stack %s\n BraceDepth: %s (called from %d)" %(self.nameStack,self.braceDepth, inspect.currentframe().f_back.f_lineno))
#Handle special case of overloading operator ()
if "operator()(" in "".join(self.nameStack):
operator_index = self.nameStack.index("operator")
self.nameStack.pop(operator_index + 2)
self.nameStack.pop(operator_index + 1)
self.nameStack[operator_index] = "operator()"
if (len(self.curClass)):
debug_print( "%s (%s) "%(self.curClass, self.curAccessSpecifier))
else:
debug_print( "<anonymous> (%s) "%self.curAccessSpecifier)
#Filter special case of array with casting in it
try:
bracePos = self.nameStack.index("[")
parenPos = self.nameStack.index("(")
if bracePos == parenPos - 1:
endParen = self.nameStack.index(")")
self.nameStack = self.nameStack[:bracePos + 1] + self.nameStack[endParen + 1:]
debug_print("Filtered namestack to=%s"%self.nameStack)
except: pass
#if 'typedef' in self.nameStack: self.evaluate_typedef() # allows nested typedefs, probably a bad idea
if (not self.curClass and 'typedef' in self.nameStack and
(('struct' not in self.nameStack and 'union' not in self.nameStack) or self.stack[-1] == ";") and
not is_enum_namestack(self.nameStack)):
trace_print('STACK', self.stack)
self.evaluate_typedef()
return
elif (len(self.nameStack) == 0):
debug_print( "trace" )
debug_print( "(Empty Stack)" )
return
elif (self.nameStack[0] == "namespace"):
#Taken care of outside of here
pass
elif len(self.nameStack) == 2 and self.nameStack[0] == "friend":#friend class declaration
pass
elif len(self.nameStack) >= 2 and self.nameStack[0] == 'using' and self.nameStack[1] == 'namespace': pass # TODO
elif is_enum_namestack(self.nameStack):
debug_print( "trace" )
self.evaluate_enum_stack()
elif self._method_body and (self.braceDepth + 1) > self._method_body: trace_print( 'INSIDE METHOD DEF' )
elif is_method_namestack(self.stack) and not self.curStruct and '(' in self.nameStack:
debug_print( "trace" )
if self.braceDepth > 0:
if "{" in self.stack and self.stack[0] != '{' and self.stack[-1] == ';' and self.braceDepth == 1:
#Special case of a method defined outside a class that has a body
pass
else:
self.evaluate_method_stack()
else:
#Free function
self.evaluate_method_stack()
elif (len(self.nameStack) == 1 and len(self.nameStackHistory) > self.braceDepth
and (self.nameStackHistory[self.braceDepth][0][0:2] == ["typedef", "struct"] or
self.nameStackHistory[self.braceDepth][0][0:2] == ["typedef", "union"])):
# Look for the name of a typedef struct: struct typedef {...] StructName; or unions to get renamed
debug_print("found the naming of a union")
type_name_to_rename = self.nameStackHistory[self.braceDepth][1]
new_name = self.nameStack[0]
type_to_rename = self.classes[type_name_to_rename]
type_to_rename["name"] = self.nameStack[0]
#Now re install it in its new location
self.classes[new_name] = type_to_rename
del self.classes[type_name_to_rename]
elif is_property_namestack(self.nameStack) and self.stack[-1] == ';':
debug_print( "trace" )
if self.nameStack[0] in ('class', 'struct') and len(self.stack) == 3: self.evalute_forward_decl()
elif len(self.nameStack) >= 2 and (self.nameStack[0]=='friend' and self.nameStack[1]=='class'): pass
else: self.evaluate_property_stack() # catches class props and structs in a namespace
elif self.nameStack[0] in ("class", "struct", "union") or self.nameStack[0] == 'typedef' and self.nameStack[1] in ('struct', 'union'):
#Parsing a union can reuse much of the class parsing
debug_print( "trace" )
self.evaluate_class_stack()
elif not self.curClass:
debug_print( "trace" )
if is_enum_namestack(self.nameStack): self.evaluate_enum_stack()
elif self.curStruct and self.stack[-1] == ';': self.evaluate_property_stack() # this catches fields of global structs
self.nameStack = []
doxygenCommentCache = ""
elif (self.braceDepth < 1):
debug_print( "trace" )
#Ignore global stuff for now
debug_print( "Global stuff: %s"%self.nameStack )
self.nameStack = []
doxygenCommentCache = ""
elif (self.braceDepth > len(self.nameSpaces) + 1):
debug_print( "trace" )
self.nameStack = []
doxygenCommentCache = ""
try:
self.nameStackHistory[self.braceDepth] = (nameStackCopy, self.curClass)
except:
self.nameStackHistory.append((nameStackCopy, self.curClass))
self.nameStack = [] # its a little confusing to have some if/else above return and others not, and then clearning the nameStack down here
doxygenCommentCache = ""
self.curTemplate = None
def evaluate_enum_stack(self):
"""Create an Enum out of the name stack"""
debug_print( "evaluating enum" )
newEnum = CppEnum(self.nameStack)
if len(list(newEnum.keys())):
if len(self.curClass):
newEnum["namespace"] = self.cur_namespace(False)
klass = self.classes[self.curClass]
klass["enums"][self.curAccessSpecifier].append(newEnum)
if self.curAccessSpecifier == 'public' and 'name' in newEnum: klass._public_enums[ newEnum['name'] ] = newEnum
else:
newEnum["namespace"] = self.cur_namespace(True)
self.enums.append(newEnum)
if 'name' in newEnum and newEnum['name']: self.global_enums[ newEnum['name'] ] = newEnum
#This enum has instances, turn them into properties
if "instances" in newEnum:
instanceType = "enum"
if "name" in newEnum:
instanceType = newEnum["name"]
for instance in newEnum["instances"]:
self.nameStack = [instanceType, instance]
self.evaluate_property_stack()
del newEnum["instances"]
def strip_parent_keys(self):
"""Strip all parent keys to prevent loops"""
obj_queue = [self]
while len(obj_queue):
obj = obj_queue.pop()
trace_print("pop %s type %s"%(obj, type(obj)))
try:
if "parent" in obj.keys():
del obj["parent"]
trace_print("Stripped parent from %s"%obj.keys())
except: pass
# Figure out what sub types are one of ours
try:
if not hasattr(obj, 'keys'):
obj = obj.__dict__
for k in obj.keys():
trace_print("-Try key %s"%(k))
trace_print("-type %s"%(type(obj[k])))
if k in ["nameStackHistory", "parent", "_public_typedefs"]: continue
if type(obj[k]) == list:
for i in obj[k]:
trace_print("push l %s"%i)
obj_queue.append(i)
elif type(obj[k]) == dict:
if len(obj):
trace_print("push d %s"%obj[k])
obj_queue.append(obj[k])
elif type(obj[k]) == type(type(0)):
if type(obj[k]) == int:
obj[k] = "int"
elif type(obj[k]) == str:
obj[k] = "string"
else:
obj[k] = "???"
trace_print("next key\n")
except:
trace_print("Exception")
def toJSON(self, indent=4):
"""Converts a parsed structure to JSON"""
import json
self.strip_parent_keys()
try:
del self.__dict__["classes_order"]
except: pass
return json.dumps(self.__dict__, indent=indent)
def __repr__(self):
rtn = {
"classes": self.classes,
"functions": self.functions,
"enums": self.enums,
"variables": self.variables,
}
return repr(rtn)
def __str__(self):
rtn = ""
for className in list(self.classes.keys()):
rtn += "%s\n"%self.classes[className]
if self.functions:
rtn += "// functions\n"
for f in self.functions:
rtn += "%s\n"%f
if self.variables:
rtn += "// variables\n"
for f in self.variables:
rtn += "%s\n"%f
if self.enums:
rtn += "// enums\n"
for f in self.enums:
rtn += "%s\n"%f
return rtn
| apache-2.0 |
nschaetti/EchoTorch | echotorch/nn/ICACell.py | 1 | 2909 | # -*- coding: utf-8 -*-
#
# File : echotorch/nn/ESN.py
# Description : An Echo State Network module.
# Date : 26th of January, 2018
#
# This file is part of EchoTorch. EchoTorch is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Nils Schaetti <nils.schaetti@unine.ch>
"""
Created on 26 January 2018
@author: Nils Schaetti
"""
# Imports
import torch.sparse
import torch
import torch.nn as nn
from torch.autograd import Variable
# Independent Component Analysis layer
class ICACell(nn.Module):
"""
Principal Component Analysis layer. It can be used to handle different batch-mode algorithm for ICA.
"""
# Constructor
def __init__(self, input_dim, output_dim):
"""
Constructor
:param input_dim: Inputs dimension.
:param output_dim: Reservoir size
"""
super(ICACell, self).__init__()
pass
# end __init__
###############################################
# PROPERTIES
###############################################
###############################################
# PUBLIC
###############################################
# Reset learning
def reset(self):
"""
Reset learning
:return:
"""
# Training mode again
self.train(True)
# end reset
# Forward
def forward(self, x, y=None):
"""
Forward
:param x: Input signal.
:param y: Target outputs
:return: Output or hidden states
"""
# Batch size
batch_size = x.size()[0]
# Time length
time_length = x.size()[1]
# Add bias
if self.with_bias:
x = self._add_constant(x)
# end if
# end forward
# Finish training
def finalize(self):
"""
Finalize training with LU factorization or Pseudo-inverse
"""
pass
# end finalize
###############################################
# PRIVATE
###############################################
# Add constant
def _add_constant(self, x):
"""
Add constant
:param x:
:return:
"""
bias = Variable(torch.ones((x.size()[0], x.size()[1], 1)), requires_grad=False)
return torch.cat((bias, x), dim=2)
# end _add_constant
# end ICACell
| gpl-3.0 |
hehaichi/django-imagemanagement | imageserver/settings.py | 1 | 3326 | """
Django settings for imageserver project.
Generated by 'django-admin startproject' using Django 1.10.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'fvmacrow6pe#wtxg01(9_m01inqisms+255x%uvj0eftaft0xm'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'imagemanagement',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'imageserver.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'imageserver.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
#Data Max upload size
DATA_UPLOAD_MAX_MEMORY_SIZE=2621440*10
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
MEDIA_ROOT = os.path.join(BASE_DIR, 'imagemanagement/media')
MEDIA_URL = '/media/'
| mit |
poulpito/Flexget | flexget/api/plugins/irc.py | 4 | 4254 | from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
from flask import jsonify
from flask_restplus import inputs
from flexget.api import api, APIResource
from flexget.api.app import BadRequest, NotFoundError, success_response, base_message_schema, empty_response
irc_api = api.namespace('irc', description='View and manage IRC connections')
irc_parser = api.parser()
irc_parser.add_argument('name', help='Name of connection. Leave empty to apply to all connections.')
class ObjectsContainer(object):
connection_object = {
'type': 'object',
'properties': {
'alive': {'type': 'boolean'},
'channels': {
'type': 'array', 'items': {
'type': 'object',
'patternProperties': {
'\w': {'type': 'integer'}
}
}
},
'connected_channels': {'type': 'array', 'items': {'type': 'string'}},
'port': {'type': 'integer'},
'server': {'type': 'string'}
}
}
connection = {
'type': 'object',
'patternProperties': {
'\w': connection_object
}
}
return_response = {'type': 'array', 'items': connection}
return_schema = api.schema('irc.connections', ObjectsContainer.return_response)
@irc_api.route('/connections/')
@api.doc(parser=irc_parser)
class IRCStatus(APIResource):
@api.response(200, model=return_schema)
@api.response(NotFoundError)
@api.response(BadRequest)
def get(self, session=None):
"""Returns status of IRC connections"""
from flexget.plugins.daemon.irc import irc_manager
if irc_manager is None:
raise BadRequest('IRC daemon does not appear to be running')
args = irc_parser.parse_args()
name = args.get('name')
try:
status = irc_manager.status(name)
except ValueError as e:
raise NotFoundError(e.args[0])
return jsonify(status)
@irc_api.route('/connections/enums/')
class IRCEnums(APIResource):
@api.response(200, model=empty_response)
def get(self, session=None):
"""Get channel status enumeration meaning"""
try:
from irc_bot import irc_bot
except ImportError:
raise BadRequest('irc_bot dep is not installed')
return jsonify(irc_bot.IRCChannelStatus().enum_dict)
@irc_api.route('/restart/')
@api.doc(parser=irc_parser)
class IRCRestart(APIResource):
@api.response(200, model=base_message_schema)
@api.response(NotFoundError)
@api.response(BadRequest)
def get(self, session=None):
"""Restarts IRC connections"""
from flexget.plugins.daemon.irc import irc_manager
if irc_manager is None:
raise BadRequest('IRC daemon does not appear to be running')
args = irc_parser.parse_args()
connection = args.get('name')
try:
irc_manager.restart_connections(connection)
except KeyError:
raise NotFoundError('Connection {} is not a valid IRC connection'.format(connection))
return success_response('Successfully restarted connection(s)')
irc_stop_parser = irc_parser.copy()
irc_stop_parser.add_argument('wait', type=inputs.boolean, default=False, help='Wait for connection to exit gracefully')
@irc_api.route('/stop/')
@api.doc(parser=irc_stop_parser)
class IRCStop(APIResource):
@api.response(200, model=base_message_schema)
@api.response(NotFoundError)
@api.response(BadRequest)
def get(self, session=None):
"""Stops IRC connections"""
from flexget.plugins.daemon.irc import irc_manager
if irc_manager is None:
raise BadRequest('IRC daemon does not appear to be running')
args = irc_stop_parser.parse_args()
name = args.get('name')
wait = args.get('wait')
try:
irc_manager.stop_connections(wait=wait, name=name)
except KeyError:
raise NotFoundError('Connection {} is not a valid IRC connection'.format(name))
return success_response('Successfully stopped connection(s)')
| mit |
ChameleonCloud/horizon | openstack_dashboard/dashboards/identity/roles/forms.py | 91 | 1701 | # Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard import api
class CreateRoleForm(forms.SelfHandlingForm):
name = forms.CharField(label=_("Role Name"))
def handle(self, request, data):
try:
new_user = api.keystone.role_create(request, data["name"])
messages.success(request, _("Role created successfully."))
return new_user
except Exception:
exceptions.handle(request, _('Unable to create role.'))
class UpdateRoleForm(forms.SelfHandlingForm):
id = forms.CharField(label=_("ID"), widget=forms.HiddenInput)
name = forms.CharField(label=_("Role Name"))
def handle(self, request, data):
try:
api.keystone.role_update(request, data['id'], data["name"])
messages.success(request, _("Role updated successfully."))
return True
except Exception:
exceptions.handle(request, _('Unable to update role.'))
| apache-2.0 |
defionscode/ansible-modules-core | network/nxos/nxos_evpn_vni.py | 19 | 16296 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: nxos_evpn_vni
version_added: "2.2"
short_description: Manages Cisco EVPN VXLAN Network Identifier (VNI).
description:
- Manages Cisco Ethernet Virtual Private Network (EVPN) VXLAN Network
Identifier (VNI) configurations of a Nexus device.
author: Gabriele Gerbino (@GGabriele)
extends_documentation_fragment: nxos
notes:
- default, where supported, restores params default value.
- RD override is not permitted. You should set it to the default values
first and then reconfigure it.
- C(route_target_both), C(route_target_import) and
C(route_target_export valid) values are a list of extended communities,
(i.e. ['1.2.3.4:5', '33:55']) or the keywords 'auto' or 'default'.
- The C(route_target_both) property is discouraged due to the inconsistent
behavior of the property across Nexus platforms and image versions.
For this reason it is recommended to use explicit C(route_target_export)
and C(route_target_import) properties instead of C(route_target_both).
- RD valid values are a string in one of the route-distinguisher formats,
the keyword 'auto', or the keyword 'default'.
options:
vni:
description:
- The EVPN VXLAN Network Identifier.
required: true
default: null
route_distinguisher:
description:
- The VPN Route Distinguisher (RD). The RD is combined with
the IPv4 or IPv6 prefix learned by the PE router to create a
globally unique address.
required: true
default: null
route_target_both:
description:
- Enables/Disables route-target settings for both import and
export target communities using a single property.
required: false
default: null
route_target_import:
description:
- Sets the route-target 'import' extended communities.
required: false
default: null
route_target_export:
description:
- Sets the route-target 'import' extended communities.
required: false
default: null
state:
description:
- Determines whether the config should be present or not
on the device.
required: false
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
- nxos_evpn_vni:
vni: 6000
route_distinguisher: "60:10"
route_target_import:
- "5000:10"
- "4100:100"
route_target_export: auto
route_target_both: default
username: "{{ un }}"
password: "{{ pwd }}"
host: "{{ inventory_hostname }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: verbose mode
type: dict
sample: {"route_target_import": ["5000:10", "4100:100",
"5001:10"],"vni": "6000"}
existing:
description: k/v pairs of existing EVPN VNI configuration
returned: verbose mode
type: dict
sample: {"route_distinguisher": "70:10", "route_target_both": [],
"route_target_export": [], "route_target_import": [
"4100:100", "5000:10"], "vni": "6000"}
end_state:
description: k/v pairs of EVPN VNI configuration after module execution
returned: verbose mode
type: dict
sample: {"route_distinguisher": "70:10", "route_target_both": [],
"route_target_export": [], "route_target_import": [
"4100:100", "5000:10", "5001:10"], "vni": "6000"}
updates:
description: commands sent to the device
returned: always
type: list
sample: ["evpn", "vni 6000 l2", "route-target import 5001:10"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
# COMMON CODE FOR MIGRATION
import re
import ansible.module_utils.nxos
from ansible.module_utils.basic import get_exception
from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
from ansible.module_utils.shell import ShellError
from ansible.module_utils.network import NetworkModule
def to_list(val):
if isinstance(val, (list, tuple)):
return list(val)
elif val is not None:
return [val]
else:
return list()
class CustomNetworkConfig(NetworkConfig):
def expand_section(self, configobj, S=None):
if S is None:
S = list()
S.append(configobj)
for child in configobj.children:
if child in S:
continue
self.expand_section(child, S)
return S
def get_object(self, path):
for item in self.items:
if item.text == path[-1]:
parents = [p.text for p in item.parents]
if parents == path[:-1]:
return item
def to_block(self, section):
return '\n'.join([item.raw for item in section])
def get_section(self, path):
try:
section = self.get_section_objects(path)
return self.to_block(section)
except ValueError:
return list()
def get_section_objects(self, path):
if not isinstance(path, list):
path = [path]
obj = self.get_object(path)
if not obj:
raise ValueError('path does not exist in config')
return self.expand_section(obj)
def add(self, lines, parents=None):
"""Adds one or lines of configuration
"""
ancestors = list()
offset = 0
obj = None
## global config command
if not parents:
for line in to_list(lines):
item = ConfigLine(line)
item.raw = line
if item not in self.items:
self.items.append(item)
else:
for index, p in enumerate(parents):
try:
i = index + 1
obj = self.get_section_objects(parents[:i])[0]
ancestors.append(obj)
except ValueError:
# add parent to config
offset = index * self.indent
obj = ConfigLine(p)
obj.raw = p.rjust(len(p) + offset)
if ancestors:
obj.parents = list(ancestors)
ancestors[-1].children.append(obj)
self.items.append(obj)
ancestors.append(obj)
# add child objects
for line in to_list(lines):
# check if child already exists
for child in ancestors[-1].children:
if child.text == line:
break
else:
offset = len(parents) * self.indent
item = ConfigLine(line)
item.raw = line.rjust(len(line) + offset)
item.parents = ancestors
ancestors[-1].children.append(item)
self.items.append(item)
def get_network_module(**kwargs):
try:
return get_module(**kwargs)
except NameError:
return NetworkModule(**kwargs)
def get_config(module, include_defaults=False):
config = module.params['config']
if not config:
try:
config = module.get_config()
except AttributeError:
defaults = module.params['include_defaults']
config = module.config.get_config(include_defaults=defaults)
return CustomNetworkConfig(indent=2, contents=config)
def load_config(module, candidate):
config = get_config(module)
commands = candidate.difference(config)
commands = [str(c).strip() for c in commands]
save_config = module.params['save']
result = dict(changed=False)
if commands:
if not module.check_mode:
try:
module.configure(commands)
except AttributeError:
module.config(commands)
if save_config:
try:
module.config.save_config()
except AttributeError:
module.execute(['copy running-config startup-config'])
result['changed'] = True
result['updates'] = commands
return result
# END OF COMMON CODE
PARAM_TO_COMMAND_KEYMAP = {
'vni': 'vni',
'route_target_both': 'route-target both',
'route_target_import': 'route-target import',
'route_target_export': 'route-target export',
'route_distinguisher': 'rd'
}
WARNINGS = []
import time
def invoke(name, *args, **kwargs):
func = globals().get(name)
if func:
return func(*args, **kwargs)
def get_value(arg, config, module):
REGEX = re.compile(r'(?:{0}\s)(?P<value>.*)$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
value = ''
if PARAM_TO_COMMAND_KEYMAP[arg] in config:
value = REGEX.search(config).group('value')
return value
def get_route_target_value(arg, config, module):
splitted_config = config.splitlines()
value_list = []
REGEX = re.compile(r'(?:{0}\s)(?P<value>.*)$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
for line in splitted_config:
value = ''
if PARAM_TO_COMMAND_KEYMAP[arg] in line.strip():
value = REGEX.search(line).group('value')
value_list.append(value)
return value_list
def get_existing(module, args):
existing = {}
netcfg = get_config(module)
parents = ['evpn', 'vni {0} l2'.format(module.params['vni'])]
config = netcfg.get_section(parents)
if config:
for arg in args:
if arg != 'vni':
if arg == 'route_distinguisher':
existing[arg] = get_value(arg, config, module)
else:
existing[arg] = get_route_target_value(arg, config, module)
existing_fix = dict((k, v) for k, v in existing.iteritems() if v)
if existing_fix:
existing['vni'] = module.params['vni']
else:
existing = existing_fix
return existing
def apply_key_map(key_map, table):
new_dict = {}
for key, value in table.items():
new_key = key_map.get(key)
if new_key:
value = table.get(key)
if value:
new_dict[new_key] = value
else:
new_dict[new_key] = value
return new_dict
def state_present(module, existing, proposed):
commands = list()
parents = list()
proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, proposed)
existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing)
for key, value in proposed_commands.iteritems():
if key.startswith('route-target'):
if value == ['default']:
existing_value = existing_commands.get(key)
if existing_value:
for target in existing_value:
commands.append('no {0} {1}'.format(key, target))
else:
if not isinstance(value, list):
value = [value]
for target in value:
if existing:
if target not in existing.get(key.replace('-', '_').replace(' ', '_')):
commands.append('{0} {1}'.format(key, target))
else:
commands.append('{0} {1}'.format(key, target))
else:
if value == 'default':
existing_value = existing_commands.get(key)
if existing_value:
commands.append('no {0} {1}'.format(key, existing_value))
else:
command = '{0} {1}'.format(key, value)
commands.append(command)
if commands:
parents = ['evpn', 'vni {0} l2'.format(module.params['vni'])]
return commands, parents
def state_absent(module, existing, proposed):
commands = ['no vni {0} l2'.format(module.params['vni'])]
parents = ['evpn']
return commands, parents
def execute_config(module, candidate):
result = {}
try:
response = load_config(module, candidate)
result.update(response)
except ShellError:
exc = get_exception()
module.fail_json(msg=str(exc))
return result
def main():
argument_spec = dict(
vni=dict(required=True, type='str'),
route_distinguisher=dict(required=False, type='str'),
route_target_both=dict(required=False, type='list'),
route_target_import=dict(required=False, type='list'),
route_target_export=dict(required=False, type='list'),
state=dict(choices=['present', 'absent'], default='present',
required=False),
include_defaults=dict(default=True),
config=dict(),
save=dict(type='bool', default=False)
)
module = get_network_module(argument_spec=argument_spec,
supports_check_mode=True)
state = module.params['state']
args = [
'vni',
'route_distinguisher',
'route_target_both',
'route_target_import',
'route_target_export'
]
existing = invoke('get_existing', module, args)
end_state = existing
proposed_args = dict((k, v) for k, v in module.params.iteritems()
if v is not None and k in args)
proposed = {}
for key, value in proposed_args.iteritems():
if key != 'vni':
if value == 'true':
value = True
elif value == 'false':
value = False
if existing.get(key) or (not existing.get(key) and value):
proposed[key] = value
result = {}
if state == 'present' or (state == 'absent' and existing):
candidate = CustomNetworkConfig(indent=3)
commands, parents = invoke('state_%s' % state, module, existing,
proposed)
if commands:
if (existing.get('route_distinguisher') and
proposed.get('route_distinguisher')):
if (existing['route_distinguisher'] != proposed[
'route_distinguisher'] and
proposed['route_distinguisher'] != 'default'):
WARNINGS.append('EVPN RD {0} was automatically removed. '
'It is highly recommended to use a task '
'(with default as value) to explicitly '
'unconfigure it.'.format(
existing['route_distinguisher']))
remove_commands = ['no rd {0}'.format(
existing['route_distinguisher'])]
candidate.add(remove_commands, parents=parents)
result = execute_config(module, candidate)
time.sleep(30)
candidate = CustomNetworkConfig(indent=3)
candidate.add(commands, parents=parents)
result = execute_config(module, candidate)
else:
result['updates'] = []
result['connected'] = module.connected
if module._verbosity > 0:
end_state = invoke('get_existing', module, args)
result['end_state'] = end_state
result['existing'] = existing
result['proposed'] = proposed_args
if WARNINGS:
result['warnings'] = WARNINGS
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
open-homeautomation/home-assistant | homeassistant/components/device_tracker/ping.py | 5 | 2962 | """
Tracks devices by sending a ICMP ping.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/device_tracker.ping/
device_tracker:
- platform: ping
count: 2
hosts:
host_one: pc.local
host_two: 192.168.2.25
"""
import logging
import subprocess
import sys
from datetime import timedelta
import voluptuous as vol
from homeassistant.components.device_tracker import (
PLATFORM_SCHEMA, DEFAULT_SCAN_INTERVAL, SOURCE_TYPE_ROUTER)
from homeassistant.helpers.event import track_point_in_utc_time
from homeassistant import util
from homeassistant import const
import homeassistant.helpers.config_validation as cv
DEPENDENCIES = []
_LOGGER = logging.getLogger(__name__)
CONF_PING_COUNT = 'count'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(const.CONF_HOSTS): {cv.string: cv.string},
vol.Optional(CONF_PING_COUNT, default=1): cv.positive_int,
})
class Host:
"""Host object with ping detection."""
def __init__(self, ip_address, dev_id, hass, config):
"""Initialize the Host pinger."""
self.hass = hass
self.ip_address = ip_address
self.dev_id = dev_id
self._count = config[CONF_PING_COUNT]
if sys.platform == "win32":
self._ping_cmd = ['ping', '-n 1', '-w 1000', self.ip_address]
else:
self._ping_cmd = ['ping', '-n', '-q', '-c1', '-W1',
self.ip_address]
def ping(self):
"""Send ICMP ping and return True if success."""
pinger = subprocess.Popen(self._ping_cmd, stdout=subprocess.PIPE)
try:
pinger.communicate()
return pinger.returncode == 0
except subprocess.CalledProcessError:
return False
def update(self, see):
"""Update device state by sending one or more ping messages."""
failed = 0
while failed < self._count: # check more times if host in unreachable
if self.ping():
see(dev_id=self.dev_id, source_type=SOURCE_TYPE_ROUTER)
return True
failed += 1
_LOGGER.debug("ping KO on ip=%s failed=%d", self.ip_address, failed)
def setup_scanner(hass, config, see, discovery_info=None):
"""Setup the Host objects and return the update function."""
hosts = [Host(ip, dev_id, hass, config) for (dev_id, ip) in
config[const.CONF_HOSTS].items()]
interval = timedelta(seconds=len(hosts) * config[CONF_PING_COUNT]) + \
DEFAULT_SCAN_INTERVAL
_LOGGER.info("Started ping tracker with interval=%s on hosts: %s",
interval, ",".join([host.ip_address for host in hosts]))
def update(now):
"""Update all the hosts on every interval time."""
for host in hosts:
host.update(see)
track_point_in_utc_time(hass, update, now + interval)
return True
return update(util.dt.utcnow())
| apache-2.0 |
rzhxeo/youtube-dl | youtube_dl/extractor/nuvid.py | 127 | 2665 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_urllib_request,
)
from ..utils import (
parse_duration,
unified_strdate,
)
class NuvidIE(InfoExtractor):
_VALID_URL = r'https?://(?:www|m)\.nuvid\.com/video/(?P<id>[0-9]+)'
_TEST = {
'url': 'http://m.nuvid.com/video/1310741/',
'md5': 'eab207b7ac4fccfb4e23c86201f11277',
'info_dict': {
'id': '1310741',
'ext': 'mp4',
'title': 'Horny babes show their awesome bodeis and',
'duration': 129,
'upload_date': '20140508',
'age_limit': 18,
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
formats = []
for dwnld_speed, format_id in [(0, '3gp'), (5, 'mp4')]:
request = compat_urllib_request.Request(
'http://m.nuvid.com/play/%s' % video_id)
request.add_header('Cookie', 'skip_download_page=1; dwnld_speed=%d; adv_show=1' % dwnld_speed)
webpage = self._download_webpage(
request, video_id, 'Downloading %s page' % format_id)
video_url = self._html_search_regex(
r'<a\s+href="([^"]+)"\s+class="b_link">', webpage, '%s video URL' % format_id, fatal=False)
if not video_url:
continue
formats.append({
'url': video_url,
'format_id': format_id,
})
webpage = self._download_webpage(
'http://m.nuvid.com/video/%s' % video_id, video_id, 'Downloading video page')
title = self._html_search_regex(
[r'<span title="([^"]+)">',
r'<div class="thumb-holder video">\s*<h5[^>]*>([^<]+)</h5>'], webpage, 'title').strip()
thumbnails = [
{
'url': thumb_url,
} for thumb_url in re.findall(r'<img src="([^"]+)" alt="" />', webpage)
]
thumbnail = thumbnails[0]['url'] if thumbnails else None
duration = parse_duration(self._html_search_regex(
r'<i class="fa fa-clock-o"></i>\s*(\d{2}:\d{2})', webpage, 'duration', fatal=False))
upload_date = unified_strdate(self._html_search_regex(
r'<i class="fa fa-user"></i>\s*(\d{4}-\d{2}-\d{2})', webpage, 'upload date', fatal=False))
return {
'id': video_id,
'title': title,
'thumbnails': thumbnails,
'thumbnail': thumbnail,
'duration': duration,
'upload_date': upload_date,
'age_limit': 18,
'formats': formats,
}
| unlicense |
cchurch/pywinrm | winrm/tests/test_session.py | 4 | 1370 | from winrm import Session
def test_run_cmd(protocol_fake):
# TODO this test should cover __init__ method
s = Session('windows-host', auth=('john.smith', 'secret'))
s.protocol = protocol_fake
r = s.run_cmd('ipconfig', ['/all'])
assert r.status_code == 0
assert 'Windows IP Configuration' in r.std_out
assert len(r.std_err) == 0
def test_target_as_hostname():
s = Session('windows-host', auth=('john.smith', 'secret'))
assert s.url == 'http://windows-host:5985/wsman'
def test_target_as_hostname_then_port():
s = Session('windows-host:1111', auth=('john.smith', 'secret'))
assert s.url == 'http://windows-host:1111/wsman'
def test_target_as_schema_then_hostname():
s = Session('http://windows-host', auth=('john.smith', 'secret'))
assert s.url == 'http://windows-host:5985/wsman'
def test_target_as_schema_then_hostname_then_port():
s = Session('http://windows-host:1111', auth=('john.smith', 'secret'))
assert s.url == 'http://windows-host:1111/wsman'
def test_target_as_full_url():
s = Session('http://windows-host:1111/wsman', auth=(
'john.smith', 'secret'))
assert s.url == 'http://windows-host:1111/wsman'
def test_target_with_dots():
s = Session('windows-host.example.com', auth=('john.smith', 'secret'))
assert s.url == 'http://windows-host.example.com:5985/wsman'
| mit |
openwisp/netjsonconfig | netjsonconfig/backends/openvpn/openvpn.py | 1 | 5835 | from ...schema import X509_FILE_MODE
from ..base.backend import BaseBackend
from . import converters
from .parser import OpenVpnParser, config_suffix, vpn_pattern
from .renderer import OpenVpnRenderer
from .schema import schema
class OpenVpn(BaseBackend):
"""
OpenVPN 2.x Configuration Backend
"""
schema = schema
converters = [converters.OpenVpn]
parser = OpenVpnParser
renderer = OpenVpnRenderer
list_identifiers = ['name']
def _generate_contents(self, tar):
"""
Adds configuration files to tarfile instance.
:param tar: tarfile instance
:returns: None
"""
text = self.render(files=False)
# create a list with all the packages (and remove empty entries)
vpn_instances = vpn_pattern.split(text)
if '' in vpn_instances:
vpn_instances.remove('')
# create a file for each VPN
for vpn in vpn_instances:
lines = vpn.split('\n')
vpn_name = lines[0]
text_contents = '\n'.join(lines[2:])
# do not end with double new line
if text_contents.endswith('\n\n'):
text_contents = text_contents[0:-1]
self._add_file(
tar=tar,
name='{0}{1}'.format(vpn_name, config_suffix),
contents=text_contents,
)
@classmethod
def auto_client(
cls,
host,
server,
ca_path=None,
ca_contents=None,
cert_path=None,
cert_contents=None,
key_path=None,
key_contents=None,
):
"""
Returns a configuration dictionary representing an OpenVPN client configuration
that is compatible with the passed server configuration.
:param host: remote VPN server
:param server: dictionary representing a single OpenVPN server configuration
:param ca_path: optional string representing path to CA, will consequently add
a file in the resulting configuration dictionary
:param ca_contents: optional string representing contents of CA file
:param cert_path: optional string representing path to certificate, will consequently add
a file in the resulting configuration dictionary
:param cert_contents: optional string representing contents of cert file
:param key_path: optional string representing path to key, will consequently add
a file in the resulting configuration dictionary
:param key_contents: optional string representing contents of key file
:returns: dictionary representing a single OpenVPN client configuration
"""
# client defaults
client = {
"mode": "p2p",
"nobind": True,
"resolv_retry": "infinite",
"tls_client": True,
}
# remote
port = server.get('port') or 1195
client['remote'] = [{'host': host, 'port': port}]
# proto
if server.get('proto') == 'tcp-server':
client['proto'] = 'tcp-client'
else:
client['proto'] = 'udp'
# determine if pull must be True
if 'server' in server or 'server_bridge' in server:
client['pull'] = True
# tls_client
if 'tls_server' not in server or not server['tls_server']:
client['tls_client'] = False
# ns_cert_type
ns_cert_type = {None: '', '': '', 'client': 'server'}
client['ns_cert_type'] = ns_cert_type[server.get('ns_cert_type')]
# remote_cert_tls
remote_cert_tls = {None: '', '': '', 'client': 'server'}
client['remote_cert_tls'] = remote_cert_tls[server.get('remote_cert_tls')]
copy_keys = [
'name',
'dev_type',
'dev',
'comp_lzo',
'auth',
'cipher',
'ca',
'cert',
'key',
'pkcs12',
'mtu_test',
'fragment',
'mssfix',
'keepalive',
'persist_tun',
'mute',
'persist_key',
'script_security',
'user',
'group',
'log',
'mute_replay_warnings',
'secret',
'reneg_sec',
'tls_timeout',
'tls_cipher',
'float',
'fast_io',
'verb',
]
for key in copy_keys:
if key in server:
client[key] = server[key]
files = cls._auto_client_files(
client,
ca_path,
ca_contents,
cert_path,
cert_contents,
key_path,
key_contents,
)
return {'openvpn': [client], 'files': files}
@classmethod
def _auto_client_files(
cls,
client,
ca_path=None,
ca_contents=None,
cert_path=None,
cert_contents=None,
key_path=None,
key_contents=None,
):
"""
returns a list of NetJSON extra files for automatically generated clients
produces side effects in ``client`` dictionary
"""
files = []
if ca_path and ca_contents:
client['ca'] = ca_path
files.append(dict(path=ca_path, contents=ca_contents, mode=X509_FILE_MODE))
if cert_path and cert_contents:
client['cert'] = cert_path
files.append(
dict(path=cert_path, contents=cert_contents, mode=X509_FILE_MODE)
)
if key_path and key_contents:
client['key'] = key_path
files.append(
dict(path=key_path, contents=key_contents, mode=X509_FILE_MODE,)
)
return files
| gpl-3.0 |
mbauskar/frappe | frappe/utils/oauth.py | 3 | 9622 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
import frappe.utils
import json
from frappe import _
from six import string_types
class SignupDisabledError(frappe.PermissionError): pass
def get_oauth2_providers():
out = {
"google": {
"flow_params": {
"name": "google",
"authorize_url": "https://accounts.google.com/o/oauth2/auth",
"access_token_url": "https://accounts.google.com/o/oauth2/token",
"base_url": "https://www.googleapis.com",
},
"redirect_uri": "/api/method/frappe.www.login.login_via_google",
"auth_url_data": {
"scope": "https://www.googleapis.com/auth/userinfo.profile https://www.googleapis.com/auth/userinfo.email",
"response_type": "code"
},
# relative to base_url
"api_endpoint": "oauth2/v2/userinfo"
},
"github": {
"flow_params": {
"name": "github",
"authorize_url": "https://github.com/login/oauth/authorize",
"access_token_url": "https://github.com/login/oauth/access_token",
"base_url": "https://api.github.com/"
},
"redirect_uri": "/api/method/frappe.www.login.login_via_github",
# relative to base_url
"api_endpoint": "user"
},
"facebook": {
"flow_params": {
"name": "facebook",
"authorize_url": "https://www.facebook.com/dialog/oauth",
"access_token_url": "https://graph.facebook.com/oauth/access_token",
"base_url": "https://graph.facebook.com"
},
"redirect_uri": "/api/method/frappe.www.login.login_via_facebook",
"auth_url_data": {
"display": "page",
"response_type": "code",
"scope": "email,public_profile"
},
# relative to base_url
"api_endpoint": "/v2.5/me",
"api_endpoint_args": {
"fields": "first_name,last_name,email,gender,location,verified,picture"
},
}
}
frappe_server_url = frappe.db.get_value("Social Login Keys", None, "frappe_server_url")
if frappe_server_url:
out['frappe'] = {
"flow_params": {
"name": "frappe",
"authorize_url": frappe_server_url + "/api/method/frappe.integrations.oauth2.authorize",
"access_token_url": frappe_server_url + "/api/method/frappe.integrations.oauth2.get_token",
"base_url": frappe_server_url
},
"redirect_uri": "/api/method/frappe.www.login.login_via_frappe",
"auth_url_data": {
"response_type": "code",
"scope": "openid"
},
# relative to base_url
"api_endpoint": "/api/method/frappe.integrations.oauth2.openid_profile"
}
return out
def get_oauth_keys(provider):
"""get client_id and client_secret from database or conf"""
# try conf
keys = frappe.conf.get("{provider}_login".format(provider=provider))
if not keys:
# try database
social = frappe.get_doc("Social Login Keys", "Social Login Keys")
keys = {}
for fieldname in ("client_id", "client_secret"):
value = social.get("{provider}_{fieldname}".format(provider=provider, fieldname=fieldname))
if not value:
keys = {}
break
keys[fieldname] = value
return keys
else:
return {
"client_id": keys["client_id"],
"client_secret": keys["client_secret"]
}
def get_oauth2_authorize_url(provider):
flow = get_oauth2_flow(provider)
state = { "site": frappe.utils.get_url(), "token": frappe.generate_hash() }
frappe.cache().set_value("{0}:{1}".format(provider, state["token"]), True, expires_in_sec=120)
# relative to absolute url
data = {
"redirect_uri": get_redirect_uri(provider),
"state": json.dumps(state)
}
oauth2_providers = get_oauth2_providers()
# additional data if any
data.update(oauth2_providers[provider].get("auth_url_data", {}))
return flow.get_authorize_url(**data)
def get_oauth2_flow(provider):
from rauth import OAuth2Service
# get client_id and client_secret
params = get_oauth_keys(provider)
oauth2_providers = get_oauth2_providers()
# additional params for getting the flow
params.update(oauth2_providers[provider]["flow_params"])
# and we have setup the communication lines
return OAuth2Service(**params)
def get_redirect_uri(provider):
keys = frappe.conf.get("{provider}_login".format(provider=provider))
if keys and keys.get("redirect_uri"):
# this should be a fully qualified redirect uri
return keys["redirect_uri"]
else:
oauth2_providers = get_oauth2_providers()
redirect_uri = oauth2_providers[provider]["redirect_uri"]
# this uses the site's url + the relative redirect uri
return frappe.utils.get_url(redirect_uri)
def login_via_oauth2(provider, code, state, decoder=None):
info = get_info_via_oauth(provider, code, decoder)
login_oauth_user(info, provider=provider, state=state)
def get_info_via_oauth(provider, code, decoder=None):
flow = get_oauth2_flow(provider)
oauth2_providers = get_oauth2_providers()
args = {
"data": {
"code": code,
"redirect_uri": get_redirect_uri(provider),
"grant_type": "authorization_code"
}
}
if decoder:
args["decoder"] = decoder
session = flow.get_auth_session(**args)
api_endpoint = oauth2_providers[provider].get("api_endpoint")
api_endpoint_args = oauth2_providers[provider].get("api_endpoint_args")
info = session.get(api_endpoint, params=api_endpoint_args).json()
if (("verified_email" in info and not info.get("verified_email"))
or ("verified" in info and not info.get("verified"))):
frappe.throw(_("Email not verified with {1}").format(provider.title()))
return info
def login_oauth_user(data=None, provider=None, state=None, email_id=None, key=None, generate_login_token=False):
# NOTE: This could lead to security issue as the signed in user can type any email address in complete_signup
# if email_id and key:
# data = json.loads(frappe.db.get_temp(key))
# # What if data is missing because of an invalid key
# data["email"] = email_id
#
# elif not (data.get("email") and get_first_name(data)) and not frappe.db.exists("User", data.get("email")):
# # ask for user email
# key = frappe.db.set_temp(json.dumps(data))
# frappe.db.commit()
# frappe.local.response["type"] = "redirect"
# frappe.local.response["location"] = "/complete_signup?key=" + key
# return
# json.loads data and state
if isinstance(data, string_types):
data = json.loads(data)
if isinstance(state, string_types):
state = json.loads(state)
if not (state and state["token"]):
frappe.respond_as_web_page(_("Invalid Request"), _("Token is missing"), http_status_code=417)
return
token = frappe.cache().get_value("{0}:{1}".format(provider, state["token"]), expires=True)
if not token:
frappe.respond_as_web_page(_("Invalid Request"), _("Invalid Token"), http_status_code=417)
return
user = data["email"]
if not user:
frappe.respond_as_web_page(_("Invalid Request"), _("Please ensure that your profile has an email address"))
return
try:
if update_oauth_user(user, data, provider) is False:
return
except SignupDisabledError:
return frappe.respond_as_web_page("Signup is Disabled", "Sorry. Signup from Website is disabled.",
success=False, http_status_code=403)
frappe.local.login_manager.user = user
frappe.local.login_manager.post_login()
# because of a GET request!
frappe.db.commit()
if frappe.utils.cint(generate_login_token):
login_token = frappe.generate_hash(length=32)
frappe.cache().set_value("login_token:{0}".format(login_token), frappe.local.session.sid, expires_in_sec=120)
frappe.response["login_token"] = login_token
else:
redirect_post_login(desk_user=frappe.local.response.get('message') == 'Logged In')
def update_oauth_user(user, data, provider):
if isinstance(data.get("location"), dict):
data["location"] = data.get("location").get("name")
save = False
if not frappe.db.exists("User", user):
# is signup disabled?
if frappe.utils.cint(frappe.db.get_single_value("Website Settings", "disable_signup")):
raise SignupDisabledError
save = True
user = frappe.new_doc("User")
user.update({
"doctype":"User",
"first_name": get_first_name(data),
"last_name": get_last_name(data),
"email": data["email"],
"gender": (data.get("gender") or "").title(),
"enabled": 1,
"new_password": frappe.generate_hash(data["email"]),
"location": data.get("location"),
"user_type": "Website User",
"user_image": data.get("picture") or data.get("avatar_url")
})
else:
user = frappe.get_doc("User", user)
if not user.enabled:
frappe.respond_as_web_page(_('Not Allowed'), _('User {0} is disabled').format(user.email))
return False
if provider=="facebook" and not user.get("fb_userid"):
save = True
user.update({
"fb_username": data.get("username"),
"fb_userid": data["id"],
"user_image": "https://graph.facebook.com/{id}/picture".format(id=data["id"])
})
elif provider=="google" and not user.get("google_userid"):
save = True
user.google_userid = data["id"]
elif provider=="github" and not user.get("github_userid"):
save = True
user.github_userid = data["id"]
user.github_username = data["login"]
elif provider=="frappe" and not user.get("frappe_userid"):
save = True
user.frappe_userid = data["sub"]
if save:
user.flags.ignore_permissions = True
user.flags.no_welcome_mail = True
user.save()
def get_first_name(data):
return data.get("first_name") or data.get("given_name") or data.get("name")
def get_last_name(data):
return data.get("last_name") or data.get("family_name")
def redirect_post_login(desk_user):
# redirect!
frappe.local.response["type"] = "redirect"
# the #desktop is added to prevent a facebook redirect bug
frappe.local.response["location"] = "/desk#desktop" if desk_user else "/"
| mit |
dcowden/cadquery-freecad-module | CadQuery/Libs/pygments/lexers/smalltalk.py | 77 | 7215 | # -*- coding: utf-8 -*-
"""
pygments.lexers.smalltalk
~~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for Smalltalk and related languages.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, include, bygroups, default
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
__all__ = ['SmalltalkLexer', 'NewspeakLexer']
class SmalltalkLexer(RegexLexer):
"""
For `Smalltalk <http://www.smalltalk.org/>`_ syntax.
Contributed by Stefan Matthias Aust.
Rewritten by Nils Winter.
.. versionadded:: 0.10
"""
name = 'Smalltalk'
filenames = ['*.st']
aliases = ['smalltalk', 'squeak', 'st']
mimetypes = ['text/x-smalltalk']
tokens = {
'root': [
(r'(<)(\w+:)(.*?)(>)', bygroups(Text, Keyword, Text, Text)),
include('squeak fileout'),
include('whitespaces'),
include('method definition'),
(r'(\|)([\w\s]*)(\|)', bygroups(Operator, Name.Variable, Operator)),
include('objects'),
(r'\^|\:=|\_', Operator),
# temporaries
(r'[\]({}.;!]', Text),
],
'method definition': [
# Not perfect can't allow whitespaces at the beginning and the
# without breaking everything
(r'([a-zA-Z]+\w*:)(\s*)(\w+)',
bygroups(Name.Function, Text, Name.Variable)),
(r'^(\b[a-zA-Z]+\w*\b)(\s*)$', bygroups(Name.Function, Text)),
(r'^([-+*/\\~<>=|&!?,@%]+)(\s*)(\w+)(\s*)$',
bygroups(Name.Function, Text, Name.Variable, Text)),
],
'blockvariables': [
include('whitespaces'),
(r'(:)(\s*)(\w+)',
bygroups(Operator, Text, Name.Variable)),
(r'\|', Operator, '#pop'),
default('#pop'), # else pop
],
'literals': [
(r"'(''|[^'])*'", String, 'afterobject'),
(r'\$.', String.Char, 'afterobject'),
(r'#\(', String.Symbol, 'parenth'),
(r'\)', Text, 'afterobject'),
(r'(\d+r)?-?\d+(\.\d+)?(e-?\d+)?', Number, 'afterobject'),
],
'_parenth_helper': [
include('whitespaces'),
(r'(\d+r)?-?\d+(\.\d+)?(e-?\d+)?', Number),
(r'[-+*/\\~<>=|&#!?,@%\w:]+', String.Symbol),
# literals
(r"'(''|[^'])*'", String),
(r'\$.', String.Char),
(r'#*\(', String.Symbol, 'inner_parenth'),
],
'parenth': [
# This state is a bit tricky since
# we can't just pop this state
(r'\)', String.Symbol, ('root', 'afterobject')),
include('_parenth_helper'),
],
'inner_parenth': [
(r'\)', String.Symbol, '#pop'),
include('_parenth_helper'),
],
'whitespaces': [
# skip whitespace and comments
(r'\s+', Text),
(r'"(""|[^"])*"', Comment),
],
'objects': [
(r'\[', Text, 'blockvariables'),
(r'\]', Text, 'afterobject'),
(r'\b(self|super|true|false|nil|thisContext)\b',
Name.Builtin.Pseudo, 'afterobject'),
(r'\b[A-Z]\w*(?!:)\b', Name.Class, 'afterobject'),
(r'\b[a-z]\w*(?!:)\b', Name.Variable, 'afterobject'),
(r'#("(""|[^"])*"|[-+*/\\~<>=|&!?,@%]+|[\w:]+)',
String.Symbol, 'afterobject'),
include('literals'),
],
'afterobject': [
(r'! !$', Keyword, '#pop'), # squeak chunk delimiter
include('whitespaces'),
(r'\b(ifTrue:|ifFalse:|whileTrue:|whileFalse:|timesRepeat:)',
Name.Builtin, '#pop'),
(r'\b(new\b(?!:))', Name.Builtin),
(r'\:=|\_', Operator, '#pop'),
(r'\b[a-zA-Z]+\w*:', Name.Function, '#pop'),
(r'\b[a-zA-Z]+\w*', Name.Function),
(r'\w+:?|[-+*/\\~<>=|&!?,@%]+', Name.Function, '#pop'),
(r'\.', Punctuation, '#pop'),
(r';', Punctuation),
(r'[\])}]', Text),
(r'[\[({]', Text, '#pop'),
],
'squeak fileout': [
# Squeak fileout format (optional)
(r'^"(""|[^"])*"!', Keyword),
(r"^'(''|[^'])*'!", Keyword),
(r'^(!)(\w+)( commentStamp: )(.*?)( prior: .*?!\n)(.*?)(!)',
bygroups(Keyword, Name.Class, Keyword, String, Keyword, Text, Keyword)),
(r"^(!)(\w+(?: class)?)( methodsFor: )('(?:''|[^'])*')(.*?!)",
bygroups(Keyword, Name.Class, Keyword, String, Keyword)),
(r'^(\w+)( subclass: )(#\w+)'
r'(\s+instanceVariableNames: )(.*?)'
r'(\s+classVariableNames: )(.*?)'
r'(\s+poolDictionaries: )(.*?)'
r'(\s+category: )(.*?)(!)',
bygroups(Name.Class, Keyword, String.Symbol, Keyword, String, Keyword,
String, Keyword, String, Keyword, String, Keyword)),
(r'^(\w+(?: class)?)(\s+instanceVariableNames: )(.*?)(!)',
bygroups(Name.Class, Keyword, String, Keyword)),
(r'(!\n)(\].*)(! !)$', bygroups(Keyword, Text, Keyword)),
(r'! !$', Keyword),
],
}
class NewspeakLexer(RegexLexer):
"""
For `Newspeak <http://newspeaklanguage.org/>` syntax.
.. versionadded:: 1.1
"""
name = 'Newspeak'
filenames = ['*.ns2']
aliases = ['newspeak', ]
mimetypes = ['text/x-newspeak']
tokens = {
'root': [
(r'\b(Newsqueak2)\b', Keyword.Declaration),
(r"'[^']*'", String),
(r'\b(class)(\s+)(\w+)(\s*)',
bygroups(Keyword.Declaration, Text, Name.Class, Text)),
(r'\b(mixin|self|super|private|public|protected|nil|true|false)\b',
Keyword),
(r'(\w+\:)(\s*)([a-zA-Z_]\w+)',
bygroups(Name.Function, Text, Name.Variable)),
(r'(\w+)(\s*)(=)',
bygroups(Name.Attribute, Text, Operator)),
(r'<\w+>', Comment.Special),
include('expressionstat'),
include('whitespace')
],
'expressionstat': [
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'\d+', Number.Integer),
(r':\w+', Name.Variable),
(r'(\w+)(::)', bygroups(Name.Variable, Operator)),
(r'\w+:', Name.Function),
(r'\w+', Name.Variable),
(r'\(|\)', Punctuation),
(r'\[|\]', Punctuation),
(r'\{|\}', Punctuation),
(r'(\^|\+|\/|~|\*|<|>|=|@|%|\||&|\?|!|,|-|:)', Operator),
(r'\.|;', Punctuation),
include('whitespace'),
include('literals'),
],
'literals': [
(r'\$.', String),
(r"'[^']*'", String),
(r"#'[^']*'", String.Symbol),
(r"#\w+:?", String.Symbol),
(r"#(\+|\/|~|\*|<|>|=|@|%|\||&|\?|!|,|-)+", String.Symbol)
],
'whitespace': [
(r'\s+', Text),
(r'"[^"]*"', Comment)
],
}
| lgpl-3.0 |
cwyark/v2ex | mapreduce/main.py | 20 | 2423 | #!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main module for map-reduce implementation.
This module should be specified as a handler for mapreduce URLs in app.yaml:
handlers:
- url: /mapreduce(/.*)?
login: admin
script: mapreduce/main.py
"""
import wsgiref.handlers
from google.appengine.ext import webapp
from google.appengine.ext.webapp import util
from mapreduce import handlers
from mapreduce import status
class RedirectHandler(webapp.RequestHandler):
"""Redirects the user back to the status page."""
def get(self):
new_path = self.request.path
if not new_path.endswith('/'):
new_path += '/'
new_path += 'status'
self.redirect(new_path)
def create_application():
"""Create new WSGIApplication and register all handlers.
Returns:
an instance of webapp.WSGIApplication with all mapreduce handlers
registered.
"""
return webapp.WSGIApplication([
# Task queue handlers.
(r".*/worker_callback", handlers.MapperWorkerCallbackHandler),
(r".*/controller_callback", handlers.ControllerCallbackHandler),
(r".*/kickoffjob_callback", handlers.KickOffJobHandler),
# RPC requests with JSON responses
(r".*/command/start_job", handlers.StartJobHandler),
(r".*/command/cleanup_job", handlers.CleanUpJobHandler),
(r".*/command/abort_job", handlers.AbortJobHandler),
(r".*/command/list_configs", status.ListConfigsHandler),
(r".*/command/list_jobs", status.ListJobsHandler),
(r".*/command/get_job_detail", status.GetJobDetailHandler),
# Catch all redirects to status page.
(r"/[^/]+(?:/)?", RedirectHandler),
# UI static files
(r".+/([a-zA-Z0-9]+(?:\.(?:css|js))?)", status.ResourceHandler),
],
debug=True)
APP = create_application()
def main():
util.run_wsgi_app(APP)
if __name__ == "__main__":
main()
| bsd-3-clause |
kaiweifan/neutron | neutron/plugins/openvswitch/common/config.py | 3 | 3468 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from neutron.agent.common import config
from neutron.plugins.openvswitch.common import constants
DEFAULT_BRIDGE_MAPPINGS = []
DEFAULT_VLAN_RANGES = []
DEFAULT_TUNNEL_RANGES = []
DEFAULT_TUNNEL_TYPES = []
ovs_opts = [
cfg.StrOpt('integration_bridge', default='br-int',
help=_("Integration bridge to use")),
cfg.BoolOpt('enable_tunneling', default=False,
help=_("Enable tunneling support")),
cfg.StrOpt('tunnel_bridge', default='br-tun',
help=_("Tunnel bridge to use")),
cfg.StrOpt('int_peer_patch_port', default='patch-tun',
help=_("Peer patch port in integration bridge for tunnel "
"bridge")),
cfg.StrOpt('tun_peer_patch_port', default='patch-int',
help=_("Peer patch port in tunnel bridge for integration "
"bridge")),
cfg.StrOpt('local_ip', default='',
help=_("Local IP address of GRE tunnel endpoints.")),
cfg.ListOpt('bridge_mappings',
default=DEFAULT_BRIDGE_MAPPINGS,
help=_("List of <physical_network>:<bridge>")),
cfg.StrOpt('tenant_network_type', default='local',
help=_("Network type for tenant networks "
"(local, vlan, gre, vxlan, or none)")),
cfg.ListOpt('network_vlan_ranges',
default=DEFAULT_VLAN_RANGES,
help=_("List of <physical_network>:<vlan_min>:<vlan_max> "
"or <physical_network>")),
cfg.ListOpt('tunnel_id_ranges',
default=DEFAULT_TUNNEL_RANGES,
help=_("List of <tun_min>:<tun_max>")),
cfg.StrOpt('tunnel_type', default='',
help=_("The type of tunnels to use when utilizing tunnels, "
"either 'gre' or 'vxlan'")),
]
agent_opts = [
cfg.IntOpt('polling_interval', default=2,
help=_("The number of seconds the agent will wait between "
"polling for local device changes.")),
cfg.ListOpt('tunnel_types', default=DEFAULT_TUNNEL_TYPES,
help=_("Network types supported by the agent "
"(gre and/or vxlan)")),
cfg.IntOpt('vxlan_udp_port', default=constants.VXLAN_UDP_PORT,
help=_("The UDP port to use for VXLAN tunnels.")),
cfg.IntOpt('veth_mtu', default=None,
help=_("MTU size of veth interfaces")),
cfg.BoolOpt('l2_population', default=False,
help=_("Use ml2 l2population mechanism driver to learn "
"remote mac and IPs and improve tunnel scalability")),
]
cfg.CONF.register_opts(ovs_opts, "OVS")
cfg.CONF.register_opts(agent_opts, "AGENT")
config.register_agent_state_opts_helper(cfg.CONF)
config.register_root_helper(cfg.CONF)
| apache-2.0 |
queenp/wakatime | wakatime/packages/pygments_py3/pygments/styles/vs.py | 135 | 1073 | # -*- coding: utf-8 -*-
"""
pygments.styles.vs
~~~~~~~~~~~~~~~~~~
Simple style with MS Visual Studio colors.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Operator, Generic
class VisualStudioStyle(Style):
background_color = "#ffffff"
default_style = ""
styles = {
Comment: "#008000",
Comment.Preproc: "#0000ff",
Keyword: "#0000ff",
Operator.Word: "#0000ff",
Keyword.Type: "#2b91af",
Name.Class: "#2b91af",
String: "#a31515",
Generic.Heading: "bold",
Generic.Subheading: "bold",
Generic.Emph: "italic",
Generic.Strong: "bold",
Generic.Prompt: "bold",
Error: "border:#FF0000"
}
| bsd-3-clause |
beagles/neutron_hacking | neutron/plugins/vmware/plugins/service.py | 1 | 81209 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 VMware, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import netaddr
from oslo.config import cfg
from neutron.common import exceptions as n_exc
from neutron.db.firewall import firewall_db
from neutron.db import l3_db
from neutron.db.loadbalancer import loadbalancer_db
from neutron.db import routedserviceinsertion_db as rsi_db
from neutron.db.vpn import vpn_db
from neutron.extensions import firewall as fw_ext
from neutron.extensions import l3
from neutron.extensions import routedserviceinsertion as rsi
from neutron.openstack.common import excutils
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants as service_constants
from neutron.plugins.vmware.api_client import exception as api_exc
from neutron.plugins.vmware.common import config # noqa
from neutron.plugins.vmware.common import exceptions as nsx_exc
from neutron.plugins.vmware.common import utils
from neutron.plugins.vmware.dbexts import servicerouter as sr_db
from neutron.plugins.vmware.dbexts import vcns_db
from neutron.plugins.vmware.dbexts import vcns_models
from neutron.plugins.vmware.extensions import servicerouter as sr
from neutron.plugins.vmware.nsxlib import router as routerlib
from neutron.plugins.vmware.nsxlib import switch as switchlib
from neutron.plugins.vmware.plugins import base
from neutron.plugins.vmware.vshield.common import constants as vcns_const
from neutron.plugins.vmware.vshield.common.constants import RouterStatus
from neutron.plugins.vmware.vshield.common import exceptions
from neutron.plugins.vmware.vshield.tasks.constants import TaskState
from neutron.plugins.vmware.vshield.tasks.constants import TaskStatus
from neutron.plugins.vmware.vshield import vcns_driver
from sqlalchemy.orm import exc as sa_exc
LOG = logging.getLogger(__name__)
ROUTER_TYPE_BASIC = 1
ROUTER_TYPE_ADVANCED = 2
ROUTER_STATUS = [
service_constants.ACTIVE,
service_constants.DOWN,
service_constants.PENDING_CREATE,
service_constants.PENDING_DELETE,
service_constants.ERROR
]
ROUTER_STATUS_LEVEL = {
service_constants.ACTIVE: RouterStatus.ROUTER_STATUS_ACTIVE,
service_constants.DOWN: RouterStatus.ROUTER_STATUS_DOWN,
service_constants.PENDING_CREATE: (
RouterStatus.ROUTER_STATUS_PENDING_CREATE
),
service_constants.PENDING_DELETE: (
RouterStatus.ROUTER_STATUS_PENDING_DELETE
),
service_constants.ERROR: RouterStatus.ROUTER_STATUS_ERROR
}
class NsxAdvancedPlugin(sr_db.ServiceRouter_mixin,
base.NsxPluginV2,
rsi_db.RoutedServiceInsertionDbMixin,
firewall_db.Firewall_db_mixin,
loadbalancer_db.LoadBalancerPluginDb,
vpn_db.VPNPluginDb):
supported_extension_aliases = (
base.NsxPluginV2.supported_extension_aliases + [
"service-router",
"routed-service-insertion",
"fwaas",
"lbaas",
"vpnaas"
])
def __init__(self):
super(NsxAdvancedPlugin, self).__init__()
self._super_create_ext_gw_port = (
self._port_drivers['create'][l3_db.DEVICE_OWNER_ROUTER_GW])
self._super_delete_ext_gw_port = (
self._port_drivers['delete'][l3_db.DEVICE_OWNER_ROUTER_GW])
self._port_drivers['create'][l3_db.DEVICE_OWNER_ROUTER_GW] = (
self._vcns_create_ext_gw_port)
self._port_drivers['delete'][l3_db.DEVICE_OWNER_ROUTER_GW] = (
self._vcns_delete_ext_gw_port)
# cache router type based on router id
self._router_type = {}
self.callbacks = [VcnsCallbacks(self)]
# load the vCNS driver
self._load_vcns_drivers()
# switchlib's create_lswitch needs to be replaced in order to proxy
# logical switch create requests to vcns
self._set_create_lswitch_proxy()
def _set_create_lswitch_proxy(self):
base.switchlib.create_lswitch = self._proxy_create_lswitch
def _proxy_create_lswitch(self, *args, **kwargs):
name, tz_config, tags = (
_process_base_create_lswitch_args(*args, **kwargs)
)
return self.vcns_driver.create_lswitch(
name, tz_config, tags=tags,
port_isolation=None, replication_mode=None)
def _load_vcns_drivers(self):
self.vcns_driver = vcns_driver.VcnsDriver(self.callbacks)
def _set_router_type(self, router_id, router_type):
self._router_type[router_id] = router_type
def _get_router_type(self, context=None, router_id=None, router=None):
if not router:
if router_id in self._router_type:
return self._router_type[router_id]
router = self._get_router(context, router_id)
LOG.debug(_("EDGE: router = %s"), router)
if router['nsx_attributes']['service_router']:
router_type = ROUTER_TYPE_ADVANCED
else:
router_type = ROUTER_TYPE_BASIC
self._set_router_type(router['id'], router_type)
return router_type
def _find_router_type(self, router):
is_service_router = router.get(sr.SERVICE_ROUTER, False)
if is_service_router:
return ROUTER_TYPE_ADVANCED
else:
return ROUTER_TYPE_BASIC
def _is_advanced_service_router(self, context=None, router_id=None,
router=None):
if router:
router_type = self._get_router_type(router=router)
else:
router_type = self._get_router_type(context, router_id)
return (router_type == ROUTER_TYPE_ADVANCED)
def _vcns_create_ext_gw_port(self, context, port_data):
router_id = port_data['device_id']
if not self._is_advanced_service_router(context, router_id):
self._super_create_ext_gw_port(context, port_data)
return
# NOP for Edge because currently the port will be create internally
# by VSM
LOG.debug(_("EDGE: _vcns_create_ext_gw_port"))
def _vcns_delete_ext_gw_port(self, context, port_data):
router_id = port_data['device_id']
if not self._is_advanced_service_router(context, router_id):
self._super_delete_ext_gw_port(context, port_data)
return
# NOP for Edge
LOG.debug(_("EDGE: _vcns_delete_ext_gw_port"))
def _get_external_attachment_info(self, context, router):
gw_port = router.gw_port
ipaddress = None
netmask = None
nexthop = None
if gw_port:
# gw_port may have multiple IPs, only configure the first one
if gw_port.get('fixed_ips'):
ipaddress = gw_port['fixed_ips'][0]['ip_address']
network_id = gw_port.get('network_id')
if network_id:
ext_net = self._get_network(context, network_id)
if not ext_net.external:
msg = (_("Network '%s' is not a valid external "
"network") % network_id)
raise n_exc.BadRequest(resource='router', msg=msg)
if ext_net.subnets:
ext_subnet = ext_net.subnets[0]
netmask = str(netaddr.IPNetwork(ext_subnet.cidr).netmask)
nexthop = ext_subnet.gateway_ip
return (ipaddress, netmask, nexthop)
def _get_external_gateway_address(self, context, router):
ipaddress, netmask, nexthop = self._get_external_attachment_info(
context, router)
return nexthop
def _vcns_update_static_routes(self, context, **kwargs):
router = kwargs.get('router')
if router is None:
router = self._get_router(context, kwargs['router_id'])
edge_id = kwargs.get('edge_id')
if edge_id is None:
binding = vcns_db.get_vcns_router_binding(context.session,
router['id'])
edge_id = binding['edge_id']
skippable = True
if 'nexthop' in kwargs:
nexthop = kwargs['nexthop']
# The default gateway and vnic config has dependencies, if we
# explicitly specify nexthop to change, tell the driver not to
# skip this route update
skippable = False
else:
nexthop = self._get_external_gateway_address(context,
router)
if 'subnets' in kwargs:
subnets = kwargs['subnets']
else:
subnets = self._find_router_subnets_cidrs(context.elevated(),
router['id'])
routes = []
for subnet in subnets:
routes.append({
'cidr': subnet,
'nexthop': vcns_const.INTEGRATION_LR_IPADDRESS.split('/')[0]
})
self.vcns_driver.update_routes(router['id'], edge_id, nexthop, routes,
skippable)
def _get_nat_rules(self, context, router):
fip_qry = context.session.query(l3_db.FloatingIP)
fip_db = fip_qry.filter_by(router_id=router['id']).all()
dnat = []
snat = []
for fip in fip_db:
if fip.fixed_port_id:
dnat.append({
'dst': fip.floating_ip_address,
'translated': fip.fixed_ip_address
})
gw_port = router.gw_port
if gw_port and router.enable_snat:
if gw_port.get('fixed_ips'):
snat_ip = gw_port['fixed_ips'][0]['ip_address']
subnets = self._find_router_subnets_cidrs(context.elevated(),
router['id'])
for subnet in subnets:
snat.append({
'src': subnet,
'translated': snat_ip
})
return (snat, dnat)
def _update_nat_rules(self, context, router):
snat, dnat = self._get_nat_rules(context, router)
binding = vcns_db.get_vcns_router_binding(context.session,
router['id'])
self.vcns_driver.update_nat_rules(router['id'],
binding['edge_id'],
snat, dnat)
def _update_interface(self, context, router, sync=False):
addr, mask, nexthop = self._get_external_attachment_info(
context, router)
secondary = []
fip_qry = context.session.query(l3_db.FloatingIP)
fip_db = fip_qry.filter_by(router_id=router['id']).all()
for fip in fip_db:
if fip.fixed_port_id:
secondary.append(fip.floating_ip_address)
#Add all vip addresses bound on the router
vip_addrs = self._get_all_vip_addrs_by_router_id(context,
router['id'])
secondary.extend(vip_addrs)
binding = vcns_db.get_vcns_router_binding(context.session,
router['id'])
task = self.vcns_driver.update_interface(
router['id'], binding['edge_id'],
vcns_const.EXTERNAL_VNIC_INDEX,
self.vcns_driver.external_network,
addr, mask, secondary=secondary)
if sync:
task.wait(TaskState.RESULT)
def _update_router_gw_info(self, context, router_id, info):
if not self._is_advanced_service_router(context, router_id):
super(NsxAdvancedPlugin, self)._update_router_gw_info(
context, router_id, info)
return
# get original gw_port config
router = self._get_router(context, router_id)
org_ext_net_id = router.gw_port_id and router.gw_port.network_id
org_enable_snat = router.enable_snat
orgaddr, orgmask, orgnexthop = self._get_external_attachment_info(
context, router)
super(base.NsxPluginV2, self)._update_router_gw_info(
context, router_id, info, router=router)
new_ext_net_id = router.gw_port_id and router.gw_port.network_id
new_enable_snat = router.enable_snat
newaddr, newmask, newnexthop = self._get_external_attachment_info(
context, router)
binding = vcns_db.get_vcns_router_binding(context.session, router_id)
if new_ext_net_id != org_ext_net_id and orgnexthop:
# network changed, need to remove default gateway before vnic
# can be configured
LOG.debug(_("VCNS: delete default gateway %s"), orgnexthop)
self._vcns_update_static_routes(context,
router=router,
edge_id=binding['edge_id'],
nexthop=None)
if orgaddr != newaddr or orgmask != newmask:
self.vcns_driver.update_interface(
router_id, binding['edge_id'],
vcns_const.EXTERNAL_VNIC_INDEX,
self.vcns_driver.external_network,
newaddr, newmask)
if orgnexthop != newnexthop:
self._vcns_update_static_routes(context,
router=router,
edge_id=binding['edge_id'],
nexthop=newnexthop)
if (new_ext_net_id == org_ext_net_id and
org_enable_snat == new_enable_snat):
return
self._update_nat_rules(context, router)
def _add_subnet_snat_rule(self, context, router, subnet):
# NOP for service router
if not self._is_advanced_service_router(router=router):
super(NsxAdvancedPlugin, self)._add_subnet_snat_rule(
context, router, subnet)
def _delete_subnet_snat_rule(self, context, router, subnet):
# NOP for service router
if not self._is_advanced_service_router(router=router):
super(NsxAdvancedPlugin, self)._delete_subnet_snat_rule(
context, router, subnet)
def _remove_floatingip_address(self, context, fip_db):
# NOP for service router
router_id = fip_db.router_id
if not self._is_advanced_service_router(context, router_id):
super(NsxAdvancedPlugin, self)._remove_floatingip_address(
context, fip_db)
def _create_advanced_service_router(self, context, neutron_router_id,
name, lrouter, lswitch):
# store binding
binding = vcns_db.add_vcns_router_binding(
context.session, neutron_router_id, None, lswitch['uuid'],
service_constants.PENDING_CREATE)
# deploy edge
jobdata = {
'neutron_router_id': neutron_router_id,
'lrouter': lrouter,
'lswitch': lswitch,
'context': context
}
# deploy and wait until the deploy requeste has been requested
# so we will have edge_id ready. The wait here should be fine
# as we're not in a database transaction now
self.vcns_driver.deploy_edge(
lrouter['uuid'], name, lswitch['uuid'], jobdata=jobdata,
wait_for_exec=True)
return binding
def _create_integration_lswitch(self, tenant_id, name):
# use defautl transport zone
transport_zone_config = [{
"zone_uuid": self.cluster.default_tz_uuid,
"transport_type": cfg.CONF.NSX.default_transport_type
}]
return self.vcns_driver.create_lswitch(name, transport_zone_config)
def _add_router_integration_interface(self, tenant_id, name,
lrouter, lswitch):
# create logic switch port
try:
ls_port = switchlib.create_lport(
self.cluster, lswitch['uuid'], tenant_id,
'', '', lrouter['uuid'], True)
except api_exc.NsxApiException:
msg = (_("An exception occurred while creating a port "
"on lswitch %s") % lswitch['uuid'])
LOG.exception(msg)
raise n_exc.NeutronException(message=msg)
# create logic router port
try:
neutron_port_id = ''
pname = name[:36] + '-lp'
admin_status_enabled = True
lr_port = routerlib.create_router_lport(
self.cluster, lrouter['uuid'], tenant_id,
neutron_port_id, pname, admin_status_enabled,
[vcns_const.INTEGRATION_LR_IPADDRESS])
except api_exc.NsxApiException:
msg = (_("Unable to create port on NSX logical router %s") % name)
LOG.exception(msg)
switchlib.delete_port(
self.cluster, lswitch['uuid'], ls_port['uuid'])
raise n_exc.NeutronException(message=msg)
# attach logic router port to switch port
try:
self._update_router_port_attachment(
self.cluster, None, lrouter['uuid'], {}, lr_port['uuid'],
'PatchAttachment', ls_port['uuid'], None)
except api_exc.NsxApiException as e:
# lr_port should have been deleted
switchlib.delete_port(
self.cluster, lswitch['uuid'], ls_port['uuid'])
raise e
def _create_lrouter(self, context, router, nexthop):
lrouter = super(NsxAdvancedPlugin, self)._create_lrouter(
context, router, vcns_const.INTEGRATION_EDGE_IPADDRESS)
router_type = self._find_router_type(router)
self._set_router_type(lrouter['uuid'], router_type)
if router_type == ROUTER_TYPE_BASIC:
return lrouter
tenant_id = self._get_tenant_id_for_create(context, router)
name = router['name']
try:
lsname = name[:36] + '-ls'
lswitch = self._create_integration_lswitch(
tenant_id, lsname)
except Exception:
msg = _("Unable to create integration logic switch "
"for router %s") % name
LOG.exception(msg)
routerlib.delete_lrouter(self.cluster, lrouter['uuid'])
raise n_exc.NeutronException(message=msg)
try:
self._add_router_integration_interface(tenant_id, name,
lrouter, lswitch)
except Exception:
msg = _("Unable to add router interface to integration lswitch "
"for router %s") % name
LOG.exception(msg)
routerlib.delete_lrouter(self.cluster, lrouter['uuid'])
raise n_exc.NeutronException(message=msg)
try:
self._create_advanced_service_router(
context, router['id'], name, lrouter, lswitch)
except Exception:
msg = (_("Unable to create advance service router for %s") % name)
LOG.exception(msg)
self.vcns_driver.delete_lswitch(lswitch('uuid'))
routerlib.delete_lrouter(self.cluster, lrouter['uuid'])
raise n_exc.NeutronException(message=msg)
lrouter['status'] = service_constants.PENDING_CREATE
return lrouter
def _delete_lrouter(self, context, router_id, nsx_router_id):
binding = vcns_db.get_vcns_router_binding(context.session, router_id)
if not binding:
super(NsxAdvancedPlugin, self)._delete_lrouter(
context, router_id, nsx_router_id)
else:
vcns_db.update_vcns_router_binding(
context.session, router_id,
status=service_constants.PENDING_DELETE)
lswitch_id = binding['lswitch_id']
edge_id = binding['edge_id']
# delete lswitch
try:
self.vcns_driver.delete_lswitch(lswitch_id)
except exceptions.ResourceNotFound:
LOG.warning(_("Did not found lswitch %s in NSX"), lswitch_id)
# delete edge
jobdata = {
'context': context
}
self.vcns_driver.delete_edge(router_id, edge_id, jobdata=jobdata)
# delete NSX logical router
routerlib.delete_lrouter(self.cluster, nsx_router_id)
if id in self._router_type:
del self._router_type[router_id]
def _update_lrouter(self, context, router_id, name, nexthop, routes=None):
if not self._is_advanced_service_router(context, router_id):
return super(NsxAdvancedPlugin, self)._update_lrouter(
context, router_id, name, nexthop, routes=routes)
previous_routes = super(NsxAdvancedPlugin, self)._update_lrouter(
context, router_id, name,
vcns_const.INTEGRATION_EDGE_IPADDRESS, routes=routes)
# TODO(fank): Theoretically users can specify extra routes for
# physical network, and routes for phyiscal network needs to be
# configured on Edge. This can be done by checking if nexthop is in
# external network. But for now we only handle routes for logic
# space and leave it for future enhancement.
# Let _update_router_gw_info handle nexthop change
#self._vcns_update_static_routes(context, router_id=router_id)
return previous_routes
def _retrieve_and_delete_nat_rules(self, context, floating_ip_address,
internal_ip, router_id,
min_num_rules_expected=0):
# NOP for advanced service router
if not self._is_advanced_service_router(context, router_id):
super(NsxAdvancedPlugin, self)._retrieve_and_delete_nat_rules(
context, floating_ip_address, internal_ip, router_id,
min_num_rules_expected=min_num_rules_expected)
def _update_fip_assoc(self, context, fip, floatingip_db, external_port):
# Update DB model only for advanced service router
router_id = self._get_fip_assoc_data(context, fip, floatingip_db)[2]
if (router_id and
not self._is_advanced_service_router(context, router_id)):
super(NsxAdvancedPlugin, self)._update_fip_assoc(
context, fip, floatingip_db, external_port)
else:
super(base.NsxPluginV2, self)._update_fip_assoc(
context, fip, floatingip_db, external_port)
def _get_nsx_lrouter_status(self, id):
try:
lrouter = routerlib.get_lrouter(self.cluster, id)
lr_status = lrouter["_relations"]["LogicalRouterStatus"]
if lr_status["fabric_status"]:
nsx_status = RouterStatus.ROUTER_STATUS_ACTIVE
else:
nsx_status = RouterStatus.ROUTER_STATUS_DOWN
except n_exc.NotFound:
nsx_status = RouterStatus.ROUTER_STATUS_ERROR
return nsx_status
def _get_vse_status(self, context, id):
binding = vcns_db.get_vcns_router_binding(context.session, id)
edge_status_level = self.vcns_driver.get_edge_status(
binding['edge_id'])
edge_db_status_level = ROUTER_STATUS_LEVEL[binding.status]
if edge_status_level > edge_db_status_level:
return edge_status_level
else:
return edge_db_status_level
def _get_all_nsx_lrouters_statuses(self, tenant_id, fields):
# get nsx lrouters status
nsx_lrouters = routerlib.get_lrouters(self.cluster,
tenant_id,
fields)
nsx_status = {}
for nsx_lrouter in nsx_lrouters:
if (nsx_lrouter["_relations"]["LogicalRouterStatus"]
["fabric_status"]):
nsx_status[nsx_lrouter['uuid']] = (
RouterStatus.ROUTER_STATUS_ACTIVE
)
else:
nsx_status[nsx_lrouter['uuid']] = (
RouterStatus.ROUTER_STATUS_DOWN
)
return nsx_status
def _get_all_vse_statuses(self, context):
bindings = self._model_query(
context, vcns_models.VcnsRouterBinding)
vse_db_status_level = {}
edge_id_to_router_id = {}
router_ids = []
for binding in bindings:
if not binding['edge_id']:
continue
router_id = binding['router_id']
router_ids.append(router_id)
edge_id_to_router_id[binding['edge_id']] = router_id
vse_db_status_level[router_id] = (
ROUTER_STATUS_LEVEL[binding['status']])
if not vse_db_status_level:
# no advanced service router, no need to query
return {}
vse_status_level = {}
edges_status_level = self.vcns_driver.get_edges_statuses()
for edge_id, status_level in edges_status_level.iteritems():
if edge_id in edge_id_to_router_id:
router_id = edge_id_to_router_id[edge_id]
db_status_level = vse_db_status_level[router_id]
if status_level > db_status_level:
vse_status_level[router_id] = status_level
else:
vse_status_level[router_id] = db_status_level
return vse_status_level
def get_router(self, context, id, fields=None):
if fields and 'status' not in fields:
return super(NsxAdvancedPlugin, self).get_router(
context, id, fields=fields)
router = super(NsxAdvancedPlugin, self).get_router(context, id)
router_type = self._find_router_type(router)
if router_type == ROUTER_TYPE_ADVANCED:
vse_status_level = self._get_vse_status(context, id)
if vse_status_level > ROUTER_STATUS_LEVEL[router['status']]:
router['status'] = ROUTER_STATUS[vse_status_level]
return self._fields(router, fields)
def get_routers(self, context, filters=None, fields=None, **kwargs):
routers = super(NsxAdvancedPlugin, self).get_routers(
context, filters=filters, **kwargs)
if fields and 'status' not in fields:
# no status checking, just return regular get_routers
return [self._fields(router, fields) for router in routers]
for router in routers:
router_type = self._find_router_type(router)
if router_type == ROUTER_TYPE_ADVANCED:
break
else:
# no advanced service router, return here
return [self._fields(router, fields) for router in routers]
vse_status_all = self._get_all_vse_statuses(context)
for router in routers:
router_type = self._find_router_type(router)
if router_type == ROUTER_TYPE_ADVANCED:
vse_status_level = vse_status_all.get(router['id'])
if vse_status_level is None:
vse_status_level = RouterStatus.ROUTER_STATUS_ERROR
if vse_status_level > ROUTER_STATUS_LEVEL[router['status']]:
router['status'] = ROUTER_STATUS[vse_status_level]
return [self._fields(router, fields) for router in routers]
def add_router_interface(self, context, router_id, interface_info):
info = super(NsxAdvancedPlugin, self).add_router_interface(
context, router_id, interface_info)
if self._is_advanced_service_router(context, router_id):
router = self._get_router(context, router_id)
if router.enable_snat:
self._update_nat_rules(context, router)
# TODO(fank): do rollback on error, or have a dedicated thread
# do sync work (rollback, re-configure, or make router down)
self._vcns_update_static_routes(context, router=router)
return info
def remove_router_interface(self, context, router_id, interface_info):
info = super(NsxAdvancedPlugin, self).remove_router_interface(
context, router_id, interface_info)
if self._is_advanced_service_router(context, router_id):
router = self._get_router(context, router_id)
if router.enable_snat:
self._update_nat_rules(context, router)
# TODO(fank): do rollback on error, or have a dedicated thread
# do sync work (rollback, re-configure, or make router down)
self._vcns_update_static_routes(context, router=router)
return info
def create_floatingip(self, context, floatingip):
fip = super(NsxAdvancedPlugin, self).create_floatingip(
context, floatingip)
router_id = fip.get('router_id')
if router_id and self._is_advanced_service_router(context, router_id):
router = self._get_router(context, router_id)
# TODO(fank): do rollback on error, or have a dedicated thread
# do sync work (rollback, re-configure, or make router down)
self._update_nat_rules(context, router)
self._update_interface(context, router)
return fip
def update_floatingip(self, context, id, floatingip):
fip = super(NsxAdvancedPlugin, self).update_floatingip(
context, id, floatingip)
router_id = fip.get('router_id')
if router_id and self._is_advanced_service_router(context, router_id):
router = self._get_router(context, router_id)
# TODO(fank): do rollback on error, or have a dedicated thread
# do sync work (rollback, re-configure, or make router down)
self._update_nat_rules(context, router)
self._update_interface(context, router)
return fip
def delete_floatingip(self, context, id):
fip_db = self._get_floatingip(context, id)
router_id = None
if fip_db.fixed_port_id:
router_id = fip_db.router_id
super(NsxAdvancedPlugin, self).delete_floatingip(context, id)
if router_id and self._is_advanced_service_router(context, router_id):
router = self._get_router(context, router_id)
# TODO(fank): do rollback on error, or have a dedicated thread
# do sync work (rollback, re-configure, or make router down)
self._update_interface(context, router)
self._update_nat_rules(context, router)
def disassociate_floatingips(self, context, port_id):
try:
fip_qry = context.session.query(l3_db.FloatingIP)
fip_db = fip_qry.filter_by(fixed_port_id=port_id).one()
router_id = fip_db.router_id
except sa_exc.NoResultFound:
router_id = None
super(NsxAdvancedPlugin, self).disassociate_floatingips(context,
port_id)
if router_id and self._is_advanced_service_router(context, router_id):
router = self._get_router(context, router_id)
# TODO(fank): do rollback on error, or have a dedicated thread
# do sync work (rollback, re-configure, or make router down)
self._update_interface(context, router)
self._update_nat_rules(context, router)
#
# FWaaS plugin implementation
#
def _firewall_set_status(
self, context, firewall_id, status, firewall=None):
with context.session.begin(subtransactions=True):
fw_db = self._get_firewall(context, firewall_id)
if status == service_constants.PENDING_UPDATE and (
fw_db.status == service_constants.PENDING_DELETE):
raise fw_ext.FirewallInPendingState(
firewall_id=firewall_id, pending_state=status)
else:
fw_db.status = status
if firewall:
firewall['status'] = status
def _ensure_firewall_update_allowed(self, context, firewall_id):
fwall = self.get_firewall(context, firewall_id)
if fwall['status'] in [service_constants.PENDING_CREATE,
service_constants.PENDING_UPDATE,
service_constants.PENDING_DELETE]:
raise fw_ext.FirewallInPendingState(firewall_id=firewall_id,
pending_state=fwall['status'])
def _ensure_firewall_policy_update_allowed(
self, context, firewall_policy_id):
firewall_policy = self.get_firewall_policy(context, firewall_policy_id)
for firewall_id in firewall_policy.get('firewall_list', []):
self._ensure_firewall_update_allowed(context, firewall_id)
def _ensure_update_or_delete_firewall_rule(
self, context, firewall_rule_id):
fw_rule = self.get_firewall_rule(context, firewall_rule_id)
if fw_rule.get('firewall_policy_id'):
self._ensure_firewall_policy_update_allowed(
context, fw_rule['firewall_policy_id'])
def _make_firewall_rule_list_by_policy_id(self, context, fw_policy_id):
if not fw_policy_id:
return []
firewall_policy_db = self._get_firewall_policy(context, fw_policy_id)
return [
self._make_firewall_rule_dict(fw_rule_db)
for fw_rule_db in firewall_policy_db['firewall_rules']
]
def _get_edge_id_by_vcns_edge_binding(self, context,
router_id):
#Get vcns_router_binding mapping between router and edge
router_binding = vcns_db.get_vcns_router_binding(
context.session, router_id)
return router_binding.edge_id
def _get_firewall_list_from_firewall_policy(self, context, policy_id):
firewall_policy_db = self._get_firewall_policy(context, policy_id)
return [
self._make_firewall_dict(fw_db)
for fw_db in firewall_policy_db['firewalls']
]
def _get_firewall_list_from_firewall_rule(self, context, rule_id):
rule = self._get_firewall_rule(context, rule_id)
if not rule.firewall_policy_id:
# The firewall rule is not associated with firewall policy yet
return None
return self._get_firewall_list_from_firewall_policy(
context, rule.firewall_policy_id)
def _vcns_update_firewall(self, context, fw, router_id=None, **kwargs):
edge_id = kwargs.get('edge_id')
if not edge_id:
edge_id = self._get_edge_id_by_vcns_edge_binding(
context, router_id)
firewall_rule_list = kwargs.get('firewall_rule_list')
if not firewall_rule_list:
firewall_rule_list = self._make_firewall_rule_list_by_policy_id(
context, fw['firewall_policy_id'])
fw_with_rules = fw
fw_with_rules['firewall_rule_list'] = firewall_rule_list
try:
self.vcns_driver.update_firewall(context, edge_id, fw_with_rules)
except exceptions.VcnsApiException as e:
self._firewall_set_status(
context, fw['id'], service_constants.ERROR)
msg = (_("Failed to create firewall on vShield Edge "
"bound on router %s") % router_id)
LOG.exception(msg)
raise e
except exceptions.VcnsBadRequest as e:
self._firewall_set_status(
context, fw['id'], service_constants.ERROR)
LOG.exception(_("Bad Firewall request Input"))
raise e
def _vcns_delete_firewall(self, context, router_id=None, **kwargs):
edge_id = kwargs.get('edge_id')
if not edge_id:
edge_id = self._get_edge_id_by_vcns_edge_binding(
context, router_id)
#TODO(linb):do rollback on error
self.vcns_driver.delete_firewall(context, edge_id)
def create_firewall(self, context, firewall):
LOG.debug(_("create_firewall() called"))
router_id = firewall['firewall'].get(vcns_const.ROUTER_ID)
if not router_id:
msg = _("router_id is not provided!")
LOG.error(msg)
raise n_exc.BadRequest(resource='router', msg=msg)
if not self._is_advanced_service_router(context, router_id):
msg = _("router_id:%s is not an advanced router!") % router_id
LOG.error(msg)
raise n_exc.BadRequest(resource='router', msg=msg)
if self._get_resource_router_id_binding(
context, firewall_db.Firewall, router_id=router_id):
msg = _("A firewall is already associated with the router")
LOG.error(msg)
raise nsx_exc.ServiceOverQuota(
overs='firewall', err_msg=msg)
fw = super(NsxAdvancedPlugin, self).create_firewall(context, firewall)
#Add router service insertion binding with firewall object
res = {
'id': fw['id'],
'router_id': router_id
}
self._process_create_resource_router_id(
context, res, firewall_db.Firewall)
# Since there is only one firewall per edge,
# here would be bulk configuration operation on firewall
self._vcns_update_firewall(context, fw, router_id)
self._firewall_set_status(
context, fw['id'], service_constants.ACTIVE, fw)
fw[rsi.ROUTER_ID] = router_id
return fw
def update_firewall(self, context, id, firewall):
LOG.debug(_("update_firewall() called"))
self._ensure_firewall_update_allowed(context, id)
service_router_binding = self._get_resource_router_id_binding(
context, firewall_db.Firewall, resource_id=id)
rule_list_pre = self._make_firewall_rule_list_by_policy_id(
context,
self.get_firewall(context, id)['firewall_policy_id'])
firewall['firewall']['status'] = service_constants.PENDING_UPDATE
fw = super(NsxAdvancedPlugin, self).update_firewall(
context, id, firewall)
fw[rsi.ROUTER_ID] = service_router_binding['router_id']
rule_list_new = self._make_firewall_rule_list_by_policy_id(
context, fw['firewall_policy_id'])
if rule_list_pre == rule_list_new:
self._firewall_set_status(
context, fw['id'], service_constants.ACTIVE, fw)
return fw
else:
self._vcns_update_firewall(
context, fw, service_router_binding.router_id,
firewall_rule_list=rule_list_new)
self._firewall_set_status(
context, fw['id'], service_constants.ACTIVE, fw)
return fw
def delete_firewall(self, context, id):
LOG.debug(_("delete_firewall() called"))
self._firewall_set_status(
context, id, service_constants.PENDING_DELETE)
service_router_binding = self._get_resource_router_id_binding(
context, firewall_db.Firewall, resource_id=id)
self._vcns_delete_firewall(context, service_router_binding.router_id)
super(NsxAdvancedPlugin, self).delete_firewall(context, id)
self._delete_resource_router_id_binding(
context, id, firewall_db.Firewall)
def get_firewall(self, context, id, fields=None):
fw = super(NsxAdvancedPlugin, self).get_firewall(
context, id, fields)
if fields and rsi.ROUTER_ID not in fields:
return fw
service_router_binding = self._get_resource_router_id_binding(
context, firewall_db.Firewall, resource_id=fw['id'])
fw[rsi.ROUTER_ID] = service_router_binding['router_id']
return fw
def get_firewalls(self, context, filters=None, fields=None):
fws = super(NsxAdvancedPlugin, self).get_firewalls(
context, filters, fields)
if fields and rsi.ROUTER_ID not in fields:
return fws
service_router_bindings = self._get_resource_router_id_bindings(
context, firewall_db.Firewall,
resource_ids=[fw['id'] for fw in fws])
mapping = dict([(binding['resource_id'], binding['router_id'])
for binding in service_router_bindings])
for fw in fws:
fw[rsi.ROUTER_ID] = mapping[fw['id']]
return fws
def update_firewall_rule(self, context, id, firewall_rule):
LOG.debug(_("update_firewall_rule() called"))
self._ensure_update_or_delete_firewall_rule(context, id)
fwr_pre = self.get_firewall_rule(context, id)
fwr = super(NsxAdvancedPlugin, self).update_firewall_rule(
context, id, firewall_rule)
if fwr_pre == fwr:
return fwr
# check if this rule is associated with firewall
fw_list = self._get_firewall_list_from_firewall_rule(context, id)
if not fw_list:
return fwr
for fw in fw_list:
# get router service insertion binding with firewall id
service_router_binding = self._get_resource_router_id_binding(
context, firewall_db.Firewall, resource_id=fw['id'])
edge_id = self._get_edge_id_by_vcns_edge_binding(
context, service_router_binding.router_id)
#TODO(linb): do rollback on error
self.vcns_driver.update_firewall_rule(context, id, edge_id, fwr)
return fwr
def update_firewall_policy(self, context, id, firewall_policy):
LOG.debug(_("update_firewall_policy() called"))
self._ensure_firewall_policy_update_allowed(context, id)
firewall_rules_pre = self._make_firewall_rule_list_by_policy_id(
context, id)
fwp = super(NsxAdvancedPlugin, self).update_firewall_policy(
context, id, firewall_policy)
firewall_rules = self._make_firewall_rule_list_by_policy_id(
context, id)
if firewall_rules_pre == firewall_rules:
return fwp
# check if this policy is associated with firewall
fw_list = self._get_firewall_list_from_firewall_policy(context, id)
if not fw_list:
return fwp
for fw in fw_list:
# Get the router_service insertion binding with firewall id
# TODO(fank): optimized by using _get_resource_router_id_bindings
service_router_binding = self._get_resource_router_id_binding(
context, firewall_db.Firewall, resource_id=fw['id'])
self._vcns_update_firewall(
context, fw, service_router_binding.router_id,
firewall_rule_list=firewall_rules)
return fwp
def insert_rule(self, context, id, rule_info):
LOG.debug(_("insert_rule() called"))
self._ensure_firewall_policy_update_allowed(context, id)
fwp = super(NsxAdvancedPlugin, self).insert_rule(
context, id, rule_info)
fwr = super(NsxAdvancedPlugin, self).get_firewall_rule(
context, rule_info['firewall_rule_id'])
# check if this policy is associated with firewall
fw_list = self._get_firewall_list_from_firewall_policy(context, id)
if not fw_list:
return fwp
for fw in fw_list:
# TODO(fank): optimized by using _get_resource_router_id_bindings
service_router_binding = self._get_resource_router_id_binding(
context, firewall_db.Firewall, resource_id=fw['id'])
edge_id = self._get_edge_id_by_vcns_edge_binding(
context, service_router_binding.router_id)
if rule_info.get('insert_before') or rule_info.get('insert_after'):
#if insert_before or insert_after is set, we would call
#VCNS insert_rule API
#TODO(linb): do rollback on error
self.vcns_driver.insert_rule(context, rule_info, edge_id, fwr)
else:
#Else we would call bulk configuration on the firewall
self._vcns_update_firewall(context, fw, edge_id=edge_id)
return fwp
def remove_rule(self, context, id, rule_info):
LOG.debug(_("remove_rule() called"))
self._ensure_firewall_policy_update_allowed(context, id)
fwp = super(NsxAdvancedPlugin, self).remove_rule(
context, id, rule_info)
fwr = super(NsxAdvancedPlugin, self).get_firewall_rule(
context, rule_info['firewall_rule_id'])
# check if this policy is associated with firewall
fw_list = self._get_firewall_list_from_firewall_policy(context, id)
if not fw_list:
return fwp
for fw in fw_list:
# TODO(fank): optimized by using _get_resource_router_id_bindings
service_router_binding = self._get_resource_router_id_binding(
context, firewall_db.Firewall, resource_id=fw['id'])
edge_id = self._get_edge_id_by_vcns_edge_binding(
context, service_router_binding.router_id)
#TODO(linb): do rollback on error
self.vcns_driver.delete_firewall_rule(
context, fwr['id'], edge_id)
return fwp
#
# LBAAS service plugin implementation
#
def _get_edge_id_by_vip_id(self, context, vip_id):
try:
service_router_binding = self._get_resource_router_id_binding(
context, loadbalancer_db.Vip, resource_id=vip_id)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to find the edge with "
"vip_id: %s"), vip_id)
return self._get_edge_id_by_vcns_edge_binding(
context, service_router_binding.router_id)
def _get_all_vip_addrs_by_router_id(
self, context, router_id):
vip_bindings = self._get_resource_router_id_bindings(
context, loadbalancer_db.Vip, router_ids=[router_id])
vip_addrs = []
for vip_binding in vip_bindings:
vip = self.get_vip(context, vip_binding.resource_id)
vip_addrs.append(vip.get('address'))
return vip_addrs
def _add_router_service_insertion_binding(self, context, resource_id,
router_id,
model):
res = {
'id': resource_id,
'router_id': router_id
}
self._process_create_resource_router_id(context, res,
model)
def _resource_set_status(self, context, model, id, status, obj=None,
pool_id=None):
with context.session.begin(subtransactions=True):
try:
qry = context.session.query(model)
if issubclass(model, loadbalancer_db.PoolMonitorAssociation):
res = qry.filter_by(monitor_id=id,
pool_id=pool_id).one()
else:
res = qry.filter_by(id=id).one()
if status == service_constants.PENDING_UPDATE and (
res.get('status') == service_constants.PENDING_DELETE):
msg = (_("Operation can't be performed, Since resource "
"%(model)s : %(id)s is in DELETEing status!") %
{'model': model,
'id': id})
LOG.error(msg)
raise nsx_exc.NsxPluginException(err_msg=msg)
else:
res.status = status
except sa_exc.NoResultFound:
msg = (_("Resource %(model)s : %(id)s not found!") %
{'model': model,
'id': id})
LOG.exception(msg)
raise nsx_exc.NsxPluginException(err_msg=msg)
if obj:
obj['status'] = status
def _vcns_create_pool_and_monitors(self, context, pool_id, **kwargs):
pool = self.get_pool(context, pool_id)
edge_id = kwargs.get('edge_id')
if not edge_id:
edge_id = self._get_edge_id_by_vip_id(
context, pool['vip_id'])
#Check wheter the pool is already created on the router
#in case of future's M:N relation between Pool and Vip
#Check associated HealthMonitors and then create them
for monitor_id in pool.get('health_monitors'):
hm = self.get_health_monitor(context, monitor_id)
try:
self.vcns_driver.create_health_monitor(
context, edge_id, hm)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to create healthmonitor "
"associated with pool id: %s!") % pool_id)
for monitor_ide in pool.get('health_monitors'):
if monitor_ide == monitor_id:
break
self.vcns_driver.delete_health_monitor(
context, monitor_ide, edge_id)
#Create the pool on the edge
members = [
super(NsxAdvancedPlugin, self).get_member(
context, member_id)
for member_id in pool.get('members')
]
try:
self.vcns_driver.create_pool(context, edge_id, pool, members)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to create pool on vshield edge"))
self.vcns_driver.delete_pool(
context, pool_id, edge_id)
for monitor_id in pool.get('health_monitors'):
self.vcns_driver.delete_health_monitor(
context, monitor_id, edge_id)
def _vcns_update_pool(self, context, pool, **kwargs):
edge_id = self._get_edge_id_by_vip_id(context, pool['vip_id'])
members = kwargs.get('members')
if not members:
members = [
super(NsxAdvancedPlugin, self).get_member(
context, member_id)
for member_id in pool.get('members')
]
self.vcns_driver.update_pool(context, edge_id, pool, members)
def create_vip(self, context, vip):
LOG.debug(_("create_vip() called"))
router_id = vip['vip'].get(vcns_const.ROUTER_ID)
if not router_id:
msg = _("router_id is not provided!")
LOG.error(msg)
raise n_exc.BadRequest(resource='router', msg=msg)
if not self._is_advanced_service_router(context, router_id):
msg = _("router_id: %s is not an advanced router!") % router_id
LOG.error(msg)
raise nsx_exc.NsxPluginException(err_msg=msg)
#Check whether the vip port is an external port
subnet_id = vip['vip']['subnet_id']
network_id = self.get_subnet(context, subnet_id)['network_id']
ext_net = self._get_network(context, network_id)
if not ext_net.external:
msg = (_("Network '%s' is not a valid external "
"network") % network_id)
raise nsx_exc.NsxPluginException(err_msg=msg)
v = super(NsxAdvancedPlugin, self).create_vip(context, vip)
#Get edge_id for the resource
router_binding = vcns_db.get_vcns_router_binding(
context.session,
router_id)
edge_id = router_binding.edge_id
#Add vip_router binding
self._add_router_service_insertion_binding(context, v['id'],
router_id,
loadbalancer_db.Vip)
#Create the vip port on vShield Edge
router = self._get_router(context, router_id)
self._update_interface(context, router, sync=True)
#Create the vip and associated pool/monitor on the corresponding edge
try:
self._vcns_create_pool_and_monitors(
context, v['pool_id'], edge_id=edge_id)
self.vcns_driver.create_vip(context, edge_id, v)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to create vip!"))
self._delete_resource_router_id_binding(
context, v['id'], loadbalancer_db.Vip)
super(NsxAdvancedPlugin, self).delete_vip(context, v['id'])
self._resource_set_status(context, loadbalancer_db.Vip,
v['id'], service_constants.ACTIVE, v)
v[rsi.ROUTER_ID] = router_id
return v
def update_vip(self, context, id, vip):
edge_id = self._get_edge_id_by_vip_id(context, id)
old_vip = self.get_vip(context, id)
vip['vip']['status'] = service_constants.PENDING_UPDATE
v = super(NsxAdvancedPlugin, self).update_vip(context, id, vip)
v[rsi.ROUTER_ID] = self._get_resource_router_id_binding(
context, loadbalancer_db.Vip, resource_id=id)['router_id']
if old_vip['pool_id'] != v['pool_id']:
self.vcns_driver.delete_vip(context, id)
#Delete old pool/monitor on the edge
#TODO(linb): Factor out procedure for removing pool and health
#separate method
old_pool = self.get_pool(context, old_vip['pool_id'])
self.vcns_driver.delete_pool(
context, old_vip['pool_id'], edge_id)
for monitor_id in old_pool.get('health_monitors'):
self.vcns_driver.delete_health_monitor(
context, monitor_id, edge_id)
#Create new pool/monitor object on the edge
#TODO(linb): add exception handle if error
self._vcns_create_pool_and_monitors(
context, v['pool_id'], edge_id=edge_id)
self.vcns_driver.create_vip(context, edge_id, v)
return v
try:
self.vcns_driver.update_vip(context, v)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to update vip with id: %s!"), id)
self._resource_set_status(context, loadbalancer_db.Vip,
id, service_constants.ERROR, v)
self._resource_set_status(context, loadbalancer_db.Vip,
v['id'], service_constants.ACTIVE, v)
return v
def delete_vip(self, context, id):
v = self.get_vip(context, id)
self._resource_set_status(
context, loadbalancer_db.Vip,
id, service_constants.PENDING_DELETE)
try:
self.vcns_driver.delete_vip(context, id)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to delete vip with id: %s!"), id)
self._resource_set_status(context, loadbalancer_db.Vip,
id, service_constants.ERROR)
edge_id = self._get_edge_id_by_vip_id(context, id)
#Check associated HealthMonitors and then delete them
pool = self.get_pool(context, v['pool_id'])
self.vcns_driver.delete_pool(context, v['pool_id'], edge_id)
for monitor_id in pool.get('health_monitors'):
#TODO(linb): do exception handle if error
self.vcns_driver.delete_health_monitor(
context, monitor_id, edge_id)
router_binding = self._get_resource_router_id_binding(
context, loadbalancer_db.Vip, resource_id=id)
router = self._get_router(context, router_binding.router_id)
self._delete_resource_router_id_binding(
context, id, loadbalancer_db.Vip)
super(NsxAdvancedPlugin, self).delete_vip(context, id)
self._update_interface(context, router, sync=True)
def get_vip(self, context, id, fields=None):
vip = super(NsxAdvancedPlugin, self).get_vip(context, id, fields)
if fields and rsi.ROUTER_ID not in fields:
return vip
service_router_binding = self._get_resource_router_id_binding(
context, loadbalancer_db.Vip, resource_id=vip['id'])
vip[rsi.ROUTER_ID] = service_router_binding['router_id']
return vip
def get_vips(self, context, filters=None, fields=None):
vips = super(NsxAdvancedPlugin, self).get_vips(
context, filters, fields)
if fields and rsi.ROUTER_ID not in fields:
return vips
service_router_bindings = self._get_resource_router_id_bindings(
context, loadbalancer_db.Vip,
resource_ids=[vip['id'] for vip in vips])
mapping = dict([(binding['resource_id'], binding['router_id'])
for binding in service_router_bindings])
for vip in vips:
vip[rsi.ROUTER_ID] = mapping[vip['id']]
return vips
def update_pool(self, context, id, pool):
pool['pool']['status'] = service_constants.PENDING_UPDATE
p = super(NsxAdvancedPlugin, self).update_pool(context, id, pool)
#Check whether the pool is already associated with the vip
if not p.get('vip_id'):
self._resource_set_status(context, loadbalancer_db.Pool,
p['id'], service_constants.ACTIVE, p)
return p
try:
self._vcns_update_pool(context, p)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to update pool with id: %s!"), id)
self._resource_set_status(context, loadbalancer_db.Pool,
p['id'], service_constants.ERROR, p)
self._resource_set_status(context, loadbalancer_db.Pool,
p['id'], service_constants.ACTIVE, p)
return p
def create_member(self, context, member):
m = super(NsxAdvancedPlugin, self).create_member(context, member)
pool_id = m.get('pool_id')
pool = self.get_pool(context, pool_id)
if not pool.get('vip_id'):
self._resource_set_status(context, loadbalancer_db.Member,
m['id'], service_constants.ACTIVE, m)
return m
self._resource_set_status(context, loadbalancer_db.Pool,
pool_id,
service_constants.PENDING_UPDATE)
try:
self._vcns_update_pool(context, pool)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to update pool with the member"))
super(NsxAdvancedPlugin, self).delete_member(context, m['id'])
self._resource_set_status(context, loadbalancer_db.Pool,
pool_id, service_constants.ACTIVE)
self._resource_set_status(context, loadbalancer_db.Member,
m['id'], service_constants.ACTIVE, m)
return m
def update_member(self, context, id, member):
member['member']['status'] = service_constants.PENDING_UPDATE
old_member = self.get_member(context, id)
m = super(NsxAdvancedPlugin, self).update_member(
context, id, member)
if m['pool_id'] != old_member['pool_id']:
old_pool_id = old_member['pool_id']
old_pool = self.get_pool(context, old_pool_id)
if old_pool.get('vip_id'):
self._resource_set_status(
context, loadbalancer_db.Pool,
old_pool_id, service_constants.PENDING_UPDATE)
try:
self._vcns_update_pool(context, old_pool)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to update old pool "
"with the member"))
super(NsxAdvancedPlugin, self).delete_member(
context, m['id'])
self._resource_set_status(
context, loadbalancer_db.Pool,
old_pool_id, service_constants.ACTIVE)
pool_id = m['pool_id']
pool = self.get_pool(context, pool_id)
if not pool.get('vip_id'):
self._resource_set_status(context, loadbalancer_db.Member,
m['id'], service_constants.ACTIVE, m)
return m
self._resource_set_status(context, loadbalancer_db.Pool,
pool_id,
service_constants.PENDING_UPDATE)
try:
self._vcns_update_pool(context, pool)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to update pool with the member"))
super(NsxAdvancedPlugin, self).delete_member(
context, m['id'])
self._resource_set_status(context, loadbalancer_db.Pool,
pool_id, service_constants.ACTIVE)
self._resource_set_status(context, loadbalancer_db.Member,
m['id'], service_constants.ACTIVE, m)
return m
def delete_member(self, context, id):
m = self.get_member(context, id)
super(NsxAdvancedPlugin, self).delete_member(context, id)
pool_id = m['pool_id']
pool = self.get_pool(context, pool_id)
if not pool.get('vip_id'):
return
self._resource_set_status(context, loadbalancer_db.Pool,
pool_id, service_constants.PENDING_UPDATE)
try:
self._vcns_update_pool(context, pool)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to update pool with the member"))
self._resource_set_status(context, loadbalancer_db.Pool,
pool_id, service_constants.ACTIVE)
def update_health_monitor(self, context, id, health_monitor):
old_hm = super(NsxAdvancedPlugin, self).get_health_monitor(
context, id)
hm = super(NsxAdvancedPlugin, self).update_health_monitor(
context, id, health_monitor)
for hm_pool in hm.get('pools'):
pool_id = hm_pool['pool_id']
pool = self.get_pool(context, pool_id)
if pool.get('vip_id'):
edge_id = self._get_edge_id_by_vip_id(
context, pool['vip_id'])
try:
self.vcns_driver.update_health_monitor(
context, edge_id, old_hm, hm)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to update monitor "
"with id: %s!"), id)
return hm
def delete_health_monitor(self, context, id):
with context.session.begin(subtransactions=True):
qry = context.session.query(
loadbalancer_db.PoolMonitorAssociation
).filter_by(monitor_id=id)
for assoc in qry:
pool_id = assoc['pool_id']
super(NsxAdvancedPlugin,
self).delete_pool_health_monitor(context,
id,
pool_id)
pool = self.get_pool(context, pool_id)
if not pool.get('vip_id'):
continue
edge_id = self._get_edge_id_by_vip_id(
context, pool['vip_id'])
self._resource_set_status(
context, loadbalancer_db.Pool,
pool_id, service_constants.PENDING_UPDATE)
try:
self._vcns_update_pool(context, pool)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to update pool with monitor!"))
self._resource_set_status(
context, loadbalancer_db.Pool,
pool_id, service_constants.ACTIVE)
try:
self.vcns_driver.delete_health_monitor(
context, id, edge_id)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to delete monitor "
"with id: %s!"), id)
super(NsxAdvancedPlugin,
self).delete_health_monitor(context, id)
self._delete_resource_router_id_binding(
context, id, loadbalancer_db.HealthMonitor)
super(NsxAdvancedPlugin, self).delete_health_monitor(context, id)
self._delete_resource_router_id_binding(
context, id, loadbalancer_db.HealthMonitor)
def create_pool_health_monitor(self, context,
health_monitor, pool_id):
monitor_id = health_monitor['health_monitor']['id']
pool = self.get_pool(context, pool_id)
monitors = pool.get('health_monitors')
if len(monitors) > 0:
msg = _("Vcns right now can only support "
"one monitor per pool")
LOG.error(msg)
raise nsx_exc.NsxPluginException(err_msg=msg)
#Check whether the pool is already associated with the vip
if not pool.get('vip_id'):
res = super(NsxAdvancedPlugin,
self).create_pool_health_monitor(context,
health_monitor,
pool_id)
return res
#Get the edge_id
edge_id = self._get_edge_id_by_vip_id(context, pool['vip_id'])
res = super(NsxAdvancedPlugin,
self).create_pool_health_monitor(context,
health_monitor,
pool_id)
monitor = self.get_health_monitor(context, monitor_id)
#TODO(linb)Add Exception handle if error
self.vcns_driver.create_health_monitor(context, edge_id, monitor)
#Get updated pool
pool['health_monitors'].append(monitor['id'])
self._resource_set_status(
context, loadbalancer_db.Pool,
pool_id, service_constants.PENDING_UPDATE)
try:
self._vcns_update_pool(context, pool)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to associate monitor with pool!"))
self._resource_set_status(
context, loadbalancer_db.Pool,
pool_id, service_constants.ERROR)
super(NsxAdvancedPlugin, self).delete_pool_health_monitor(
context, monitor_id, pool_id)
self._resource_set_status(
context, loadbalancer_db.Pool,
pool_id, service_constants.ACTIVE)
self._resource_set_status(
context, loadbalancer_db.PoolMonitorAssociation,
monitor_id, service_constants.ACTIVE, res,
pool_id=pool_id)
return res
def delete_pool_health_monitor(self, context, id, pool_id):
super(NsxAdvancedPlugin, self).delete_pool_health_monitor(
context, id, pool_id)
pool = self.get_pool(context, pool_id)
#Check whether the pool is already associated with the vip
if pool.get('vip_id'):
#Delete the monitor on vshield edge
edge_id = self._get_edge_id_by_vip_id(context, pool['vip_id'])
self._resource_set_status(
context, loadbalancer_db.Pool,
pool_id, service_constants.PENDING_UPDATE)
try:
self._vcns_update_pool(context, pool)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(
_("Failed to update pool with pool_monitor!"))
self._resource_set_status(
context, loadbalancer_db.Pool,
pool_id, service_constants.ERROR)
#TODO(linb): Add exception handle if error
self.vcns_driver.delete_health_monitor(context, id, edge_id)
self._resource_set_status(
context, loadbalancer_db.Pool,
pool_id, service_constants.ACTIVE)
def _vcns_update_ipsec_config(
self, context, vpnservice_id, removed_ipsec_conn_id=None):
sites = []
vpn_service = self._get_vpnservice(context, vpnservice_id)
edge_id = self._get_edge_id_by_vcns_edge_binding(
context, vpn_service.router_id)
if not vpn_service.router.gw_port:
msg = _("Failed to update ipsec vpn configuration on edge, since "
"the router: %s does not have a gateway yet!"
) % vpn_service.router_id
LOG.error(msg)
raise exceptions.VcnsBadRequest(resource='router', msg=msg)
external_ip = vpn_service.router.gw_port['fixed_ips'][0]['ip_address']
subnet = self._make_subnet_dict(vpn_service.subnet)
for ipsec_site_conn in vpn_service.ipsec_site_connections:
if ipsec_site_conn.id != removed_ipsec_conn_id:
site = self._make_ipsec_site_connection_dict(ipsec_site_conn)
ikepolicy = self._make_ikepolicy_dict(
ipsec_site_conn.ikepolicy)
ipsecpolicy = self._make_ipsecpolicy_dict(
ipsec_site_conn.ipsecpolicy)
sites.append({'site': site,
'ikepolicy': ikepolicy,
'ipsecpolicy': ipsecpolicy,
'subnet': subnet,
'external_ip': external_ip})
try:
self.vcns_driver.update_ipsec_config(
edge_id, sites, enabled=vpn_service.admin_state_up)
except exceptions.VcnsBadRequest:
LOG.exception(_("Bad or unsupported Input request!"))
raise
except exceptions.VcnsApiException:
msg = (_("Failed to update ipsec VPN configuration "
"with vpnservice: %(vpnservice_id)s on vShield Edge: "
"%(edge_id)s") % {'vpnservice_id': vpnservice_id,
'edge_id': edge_id})
LOG.exception(msg)
raise
def create_vpnservice(self, context, vpnservice):
LOG.debug(_("create_vpnservice() called"))
router_id = vpnservice['vpnservice'].get('router_id')
if not self._is_advanced_service_router(context, router_id):
msg = _("router_id:%s is not an advanced router!") % router_id
LOG.warning(msg)
raise exceptions.VcnsBadRequest(resource='router', msg=msg)
if self.get_vpnservices(context, filters={'router_id': [router_id]}):
msg = _("a vpnservice is already associated with the router: %s"
) % router_id
LOG.warning(msg)
raise nsx_exc.ServiceOverQuota(
overs='vpnservice', err_msg=msg)
service = super(NsxAdvancedPlugin, self).create_vpnservice(
context, vpnservice)
self._resource_set_status(
context, vpn_db.VPNService,
service['id'], service_constants.ACTIVE, service)
return service
def update_vpnservice(self, context, vpnservice_id, vpnservice):
vpnservice['vpnservice']['status'] = service_constants.PENDING_UPDATE
service = super(NsxAdvancedPlugin, self).update_vpnservice(
context, vpnservice_id, vpnservice)
# Only admin_state_up attribute is configurable on Edge.
if vpnservice['vpnservice'].get('admin_state_up') is None:
self._resource_set_status(
context, vpn_db.VPNService,
service['id'], service_constants.ACTIVE, service)
return service
# Test whether there is one ipsec site connection attached to
# the vpnservice. If not, just return without updating ipsec
# config on edge side.
vpn_service_db = self._get_vpnservice(context, vpnservice_id)
if not vpn_service_db.ipsec_site_connections:
self._resource_set_status(
context, vpn_db.VPNService,
service['id'], service_constants.ACTIVE, service)
return service
try:
self._vcns_update_ipsec_config(context, service['id'])
except Exception:
with excutils.save_and_reraise_exception():
self._resource_set_status(
context, vpn_db.VPNService,
service['id'], service_constants.ERROR, service)
self._resource_set_status(
context, vpn_db.VPNService,
service['id'], service_constants.ACTIVE, service)
return service
def create_ipsec_site_connection(self, context, ipsec_site_connection):
ipsec_site_conn = super(
NsxAdvancedPlugin, self).create_ipsec_site_connection(
context, ipsec_site_connection)
try:
self._vcns_update_ipsec_config(
context, ipsec_site_conn['vpnservice_id'])
except Exception:
with excutils.save_and_reraise_exception():
super(NsxAdvancedPlugin, self).delete_ipsec_site_connection(
context, ipsec_site_conn['id'])
self._resource_set_status(
context, vpn_db.IPsecSiteConnection,
ipsec_site_conn['id'], service_constants.ACTIVE, ipsec_site_conn)
return ipsec_site_conn
def update_ipsec_site_connection(self, context, ipsec_site_connection_id,
ipsec_site_connection):
ipsec_site_connection['ipsec_site_connection']['status'] = (
service_constants.PENDING_UPDATE)
ipsec_site_conn = super(
NsxAdvancedPlugin, self).update_ipsec_site_connection(
context, ipsec_site_connection_id, ipsec_site_connection)
try:
self._vcns_update_ipsec_config(
context, ipsec_site_conn['vpnservice_id'])
except Exception:
with excutils.save_and_reraise_exception():
self._resource_set_status(
context, vpn_db.IPsecSiteConnection, ipsec_site_conn['id'],
service_constants.ERROR, ipsec_site_conn)
self._resource_set_status(
context, vpn_db.IPsecSiteConnection,
ipsec_site_conn['id'], service_constants.ACTIVE, ipsec_site_conn)
return ipsec_site_conn
def delete_ipsec_site_connection(self, context, ipsec_site_conn_id):
self._resource_set_status(
context, vpn_db.IPsecSiteConnection,
ipsec_site_conn_id, service_constants.PENDING_DELETE)
vpnservice_id = self.get_ipsec_site_connection(
context, ipsec_site_conn_id)['vpnservice_id']
try:
self._vcns_update_ipsec_config(
context, vpnservice_id, ipsec_site_conn_id)
except Exception:
with excutils.save_and_reraise_exception():
self._resource_set_status(
context, vpn_db.IPsecSiteConnection, ipsec_site_conn_id,
service_constants.ERROR)
super(NsxAdvancedPlugin, self).delete_ipsec_site_connection(
context, ipsec_site_conn_id)
class VcnsCallbacks(object):
"""Edge callback implementation Callback functions for
asynchronous tasks.
"""
def __init__(self, plugin):
self.plugin = plugin
def edge_deploy_started(self, task):
"""callback when deployment task started."""
jobdata = task.userdata['jobdata']
context = jobdata['context']
edge_id = task.userdata.get('edge_id')
neutron_router_id = jobdata['neutron_router_id']
name = task.userdata['router_name']
if edge_id:
LOG.debug(_("Start deploying %(edge_id)s for router %(name)s"), {
'edge_id': edge_id,
'name': name})
vcns_db.update_vcns_router_binding(
context.session, neutron_router_id, edge_id=edge_id)
else:
LOG.debug(_("Failed to deploy Edge for router %s"), name)
vcns_db.update_vcns_router_binding(
context.session, neutron_router_id,
status=service_constants.ERROR)
def edge_deploy_result(self, task):
"""callback when deployment task finished."""
jobdata = task.userdata['jobdata']
lrouter = jobdata['lrouter']
context = jobdata['context']
name = task.userdata['router_name']
neutron_router_id = jobdata['neutron_router_id']
router_db = None
try:
router_db = self.plugin._get_router(
context, neutron_router_id)
except l3.RouterNotFound:
# Router might have been deleted before deploy finished
LOG.exception(_("Router %s not found"), lrouter['uuid'])
if task.status == TaskStatus.COMPLETED:
LOG.debug(_("Successfully deployed %(edge_id)s for "
"router %(name)s"), {
'edge_id': task.userdata['edge_id'],
'name': name})
if (router_db and
router_db['status'] == service_constants.PENDING_CREATE):
router_db['status'] = service_constants.ACTIVE
binding = vcns_db.get_vcns_router_binding(
context.session, neutron_router_id)
# only update status to active if its status is pending create
if binding['status'] == service_constants.PENDING_CREATE:
vcns_db.update_vcns_router_binding(
context.session, neutron_router_id,
status=service_constants.ACTIVE)
else:
LOG.debug(_("Failed to deploy Edge for router %s"), name)
if router_db:
router_db['status'] = service_constants.ERROR
vcns_db.update_vcns_router_binding(
context.session, neutron_router_id,
status=service_constants.ERROR)
def edge_delete_result(self, task):
jobdata = task.userdata['jobdata']
router_id = task.userdata['router_id']
context = jobdata['context']
if task.status == TaskStatus.COMPLETED:
vcns_db.delete_vcns_router_binding(context.session,
router_id)
def interface_update_result(self, task):
LOG.debug(_("interface_update_result %d"), task.status)
def snat_create_result(self, task):
LOG.debug(_("snat_create_result %d"), task.status)
def snat_delete_result(self, task):
LOG.debug(_("snat_delete_result %d"), task.status)
def dnat_create_result(self, task):
LOG.debug(_("dnat_create_result %d"), task.status)
def dnat_delete_result(self, task):
LOG.debug(_("dnat_delete_result %d"), task.status)
def routes_update_result(self, task):
LOG.debug(_("routes_update_result %d"), task.status)
def nat_update_result(self, task):
LOG.debug(_("nat_update_result %d"), task.status)
def _process_base_create_lswitch_args(*args, **kwargs):
tags = utils.get_tags()
tags.append({"tag": args[1],
"scope": "quantum_net_id"})
if args[2]:
tags.append({"tag": args[2], "scope": "os_tid"})
switch_name = args[3]
tz_config = args[4]
if kwargs.get("shared", False) or len(args) >= 6:
tags.append({"tag": "true", "scope": "shared"})
if kwargs.get("tags"):
tags.extend(kwargs["tags"])
return switch_name, tz_config, tags
# For backward compatibility
NvpAdvancedPlugin = NsxAdvancedPlugin
| apache-2.0 |
alimony/django | django/contrib/gis/gdal/driver.py | 62 | 3262 | from ctypes import c_void_p
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.error import GDALException
from django.contrib.gis.gdal.prototypes import ds as vcapi, raster as rcapi
from django.utils.encoding import force_bytes, force_text
class Driver(GDALBase):
"""
Wrap a GDAL/OGR Data Source Driver.
For more information, see the C API source code:
http://www.gdal.org/gdal_8h.html - http://www.gdal.org/ogr__api_8h.html
"""
# Case-insensitive aliases for some GDAL/OGR Drivers.
# For a complete list of original driver names see
# http://www.gdal.org/ogr_formats.html (vector)
# http://www.gdal.org/formats_list.html (raster)
_alias = {
# vector
'esri': 'ESRI Shapefile',
'shp': 'ESRI Shapefile',
'shape': 'ESRI Shapefile',
'tiger': 'TIGER',
'tiger/line': 'TIGER',
# raster
'tiff': 'GTiff',
'tif': 'GTiff',
'jpeg': 'JPEG',
'jpg': 'JPEG',
}
def __init__(self, dr_input):
"""
Initialize an GDAL/OGR driver on either a string or integer input.
"""
if isinstance(dr_input, str):
# If a string name of the driver was passed in
self.ensure_registered()
# Checking the alias dictionary (case-insensitive) to see if an
# alias exists for the given driver.
if dr_input.lower() in self._alias:
name = self._alias[dr_input.lower()]
else:
name = dr_input
# Attempting to get the GDAL/OGR driver by the string name.
for iface in (vcapi, rcapi):
driver = c_void_p(iface.get_driver_by_name(force_bytes(name)))
if driver:
break
elif isinstance(dr_input, int):
self.ensure_registered()
for iface in (vcapi, rcapi):
driver = iface.get_driver(dr_input)
if driver:
break
elif isinstance(dr_input, c_void_p):
driver = dr_input
else:
raise GDALException('Unrecognized input type for GDAL/OGR Driver: %s' % type(dr_input))
# Making sure we get a valid pointer to the OGR Driver
if not driver:
raise GDALException('Could not initialize GDAL/OGR Driver on input: %s' % dr_input)
self.ptr = driver
def __str__(self):
return self.name
@classmethod
def ensure_registered(cls):
"""
Attempt to register all the data source drivers.
"""
# Only register all if the driver counts are 0 (or else all drivers
# will be registered over and over again)
if not vcapi.get_driver_count():
vcapi.register_all()
if not rcapi.get_driver_count():
rcapi.register_all()
@classmethod
def driver_count(cls):
"""
Return the number of GDAL/OGR data source drivers registered.
"""
return vcapi.get_driver_count() + rcapi.get_driver_count()
@property
def name(self):
"""
Return description/name string for this driver.
"""
return force_text(rcapi.get_driver_description(self.ptr))
| bsd-3-clause |
mSenyor/sl4a | python/src/Lib/plat-sunos5/STROPTS.py | 66 | 36365 | # Generated by h2py from /usr/include/sys/stropts.h
# Included from sys/feature_tests.h
# Included from sys/isa_defs.h
_CHAR_ALIGNMENT = 1
_SHORT_ALIGNMENT = 2
_INT_ALIGNMENT = 4
_LONG_ALIGNMENT = 8
_LONG_LONG_ALIGNMENT = 8
_DOUBLE_ALIGNMENT = 8
_LONG_DOUBLE_ALIGNMENT = 16
_POINTER_ALIGNMENT = 8
_MAX_ALIGNMENT = 16
_ALIGNMENT_REQUIRED = 1
_CHAR_ALIGNMENT = 1
_SHORT_ALIGNMENT = 2
_INT_ALIGNMENT = 4
_LONG_ALIGNMENT = 4
_LONG_LONG_ALIGNMENT = 4
_DOUBLE_ALIGNMENT = 4
_LONG_DOUBLE_ALIGNMENT = 4
_POINTER_ALIGNMENT = 4
_MAX_ALIGNMENT = 4
_ALIGNMENT_REQUIRED = 0
_CHAR_ALIGNMENT = 1
_SHORT_ALIGNMENT = 2
_INT_ALIGNMENT = 4
_LONG_LONG_ALIGNMENT = 8
_DOUBLE_ALIGNMENT = 8
_ALIGNMENT_REQUIRED = 1
_LONG_ALIGNMENT = 4
_LONG_DOUBLE_ALIGNMENT = 8
_POINTER_ALIGNMENT = 4
_MAX_ALIGNMENT = 8
_LONG_ALIGNMENT = 8
_LONG_DOUBLE_ALIGNMENT = 16
_POINTER_ALIGNMENT = 8
_MAX_ALIGNMENT = 16
_POSIX_C_SOURCE = 1
_LARGEFILE64_SOURCE = 1
_LARGEFILE_SOURCE = 1
_FILE_OFFSET_BITS = 64
_FILE_OFFSET_BITS = 32
_POSIX_C_SOURCE = 199506L
_POSIX_PTHREAD_SEMANTICS = 1
_XOPEN_VERSION = 500
_XOPEN_VERSION = 4
_XOPEN_VERSION = 3
from TYPES import *
# Included from sys/conf.h
# Included from sys/t_lock.h
# Included from sys/machlock.h
from TYPES import *
LOCK_HELD_VALUE = 0xff
def SPIN_LOCK(pl): return ((pl) > ipltospl(LOCK_LEVEL))
def LOCK_SAMPLE_INTERVAL(i): return (((i) & 0xff) == 0)
CLOCK_LEVEL = 10
LOCK_LEVEL = 10
DISP_LEVEL = (LOCK_LEVEL + 1)
PTR24_LSB = 5
PTR24_MSB = (PTR24_LSB + 24)
PTR24_ALIGN = 32
PTR24_BASE = 0xe0000000
# Included from sys/param.h
from TYPES import *
_POSIX_VDISABLE = 0
MAX_INPUT = 512
MAX_CANON = 256
UID_NOBODY = 60001
GID_NOBODY = UID_NOBODY
UID_NOACCESS = 60002
MAX_TASKID = 999999
MAX_MAXPID = 999999
DEFAULT_MAXPID = 999999
DEFAULT_JUMPPID = 100000
DEFAULT_MAXPID = 30000
DEFAULT_JUMPPID = 0
MAXUID = 2147483647
MAXPROJID = MAXUID
MAXLINK = 32767
NMOUNT = 40
CANBSIZ = 256
NOFILE = 20
NGROUPS_UMIN = 0
NGROUPS_UMAX = 32
NGROUPS_MAX_DEFAULT = 16
NZERO = 20
NULL = 0L
NULL = 0
CMASK = 022
CDLIMIT = (1L<<11)
NBPS = 0x20000
NBPSCTR = 512
UBSIZE = 512
SCTRSHFT = 9
SYSNAME = 9
PREMOTE = 39
MAXPATHLEN = 1024
MAXSYMLINKS = 20
MAXNAMELEN = 256
NADDR = 13
PIPE_BUF = 5120
PIPE_MAX = 5120
NBBY = 8
MAXBSIZE = 8192
DEV_BSIZE = 512
DEV_BSHIFT = 9
MAXFRAG = 8
MAXOFF32_T = 0x7fffffff
MAXOFF_T = 0x7fffffffffffffffl
MAXOFFSET_T = 0x7fffffffffffffffl
MAXOFF_T = 0x7fffffffl
MAXOFFSET_T = 0x7fffffff
def btodb(bytes): return \
def dbtob(db): return \
def lbtodb(bytes): return \
def ldbtob(db): return \
NCARGS32 = 0x100000
NCARGS64 = 0x200000
NCARGS = NCARGS64
NCARGS = NCARGS32
FSHIFT = 8
FSCALE = (1<<FSHIFT)
def DELAY(n): return drv_usecwait(n)
def mmu_ptob(x): return ((x) << MMU_PAGESHIFT)
def mmu_btop(x): return (((x)) >> MMU_PAGESHIFT)
def mmu_btopr(x): return ((((x) + MMU_PAGEOFFSET) >> MMU_PAGESHIFT))
def mmu_ptod(x): return ((x) << (MMU_PAGESHIFT - DEV_BSHIFT))
def ptod(x): return ((x) << (PAGESHIFT - DEV_BSHIFT))
def ptob(x): return ((x) << PAGESHIFT)
def btop(x): return (((x) >> PAGESHIFT))
def btopr(x): return ((((x) + PAGEOFFSET) >> PAGESHIFT))
def dtop(DD): return (((DD) + NDPP - 1) >> (PAGESHIFT - DEV_BSHIFT))
def dtopt(DD): return ((DD) >> (PAGESHIFT - DEV_BSHIFT))
_AIO_LISTIO_MAX = (4096)
_AIO_MAX = (-1)
_MQ_OPEN_MAX = (32)
_MQ_PRIO_MAX = (32)
_SEM_NSEMS_MAX = INT_MAX
_SEM_VALUE_MAX = INT_MAX
# Included from sys/unistd.h
_CS_PATH = 65
_CS_LFS_CFLAGS = 68
_CS_LFS_LDFLAGS = 69
_CS_LFS_LIBS = 70
_CS_LFS_LINTFLAGS = 71
_CS_LFS64_CFLAGS = 72
_CS_LFS64_LDFLAGS = 73
_CS_LFS64_LIBS = 74
_CS_LFS64_LINTFLAGS = 75
_CS_XBS5_ILP32_OFF32_CFLAGS = 700
_CS_XBS5_ILP32_OFF32_LDFLAGS = 701
_CS_XBS5_ILP32_OFF32_LIBS = 702
_CS_XBS5_ILP32_OFF32_LINTFLAGS = 703
_CS_XBS5_ILP32_OFFBIG_CFLAGS = 705
_CS_XBS5_ILP32_OFFBIG_LDFLAGS = 706
_CS_XBS5_ILP32_OFFBIG_LIBS = 707
_CS_XBS5_ILP32_OFFBIG_LINTFLAGS = 708
_CS_XBS5_LP64_OFF64_CFLAGS = 709
_CS_XBS5_LP64_OFF64_LDFLAGS = 710
_CS_XBS5_LP64_OFF64_LIBS = 711
_CS_XBS5_LP64_OFF64_LINTFLAGS = 712
_CS_XBS5_LPBIG_OFFBIG_CFLAGS = 713
_CS_XBS5_LPBIG_OFFBIG_LDFLAGS = 714
_CS_XBS5_LPBIG_OFFBIG_LIBS = 715
_CS_XBS5_LPBIG_OFFBIG_LINTFLAGS = 716
_SC_ARG_MAX = 1
_SC_CHILD_MAX = 2
_SC_CLK_TCK = 3
_SC_NGROUPS_MAX = 4
_SC_OPEN_MAX = 5
_SC_JOB_CONTROL = 6
_SC_SAVED_IDS = 7
_SC_VERSION = 8
_SC_PASS_MAX = 9
_SC_LOGNAME_MAX = 10
_SC_PAGESIZE = 11
_SC_XOPEN_VERSION = 12
_SC_NPROCESSORS_CONF = 14
_SC_NPROCESSORS_ONLN = 15
_SC_STREAM_MAX = 16
_SC_TZNAME_MAX = 17
_SC_AIO_LISTIO_MAX = 18
_SC_AIO_MAX = 19
_SC_AIO_PRIO_DELTA_MAX = 20
_SC_ASYNCHRONOUS_IO = 21
_SC_DELAYTIMER_MAX = 22
_SC_FSYNC = 23
_SC_MAPPED_FILES = 24
_SC_MEMLOCK = 25
_SC_MEMLOCK_RANGE = 26
_SC_MEMORY_PROTECTION = 27
_SC_MESSAGE_PASSING = 28
_SC_MQ_OPEN_MAX = 29
_SC_MQ_PRIO_MAX = 30
_SC_PRIORITIZED_IO = 31
_SC_PRIORITY_SCHEDULING = 32
_SC_REALTIME_SIGNALS = 33
_SC_RTSIG_MAX = 34
_SC_SEMAPHORES = 35
_SC_SEM_NSEMS_MAX = 36
_SC_SEM_VALUE_MAX = 37
_SC_SHARED_MEMORY_OBJECTS = 38
_SC_SIGQUEUE_MAX = 39
_SC_SIGRT_MIN = 40
_SC_SIGRT_MAX = 41
_SC_SYNCHRONIZED_IO = 42
_SC_TIMERS = 43
_SC_TIMER_MAX = 44
_SC_2_C_BIND = 45
_SC_2_C_DEV = 46
_SC_2_C_VERSION = 47
_SC_2_FORT_DEV = 48
_SC_2_FORT_RUN = 49
_SC_2_LOCALEDEF = 50
_SC_2_SW_DEV = 51
_SC_2_UPE = 52
_SC_2_VERSION = 53
_SC_BC_BASE_MAX = 54
_SC_BC_DIM_MAX = 55
_SC_BC_SCALE_MAX = 56
_SC_BC_STRING_MAX = 57
_SC_COLL_WEIGHTS_MAX = 58
_SC_EXPR_NEST_MAX = 59
_SC_LINE_MAX = 60
_SC_RE_DUP_MAX = 61
_SC_XOPEN_CRYPT = 62
_SC_XOPEN_ENH_I18N = 63
_SC_XOPEN_SHM = 64
_SC_2_CHAR_TERM = 66
_SC_XOPEN_XCU_VERSION = 67
_SC_ATEXIT_MAX = 76
_SC_IOV_MAX = 77
_SC_XOPEN_UNIX = 78
_SC_PAGE_SIZE = _SC_PAGESIZE
_SC_T_IOV_MAX = 79
_SC_PHYS_PAGES = 500
_SC_AVPHYS_PAGES = 501
_SC_COHER_BLKSZ = 503
_SC_SPLIT_CACHE = 504
_SC_ICACHE_SZ = 505
_SC_DCACHE_SZ = 506
_SC_ICACHE_LINESZ = 507
_SC_DCACHE_LINESZ = 508
_SC_ICACHE_BLKSZ = 509
_SC_DCACHE_BLKSZ = 510
_SC_DCACHE_TBLKSZ = 511
_SC_ICACHE_ASSOC = 512
_SC_DCACHE_ASSOC = 513
_SC_MAXPID = 514
_SC_STACK_PROT = 515
_SC_THREAD_DESTRUCTOR_ITERATIONS = 568
_SC_GETGR_R_SIZE_MAX = 569
_SC_GETPW_R_SIZE_MAX = 570
_SC_LOGIN_NAME_MAX = 571
_SC_THREAD_KEYS_MAX = 572
_SC_THREAD_STACK_MIN = 573
_SC_THREAD_THREADS_MAX = 574
_SC_TTY_NAME_MAX = 575
_SC_THREADS = 576
_SC_THREAD_ATTR_STACKADDR = 577
_SC_THREAD_ATTR_STACKSIZE = 578
_SC_THREAD_PRIORITY_SCHEDULING = 579
_SC_THREAD_PRIO_INHERIT = 580
_SC_THREAD_PRIO_PROTECT = 581
_SC_THREAD_PROCESS_SHARED = 582
_SC_THREAD_SAFE_FUNCTIONS = 583
_SC_XOPEN_LEGACY = 717
_SC_XOPEN_REALTIME = 718
_SC_XOPEN_REALTIME_THREADS = 719
_SC_XBS5_ILP32_OFF32 = 720
_SC_XBS5_ILP32_OFFBIG = 721
_SC_XBS5_LP64_OFF64 = 722
_SC_XBS5_LPBIG_OFFBIG = 723
_PC_LINK_MAX = 1
_PC_MAX_CANON = 2
_PC_MAX_INPUT = 3
_PC_NAME_MAX = 4
_PC_PATH_MAX = 5
_PC_PIPE_BUF = 6
_PC_NO_TRUNC = 7
_PC_VDISABLE = 8
_PC_CHOWN_RESTRICTED = 9
_PC_ASYNC_IO = 10
_PC_PRIO_IO = 11
_PC_SYNC_IO = 12
_PC_FILESIZEBITS = 67
_PC_LAST = 67
_POSIX_VERSION = 199506L
_POSIX2_VERSION = 199209L
_POSIX2_C_VERSION = 199209L
_XOPEN_XCU_VERSION = 4
_XOPEN_REALTIME = 1
_XOPEN_ENH_I18N = 1
_XOPEN_SHM = 1
_POSIX2_C_BIND = 1
_POSIX2_CHAR_TERM = 1
_POSIX2_LOCALEDEF = 1
_POSIX2_C_DEV = 1
_POSIX2_SW_DEV = 1
_POSIX2_UPE = 1
# Included from sys/mutex.h
from TYPES import *
def MUTEX_HELD(x): return (mutex_owned(x))
# Included from sys/rwlock.h
from TYPES import *
def RW_READ_HELD(x): return (rw_read_held((x)))
def RW_WRITE_HELD(x): return (rw_write_held((x)))
def RW_LOCK_HELD(x): return (rw_lock_held((x)))
def RW_ISWRITER(x): return (rw_iswriter(x))
# Included from sys/semaphore.h
# Included from sys/thread.h
from TYPES import *
# Included from sys/klwp.h
from TYPES import *
# Included from sys/condvar.h
from TYPES import *
# Included from sys/time.h
# Included from sys/types32.h
# Included from sys/int_types.h
TIME32_MAX = INT32_MAX
TIME32_MIN = INT32_MIN
def TIMEVAL_OVERFLOW(tv): return \
from TYPES import *
DST_NONE = 0
DST_USA = 1
DST_AUST = 2
DST_WET = 3
DST_MET = 4
DST_EET = 5
DST_CAN = 6
DST_GB = 7
DST_RUM = 8
DST_TUR = 9
DST_AUSTALT = 10
ITIMER_REAL = 0
ITIMER_VIRTUAL = 1
ITIMER_PROF = 2
ITIMER_REALPROF = 3
def ITIMERVAL_OVERFLOW(itv): return \
SEC = 1
MILLISEC = 1000
MICROSEC = 1000000
NANOSEC = 1000000000
# Included from sys/time_impl.h
def TIMESPEC_OVERFLOW(ts): return \
def ITIMERSPEC_OVERFLOW(it): return \
__CLOCK_REALTIME0 = 0
CLOCK_VIRTUAL = 1
CLOCK_PROF = 2
__CLOCK_REALTIME3 = 3
CLOCK_HIGHRES = 4
CLOCK_MAX = 5
CLOCK_REALTIME = __CLOCK_REALTIME3
CLOCK_REALTIME = __CLOCK_REALTIME0
TIMER_RELTIME = 0x0
TIMER_ABSTIME = 0x1
def TICK_TO_SEC(tick): return ((tick) / hz)
def SEC_TO_TICK(sec): return ((sec) * hz)
def TICK_TO_MSEC(tick): return \
def MSEC_TO_TICK(msec): return \
def MSEC_TO_TICK_ROUNDUP(msec): return \
def TICK_TO_USEC(tick): return ((tick) * usec_per_tick)
def USEC_TO_TICK(usec): return ((usec) / usec_per_tick)
def USEC_TO_TICK_ROUNDUP(usec): return \
def TICK_TO_NSEC(tick): return ((tick) * nsec_per_tick)
def NSEC_TO_TICK(nsec): return ((nsec) / nsec_per_tick)
def NSEC_TO_TICK_ROUNDUP(nsec): return \
def TIMEVAL_TO_TICK(tvp): return \
def TIMESTRUC_TO_TICK(tsp): return \
# Included from time.h
from TYPES import *
# Included from iso/time_iso.h
NULL = 0L
NULL = 0
CLOCKS_PER_SEC = 1000000
# Included from sys/select.h
FD_SETSIZE = 65536
FD_SETSIZE = 1024
_NBBY = 8
NBBY = _NBBY
def FD_ZERO(p): return bzero((p), sizeof (*(p)))
# Included from sys/signal.h
# Included from sys/iso/signal_iso.h
SIGHUP = 1
SIGINT = 2
SIGQUIT = 3
SIGILL = 4
SIGTRAP = 5
SIGIOT = 6
SIGABRT = 6
SIGEMT = 7
SIGFPE = 8
SIGKILL = 9
SIGBUS = 10
SIGSEGV = 11
SIGSYS = 12
SIGPIPE = 13
SIGALRM = 14
SIGTERM = 15
SIGUSR1 = 16
SIGUSR2 = 17
SIGCLD = 18
SIGCHLD = 18
SIGPWR = 19
SIGWINCH = 20
SIGURG = 21
SIGPOLL = 22
SIGIO = SIGPOLL
SIGSTOP = 23
SIGTSTP = 24
SIGCONT = 25
SIGTTIN = 26
SIGTTOU = 27
SIGVTALRM = 28
SIGPROF = 29
SIGXCPU = 30
SIGXFSZ = 31
SIGWAITING = 32
SIGLWP = 33
SIGFREEZE = 34
SIGTHAW = 35
SIGCANCEL = 36
SIGLOST = 37
_SIGRTMIN = 38
_SIGRTMAX = 45
SIG_BLOCK = 1
SIG_UNBLOCK = 2
SIG_SETMASK = 3
SIGNO_MASK = 0xFF
SIGDEFER = 0x100
SIGHOLD = 0x200
SIGRELSE = 0x400
SIGIGNORE = 0x800
SIGPAUSE = 0x1000
# Included from sys/siginfo.h
from TYPES import *
SIGEV_NONE = 1
SIGEV_SIGNAL = 2
SIGEV_THREAD = 3
SI_NOINFO = 32767
SI_USER = 0
SI_LWP = (-1)
SI_QUEUE = (-2)
SI_TIMER = (-3)
SI_ASYNCIO = (-4)
SI_MESGQ = (-5)
# Included from sys/machsig.h
ILL_ILLOPC = 1
ILL_ILLOPN = 2
ILL_ILLADR = 3
ILL_ILLTRP = 4
ILL_PRVOPC = 5
ILL_PRVREG = 6
ILL_COPROC = 7
ILL_BADSTK = 8
NSIGILL = 8
EMT_TAGOVF = 1
EMT_CPCOVF = 2
NSIGEMT = 2
FPE_INTDIV = 1
FPE_INTOVF = 2
FPE_FLTDIV = 3
FPE_FLTOVF = 4
FPE_FLTUND = 5
FPE_FLTRES = 6
FPE_FLTINV = 7
FPE_FLTSUB = 8
NSIGFPE = 8
SEGV_MAPERR = 1
SEGV_ACCERR = 2
NSIGSEGV = 2
BUS_ADRALN = 1
BUS_ADRERR = 2
BUS_OBJERR = 3
NSIGBUS = 3
TRAP_BRKPT = 1
TRAP_TRACE = 2
TRAP_RWATCH = 3
TRAP_WWATCH = 4
TRAP_XWATCH = 5
NSIGTRAP = 5
CLD_EXITED = 1
CLD_KILLED = 2
CLD_DUMPED = 3
CLD_TRAPPED = 4
CLD_STOPPED = 5
CLD_CONTINUED = 6
NSIGCLD = 6
POLL_IN = 1
POLL_OUT = 2
POLL_MSG = 3
POLL_ERR = 4
POLL_PRI = 5
POLL_HUP = 6
NSIGPOLL = 6
PROF_SIG = 1
NSIGPROF = 1
SI_MAXSZ = 256
SI_MAXSZ = 128
# Included from sys/time_std_impl.h
from TYPES import *
SI32_MAXSZ = 128
def SI_CANQUEUE(c): return ((c) <= SI_QUEUE)
SA_NOCLDSTOP = 0x00020000
SA_ONSTACK = 0x00000001
SA_RESETHAND = 0x00000002
SA_RESTART = 0x00000004
SA_SIGINFO = 0x00000008
SA_NODEFER = 0x00000010
SA_NOCLDWAIT = 0x00010000
SA_WAITSIG = 0x00010000
NSIG = 46
MAXSIG = 45
S_SIGNAL = 1
S_SIGSET = 2
S_SIGACTION = 3
S_NONE = 4
MINSIGSTKSZ = 2048
SIGSTKSZ = 8192
SS_ONSTACK = 0x00000001
SS_DISABLE = 0x00000002
SN_PROC = 1
SN_CANCEL = 2
SN_SEND = 3
# Included from sys/ucontext.h
from TYPES import *
# Included from sys/regset.h
REG_CCR = (0)
REG_PSR = (0)
REG_PSR = (0)
REG_PC = (1)
REG_nPC = (2)
REG_Y = (3)
REG_G1 = (4)
REG_G2 = (5)
REG_G3 = (6)
REG_G4 = (7)
REG_G5 = (8)
REG_G6 = (9)
REG_G7 = (10)
REG_O0 = (11)
REG_O1 = (12)
REG_O2 = (13)
REG_O3 = (14)
REG_O4 = (15)
REG_O5 = (16)
REG_O6 = (17)
REG_O7 = (18)
REG_ASI = (19)
REG_FPRS = (20)
REG_PS = REG_PSR
REG_SP = REG_O6
REG_R0 = REG_O0
REG_R1 = REG_O1
_NGREG = 21
_NGREG = 19
NGREG = _NGREG
_NGREG32 = 19
_NGREG64 = 21
SPARC_MAXREGWINDOW = 31
MAXFPQ = 16
XRS_ID = 0x78727300
# Included from v7/sys/privregs.h
# Included from v7/sys/psr.h
PSR_CWP = 0x0000001F
PSR_ET = 0x00000020
PSR_PS = 0x00000040
PSR_S = 0x00000080
PSR_PIL = 0x00000F00
PSR_EF = 0x00001000
PSR_EC = 0x00002000
PSR_RSV = 0x000FC000
PSR_ICC = 0x00F00000
PSR_C = 0x00100000
PSR_V = 0x00200000
PSR_Z = 0x00400000
PSR_N = 0x00800000
PSR_VER = 0x0F000000
PSR_IMPL = 0xF0000000
PSL_ALLCC = PSR_ICC
PSL_USER = (PSR_S)
PSL_USERMASK = (PSR_ICC)
PSL_UBITS = (PSR_ICC|PSR_EF)
def USERMODE(ps): return (((ps) & PSR_PS) == 0)
# Included from sys/fsr.h
FSR_CEXC = 0x0000001f
FSR_AEXC = 0x000003e0
FSR_FCC = 0x00000c00
FSR_PR = 0x00001000
FSR_QNE = 0x00002000
FSR_FTT = 0x0001c000
FSR_VER = 0x000e0000
FSR_TEM = 0x0f800000
FSR_RP = 0x30000000
FSR_RD = 0xc0000000
FSR_VER_SHIFT = 17
FSR_FCC1 = 0x00000003
FSR_FCC2 = 0x0000000C
FSR_FCC3 = 0x00000030
FSR_CEXC_NX = 0x00000001
FSR_CEXC_DZ = 0x00000002
FSR_CEXC_UF = 0x00000004
FSR_CEXC_OF = 0x00000008
FSR_CEXC_NV = 0x00000010
FSR_AEXC_NX = (0x1 << 5)
FSR_AEXC_DZ = (0x2 << 5)
FSR_AEXC_UF = (0x4 << 5)
FSR_AEXC_OF = (0x8 << 5)
FSR_AEXC_NV = (0x10 << 5)
FTT_NONE = 0
FTT_IEEE = 1
FTT_UNFIN = 2
FTT_UNIMP = 3
FTT_SEQ = 4
FTT_ALIGN = 5
FTT_DFAULT = 6
FSR_FTT_SHIFT = 14
FSR_FTT_IEEE = (FTT_IEEE << FSR_FTT_SHIFT)
FSR_FTT_UNFIN = (FTT_UNFIN << FSR_FTT_SHIFT)
FSR_FTT_UNIMP = (FTT_UNIMP << FSR_FTT_SHIFT)
FSR_FTT_SEQ = (FTT_SEQ << FSR_FTT_SHIFT)
FSR_FTT_ALIGN = (FTT_ALIGN << FSR_FTT_SHIFT)
FSR_FTT_DFAULT = (FTT_DFAULT << FSR_FTT_SHIFT)
FSR_TEM_NX = (0x1 << 23)
FSR_TEM_DZ = (0x2 << 23)
FSR_TEM_UF = (0x4 << 23)
FSR_TEM_OF = (0x8 << 23)
FSR_TEM_NV = (0x10 << 23)
RP_DBLEXT = 0
RP_SINGLE = 1
RP_DOUBLE = 2
RP_RESERVED = 3
RD_NEAR = 0
RD_ZER0 = 1
RD_POSINF = 2
RD_NEGINF = 3
FPRS_DL = 0x1
FPRS_DU = 0x2
FPRS_FEF = 0x4
PIL_MAX = 0xf
def SAVE_GLOBALS(RP): return \
def RESTORE_GLOBALS(RP): return \
def SAVE_OUTS(RP): return \
def RESTORE_OUTS(RP): return \
def SAVE_WINDOW(SBP): return \
def RESTORE_WINDOW(SBP): return \
def STORE_FPREGS(FP): return \
def LOAD_FPREGS(FP): return \
_SPARC_MAXREGWINDOW = 31
_XRS_ID = 0x78727300
GETCONTEXT = 0
SETCONTEXT = 1
UC_SIGMASK = 001
UC_STACK = 002
UC_CPU = 004
UC_MAU = 010
UC_FPU = UC_MAU
UC_INTR = 020
UC_ASR = 040
UC_MCONTEXT = (UC_CPU|UC_FPU|UC_ASR)
UC_ALL = (UC_SIGMASK|UC_STACK|UC_MCONTEXT)
_SIGQUEUE_MAX = 32
_SIGNOTIFY_MAX = 32
# Included from sys/pcb.h
INSTR_VALID = 0x02
NORMAL_STEP = 0x04
WATCH_STEP = 0x08
CPC_OVERFLOW = 0x10
ASYNC_HWERR = 0x20
STEP_NONE = 0
STEP_REQUESTED = 1
STEP_ACTIVE = 2
STEP_WASACTIVE = 3
# Included from sys/msacct.h
LMS_USER = 0
LMS_SYSTEM = 1
LMS_TRAP = 2
LMS_TFAULT = 3
LMS_DFAULT = 4
LMS_KFAULT = 5
LMS_USER_LOCK = 6
LMS_SLEEP = 7
LMS_WAIT_CPU = 8
LMS_STOPPED = 9
NMSTATES = 10
# Included from sys/lwp.h
# Included from sys/synch.h
from TYPES import *
USYNC_THREAD = 0x00
USYNC_PROCESS = 0x01
LOCK_NORMAL = 0x00
LOCK_ERRORCHECK = 0x02
LOCK_RECURSIVE = 0x04
USYNC_PROCESS_ROBUST = 0x08
LOCK_PRIO_NONE = 0x00
LOCK_PRIO_INHERIT = 0x10
LOCK_PRIO_PROTECT = 0x20
LOCK_STALL_NP = 0x00
LOCK_ROBUST_NP = 0x40
LOCK_OWNERDEAD = 0x1
LOCK_NOTRECOVERABLE = 0x2
LOCK_INITED = 0x4
LOCK_UNMAPPED = 0x8
LWP_DETACHED = 0x00000040
LWP_SUSPENDED = 0x00000080
__LWP_ASLWP = 0x00000100
MAXSYSARGS = 8
NORMALRETURN = 0
JUSTRETURN = 1
LWP_USER = 0x01
LWP_SYS = 0x02
TS_FREE = 0x00
TS_SLEEP = 0x01
TS_RUN = 0x02
TS_ONPROC = 0x04
TS_ZOMB = 0x08
TS_STOPPED = 0x10
T_INTR_THREAD = 0x0001
T_WAKEABLE = 0x0002
T_TOMASK = 0x0004
T_TALLOCSTK = 0x0008
T_WOULDBLOCK = 0x0020
T_DONTBLOCK = 0x0040
T_DONTPEND = 0x0080
T_SYS_PROF = 0x0100
T_WAITCVSEM = 0x0200
T_WATCHPT = 0x0400
T_PANIC = 0x0800
TP_HOLDLWP = 0x0002
TP_TWAIT = 0x0004
TP_LWPEXIT = 0x0008
TP_PRSTOP = 0x0010
TP_CHKPT = 0x0020
TP_EXITLWP = 0x0040
TP_PRVSTOP = 0x0080
TP_MSACCT = 0x0100
TP_STOPPING = 0x0200
TP_WATCHPT = 0x0400
TP_PAUSE = 0x0800
TP_CHANGEBIND = 0x1000
TS_LOAD = 0x0001
TS_DONT_SWAP = 0x0002
TS_SWAPENQ = 0x0004
TS_ON_SWAPQ = 0x0008
TS_CSTART = 0x0100
TS_UNPAUSE = 0x0200
TS_XSTART = 0x0400
TS_PSTART = 0x0800
TS_RESUME = 0x1000
TS_CREATE = 0x2000
TS_ALLSTART = \
(TS_CSTART|TS_UNPAUSE|TS_XSTART|TS_PSTART|TS_RESUME|TS_CREATE)
def CPR_VSTOPPED(t): return \
def THREAD_TRANSITION(tp): return thread_transition(tp);
def THREAD_STOP(tp): return \
def THREAD_ZOMB(tp): return THREAD_SET_STATE(tp, TS_ZOMB, NULL)
def SEMA_HELD(x): return (sema_held((x)))
NO_LOCKS_HELD = 1
NO_COMPETING_THREADS = 1
FMNAMESZ = 8
# Included from sys/systm.h
from TYPES import *
# Included from sys/proc.h
# Included from sys/cred.h
# Included from sys/user.h
from TYPES import *
# Included from sys/resource.h
from TYPES import *
PRIO_PROCESS = 0
PRIO_PGRP = 1
PRIO_USER = 2
RLIMIT_CPU = 0
RLIMIT_FSIZE = 1
RLIMIT_DATA = 2
RLIMIT_STACK = 3
RLIMIT_CORE = 4
RLIMIT_NOFILE = 5
RLIMIT_VMEM = 6
RLIMIT_AS = RLIMIT_VMEM
RLIM_NLIMITS = 7
RLIM_INFINITY = (-3l)
RLIM_SAVED_MAX = (-2l)
RLIM_SAVED_CUR = (-1l)
RLIM_INFINITY = 0x7fffffff
RLIM_SAVED_MAX = 0x7ffffffe
RLIM_SAVED_CUR = 0x7ffffffd
RLIM32_INFINITY = 0x7fffffff
RLIM32_SAVED_MAX = 0x7ffffffe
RLIM32_SAVED_CUR = 0x7ffffffd
# Included from sys/model.h
# Included from sys/debug.h
def ASSERT64(x): return ASSERT(x)
def ASSERT32(x): return ASSERT(x)
DATAMODEL_MASK = 0x0FF00000
DATAMODEL_ILP32 = 0x00100000
DATAMODEL_LP64 = 0x00200000
DATAMODEL_NONE = 0
DATAMODEL_NATIVE = DATAMODEL_LP64
DATAMODEL_NATIVE = DATAMODEL_ILP32
def STRUCT_SIZE(handle): return \
def STRUCT_BUF(handle): return ((handle).ptr.m64)
def SIZEOF_PTR(umodel): return \
def STRUCT_SIZE(handle): return (sizeof (*(handle).ptr))
def STRUCT_BUF(handle): return ((handle).ptr)
def SIZEOF_PTR(umodel): return sizeof (caddr_t)
def lwp_getdatamodel(t): return DATAMODEL_ILP32
RUSAGE_SELF = 0
RUSAGE_CHILDREN = -1
# Included from sys/auxv.h
AT_NULL = 0
AT_IGNORE = 1
AT_EXECFD = 2
AT_PHDR = 3
AT_PHENT = 4
AT_PHNUM = 5
AT_PAGESZ = 6
AT_BASE = 7
AT_FLAGS = 8
AT_ENTRY = 9
AT_DCACHEBSIZE = 10
AT_ICACHEBSIZE = 11
AT_UCACHEBSIZE = 12
AT_SUN_UID = 2000
AT_SUN_RUID = 2001
AT_SUN_GID = 2002
AT_SUN_RGID = 2003
AT_SUN_LDELF = 2004
AT_SUN_LDSHDR = 2005
AT_SUN_LDNAME = 2006
AT_SUN_LPAGESZ = 2007
AT_SUN_PLATFORM = 2008
AT_SUN_HWCAP = 2009
AT_SUN_IFLUSH = 2010
AT_SUN_CPU = 2011
AT_SUN_EMUL_ENTRY = 2012
AT_SUN_EMUL_EXECFD = 2013
AT_SUN_EXECNAME = 2014
AT_SUN_MMU = 2015
# Included from sys/errno.h
EPERM = 1
ENOENT = 2
ESRCH = 3
EINTR = 4
EIO = 5
ENXIO = 6
E2BIG = 7
ENOEXEC = 8
EBADF = 9
ECHILD = 10
EAGAIN = 11
ENOMEM = 12
EACCES = 13
EFAULT = 14
ENOTBLK = 15
EBUSY = 16
EEXIST = 17
EXDEV = 18
ENODEV = 19
ENOTDIR = 20
EISDIR = 21
EINVAL = 22
ENFILE = 23
EMFILE = 24
ENOTTY = 25
ETXTBSY = 26
EFBIG = 27
ENOSPC = 28
ESPIPE = 29
EROFS = 30
EMLINK = 31
EPIPE = 32
EDOM = 33
ERANGE = 34
ENOMSG = 35
EIDRM = 36
ECHRNG = 37
EL2NSYNC = 38
EL3HLT = 39
EL3RST = 40
ELNRNG = 41
EUNATCH = 42
ENOCSI = 43
EL2HLT = 44
EDEADLK = 45
ENOLCK = 46
ECANCELED = 47
ENOTSUP = 48
EDQUOT = 49
EBADE = 50
EBADR = 51
EXFULL = 52
ENOANO = 53
EBADRQC = 54
EBADSLT = 55
EDEADLOCK = 56
EBFONT = 57
EOWNERDEAD = 58
ENOTRECOVERABLE = 59
ENOSTR = 60
ENODATA = 61
ETIME = 62
ENOSR = 63
ENONET = 64
ENOPKG = 65
EREMOTE = 66
ENOLINK = 67
EADV = 68
ESRMNT = 69
ECOMM = 70
EPROTO = 71
ELOCKUNMAPPED = 72
ENOTACTIVE = 73
EMULTIHOP = 74
EBADMSG = 77
ENAMETOOLONG = 78
EOVERFLOW = 79
ENOTUNIQ = 80
EBADFD = 81
EREMCHG = 82
ELIBACC = 83
ELIBBAD = 84
ELIBSCN = 85
ELIBMAX = 86
ELIBEXEC = 87
EILSEQ = 88
ENOSYS = 89
ELOOP = 90
ERESTART = 91
ESTRPIPE = 92
ENOTEMPTY = 93
EUSERS = 94
ENOTSOCK = 95
EDESTADDRREQ = 96
EMSGSIZE = 97
EPROTOTYPE = 98
ENOPROTOOPT = 99
EPROTONOSUPPORT = 120
ESOCKTNOSUPPORT = 121
EOPNOTSUPP = 122
EPFNOSUPPORT = 123
EAFNOSUPPORT = 124
EADDRINUSE = 125
EADDRNOTAVAIL = 126
ENETDOWN = 127
ENETUNREACH = 128
ENETRESET = 129
ECONNABORTED = 130
ECONNRESET = 131
ENOBUFS = 132
EISCONN = 133
ENOTCONN = 134
ESHUTDOWN = 143
ETOOMANYREFS = 144
ETIMEDOUT = 145
ECONNREFUSED = 146
EHOSTDOWN = 147
EHOSTUNREACH = 148
EWOULDBLOCK = EAGAIN
EALREADY = 149
EINPROGRESS = 150
ESTALE = 151
PSARGSZ = 80
PSCOMSIZ = 14
MAXCOMLEN = 16
__KERN_NAUXV_IMPL = 19
__KERN_NAUXV_IMPL = 21
__KERN_NAUXV_IMPL = 21
PSARGSZ = 80
# Included from sys/watchpoint.h
from TYPES import *
# Included from vm/seg_enum.h
# Included from sys/copyops.h
from TYPES import *
# Included from sys/buf.h
# Included from sys/kstat.h
from TYPES import *
KSTAT_STRLEN = 31
def KSTAT_ENTER(k): return \
def KSTAT_EXIT(k): return \
KSTAT_TYPE_RAW = 0
KSTAT_TYPE_NAMED = 1
KSTAT_TYPE_INTR = 2
KSTAT_TYPE_IO = 3
KSTAT_TYPE_TIMER = 4
KSTAT_NUM_TYPES = 5
KSTAT_FLAG_VIRTUAL = 0x01
KSTAT_FLAG_VAR_SIZE = 0x02
KSTAT_FLAG_WRITABLE = 0x04
KSTAT_FLAG_PERSISTENT = 0x08
KSTAT_FLAG_DORMANT = 0x10
KSTAT_FLAG_INVALID = 0x20
KSTAT_READ = 0
KSTAT_WRITE = 1
KSTAT_DATA_CHAR = 0
KSTAT_DATA_INT32 = 1
KSTAT_DATA_UINT32 = 2
KSTAT_DATA_INT64 = 3
KSTAT_DATA_UINT64 = 4
KSTAT_DATA_LONG = KSTAT_DATA_INT32
KSTAT_DATA_ULONG = KSTAT_DATA_UINT32
KSTAT_DATA_LONG = KSTAT_DATA_INT64
KSTAT_DATA_ULONG = KSTAT_DATA_UINT64
KSTAT_DATA_LONG = 7
KSTAT_DATA_ULONG = 8
KSTAT_DATA_LONGLONG = KSTAT_DATA_INT64
KSTAT_DATA_ULONGLONG = KSTAT_DATA_UINT64
KSTAT_DATA_FLOAT = 5
KSTAT_DATA_DOUBLE = 6
KSTAT_INTR_HARD = 0
KSTAT_INTR_SOFT = 1
KSTAT_INTR_WATCHDOG = 2
KSTAT_INTR_SPURIOUS = 3
KSTAT_INTR_MULTSVC = 4
KSTAT_NUM_INTRS = 5
B_BUSY = 0x0001
B_DONE = 0x0002
B_ERROR = 0x0004
B_PAGEIO = 0x0010
B_PHYS = 0x0020
B_READ = 0x0040
B_WRITE = 0x0100
B_KERNBUF = 0x0008
B_WANTED = 0x0080
B_AGE = 0x000200
B_ASYNC = 0x000400
B_DELWRI = 0x000800
B_STALE = 0x001000
B_DONTNEED = 0x002000
B_REMAPPED = 0x004000
B_FREE = 0x008000
B_INVAL = 0x010000
B_FORCE = 0x020000
B_HEAD = 0x040000
B_NOCACHE = 0x080000
B_TRUNC = 0x100000
B_SHADOW = 0x200000
B_RETRYWRI = 0x400000
def notavail(bp): return \
def BWRITE(bp): return \
def BWRITE2(bp): return \
# Included from sys/aio_req.h
# Included from sys/uio.h
from TYPES import *
WP_NOWATCH = 0x01
WP_SETPROT = 0x02
# Included from sys/timer.h
from TYPES import *
_TIMER_MAX = 32
ITLK_LOCKED = 0x01
ITLK_WANTED = 0x02
ITLK_REMOVE = 0x04
IT_PERLWP = 0x01
IT_SIGNAL = 0x02
# Included from sys/utrap.h
UT_INSTRUCTION_DISABLED = 1
UT_INSTRUCTION_ERROR = 2
UT_INSTRUCTION_PROTECTION = 3
UT_ILLTRAP_INSTRUCTION = 4
UT_ILLEGAL_INSTRUCTION = 5
UT_PRIVILEGED_OPCODE = 6
UT_FP_DISABLED = 7
UT_FP_EXCEPTION_IEEE_754 = 8
UT_FP_EXCEPTION_OTHER = 9
UT_TAG_OVERFLOW = 10
UT_DIVISION_BY_ZERO = 11
UT_DATA_EXCEPTION = 12
UT_DATA_ERROR = 13
UT_DATA_PROTECTION = 14
UT_MEM_ADDRESS_NOT_ALIGNED = 15
UT_PRIVILEGED_ACTION = 16
UT_ASYNC_DATA_ERROR = 17
UT_TRAP_INSTRUCTION_16 = 18
UT_TRAP_INSTRUCTION_17 = 19
UT_TRAP_INSTRUCTION_18 = 20
UT_TRAP_INSTRUCTION_19 = 21
UT_TRAP_INSTRUCTION_20 = 22
UT_TRAP_INSTRUCTION_21 = 23
UT_TRAP_INSTRUCTION_22 = 24
UT_TRAP_INSTRUCTION_23 = 25
UT_TRAP_INSTRUCTION_24 = 26
UT_TRAP_INSTRUCTION_25 = 27
UT_TRAP_INSTRUCTION_26 = 28
UT_TRAP_INSTRUCTION_27 = 29
UT_TRAP_INSTRUCTION_28 = 30
UT_TRAP_INSTRUCTION_29 = 31
UT_TRAP_INSTRUCTION_30 = 32
UT_TRAP_INSTRUCTION_31 = 33
UTRAP_V8P_FP_DISABLED = UT_FP_DISABLED
UTRAP_V8P_MEM_ADDRESS_NOT_ALIGNED = UT_MEM_ADDRESS_NOT_ALIGNED
UT_PRECISE_MAXTRAPS = 33
# Included from sys/refstr.h
# Included from sys/task.h
from TYPES import *
TASK_NORMAL = 0x0
TASK_FINAL = 0x1
TASK_FINALITY = 0x1
# Included from sys/id_space.h
from TYPES import *
# Included from sys/vmem.h
from TYPES import *
VM_SLEEP = 0x00000000
VM_NOSLEEP = 0x00000001
VM_PANIC = 0x00000002
VM_KMFLAGS = 0x000000ff
VM_BESTFIT = 0x00000100
VMEM_ALLOC = 0x01
VMEM_FREE = 0x02
VMEM_SPAN = 0x10
ISP_NORMAL = 0x0
ISP_RESERVE = 0x1
# Included from sys/exacct_impl.h
from TYPES import *
# Included from sys/kmem.h
from TYPES import *
KM_SLEEP = 0x0000
KM_NOSLEEP = 0x0001
KM_PANIC = 0x0002
KM_VMFLAGS = 0x00ff
KM_FLAGS = 0xffff
KMC_NOTOUCH = 0x00010000
KMC_NODEBUG = 0x00020000
KMC_NOMAGAZINE = 0x00040000
KMC_NOHASH = 0x00080000
KMC_QCACHE = 0x00100000
_ISA_IA32 = 0
_ISA_IA64 = 1
SSLEEP = 1
SRUN = 2
SZOMB = 3
SSTOP = 4
SIDL = 5
SONPROC = 6
CLDPEND = 0x0001
CLDCONT = 0x0002
SSYS = 0x00000001
STRC = 0x00000002
SLOAD = 0x00000008
SLOCK = 0x00000010
SPREXEC = 0x00000020
SPROCTR = 0x00000040
SPRFORK = 0x00000080
SKILLED = 0x00000100
SULOAD = 0x00000200
SRUNLCL = 0x00000400
SBPTADJ = 0x00000800
SKILLCL = 0x00001000
SOWEUPC = 0x00002000
SEXECED = 0x00004000
SPASYNC = 0x00008000
SJCTL = 0x00010000
SNOWAIT = 0x00020000
SVFORK = 0x00040000
SVFWAIT = 0x00080000
EXITLWPS = 0x00100000
HOLDFORK = 0x00200000
SWAITSIG = 0x00400000
HOLDFORK1 = 0x00800000
COREDUMP = 0x01000000
SMSACCT = 0x02000000
ASLWP = 0x04000000
SPRLOCK = 0x08000000
NOCD = 0x10000000
HOLDWATCH = 0x20000000
SMSFORK = 0x40000000
SDOCORE = 0x80000000
FORREAL = 0
JUSTLOOKING = 1
SUSPEND_NORMAL = 0
SUSPEND_PAUSE = 1
NOCLASS = (-1)
# Included from sys/dditypes.h
DDI_DEVICE_ATTR_V0 = 0x0001
DDI_NEVERSWAP_ACC = 0x00
DDI_STRUCTURE_LE_ACC = 0x01
DDI_STRUCTURE_BE_ACC = 0x02
DDI_STRICTORDER_ACC = 0x00
DDI_UNORDERED_OK_ACC = 0x01
DDI_MERGING_OK_ACC = 0x02
DDI_LOADCACHING_OK_ACC = 0x03
DDI_STORECACHING_OK_ACC = 0x04
DDI_DATA_SZ01_ACC = 1
DDI_DATA_SZ02_ACC = 2
DDI_DATA_SZ04_ACC = 4
DDI_DATA_SZ08_ACC = 8
VERS_ACCHDL = 0x0001
DEVID_NONE = 0
DEVID_SCSI3_WWN = 1
DEVID_SCSI_SERIAL = 2
DEVID_FAB = 3
DEVID_ENCAP = 4
DEVID_MAXTYPE = 4
# Included from sys/varargs.h
# Included from sys/va_list.h
VA_ALIGN = 8
def _ARGSIZEOF(t): return ((sizeof (t) + VA_ALIGN - 1) & ~(VA_ALIGN - 1))
VA_ALIGN = 8
def _ARGSIZEOF(t): return ((sizeof (t) + VA_ALIGN - 1) & ~(VA_ALIGN - 1))
NSYSCALL = 256
SE_32RVAL1 = 0x0
SE_32RVAL2 = 0x1
SE_64RVAL = 0x2
SE_RVAL_MASK = 0x3
SE_LOADABLE = 0x08
SE_LOADED = 0x10
SE_NOUNLOAD = 0x20
SE_ARGC = 0x40
# Included from sys/devops.h
from TYPES import *
# Included from sys/poll.h
POLLIN = 0x0001
POLLPRI = 0x0002
POLLOUT = 0x0004
POLLRDNORM = 0x0040
POLLWRNORM = POLLOUT
POLLRDBAND = 0x0080
POLLWRBAND = 0x0100
POLLNORM = POLLRDNORM
POLLERR = 0x0008
POLLHUP = 0x0010
POLLNVAL = 0x0020
POLLREMOVE = 0x0800
POLLRDDATA = 0x0200
POLLNOERR = 0x0400
POLLCLOSED = 0x8000
# Included from vm/as.h
# Included from vm/seg.h
# Included from sys/vnode.h
from TYPES import *
VROOT = 0x01
VNOCACHE = 0x02
VNOMAP = 0x04
VDUP = 0x08
VNOSWAP = 0x10
VNOMOUNT = 0x20
VISSWAP = 0x40
VSWAPLIKE = 0x80
VVFSLOCK = 0x100
VVFSWAIT = 0x200
VVMLOCK = 0x400
VDIROPEN = 0x800
VVMEXEC = 0x1000
VPXFS = 0x2000
AT_TYPE = 0x0001
AT_MODE = 0x0002
AT_UID = 0x0004
AT_GID = 0x0008
AT_FSID = 0x0010
AT_NODEID = 0x0020
AT_NLINK = 0x0040
AT_SIZE = 0x0080
AT_ATIME = 0x0100
AT_MTIME = 0x0200
AT_CTIME = 0x0400
AT_RDEV = 0x0800
AT_BLKSIZE = 0x1000
AT_NBLOCKS = 0x2000
AT_VCODE = 0x4000
AT_ALL = (AT_TYPE|AT_MODE|AT_UID|AT_GID|AT_FSID|AT_NODEID|\
AT_NLINK|AT_SIZE|AT_ATIME|AT_MTIME|AT_CTIME|\
AT_RDEV|AT_BLKSIZE|AT_NBLOCKS|AT_VCODE)
AT_STAT = (AT_MODE|AT_UID|AT_GID|AT_FSID|AT_NODEID|AT_NLINK|\
AT_SIZE|AT_ATIME|AT_MTIME|AT_CTIME|AT_RDEV)
AT_TIMES = (AT_ATIME|AT_MTIME|AT_CTIME)
AT_NOSET = (AT_NLINK|AT_RDEV|AT_FSID|AT_NODEID|AT_TYPE|\
AT_BLKSIZE|AT_NBLOCKS|AT_VCODE)
VSUID = 04000
VSGID = 02000
VSVTX = 01000
VREAD = 00400
VWRITE = 00200
VEXEC = 00100
MODEMASK = 07777
PERMMASK = 00777
def MANDMODE(mode): return (((mode) & (VSGID|(VEXEC>>3))) == VSGID)
VSA_ACL = 0x0001
VSA_ACLCNT = 0x0002
VSA_DFACL = 0x0004
VSA_DFACLCNT = 0x0008
LOOKUP_DIR = 0x01
DUMP_ALLOC = 0
DUMP_FREE = 1
DUMP_SCAN = 2
ATTR_UTIME = 0x01
ATTR_EXEC = 0x02
ATTR_COMM = 0x04
ATTR_HINT = 0x08
ATTR_REAL = 0x10
# Included from vm/faultcode.h
FC_HWERR = 0x1
FC_ALIGN = 0x2
FC_OBJERR = 0x3
FC_PROT = 0x4
FC_NOMAP = 0x5
FC_NOSUPPORT = 0x6
def FC_MAKE_ERR(e): return (((e) << 8) | FC_OBJERR)
def FC_CODE(fc): return ((fc) & 0xff)
def FC_ERRNO(fc): return ((unsigned)(fc) >> 8)
# Included from vm/hat.h
from TYPES import *
# Included from vm/page.h
PAGE_HASHAVELEN = 4
PAGE_HASHVPSHIFT = 6
PG_EXCL = 0x0001
PG_WAIT = 0x0002
PG_PHYSCONTIG = 0x0004
PG_MATCH_COLOR = 0x0008
PG_NORELOC = 0x0010
PG_FREE_LIST = 1
PG_CACHE_LIST = 2
PG_LIST_TAIL = 0
PG_LIST_HEAD = 1
def page_next_raw(PP): return page_nextn_raw((PP), 1)
PAGE_IO_INUSE = 0x1
PAGE_IO_WANTED = 0x2
PGREL_NOTREL = 0x1
PGREL_CLEAN = 0x2
PGREL_MOD = 0x3
P_FREE = 0x80
P_NORELOC = 0x40
def PP_SETAGED(pp): return ASSERT(PP_ISAGED(pp))
HAT_FLAGS_RESV = 0xFF000000
HAT_LOAD = 0x00
HAT_LOAD_LOCK = 0x01
HAT_LOAD_ADV = 0x04
HAT_LOAD_CONTIG = 0x10
HAT_LOAD_NOCONSIST = 0x20
HAT_LOAD_SHARE = 0x40
HAT_LOAD_REMAP = 0x80
HAT_RELOAD_SHARE = 0x100
HAT_PLAT_ATTR_MASK = 0xF00000
HAT_PROT_MASK = 0x0F
HAT_NOFAULT = 0x10
HAT_NOSYNC = 0x20
HAT_STRICTORDER = 0x0000
HAT_UNORDERED_OK = 0x0100
HAT_MERGING_OK = 0x0200
HAT_LOADCACHING_OK = 0x0300
HAT_STORECACHING_OK = 0x0400
HAT_ORDER_MASK = 0x0700
HAT_NEVERSWAP = 0x0000
HAT_STRUCTURE_BE = 0x1000
HAT_STRUCTURE_LE = 0x2000
HAT_ENDIAN_MASK = 0x3000
HAT_COW = 0x0001
HAT_UNLOAD = 0x00
HAT_UNLOAD_NOSYNC = 0x02
HAT_UNLOAD_UNLOCK = 0x04
HAT_UNLOAD_OTHER = 0x08
HAT_UNLOAD_UNMAP = 0x10
HAT_SYNC_DONTZERO = 0x00
HAT_SYNC_ZERORM = 0x01
HAT_SYNC_STOPON_REF = 0x02
HAT_SYNC_STOPON_MOD = 0x04
HAT_SYNC_STOPON_RM = (HAT_SYNC_STOPON_REF | HAT_SYNC_STOPON_MOD)
HAT_DUP_ALL = 1
HAT_DUP_COW = 2
HAT_MAP = 0x00
HAT_ADV_PGUNLOAD = 0x00
HAT_FORCE_PGUNLOAD = 0x01
P_MOD = 0x1
P_REF = 0x2
P_RO = 0x4
def hat_ismod(pp): return (hat_page_getattr(pp, P_MOD))
def hat_isref(pp): return (hat_page_getattr(pp, P_REF))
def hat_isro(pp): return (hat_page_getattr(pp, P_RO))
def hat_setmod(pp): return (hat_page_setattr(pp, P_MOD))
def hat_setref(pp): return (hat_page_setattr(pp, P_REF))
def hat_setrefmod(pp): return (hat_page_setattr(pp, P_REF|P_MOD))
def hat_clrmod(pp): return (hat_page_clrattr(pp, P_MOD))
def hat_clrref(pp): return (hat_page_clrattr(pp, P_REF))
def hat_clrrefmod(pp): return (hat_page_clrattr(pp, P_REF|P_MOD))
def hat_page_is_mapped(pp): return (hat_page_getshare(pp))
HAT_DONTALLOC = 0
HAT_ALLOC = 1
HRM_SHIFT = 4
HRM_BYTES = (1 << HRM_SHIFT)
HRM_PAGES = ((HRM_BYTES * NBBY) / 2)
HRM_PGPERBYTE = (NBBY/2)
HRM_PGBYTEMASK = (HRM_PGPERBYTE-1)
HRM_HASHSIZE = 0x200
HRM_HASHMASK = (HRM_HASHSIZE - 1)
HRM_BLIST_INCR = 0x200
HRM_SWSMONID = 1
SSL_NLEVELS = 4
SSL_BFACTOR = 4
SSL_LOG2BF = 2
SEGP_ASYNC_FLUSH = 0x1
SEGP_FORCE_WIRED = 0x2
SEGP_SUCCESS = 0
SEGP_FAIL = 1
def seg_pages(seg): return \
IE_NOMEM = -1
AS_PAGLCK = 0x80
AS_CLAIMGAP = 0x40
AS_UNMAPWAIT = 0x20
def AS_TYPE_64BIT(as_): return \
AS_LREP_LINKEDLIST = 0
AS_LREP_SKIPLIST = 1
AS_MUTATION_THRESH = 225
AH_DIR = 0x1
AH_LO = 0x0
AH_HI = 0x1
AH_CONTAIN = 0x2
# Included from sys/ddidmareq.h
DMA_UNIT_8 = 1
DMA_UNIT_16 = 2
DMA_UNIT_32 = 4
DMALIM_VER0 = ((0x86000000) + 0)
DDI_DMA_FORCE_PHYSICAL = 0x0100
DMA_ATTR_V0 = 0
DMA_ATTR_VERSION = DMA_ATTR_V0
DDI_DMA_CALLBACK_RUNOUT = 0
DDI_DMA_CALLBACK_DONE = 1
DDI_DMA_WRITE = 0x0001
DDI_DMA_READ = 0x0002
DDI_DMA_RDWR = (DDI_DMA_READ | DDI_DMA_WRITE)
DDI_DMA_REDZONE = 0x0004
DDI_DMA_PARTIAL = 0x0008
DDI_DMA_CONSISTENT = 0x0010
DDI_DMA_EXCLUSIVE = 0x0020
DDI_DMA_STREAMING = 0x0040
DDI_DMA_SBUS_64BIT = 0x2000
DDI_DMA_MAPPED = 0
DDI_DMA_MAPOK = 0
DDI_DMA_PARTIAL_MAP = 1
DDI_DMA_DONE = 2
DDI_DMA_NORESOURCES = -1
DDI_DMA_NOMAPPING = -2
DDI_DMA_TOOBIG = -3
DDI_DMA_TOOSMALL = -4
DDI_DMA_LOCKED = -5
DDI_DMA_BADLIMITS = -6
DDI_DMA_STALE = -7
DDI_DMA_BADATTR = -8
DDI_DMA_INUSE = -9
DDI_DMA_SYNC_FORDEV = 0x0
DDI_DMA_SYNC_FORCPU = 0x1
DDI_DMA_SYNC_FORKERNEL = 0x2
# Included from sys/ddimapreq.h
# Included from sys/mman.h
PROT_READ = 0x1
PROT_WRITE = 0x2
PROT_EXEC = 0x4
PROT_USER = 0x8
PROT_ZFOD = (PROT_READ | PROT_WRITE | PROT_EXEC | PROT_USER)
PROT_ALL = (PROT_READ | PROT_WRITE | PROT_EXEC | PROT_USER)
PROT_NONE = 0x0
MAP_SHARED = 1
MAP_PRIVATE = 2
MAP_TYPE = 0xf
MAP_FIXED = 0x10
MAP_NORESERVE = 0x40
MAP_ANON = 0x100
MAP_ANONYMOUS = MAP_ANON
MAP_RENAME = 0x20
PROC_TEXT = (PROT_EXEC | PROT_READ)
PROC_DATA = (PROT_READ | PROT_WRITE | PROT_EXEC)
SHARED = 0x10
PRIVATE = 0x20
VALID_ATTR = (PROT_READ|PROT_WRITE|PROT_EXEC|SHARED|PRIVATE)
PROT_EXCL = 0x20
_MAP_LOW32 = 0x80
_MAP_NEW = 0x80000000
from TYPES import *
MADV_NORMAL = 0
MADV_RANDOM = 1
MADV_SEQUENTIAL = 2
MADV_WILLNEED = 3
MADV_DONTNEED = 4
MADV_FREE = 5
MS_OLDSYNC = 0x0
MS_SYNC = 0x4
MS_ASYNC = 0x1
MS_INVALIDATE = 0x2
MC_SYNC = 1
MC_LOCK = 2
MC_UNLOCK = 3
MC_ADVISE = 4
MC_LOCKAS = 5
MC_UNLOCKAS = 6
MCL_CURRENT = 0x1
MCL_FUTURE = 0x2
DDI_MAP_VERSION = 0x0001
DDI_MF_USER_MAPPING = 0x1
DDI_MF_KERNEL_MAPPING = 0x2
DDI_MF_DEVICE_MAPPING = 0x4
DDI_ME_GENERIC = (-1)
DDI_ME_UNIMPLEMENTED = (-2)
DDI_ME_NORESOURCES = (-3)
DDI_ME_UNSUPPORTED = (-4)
DDI_ME_REGSPEC_RANGE = (-5)
DDI_ME_RNUMBER_RANGE = (-6)
DDI_ME_INVAL = (-7)
# Included from sys/ddipropdefs.h
def CELLS_1275_TO_BYTES(n): return ((n) * PROP_1275_CELL_SIZE)
def BYTES_TO_1275_CELLS(n): return ((n) / PROP_1275_CELL_SIZE)
PH_FROM_PROM = 0x01
DDI_PROP_SUCCESS = 0
DDI_PROP_NOT_FOUND = 1
DDI_PROP_UNDEFINED = 2
DDI_PROP_NO_MEMORY = 3
DDI_PROP_INVAL_ARG = 4
DDI_PROP_BUF_TOO_SMALL = 5
DDI_PROP_CANNOT_DECODE = 6
DDI_PROP_CANNOT_ENCODE = 7
DDI_PROP_END_OF_DATA = 8
DDI_PROP_FOUND_1275 = 255
PROP_1275_INT_SIZE = 4
DDI_PROP_DONTPASS = 0x0001
DDI_PROP_CANSLEEP = 0x0002
DDI_PROP_SYSTEM_DEF = 0x0004
DDI_PROP_NOTPROM = 0x0008
DDI_PROP_DONTSLEEP = 0x0010
DDI_PROP_STACK_CREATE = 0x0020
DDI_PROP_UNDEF_IT = 0x0040
DDI_PROP_HW_DEF = 0x0080
DDI_PROP_TYPE_INT = 0x0100
DDI_PROP_TYPE_STRING = 0x0200
DDI_PROP_TYPE_BYTE = 0x0400
DDI_PROP_TYPE_COMPOSITE = 0x0800
DDI_PROP_TYPE_ANY = (DDI_PROP_TYPE_INT | \
DDI_PROP_TYPE_STRING | \
DDI_PROP_TYPE_BYTE | \
DDI_PROP_TYPE_COMPOSITE)
DDI_PROP_TYPE_MASK = (DDI_PROP_TYPE_INT | \
DDI_PROP_TYPE_STRING | \
DDI_PROP_TYPE_BYTE | \
DDI_PROP_TYPE_COMPOSITE)
DDI_RELATIVE_ADDRESSING = "relative-addressing"
DDI_GENERIC_ADDRESSING = "generic-addressing"
# Included from sys/ddidevmap.h
KMEM_PAGEABLE = 0x100
KMEM_NON_PAGEABLE = 0x200
UMEM_LOCKED = 0x400
UMEM_TRASH = 0x800
DEVMAP_OPS_REV = 1
DEVMAP_DEFAULTS = 0x00
DEVMAP_MAPPING_INVALID = 0x01
DEVMAP_ALLOW_REMAP = 0x02
DEVMAP_USE_PAGESIZE = 0x04
DEVMAP_SETUP_FLAGS = \
(DEVMAP_MAPPING_INVALID | DEVMAP_ALLOW_REMAP | DEVMAP_USE_PAGESIZE)
DEVMAP_SETUP_DONE = 0x100
DEVMAP_LOCK_INITED = 0x200
DEVMAP_FAULTING = 0x400
DEVMAP_LOCKED = 0x800
DEVMAP_FLAG_LARGE = 0x1000
DDI_UMEM_SLEEP = 0x0
DDI_UMEM_NOSLEEP = 0x01
DDI_UMEM_PAGEABLE = 0x02
DDI_UMEM_TRASH = 0x04
DDI_UMEMLOCK_READ = 0x01
DDI_UMEMLOCK_WRITE = 0x02
# Included from sys/nexusdefs.h
# Included from sys/nexusintr.h
BUSO_REV = 4
BUSO_REV_3 = 3
BUSO_REV_4 = 4
DEVO_REV = 3
CB_REV = 1
DDI_IDENTIFIED = (0)
DDI_NOT_IDENTIFIED = (-1)
DDI_PROBE_FAILURE = ENXIO
DDI_PROBE_DONTCARE = 0
DDI_PROBE_PARTIAL = 1
DDI_PROBE_SUCCESS = 2
MAPDEV_REV = 1
from TYPES import *
D_NEW = 0x00
_D_OLD = 0x01
D_TAPE = 0x08
D_MTSAFE = 0x0020
_D_QNEXTLESS = 0x0040
_D_MTOCSHARED = 0x0080
D_MTOCEXCL = 0x0800
D_MTPUTSHARED = 0x1000
D_MTPERQ = 0x2000
D_MTQPAIR = 0x4000
D_MTPERMOD = 0x6000
D_MTOUTPERIM = 0x8000
_D_MTCBSHARED = 0x10000
D_MTINNER_MOD = (D_MTPUTSHARED|_D_MTOCSHARED|_D_MTCBSHARED)
D_MTOUTER_MOD = (D_MTOCEXCL)
D_MP = D_MTSAFE
D_64BIT = 0x200
D_SYNCSTR = 0x400
D_DEVMAP = 0x100
D_HOTPLUG = 0x4
SNDZERO = 0x001
SNDPIPE = 0x002
RNORM = 0x000
RMSGD = 0x001
RMSGN = 0x002
RMODEMASK = 0x003
RPROTDAT = 0x004
RPROTDIS = 0x008
RPROTNORM = 0x010
RPROTMASK = 0x01c
RFLUSHMASK = 0x020
RFLUSHPCPROT = 0x020
RERRNORM = 0x001
RERRNONPERSIST = 0x002
RERRMASK = (RERRNORM|RERRNONPERSIST)
WERRNORM = 0x004
WERRNONPERSIST = 0x008
WERRMASK = (WERRNORM|WERRNONPERSIST)
FLUSHR = 0x01
FLUSHW = 0x02
FLUSHRW = 0x03
FLUSHBAND = 0x04
MAPINOK = 0x01
NOMAPIN = 0x02
REMAPOK = 0x04
NOREMAP = 0x08
S_INPUT = 0x0001
S_HIPRI = 0x0002
S_OUTPUT = 0x0004
S_MSG = 0x0008
S_ERROR = 0x0010
S_HANGUP = 0x0020
S_RDNORM = 0x0040
S_WRNORM = S_OUTPUT
S_RDBAND = 0x0080
S_WRBAND = 0x0100
S_BANDURG = 0x0200
RS_HIPRI = 0x01
STRUIO_POSTPONE = 0x08
STRUIO_MAPIN = 0x10
MSG_HIPRI = 0x01
MSG_ANY = 0x02
MSG_BAND = 0x04
MSG_XPG4 = 0x08
MSG_IPEEK = 0x10
MSG_DISCARDTAIL = 0x20
MSG_HOLDSIG = 0x40
MSG_IGNERROR = 0x80
MSG_DELAYERROR = 0x100
MSG_IGNFLOW = 0x200
MSG_NOMARK = 0x400
MORECTL = 1
MOREDATA = 2
MUXID_ALL = (-1)
ANYMARK = 0x01
LASTMARK = 0x02
_INFTIM = -1
INFTIM = _INFTIM
| apache-2.0 |
pkariz/nnsearch | nnsearch/approx/Annoy.py | 1 | 6165 | from ..baseindex import Index
import numpy as np
import math
from annoy import AnnoyIndex
class Annoy(Index):
"""
AnnoyIndex from annoy package.
"""
def __init__(self):
self.algorithm = "AnnoyIndex"
self.idx_to_vector = {}
self.valid_types = [np.uint8, np.uint16, np.uint32, np.uint64,
np.int8, np.int16, np.int32, np.int64,
np.float16, np.float32, np.float64]
def build(self, data=None, dimensions=None, distance="angular", trees=-1):
"""
Builds AnnoyIndex on data or creates an empty one. If both dimensions and data are given then their dimensions
must match. At least one of those two attributes must be given to define number of dimensions which is required
to create AnnoyIndex. After the trees are built you cannot add additional vectors.
:param data: Dataset instance representing vectors which are inserted before trees are built (optional, you can
insert data one by one with insert method before building trees)
:param dimensions: number of dimensions
:param distance: can be "angular" (default) or "euclidean"
:param trees: number of binary trees. Default (-1) means that this parameter is determined automatically in a way,
that memory usage <= 2 * memory(vectors)
"""
#check dimensions
if data is None and dimensions is None:
raise ValueError("Number of dimensions is missing!")
if data is not None and dimensions is not None and dimensions != len(data.data[0]):
raise ValueError("Dimensions from constructor parameter 'dimensions' and derived dimensions from 'data' are different!")
#build index
if data is not None:
dimensions = len(data.data[0])
self.index = AnnoyIndex(dimensions, distance)
self.d = dimensions
self._size = 0
self.metric = 0 #angular
if distance != "angular":
self.metric = 1 #euclidean
#fill data
if data is not None:
if type(data.data) is np.ndarray and data.data.dtype not in self.valid_types:
raise ValueError("Invalid dtype of numpy array, check valid_types parameter of index!")
for v in data.data:
self._insert(v)
#build trees
self.index.build(trees)
def _insert(self, vector):
"""
Inserts vector in AnnoyIndex.
:param vector: 1d numpy array, list or tuple representing vector
"""
if type(vector) is np.ndarray:
vector = vector.tolist()
else:
vector = list(vector)
self.index.add_item(self._size, vector)
self._size += 1
def get_dist(self, v1, v2, dist=None):
"""
Calculates distance (euclidean or angular) between two vectors. By default distance is set to metric of index.
:param v1: first vector (list or numpy array)
:param v2: second vector
:param dist: distance can be 0 (angular) or 1 (euclidean)
:return: distance between given vectors
"""
if dist is None:
dist = self.metric
if dist == 0:
#angular
v1_sum, v2_sum, mix_sum = 0.0, 0.0, 0.0
for i in range(self.d):
v1_sum += v1[i] * v1[i]
v2_sum += v2[i] * v2[i]
mix_sum += v1[i] * v2[i]
a = v1_sum * v2_sum
if a > 0.0:
return 2.0 - (2.0 * mix_sum / (math.sqrt(a)))
else:
return 2.0
else:
#euclidean
d = 0.0
if self.d != len(v1) or self.d != len(v2):
raise ValueError("Length of vectors is not the same as d!")
for i in range(self.d):
d += (v1[i] - v2[i]) * (v1[i] - v2[i])
return math.sqrt(d)
def query(self, queries, k=1):
"""
Returns k nearest neighbors.
:param queries: 1d or 2d numpy array or list
:param k: number of nearest neighbors to return
:return: array with k nearest neighbors, if return_distances is True it returns (a,b) where a is array with k
nearest neighbors and b is an array with the same shape containing their distances
"""
dists = []
if isinstance(queries, np.ndarray) and len(queries.shape) == 1 or \
isinstance(queries, list) and not isinstance(queries[0], list):
if isinstance(queries, np.ndarray):
neighbors = self.index.get_nns_by_vector(queries.tolist(), k)
else:
neighbors = self.index.get_nns_by_vector(queries, k)
#calculate distances
dists = [self.get_dist(queries.tolist(), self.index.get_item_vector(x)) for x in neighbors]
else:
#more queries
neighbors = []
for query in queries:
if isinstance(query, np.ndarray):
cur_neighbors = self.index.get_nns_by_vector(query.tolist(), k)
else:
cur_neighbors = self.index.get_nns_by_vector(query, k)
neighbors.append(cur_neighbors)
#calculate distances from cur_neighbors to query point
dists.append([self.get_dist(query, self.index.get_item_vector(x)) for x in cur_neighbors])
return np.array(neighbors), np.array(dists)
def save(self, filename):
"""Saves index to file."""
self.index.save(filename)
def load(self, filename, dimensions=None, distance=None):
"""
Loads index from file.
:param filename: path to file
:param dimensions: number of dimensions of index
:param distance: distance used
"""
if dimensions is None or distance is None:
raise ValueError("Dimensions and distance are needed!")
self.index = AnnoyIndex(dimensions, distance)
self.d = dimensions
self.metric = 0
if distance == "euclidean":
self.metric = 1
self.index.load(filename)
| gpl-3.0 |
Ziqi-Li/bknqgis | pandas/pandas/tests/plotting/test_series.py | 2 | 32812 | # coding: utf-8
""" Test cases for Series.plot """
import itertools
import pytest
from datetime import datetime
import pandas as pd
from pandas import Series, DataFrame, date_range
from pandas.compat import range, lrange
import pandas.util.testing as tm
import numpy as np
from numpy.random import randn
import pandas.plotting as plotting
from pandas.tests.plotting.common import (TestPlotBase, _check_plot_works,
_skip_if_no_scipy_gaussian_kde,
_ok_for_gaussian_kde)
tm._skip_if_no_mpl()
class TestSeriesPlots(TestPlotBase):
def setup_method(self, method):
TestPlotBase.setup_method(self, method)
import matplotlib as mpl
mpl.rcdefaults()
self.ts = tm.makeTimeSeries()
self.ts.name = 'ts'
self.series = tm.makeStringSeries()
self.series.name = 'series'
self.iseries = tm.makePeriodSeries()
self.iseries.name = 'iseries'
@pytest.mark.slow
def test_plot(self):
_check_plot_works(self.ts.plot, label='foo')
_check_plot_works(self.ts.plot, use_index=False)
axes = _check_plot_works(self.ts.plot, rot=0)
self._check_ticks_props(axes, xrot=0)
ax = _check_plot_works(self.ts.plot, style='.', logy=True)
self._check_ax_scales(ax, yaxis='log')
ax = _check_plot_works(self.ts.plot, style='.', logx=True)
self._check_ax_scales(ax, xaxis='log')
ax = _check_plot_works(self.ts.plot, style='.', loglog=True)
self._check_ax_scales(ax, xaxis='log', yaxis='log')
_check_plot_works(self.ts[:10].plot.bar)
_check_plot_works(self.ts.plot.area, stacked=False)
_check_plot_works(self.iseries.plot)
for kind in ['line', 'bar', 'barh', 'kde', 'hist', 'box']:
if not _ok_for_gaussian_kde(kind):
continue
_check_plot_works(self.series[:5].plot, kind=kind)
_check_plot_works(self.series[:10].plot.barh)
ax = _check_plot_works(Series(randn(10)).plot.bar, color='black')
self._check_colors([ax.patches[0]], facecolors=['black'])
# GH 6951
ax = _check_plot_works(self.ts.plot, subplots=True)
self._check_axes_shape(ax, axes_num=1, layout=(1, 1))
ax = _check_plot_works(self.ts.plot, subplots=True, layout=(-1, 1))
self._check_axes_shape(ax, axes_num=1, layout=(1, 1))
ax = _check_plot_works(self.ts.plot, subplots=True, layout=(1, -1))
self._check_axes_shape(ax, axes_num=1, layout=(1, 1))
@pytest.mark.slow
def test_plot_figsize_and_title(self):
# figsize and title
_, ax = self.plt.subplots()
ax = self.series.plot(title='Test', figsize=(16, 8), ax=ax)
self._check_text_labels(ax.title, 'Test')
self._check_axes_shape(ax, axes_num=1, layout=(1, 1), figsize=(16, 8))
def test_dont_modify_rcParams(self):
# GH 8242
if self.mpl_ge_1_5_0:
key = 'axes.prop_cycle'
else:
key = 'axes.color_cycle'
colors = self.plt.rcParams[key]
_, ax = self.plt.subplots()
Series([1, 2, 3]).plot(ax=ax)
assert colors == self.plt.rcParams[key]
def test_ts_line_lim(self):
fig, ax = self.plt.subplots()
ax = self.ts.plot(ax=ax)
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
assert xmin == lines[0].get_data(orig=False)[0][0]
assert xmax == lines[0].get_data(orig=False)[0][-1]
tm.close()
ax = self.ts.plot(secondary_y=True, ax=ax)
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
assert xmin == lines[0].get_data(orig=False)[0][0]
assert xmax == lines[0].get_data(orig=False)[0][-1]
def test_ts_area_lim(self):
_, ax = self.plt.subplots()
ax = self.ts.plot.area(stacked=False, ax=ax)
xmin, xmax = ax.get_xlim()
line = ax.get_lines()[0].get_data(orig=False)[0]
assert xmin == line[0]
assert xmax == line[-1]
tm.close()
# GH 7471
_, ax = self.plt.subplots()
ax = self.ts.plot.area(stacked=False, x_compat=True, ax=ax)
xmin, xmax = ax.get_xlim()
line = ax.get_lines()[0].get_data(orig=False)[0]
assert xmin == line[0]
assert xmax == line[-1]
tm.close()
tz_ts = self.ts.copy()
tz_ts.index = tz_ts.tz_localize('GMT').tz_convert('CET')
_, ax = self.plt.subplots()
ax = tz_ts.plot.area(stacked=False, x_compat=True, ax=ax)
xmin, xmax = ax.get_xlim()
line = ax.get_lines()[0].get_data(orig=False)[0]
assert xmin == line[0]
assert xmax == line[-1]
tm.close()
_, ax = self.plt.subplots()
ax = tz_ts.plot.area(stacked=False, secondary_y=True, ax=ax)
xmin, xmax = ax.get_xlim()
line = ax.get_lines()[0].get_data(orig=False)[0]
assert xmin == line[0]
assert xmax == line[-1]
def test_label(self):
s = Series([1, 2])
_, ax = self.plt.subplots()
ax = s.plot(label='LABEL', legend=True, ax=ax)
self._check_legend_labels(ax, labels=['LABEL'])
self.plt.close()
_, ax = self.plt.subplots()
ax = s.plot(legend=True, ax=ax)
self._check_legend_labels(ax, labels=['None'])
self.plt.close()
# get name from index
s.name = 'NAME'
_, ax = self.plt.subplots()
ax = s.plot(legend=True, ax=ax)
self._check_legend_labels(ax, labels=['NAME'])
self.plt.close()
# override the default
_, ax = self.plt.subplots()
ax = s.plot(legend=True, label='LABEL', ax=ax)
self._check_legend_labels(ax, labels=['LABEL'])
self.plt.close()
# Add lebel info, but don't draw
_, ax = self.plt.subplots()
ax = s.plot(legend=False, label='LABEL', ax=ax)
assert ax.get_legend() is None # Hasn't been drawn
ax.legend() # draw it
self._check_legend_labels(ax, labels=['LABEL'])
def test_line_area_nan_series(self):
values = [1, 2, np.nan, 3]
s = Series(values)
ts = Series(values, index=tm.makeDateIndex(k=4))
for d in [s, ts]:
ax = _check_plot_works(d.plot)
masked = ax.lines[0].get_ydata()
# remove nan for comparison purpose
exp = np.array([1, 2, 3], dtype=np.float64)
tm.assert_numpy_array_equal(np.delete(masked.data, 2), exp)
tm.assert_numpy_array_equal(
masked.mask, np.array([False, False, True, False]))
expected = np.array([1, 2, 0, 3], dtype=np.float64)
ax = _check_plot_works(d.plot, stacked=True)
tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected)
ax = _check_plot_works(d.plot.area)
tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected)
ax = _check_plot_works(d.plot.area, stacked=False)
tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected)
def test_line_use_index_false(self):
s = Series([1, 2, 3], index=['a', 'b', 'c'])
s.index.name = 'The Index'
_, ax = self.plt.subplots()
ax = s.plot(use_index=False, ax=ax)
label = ax.get_xlabel()
assert label == ''
_, ax = self.plt.subplots()
ax2 = s.plot.bar(use_index=False, ax=ax)
label2 = ax2.get_xlabel()
assert label2 == ''
@pytest.mark.slow
def test_bar_log(self):
expected = np.array([1., 10., 100., 1000.])
if not self.mpl_le_1_2_1:
expected = np.hstack((.1, expected, 1e4))
_, ax = self.plt.subplots()
ax = Series([200, 500]).plot.bar(log=True, ax=ax)
tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), expected)
tm.close()
_, ax = self.plt.subplots()
ax = Series([200, 500]).plot.barh(log=True, ax=ax)
tm.assert_numpy_array_equal(ax.xaxis.get_ticklocs(), expected)
tm.close()
# GH 9905
expected = np.array([1.0e-03, 1.0e-02, 1.0e-01, 1.0e+00])
if not self.mpl_le_1_2_1:
expected = np.hstack((1.0e-04, expected, 1.0e+01))
if self.mpl_ge_2_0_0:
expected = np.hstack((1.0e-05, expected))
_, ax = self.plt.subplots()
ax = Series([0.1, 0.01, 0.001]).plot(log=True, kind='bar', ax=ax)
ymin = 0.0007943282347242822 if self.mpl_ge_2_0_0 else 0.001
ymax = 0.12589254117941673 if self.mpl_ge_2_0_0 else .10000000000000001
res = ax.get_ylim()
tm.assert_almost_equal(res[0], ymin)
tm.assert_almost_equal(res[1], ymax)
tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), expected)
tm.close()
_, ax = self.plt.subplots()
ax = Series([0.1, 0.01, 0.001]).plot(log=True, kind='barh', ax=ax)
res = ax.get_xlim()
tm.assert_almost_equal(res[0], ymin)
tm.assert_almost_equal(res[1], ymax)
tm.assert_numpy_array_equal(ax.xaxis.get_ticklocs(), expected)
@pytest.mark.slow
def test_bar_ignore_index(self):
df = Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd'])
_, ax = self.plt.subplots()
ax = df.plot.bar(use_index=False, ax=ax)
self._check_text_labels(ax.get_xticklabels(), ['0', '1', '2', '3'])
def test_rotation(self):
df = DataFrame(randn(5, 5))
# Default rot 0
_, ax = self.plt.subplots()
axes = df.plot(ax=ax)
self._check_ticks_props(axes, xrot=0)
_, ax = self.plt.subplots()
axes = df.plot(rot=30, ax=ax)
self._check_ticks_props(axes, xrot=30)
def test_irregular_datetime(self):
rng = date_range('1/1/2000', '3/1/2000')
rng = rng[[0, 1, 2, 3, 5, 9, 10, 11, 12]]
ser = Series(randn(len(rng)), rng)
_, ax = self.plt.subplots()
ax = ser.plot(ax=ax)
xp = datetime(1999, 1, 1).toordinal()
ax.set_xlim('1/1/1999', '1/1/2001')
assert xp == ax.get_xlim()[0]
@pytest.mark.slow
def test_pie_series(self):
# if sum of values is less than 1.0, pie handle them as rate and draw
# semicircle.
series = Series(np.random.randint(1, 5),
index=['a', 'b', 'c', 'd', 'e'], name='YLABEL')
ax = _check_plot_works(series.plot.pie)
self._check_text_labels(ax.texts, series.index)
assert ax.get_ylabel() == 'YLABEL'
# without wedge labels
ax = _check_plot_works(series.plot.pie, labels=None)
self._check_text_labels(ax.texts, [''] * 5)
# with less colors than elements
color_args = ['r', 'g', 'b']
ax = _check_plot_works(series.plot.pie, colors=color_args)
color_expected = ['r', 'g', 'b', 'r', 'g']
self._check_colors(ax.patches, facecolors=color_expected)
# with labels and colors
labels = ['A', 'B', 'C', 'D', 'E']
color_args = ['r', 'g', 'b', 'c', 'm']
ax = _check_plot_works(series.plot.pie, labels=labels,
colors=color_args)
self._check_text_labels(ax.texts, labels)
self._check_colors(ax.patches, facecolors=color_args)
# with autopct and fontsize
ax = _check_plot_works(series.plot.pie, colors=color_args,
autopct='%.2f', fontsize=7)
pcts = ['{0:.2f}'.format(s * 100)
for s in series.values / float(series.sum())]
iters = [iter(series.index), iter(pcts)]
expected_texts = list(next(it) for it in itertools.cycle(iters))
self._check_text_labels(ax.texts, expected_texts)
for t in ax.texts:
assert t.get_fontsize() == 7
# includes negative value
with pytest.raises(ValueError):
series = Series([1, 2, 0, 4, -1], index=['a', 'b', 'c', 'd', 'e'])
series.plot.pie()
# includes nan
series = Series([1, 2, np.nan, 4], index=['a', 'b', 'c', 'd'],
name='YLABEL')
ax = _check_plot_works(series.plot.pie)
self._check_text_labels(ax.texts, ['a', 'b', '', 'd'])
def test_pie_nan(self):
s = Series([1, np.nan, 1, 1])
_, ax = self.plt.subplots()
ax = s.plot.pie(legend=True, ax=ax)
expected = ['0', '', '2', '3']
result = [x.get_text() for x in ax.texts]
assert result == expected
@pytest.mark.slow
def test_hist_df_kwargs(self):
df = DataFrame(np.random.randn(10, 2))
_, ax = self.plt.subplots()
ax = df.plot.hist(bins=5, ax=ax)
assert len(ax.patches) == 10
@pytest.mark.slow
def test_hist_df_with_nonnumerics(self):
# GH 9853
with tm.RNGContext(1):
df = DataFrame(
np.random.randn(10, 4), columns=['A', 'B', 'C', 'D'])
df['E'] = ['x', 'y'] * 5
_, ax = self.plt.subplots()
ax = df.plot.hist(bins=5, ax=ax)
assert len(ax.patches) == 20
_, ax = self.plt.subplots()
ax = df.plot.hist(ax=ax) # bins=10
assert len(ax.patches) == 40
@pytest.mark.slow
def test_hist_legacy(self):
_check_plot_works(self.ts.hist)
_check_plot_works(self.ts.hist, grid=False)
_check_plot_works(self.ts.hist, figsize=(8, 10))
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
_check_plot_works(self.ts.hist,
by=self.ts.index.month)
with tm.assert_produces_warning(UserWarning):
_check_plot_works(self.ts.hist,
by=self.ts.index.month, bins=5)
fig, ax = self.plt.subplots(1, 1)
_check_plot_works(self.ts.hist, ax=ax)
_check_plot_works(self.ts.hist, ax=ax, figure=fig)
_check_plot_works(self.ts.hist, figure=fig)
tm.close()
fig, (ax1, ax2) = self.plt.subplots(1, 2)
_check_plot_works(self.ts.hist, figure=fig, ax=ax1)
_check_plot_works(self.ts.hist, figure=fig, ax=ax2)
with pytest.raises(ValueError):
self.ts.hist(by=self.ts.index, figure=fig)
@pytest.mark.slow
def test_hist_bins_legacy(self):
df = DataFrame(np.random.randn(10, 2))
ax = df.hist(bins=2)[0][0]
assert len(ax.patches) == 2
@pytest.mark.slow
def test_hist_layout(self):
df = self.hist_df
with pytest.raises(ValueError):
df.height.hist(layout=(1, 1))
with pytest.raises(ValueError):
df.height.hist(layout=[1, 1])
@pytest.mark.slow
def test_hist_layout_with_by(self):
df = self.hist_df
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.height.hist,
by=df.gender, layout=(2, 1))
self._check_axes_shape(axes, axes_num=2, layout=(2, 1))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.height.hist,
by=df.gender, layout=(3, -1))
self._check_axes_shape(axes, axes_num=2, layout=(3, 1))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.height.hist,
by=df.category, layout=(4, 1))
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.height.hist,
by=df.category, layout=(2, -1))
self._check_axes_shape(axes, axes_num=4, layout=(2, 2))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.height.hist,
by=df.category, layout=(3, -1))
self._check_axes_shape(axes, axes_num=4, layout=(3, 2))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.height.hist,
by=df.category, layout=(-1, 4))
self._check_axes_shape(axes, axes_num=4, layout=(1, 4))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.height.hist,
by=df.classroom, layout=(2, 2))
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
axes = df.height.hist(by=df.category, layout=(4, 2), figsize=(12, 7))
self._check_axes_shape(axes, axes_num=4, layout=(4, 2),
figsize=(12, 7))
@pytest.mark.slow
def test_hist_no_overlap(self):
from matplotlib.pyplot import subplot, gcf
x = Series(randn(2))
y = Series(randn(2))
subplot(121)
x.hist()
subplot(122)
y.hist()
fig = gcf()
axes = fig.axes if self.mpl_ge_1_5_0 else fig.get_axes()
assert len(axes) == 2
@pytest.mark.slow
def test_hist_secondary_legend(self):
# GH 9610
df = DataFrame(np.random.randn(30, 4), columns=list('abcd'))
# primary -> secondary
_, ax = self.plt.subplots()
ax = df['a'].plot.hist(legend=True, ax=ax)
df['b'].plot.hist(ax=ax, legend=True, secondary_y=True)
# both legends are dran on left ax
# left and right axis must be visible
self._check_legend_labels(ax, labels=['a', 'b (right)'])
assert ax.get_yaxis().get_visible()
assert ax.right_ax.get_yaxis().get_visible()
tm.close()
# secondary -> secondary
_, ax = self.plt.subplots()
ax = df['a'].plot.hist(legend=True, secondary_y=True, ax=ax)
df['b'].plot.hist(ax=ax, legend=True, secondary_y=True)
# both legends are draw on left ax
# left axis must be invisible, right axis must be visible
self._check_legend_labels(ax.left_ax,
labels=['a (right)', 'b (right)'])
assert not ax.left_ax.get_yaxis().get_visible()
assert ax.get_yaxis().get_visible()
tm.close()
# secondary -> primary
_, ax = self.plt.subplots()
ax = df['a'].plot.hist(legend=True, secondary_y=True, ax=ax)
# right axes is returned
df['b'].plot.hist(ax=ax, legend=True)
# both legends are draw on left ax
# left and right axis must be visible
self._check_legend_labels(ax.left_ax, labels=['a (right)', 'b'])
assert ax.left_ax.get_yaxis().get_visible()
assert ax.get_yaxis().get_visible()
tm.close()
@pytest.mark.slow
def test_df_series_secondary_legend(self):
# GH 9779
df = DataFrame(np.random.randn(30, 3), columns=list('abc'))
s = Series(np.random.randn(30), name='x')
# primary -> secondary (without passing ax)
_, ax = self.plt.subplots()
ax = df.plot(ax=ax)
s.plot(legend=True, secondary_y=True, ax=ax)
# both legends are dran on left ax
# left and right axis must be visible
self._check_legend_labels(ax, labels=['a', 'b', 'c', 'x (right)'])
assert ax.get_yaxis().get_visible()
assert ax.right_ax.get_yaxis().get_visible()
tm.close()
# primary -> secondary (with passing ax)
_, ax = self.plt.subplots()
ax = df.plot(ax=ax)
s.plot(ax=ax, legend=True, secondary_y=True)
# both legends are dran on left ax
# left and right axis must be visible
self._check_legend_labels(ax, labels=['a', 'b', 'c', 'x (right)'])
assert ax.get_yaxis().get_visible()
assert ax.right_ax.get_yaxis().get_visible()
tm.close()
# seconcary -> secondary (without passing ax)
_, ax = self.plt.subplots()
ax = df.plot(secondary_y=True, ax=ax)
s.plot(legend=True, secondary_y=True, ax=ax)
# both legends are dran on left ax
# left axis must be invisible and right axis must be visible
expected = ['a (right)', 'b (right)', 'c (right)', 'x (right)']
self._check_legend_labels(ax.left_ax, labels=expected)
assert not ax.left_ax.get_yaxis().get_visible()
assert ax.get_yaxis().get_visible()
tm.close()
# secondary -> secondary (with passing ax)
_, ax = self.plt.subplots()
ax = df.plot(secondary_y=True, ax=ax)
s.plot(ax=ax, legend=True, secondary_y=True)
# both legends are dran on left ax
# left axis must be invisible and right axis must be visible
expected = ['a (right)', 'b (right)', 'c (right)', 'x (right)']
self._check_legend_labels(ax.left_ax, expected)
assert not ax.left_ax.get_yaxis().get_visible()
assert ax.get_yaxis().get_visible()
tm.close()
# secondary -> secondary (with passing ax)
_, ax = self.plt.subplots()
ax = df.plot(secondary_y=True, mark_right=False, ax=ax)
s.plot(ax=ax, legend=True, secondary_y=True)
# both legends are dran on left ax
# left axis must be invisible and right axis must be visible
expected = ['a', 'b', 'c', 'x (right)']
self._check_legend_labels(ax.left_ax, expected)
assert not ax.left_ax.get_yaxis().get_visible()
assert ax.get_yaxis().get_visible()
tm.close()
@pytest.mark.slow
def test_plot_fails_with_dupe_color_and_style(self):
x = Series(randn(2))
with pytest.raises(ValueError):
_, ax = self.plt.subplots()
x.plot(style='k--', color='k', ax=ax)
@pytest.mark.slow
def test_hist_kde(self):
_, ax = self.plt.subplots()
ax = self.ts.plot.hist(logy=True, ax=ax)
self._check_ax_scales(ax, yaxis='log')
xlabels = ax.get_xticklabels()
# ticks are values, thus ticklabels are blank
self._check_text_labels(xlabels, [''] * len(xlabels))
ylabels = ax.get_yticklabels()
self._check_text_labels(ylabels, [''] * len(ylabels))
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
_check_plot_works(self.ts.plot.kde)
_check_plot_works(self.ts.plot.density)
_, ax = self.plt.subplots()
ax = self.ts.plot.kde(logy=True, ax=ax)
self._check_ax_scales(ax, yaxis='log')
xlabels = ax.get_xticklabels()
self._check_text_labels(xlabels, [''] * len(xlabels))
ylabels = ax.get_yticklabels()
self._check_text_labels(ylabels, [''] * len(ylabels))
@pytest.mark.slow
def test_kde_kwargs(self):
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
from numpy import linspace
_check_plot_works(self.ts.plot.kde, bw_method=.5,
ind=linspace(-100, 100, 20))
_check_plot_works(self.ts.plot.density, bw_method=.5,
ind=linspace(-100, 100, 20))
_, ax = self.plt.subplots()
ax = self.ts.plot.kde(logy=True, bw_method=.5,
ind=linspace(-100, 100, 20), ax=ax)
self._check_ax_scales(ax, yaxis='log')
self._check_text_labels(ax.yaxis.get_label(), 'Density')
@pytest.mark.slow
def test_kde_missing_vals(self):
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
s = Series(np.random.uniform(size=50))
s[0] = np.nan
axes = _check_plot_works(s.plot.kde)
# gh-14821: check if the values have any missing values
assert any(~np.isnan(axes.lines[0].get_xdata()))
@pytest.mark.slow
def test_hist_kwargs(self):
_, ax = self.plt.subplots()
ax = self.ts.plot.hist(bins=5, ax=ax)
assert len(ax.patches) == 5
self._check_text_labels(ax.yaxis.get_label(), 'Frequency')
tm.close()
if self.mpl_ge_1_3_1:
_, ax = self.plt.subplots()
ax = self.ts.plot.hist(orientation='horizontal', ax=ax)
self._check_text_labels(ax.xaxis.get_label(), 'Frequency')
tm.close()
_, ax = self.plt.subplots()
ax = self.ts.plot.hist(align='left', stacked=True, ax=ax)
tm.close()
@pytest.mark.slow
def test_hist_kde_color(self):
_, ax = self.plt.subplots()
ax = self.ts.plot.hist(logy=True, bins=10, color='b', ax=ax)
self._check_ax_scales(ax, yaxis='log')
assert len(ax.patches) == 10
self._check_colors(ax.patches, facecolors=['b'] * 10)
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
_, ax = self.plt.subplots()
ax = self.ts.plot.kde(logy=True, color='r', ax=ax)
self._check_ax_scales(ax, yaxis='log')
lines = ax.get_lines()
assert len(lines) == 1
self._check_colors(lines, ['r'])
@pytest.mark.slow
def test_boxplot_series(self):
_, ax = self.plt.subplots()
ax = self.ts.plot.box(logy=True, ax=ax)
self._check_ax_scales(ax, yaxis='log')
xlabels = ax.get_xticklabels()
self._check_text_labels(xlabels, [self.ts.name])
ylabels = ax.get_yticklabels()
self._check_text_labels(ylabels, [''] * len(ylabels))
@pytest.mark.slow
def test_kind_both_ways(self):
s = Series(range(3))
kinds = (plotting._core._common_kinds +
plotting._core._series_kinds)
_, ax = self.plt.subplots()
for kind in kinds:
if not _ok_for_gaussian_kde(kind):
continue
s.plot(kind=kind, ax=ax)
getattr(s.plot, kind)()
@pytest.mark.slow
def test_invalid_plot_data(self):
s = Series(list('abcd'))
_, ax = self.plt.subplots()
for kind in plotting._core._common_kinds:
if not _ok_for_gaussian_kde(kind):
continue
with pytest.raises(TypeError):
s.plot(kind=kind, ax=ax)
@pytest.mark.slow
def test_valid_object_plot(self):
s = Series(lrange(10), dtype=object)
for kind in plotting._core._common_kinds:
if not _ok_for_gaussian_kde(kind):
continue
_check_plot_works(s.plot, kind=kind)
def test_partially_invalid_plot_data(self):
s = Series(['a', 'b', 1.0, 2])
_, ax = self.plt.subplots()
for kind in plotting._core._common_kinds:
if not _ok_for_gaussian_kde(kind):
continue
with pytest.raises(TypeError):
s.plot(kind=kind, ax=ax)
def test_invalid_kind(self):
s = Series([1, 2])
with pytest.raises(ValueError):
s.plot(kind='aasdf')
@pytest.mark.slow
def test_dup_datetime_index_plot(self):
dr1 = date_range('1/1/2009', periods=4)
dr2 = date_range('1/2/2009', periods=4)
index = dr1.append(dr2)
values = randn(index.size)
s = Series(values, index=index)
_check_plot_works(s.plot)
@pytest.mark.slow
def test_errorbar_plot(self):
s = Series(np.arange(10), name='x')
s_err = np.random.randn(10)
d_err = DataFrame(randn(10, 2), index=s.index, columns=['x', 'y'])
# test line and bar plots
kinds = ['line', 'bar']
for kind in kinds:
ax = _check_plot_works(s.plot, yerr=Series(s_err), kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(s.plot, yerr=s_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(s.plot, yerr=s_err.tolist(), kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(s.plot, yerr=d_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(s.plot, xerr=0.2, yerr=0.2, kind=kind)
self._check_has_errorbars(ax, xerr=1, yerr=1)
ax = _check_plot_works(s.plot, xerr=s_err)
self._check_has_errorbars(ax, xerr=1, yerr=0)
# test time series plotting
ix = date_range('1/1/2000', '1/1/2001', freq='M')
ts = Series(np.arange(12), index=ix, name='x')
ts_err = Series(np.random.randn(12), index=ix)
td_err = DataFrame(randn(12, 2), index=ix, columns=['x', 'y'])
ax = _check_plot_works(ts.plot, yerr=ts_err)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(ts.plot, yerr=td_err)
self._check_has_errorbars(ax, xerr=0, yerr=1)
# check incorrect lengths and types
with pytest.raises(ValueError):
s.plot(yerr=np.arange(11))
s_err = ['zzz'] * 10
# in mpl 1.5+ this is a TypeError
with pytest.raises((ValueError, TypeError)):
s.plot(yerr=s_err)
def test_table(self):
_check_plot_works(self.series.plot, table=True)
_check_plot_works(self.series.plot, table=self.series)
@pytest.mark.slow
def test_series_grid_settings(self):
# Make sure plot defaults to rcParams['axes.grid'] setting, GH 9792
self._check_grid_settings(Series([1, 2, 3]),
plotting._core._series_kinds +
plotting._core._common_kinds)
@pytest.mark.slow
def test_standard_colors(self):
from pandas.plotting._style import _get_standard_colors
for c in ['r', 'red', 'green', '#FF0000']:
result = _get_standard_colors(1, color=c)
assert result == [c]
result = _get_standard_colors(1, color=[c])
assert result == [c]
result = _get_standard_colors(3, color=c)
assert result == [c] * 3
result = _get_standard_colors(3, color=[c])
assert result == [c] * 3
@pytest.mark.slow
def test_standard_colors_all(self):
import matplotlib.colors as colors
from pandas.plotting._style import _get_standard_colors
# multiple colors like mediumaquamarine
for c in colors.cnames:
result = _get_standard_colors(num_colors=1, color=c)
assert result == [c]
result = _get_standard_colors(num_colors=1, color=[c])
assert result == [c]
result = _get_standard_colors(num_colors=3, color=c)
assert result == [c] * 3
result = _get_standard_colors(num_colors=3, color=[c])
assert result == [c] * 3
# single letter colors like k
for c in colors.ColorConverter.colors:
result = _get_standard_colors(num_colors=1, color=c)
assert result == [c]
result = _get_standard_colors(num_colors=1, color=[c])
assert result == [c]
result = _get_standard_colors(num_colors=3, color=c)
assert result == [c] * 3
result = _get_standard_colors(num_colors=3, color=[c])
assert result == [c] * 3
def test_series_plot_color_kwargs(self):
# GH1890
_, ax = self.plt.subplots()
ax = Series(np.arange(12) + 1).plot(color='green', ax=ax)
self._check_colors(ax.get_lines(), linecolors=['green'])
def test_time_series_plot_color_kwargs(self):
# #1890
_, ax = self.plt.subplots()
ax = Series(np.arange(12) + 1, index=date_range(
'1/1/2000', periods=12)).plot(color='green', ax=ax)
self._check_colors(ax.get_lines(), linecolors=['green'])
def test_time_series_plot_color_with_empty_kwargs(self):
import matplotlib as mpl
if self.mpl_ge_1_5_0:
def_colors = self._maybe_unpack_cycler(mpl.rcParams)
else:
def_colors = mpl.rcParams['axes.color_cycle']
index = date_range('1/1/2000', periods=12)
s = Series(np.arange(1, 13), index=index)
ncolors = 3
_, ax = self.plt.subplots()
for i in range(ncolors):
ax = s.plot(ax=ax)
self._check_colors(ax.get_lines(), linecolors=def_colors[:ncolors])
def test_xticklabels(self):
# GH11529
s = Series(np.arange(10), index=['P%02d' % i for i in range(10)])
_, ax = self.plt.subplots()
ax = s.plot(xticks=[0, 3, 5, 9], ax=ax)
exp = ['P%02d' % i for i in [0, 3, 5, 9]]
self._check_text_labels(ax.get_xticklabels(), exp)
def test_custom_business_day_freq(self):
# GH7222
from pandas.tseries.offsets import CustomBusinessDay
s = Series(range(100, 121), index=pd.bdate_range(
start='2014-05-01', end='2014-06-01',
freq=CustomBusinessDay(holidays=['2014-05-26'])))
_check_plot_works(s.plot)
| gpl-2.0 |
hhru/tinydav | test/Mock.py | 4 | 5311 | # Mock object for unittests.
# Copyright (C) 2009 Manuel Hermann <manuel-hermann@gmx.net>
#
# This file is part of tinydav.
#
# tinydav is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Mock object for unittests."""
from collections import defaultdict
from contextlib import contextmanager
from email.mime.multipart import MIMEMultipart
from functools import partial
from StringIO import StringIO
import urllib2
@contextmanager
def injected(func, **kwargs):
"""Inject vars into a function or method while in context mode."""
# recognize methods
if hasattr(func, "im_func"):
func = func.im_func
# save and replace current function globals as to kwargs
func_globals = func.func_globals
saved = dict((k, func_globals[k]) for k in kwargs if k in func_globals)
func_globals.update(kwargs)
# context is now ready to be used
yield
# restore previous state
func_globals.update(saved)
@contextmanager
def replaced(obj, **attrs):
"""Replace attribute in object while in context mode."""
# save and replace current attributes
saved = dict((k, getattr(obj, k)) for k in attrs)
for (name, attr) in attrs.iteritems():
setattr(obj, name, attr)
# context is ready
yield
# restore previous state
for (name, attr) in saved.iteritems():
setattr(obj, name, attr)
def omnivore_func(retval=None, exception=None):
"""Return a function accepting any number of args and act accordingly.
retval -- Returned function returns this value on call.
exception -- If not None, this will be raised by the returned function.
"""
def omnivore(*args, **kwargs):
omnivore.callcount += 1
if exception is not None:
raise exception
return retval
omnivore.callcount = 0
return omnivore
class Omnivore(object):
"""Omnivore class.
Return pre-defined values or raise predefined exceptions an any method
that may be called, including __call__.
"""
def __init__(self, **kwargs):
"""Initialize with return values.
**kwargs -- Key is the method name, value is the returned value. If
the value is an instance of Exception, it will be raised.
"""
self.__name__ = "Omnivore"
self.retvals = dict()
for (key, value) in kwargs.iteritems():
self.retvals[key] = iter(value)
self.called = defaultdict(list)
def __enter__(self):
self.called["__enter__"] = True
return self
def __exit__(exctype, excvalue, exctb):
self.called["__exit__"] = (exctype, excvalue, exctb)
def method(self, methodname, *args, **kwargs):
self.called[methodname].append((args, kwargs))
generator = self.retvals.get(methodname)
if generator is None:
return None
value = generator.next()
if isinstance(value, Exception):
raise value
return value
def __getattr__(self, name):
return partial(self.method, name)
def __call__(self, *args, **kwargs):
return self.method("__call__", *args, **kwargs)
class FakeMIMEMultipart(object):
"""Subclass of MIMEMultipart."""
def __init__(self, boundary="foobar"):
self.boundary = boundary
def __call__(self, subtype):
boundary = self.boundary
if subtype == "mixed":
boundary += "-mixed"
return MIMEMultipart(subtype, boundary)
class HTTPConnection(object):
"""Mock httplib.HTTPConnection object."""
def __init__(self):
# input
self.method = None
self.path = None
self.body = None
self.headers = None
# output
self.response = Response()
self.closed = False
def request(self, method, path, body=None, headers=None):
self.method = method
self.path = path
self.body = body
self.headers = headers
def __enter__(self):
pass
def __exit__(self, *args):
pass
def getresponse(self):
return self.response
def close(self):
self.closed = True
class ModuleProxy(object):
"""Mock module. Must be instantiated."""
def __init__(self, module):
self.__module = module
def __getattr__(self, name):
return getattr(self.__module, name)
class Response(urllib2.HTTPError):
"""Mock urllib2 response object."""
def __init__(self):
self.code = None
self.content = ""
self.version = 11
self.reason = "The reason"
self.headers = dict()
self.status = 200
def getheaders(self):
return self.headers
def read(self):
return self.content
| gpl-3.0 |
cdr-stats/cdr-stats | cdr_stats/voip_billing/views.py | 2 | 10687 | #
# CDR-Stats License
# http://www.cdr-stats.org
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (C) 2011-2015 Star2Billing S.L.
#
# The Initial Developer of the Original Code is
# Arezqui Belaid <info@star2billing.com>
#
from django.contrib.auth.decorators import login_required, permission_required
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template.context import RequestContext
from voip_billing.models import VoIPRetailRate
from voip_billing.forms import PrefixRetailRateForm, SimulatorForm, BillingReportForm
from voip_billing.function_def import prefix_allowed_to_call
from voip_billing.rate_engine import rate_engine
from voip_billing.constants import RATE_COLUMN_NAME
from aggregator.pandas_cdr import get_report_cdr_per_switch
from aggregator.aggregate_cdr import custom_sql_aggr_top_country
from cdr.decorators import check_user_detail
from cdr.functions_def import get_switch_ip_addr, calculate_act_acd
from cdr.constants import Export_choice
from common.helpers import trunc_date_start, trunc_date_end
from django_lets_go.common_functions import getvar, get_pagination_vars
from datetime import datetime
import logging
import requests
import ast
import tablib
from apirest.view_voip_rate import find_rates
def rest_api_call(request, api_url):
final_rate_list = []
response = False
try:
response = requests.get(api_url, auth=(request.user, request.user), timeout=1.0)
logging.debug('API requests.get response: ' + response)
except requests.exceptions.Timeout:
# Todo: we may want to deal with error nicely
logging.debug('API Timeout Error : ' + api_url)
except:
logging.debug('API Error : ' + api_url)
if response and response.status_code == 200:
# due to string response of API, we need to convert response in to array
rate_list = response.content.replace('[', '').replace(']', '').replace('}, {', '}|{').split('|')
for i in rate_list:
if i:
# convert string into dict
final_rate_list.append(ast.literal_eval(i))
return final_rate_list
@permission_required('user_profile.call_rate', login_url='/')
@login_required
@check_user_detail('voipplan')
def voip_rates(request):
"""List voip call rates according to country prefix
**Attributes**:
* ``template`` - voip_billing/rates.html
* ``form`` - PrefixRetailRateForm
**Logic Description**:
get all call rates from voip rate API and list them in template
with pagination & sorting column
"""
form = PrefixRetailRateForm(request.POST or None)
final_rate_list = []
# Get pagination data
sort_col_field_list = ['prefix', 'retail_rate', 'destination']
page_data = get_pagination_vars(request, sort_col_field_list, default_sort_field='prefix')
sort_order = page_data['sort_order']
order = 'ASC'
if "-" in sort_order:
order = 'DESC'
sort_order = sort_order[1:]
dialcode = ''
if form.is_valid():
dialcode = request.POST.get('prefix')
request.session['dialcode'] = dialcode
else:
# pagination with prefix code
if (request.session.get('dialcode') and (request.GET.get('page') or request.GET.get('sort_by'))):
dialcode = request.session.get('dialcode')
form = PrefixRetailRateForm(initial={'prefix': dialcode})
else:
# Reset variables
request.session['dialcode'] = ''
dialcode = ''
if hasattr(request.user, 'userprofile'):
voipplan_id = request.user.userprofile.voipplan_id
if dialcode:
final_rate_list = find_rates(voipplan_id, dialcode=dialcode, sort_field=sort_order, order=order)
else:
final_rate_list = find_rates(voipplan_id, dialcode=None, sort_field=sort_order, order=order)
else:
final_rate_list = []
variables = {
'form': form,
'rate_list': final_rate_list,
'rate_list_count': len(final_rate_list),
'col_name_with_order': page_data['col_name_with_order'],
'RATE_COLUMN_NAME': RATE_COLUMN_NAME,
'sort_order': sort_order,
'up_icon': '<i class="glyphicon glyphicon-chevron-up"></i>',
'down_icon': '<i class="glyphicon glyphicon-chevron-down"></i>'
}
return render_to_response('voip_billing/rates.html',
variables,
context_instance=RequestContext(request))
@permission_required('user_profile.export_call_rate', login_url='/')
@login_required
def export_rate(request):
"""
**Logic Description**:
get the prifix rates from voip rate API
according to search parameters & store into csv file
"""
format_type = request.GET['format']
response = HttpResponse(content_type='text/%s' % format_type)
response['Content-Disposition'] = 'attachment;filename=call_rate.%s' % format_type
headers = ('prefix', 'destination', 'retail_rate')
final_result = []
if request.session.get('session_api_url'):
api_url = request.session['session_api_url']
final_result = rest_api_call(request, api_url)
list_val = []
for row in final_result:
list_val.append((row['prefix'], row['prefix__destination'], row['retail_rate']))
data = tablib.Dataset(*list_val, headers=headers)
if format_type == Export_choice.XLS:
response.write(data.xls)
elif format_type == Export_choice.CSV:
response.write(data.csv)
elif format_type == Export_choice.JSON:
response.write(data.json)
return response
@permission_required('user_profile.simulator', login_url='/')
@check_user_detail('voipplan')
@login_required
def simulator(request):
"""Client Simulator
To view rate according to VoIP Plan & Destination No.
**Attributes**:
* ``template`` - voip_billing/simulator.html
* ``form`` - SimulatorForm
**Logic Description**:
get min call rates for destination from rate_engine and display them in template
"""
data = []
form = SimulatorForm(request.user, request.POST or None)
# Get Voip Plan ID according to USER
if form.is_valid():
# IS recipient_phone_no/destination no is valid prefix
# (Not banned Prefix) ?
destination_no = request.POST.get("destination_no")
if hasattr(request.user, 'userprofile'):
voipplan_id = request.user.userprofile.voipplan_id
allowed = prefix_allowed_to_call(destination_no, voipplan_id)
if allowed:
rates = rate_engine(voipplan_id=voipplan_id, dest_number=destination_no)
for rate in rates:
r_r_plan = VoIPRetailRate.objects.get(id=rate.rrid)
data.append((voipplan_id,
r_r_plan.voip_retail_plan_id.name,
rate.retail_rate))
data = {
'form': form,
'data': data,
}
return render_to_response('voip_billing/simulator.html',
data,
context_instance=RequestContext(request))
@permission_required('user_profile.billing_report', login_url='/')
@check_user_detail('accountcode,voipplan')
@login_required
def billing_report(request):
"""CDR billing graph by daily basis
**Attributes**:
* ``template`` - voip_billing/billing_report.html
* ``form`` - BillingReportForm
**Logic Description**:
Retrieve call records from PostgreSQL and build the
daily billing analytics for given date range
"""
switch_id = 0
tday = datetime.today()
total_data = []
charttype = "lineWithFocusChart"
hourly_chartdata = {"x": []}
form = BillingReportForm(request.POST or None,
initial={'from_date': tday.strftime('%Y-%m-%d 00:00'),
'to_date': tday.strftime('%Y-%m-%d 23:55'),
'switch_id': switch_id})
start_date = trunc_date_start(tday)
end_date = trunc_date_end(tday)
if form.is_valid():
from_date = getvar(request, 'from_date')
to_date = getvar(request, 'to_date')
start_date = trunc_date_start(from_date)
end_date = trunc_date_end(to_date)
switch_id = getvar(request, 'switch_id')
metrics = ['buy_cost', 'sell_cost']
hourly_data = get_report_cdr_per_switch(request.user, 'hour', start_date, end_date, switch_id)
hourly_chartdata['x'] = hourly_data["nbcalls"]["x_timestamp"]
i = 0
for metric in metrics:
extra_serie = {
"tooltip": {"y_start": "", "y_end": " " + metric},
"date_format": "%d %b %y %H:%M%p"
}
for switch in hourly_data[metric]["columns"]:
i = i + 1
hourly_chartdata['name' + str(i)] = get_switch_ip_addr(switch) + "_" + metric
hourly_chartdata['y' + str(i)] = hourly_data[metric]["values"][str(switch)]
hourly_chartdata['extra' + str(i)] = extra_serie
total_calls = hourly_data["nbcalls"]["total"]
total_duration = hourly_data["duration"]["total"]
total_billsec = hourly_data["billsec"]["total"]
total_buy_cost = hourly_data["buy_cost"]["total"]
total_sell_cost = hourly_data["sell_cost"]["total"]
# Calculate the Average Time of Call
metric_aggr = calculate_act_acd(total_calls, total_duration)
# Get top 10 of country calls
country_data = custom_sql_aggr_top_country(request.user, switch_id, 10, start_date, end_date)
data = {
'form': form,
'total_data': total_data,
'start_date': start_date,
'end_date': end_date,
'charttype': charttype,
'chartdata': hourly_chartdata,
'chartcontainer': 'chart_container',
'extra': {
'x_is_date': True,
'x_axis_format': '%d %b %Y',
'tag_script_js': True,
'jquery_on_ready': True,
},
'total_calls': total_calls,
'total_duration': total_duration,
'total_billsec': total_billsec,
'total_buy_cost': total_buy_cost,
'total_sell_cost': total_sell_cost,
'metric_aggr': metric_aggr,
'country_data': country_data,
}
return render_to_response('voip_billing/billing_report.html',
data,
context_instance=RequestContext(request))
| mpl-2.0 |
HERA-Team/pyuvdata | pyuvdata/uvbeam/cst_beam.py | 1 | 13336 | # -*- mode: python; coding: utf-8 -*-
# Copyright (c) 2018 Radio Astronomy Software Group
# Licensed under the 2-clause BSD License
"""Class for reading beam CST files."""
import re
import warnings
import numpy as np
from .uvbeam import UVBeam
from .. import utils as uvutils
__all__ = ["CSTBeam"]
class CSTBeam(UVBeam):
"""
Defines a CST-specific subclass of UVBeam for reading CST text files.
This class should not be interacted with directly, instead use the
read_cst_beam method on the UVBeam class.
"""
def name2freq(self, fname):
"""
Extract frequency from the filename.
Assumes the file name contains a substring with the frequency channel
in MHz that the data represents.
e.g. "HERA_Sim_120.87MHz.txt" should yield 120.87e6
Parameters
----------
fname : str
Filename to parse.
Returns
-------
float
Frequency extracted from filename in Hz.
"""
fi = fname.rfind("Hz")
frequency = float(re.findall(r"\d*\.\d+|\d+", fname[:fi])[-1])
si_prefix = fname[fi - 1]
si_dict = {"k": 1e3, "M": 1e6, "G": 1e9}
if si_prefix in si_dict.keys():
frequency = frequency * si_dict[si_prefix]
return frequency
def read_cst_beam(
self,
filename,
beam_type="power",
feed_pol="x",
rotate_pol=True,
frequency=None,
telescope_name=None,
feed_name=None,
feed_version=None,
model_name=None,
model_version=None,
history="",
x_orientation=None,
reference_impedance=None,
extra_keywords=None,
run_check=True,
check_extra=True,
run_check_acceptability=True,
):
"""
Read in data from a cst file.
Parameters
----------
filename : str
The cst file to read from.
beam_type : str
What beam_type to read in ('power' or 'efield').
feed_pol : str
The feed or polarization or list of feeds or polarizations the
files correspond to.
Defaults to 'x' (meaning x for efield or xx for power beams).
rotate_pol : bool
If True, assume the structure in the simulation is symmetric under
90 degree rotations about the z-axis (so that the y polarization can be
constructed by rotating the x polarization or vice versa).
Default: True if feed_pol is a single value or a list with all
the same values in it, False if it is a list with varying values.
frequency : float or list of float
The frequency or list of frequencies corresponding to the filename(s).
This is assumed to be in the same order as the files.
If not passed, the code attempts to parse it from the filenames.
telescope_name : str
The name of the telescope corresponding to the filename(s).
feed_name : str
The name of the feed corresponding to the filename(s).
feed_version : str
The version of the feed corresponding to the filename(s).
model_name : str
The name of the model corresponding to the filename(s).
model_version : str
The version of the model corresponding to the filename(s).
history : str
A string detailing the history of the filename(s).
x_orientation : str, optional
Orientation of the physical dipole corresponding to what is
labelled as the x polarization. Options are "east" (indicating
east/west orientation) and "north" (indicating north/south orientation)
reference_impedance : float, optional
The reference impedance of the model(s).
extra_keywords : dict, optional
A dictionary containing any extra_keywords.
run_check : bool
Option to check for the existence and proper shapes of
required parameters after reading in the file.
check_extra : bool
Option to check optional parameters as well as
required ones.
run_check_acceptability : bool
Option to check acceptable range of the values of
required parameters after reading in the file.
"""
self.telescope_name = telescope_name
self.feed_name = feed_name
self.feed_version = feed_version
self.model_name = model_name
self.model_version = model_version
self.history = history
if not uvutils._check_history_version(self.history, self.pyuvdata_version_str):
self.history += self.pyuvdata_version_str
if x_orientation is not None:
self.x_orientation = x_orientation
if reference_impedance is not None:
self.reference_impedance = float(reference_impedance)
if extra_keywords is not None:
self.extra_keywords = extra_keywords
if beam_type == "power":
self.Naxes_vec = 1
if feed_pol == "x":
feed_pol = "xx"
elif feed_pol == "y":
feed_pol = "yy"
if rotate_pol:
rot_pol_dict = {"xx": "yy", "yy": "xx", "xy": "yx", "yx": "xy"}
pol2 = rot_pol_dict[feed_pol]
self.polarization_array = np.array(
[uvutils.polstr2num(feed_pol), uvutils.polstr2num(pol2)]
)
else:
self.polarization_array = np.array([uvutils.polstr2num(feed_pol)])
self.Npols = len(self.polarization_array)
self._set_power()
else:
self.Naxes_vec = 2
self.Ncomponents_vec = 2
if rotate_pol:
if feed_pol == "x":
self.feed_array = np.array(["x", "y"])
else:
self.feed_array = np.array(["y", "x"])
else:
if feed_pol == "x":
self.feed_array = np.array(["x"])
else:
self.feed_array = np.array(["y"])
self.Nfeeds = self.feed_array.size
self._set_efield()
self.data_normalization = "physical"
self.antenna_type = "simple"
self.Nfreqs = 1
self.Nspws = 1
self.freq_array = np.zeros((self.Nspws, self.Nfreqs))
self.bandpass_array = np.zeros((self.Nspws, self.Nfreqs))
self.spw_array = np.array([0])
self.pixel_coordinate_system = "az_za"
self._set_cs_params()
out_file = open(filename, "r")
line = out_file.readline().strip() # Get the first line
out_file.close()
raw_names = line.split("]")
raw_names = [raw_name for raw_name in raw_names if not raw_name == ""]
column_names = []
units = []
for raw_name in raw_names:
column_name, unit = tuple(raw_name.split("["))
column_names.append("".join(column_name.lower().split(" ")))
units.append(unit.lower().strip())
data = np.loadtxt(filename, skiprows=2)
theta_col = np.where(np.array(column_names) == "theta")[0][0]
phi_col = np.where(np.array(column_names) == "phi")[0][0]
if "deg" in units[theta_col]:
theta_data = np.radians(data[:, theta_col])
else:
theta_data = data[:, theta_col]
if "deg" in units[phi_col]:
phi_data = np.radians(data[:, phi_col])
else:
phi_data = data[:, phi_col]
theta_axis = np.sort(np.unique(theta_data))
phi_axis = np.sort(np.unique(phi_data))
if not theta_axis.size * phi_axis.size == theta_data.size:
raise ValueError("Data does not appear to be on a grid")
theta_data = theta_data.reshape((theta_axis.size, phi_axis.size), order="F")
phi_data = phi_data.reshape((theta_axis.size, phi_axis.size), order="F")
delta_theta = np.diff(theta_axis)
if not np.isclose(np.max(delta_theta), np.min(delta_theta)):
raise ValueError(
"Data does not appear to be regularly gridded in zenith angle"
)
delta_theta = delta_theta[0]
delta_phi = np.diff(phi_axis)
if not np.isclose(np.max(delta_phi), np.min(delta_phi)):
raise ValueError(
"Data does not appear to be regularly gridded in azimuth angle"
)
delta_phi = delta_phi[0]
self.axis1_array = phi_axis
self.Naxes1 = self.axis1_array.size
self.axis2_array = theta_axis
self.Naxes2 = self.axis2_array.size
if self.beam_type == "power":
# type depends on whether cross pols are present
# (if so, complex, else float)
self.data_array = np.zeros(
self._data_array.expected_shape(self),
dtype=self._data_array.expected_type,
)
else:
self.data_array = np.zeros(
self._data_array.expected_shape(self), dtype=np.complex
)
if frequency is not None:
self.freq_array[0] = frequency
else:
self.freq_array[0] = self.name2freq(filename)
if rotate_pol:
# for second polarization, rotate by pi/2
rot_phi = phi_data + np.pi / 2
rot_phi[np.where(rot_phi >= 2 * np.pi)] -= 2 * np.pi
roll_rot_phi = np.roll(rot_phi, int((np.pi / 2) / delta_phi), axis=1)
if not np.allclose(roll_rot_phi, phi_data):
raise ValueError("Rotating by pi/2 failed")
# theta is not affected by the rotation
# get beam
if self.beam_type == "power":
data_col_enum = ["abs(e)", "abs(v)"]
data_col = []
for name in data_col_enum:
this_col = np.where(np.array(column_names) == name)[0]
if this_col.size > 0:
data_col = data_col + this_col.tolist()
if len(data_col) == 0:
raise ValueError("No power column found in file: {}".format(filename))
elif len(data_col) > 1:
raise ValueError(
"Multiple possible power columns found in file: {}".format(filename)
)
data_col = data_col[0]
power_beam1 = (
data[:, data_col].reshape((theta_axis.size, phi_axis.size), order="F")
** 2.0
)
self.data_array[0, 0, 0, 0, :, :] = power_beam1
if rotate_pol:
# rotate by pi/2 for second polarization
power_beam2 = np.roll(power_beam1, int((np.pi / 2) / delta_phi), axis=1)
self.data_array[0, 0, 1, 0, :, :] = power_beam2
else:
self.basis_vector_array = np.zeros(
(self.Naxes_vec, self.Ncomponents_vec, self.Naxes2, self.Naxes1)
)
self.basis_vector_array[0, 0, :, :] = 1.0
self.basis_vector_array[1, 1, :, :] = 1.0
theta_mag_col = np.where(np.array(column_names) == "abs(theta)")[0][0]
theta_phase_col = np.where(np.array(column_names) == "phase(theta)")[0][0]
phi_mag_col = np.where(np.array(column_names) == "abs(phi)")[0][0]
phi_phase_col = np.where(np.array(column_names) == "phase(phi)")[0][0]
theta_mag = data[:, theta_mag_col].reshape(
(theta_axis.size, phi_axis.size), order="F"
)
phi_mag = data[:, phi_mag_col].reshape(
(theta_axis.size, phi_axis.size), order="F"
)
if "deg" in units[theta_phase_col]:
theta_phase = np.radians(data[:, theta_phase_col])
else:
theta_phase = data[:, theta_phase_col]
if "deg" in units[phi_phase_col]:
phi_phase = np.radians(data[:, phi_phase_col])
else:
phi_phase = data[:, phi_phase_col]
theta_phase = theta_phase.reshape(
(theta_axis.size, phi_axis.size), order="F"
)
phi_phase = phi_phase.reshape((theta_axis.size, phi_axis.size), order="F")
theta_beam = theta_mag * np.exp(1j * theta_phase)
phi_beam = phi_mag * np.exp(1j * phi_phase)
self.data_array[0, 0, 0, 0, :, :] = phi_beam
self.data_array[1, 0, 0, 0, :, :] = theta_beam
if rotate_pol:
# rotate by pi/2 for second polarization
theta_beam2 = np.roll(theta_beam, int((np.pi / 2) / delta_phi), axis=1)
phi_beam2 = np.roll(phi_beam, int((np.pi / 2) / delta_phi), axis=1)
self.data_array[0, 0, 1, 0, :, :] = phi_beam2
self.data_array[1, 0, 1, 0, :, :] = theta_beam2
self.bandpass_array[0] = 1
if frequency is None:
warnings.warn(
"No frequency provided. Detected frequency is: "
"{freqs} Hz".format(freqs=self.freq_array)
)
if run_check:
self.check(
check_extra=check_extra, run_check_acceptability=run_check_acceptability
)
| bsd-2-clause |
276361270/sqlalchemy | test/sql/test_query.py | 13 | 94964 | from sqlalchemy.testing import eq_, assert_raises_message, assert_raises, \
is_, in_, not_in_
from sqlalchemy import testing
from sqlalchemy.testing import fixtures, engines
from sqlalchemy import util
from sqlalchemy import (
exc, sql, func, select, String, Integer, MetaData, and_, ForeignKey,
union, intersect, except_, union_all, VARCHAR, INT, CHAR, text, Sequence,
bindparam, literal, not_, type_coerce, literal_column, desc, asc,
TypeDecorator, or_, cast, table, column)
from sqlalchemy.engine import default, result as _result
from sqlalchemy.testing.schema import Table, Column
# ongoing - these are old tests. those which are of general use
# to test a dialect are being slowly migrated to
# sqlalhcemy.testing.suite
users = users2 = addresses = metadata = None
class QueryTest(fixtures.TestBase):
__backend__ = True
@classmethod
def setup_class(cls):
global users, users2, addresses, metadata
metadata = MetaData(testing.db)
users = Table(
'query_users', metadata,
Column(
'user_id', INT, primary_key=True,
test_needs_autoincrement=True),
Column('user_name', VARCHAR(20)),
test_needs_acid=True
)
addresses = Table(
'query_addresses', metadata,
Column(
'address_id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('user_id', Integer, ForeignKey('query_users.user_id')),
Column('address', String(30)),
test_needs_acid=True
)
users2 = Table(
'u2', metadata,
Column('user_id', INT, primary_key=True),
Column('user_name', VARCHAR(20)),
test_needs_acid=True
)
metadata.create_all()
@engines.close_first
def teardown(self):
addresses.delete().execute()
users.delete().execute()
users2.delete().execute()
@classmethod
def teardown_class(cls):
metadata.drop_all()
@testing.requires.multivalues_inserts
def test_multivalues_insert(self):
users.insert(
values=[
{'user_id': 7, 'user_name': 'jack'},
{'user_id': 8, 'user_name': 'ed'}]).execute()
rows = users.select().order_by(users.c.user_id).execute().fetchall()
self.assert_(rows[0] == (7, 'jack'))
self.assert_(rows[1] == (8, 'ed'))
users.insert(values=[(9, 'jack'), (10, 'ed')]).execute()
rows = users.select().order_by(users.c.user_id).execute().fetchall()
self.assert_(rows[2] == (9, 'jack'))
self.assert_(rows[3] == (10, 'ed'))
def test_insert_heterogeneous_params(self):
"""test that executemany parameters are asserted to match the
parameter set of the first."""
assert_raises_message(
exc.StatementError,
r"\(sqlalchemy.exc.InvalidRequestError\) A value is required for "
"bind parameter 'user_name', in "
"parameter group 2 "
r"\[SQL: u?'INSERT INTO query_users",
users.insert().execute,
{'user_id': 7, 'user_name': 'jack'},
{'user_id': 8, 'user_name': 'ed'},
{'user_id': 9}
)
# this succeeds however. We aren't yet doing
# a length check on all subsequent parameters.
users.insert().execute(
{'user_id': 7},
{'user_id': 8, 'user_name': 'ed'},
{'user_id': 9}
)
def test_lastrow_accessor(self):
"""Tests the inserted_primary_key and lastrow_has_id() functions."""
def insert_values(engine, table, values):
"""
Inserts a row into a table, returns the full list of values
INSERTed including defaults that fired off on the DB side and
detects rows that had defaults and post-fetches.
"""
# verify implicit_returning is working
if engine.dialect.implicit_returning:
ins = table.insert()
comp = ins.compile(engine, column_keys=list(values))
if not set(values).issuperset(
c.key for c in table.primary_key):
assert comp.returning
result = engine.execute(table.insert(), **values)
ret = values.copy()
for col, id in zip(table.primary_key, result.inserted_primary_key):
ret[col.key] = id
if result.lastrow_has_defaults():
criterion = and_(
*[
col == id for col, id in
zip(table.primary_key, result.inserted_primary_key)])
row = engine.execute(table.select(criterion)).first()
for c in table.c:
ret[c.key] = row[c]
return ret
if testing.against('firebird', 'postgresql', 'oracle', 'mssql'):
assert testing.db.dialect.implicit_returning
if testing.db.dialect.implicit_returning:
test_engines = [
engines.testing_engine(options={'implicit_returning': False}),
engines.testing_engine(options={'implicit_returning': True}),
]
else:
test_engines = [testing.db]
for engine in test_engines:
metadata = MetaData()
for supported, table, values, assertvalues in [
(
{'unsupported': ['sqlite']},
Table(
"t1", metadata,
Column(
'id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('foo', String(30), primary_key=True)),
{'foo': 'hi'},
{'id': 1, 'foo': 'hi'}
),
(
{'unsupported': ['sqlite']},
Table(
"t2", metadata,
Column(
'id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('foo', String(30), primary_key=True),
Column('bar', String(30), server_default='hi')
),
{'foo': 'hi'},
{'id': 1, 'foo': 'hi', 'bar': 'hi'}
),
(
{'unsupported': []},
Table(
"t3", metadata,
Column("id", String(40), primary_key=True),
Column('foo', String(30), primary_key=True),
Column("bar", String(30))
),
{'id': 'hi', 'foo': 'thisisfoo', 'bar': "thisisbar"},
{'id': 'hi', 'foo': 'thisisfoo', 'bar': "thisisbar"}
),
(
{'unsupported': []},
Table(
"t4", metadata,
Column(
'id', Integer,
Sequence('t4_id_seq', optional=True),
primary_key=True),
Column('foo', String(30), primary_key=True),
Column('bar', String(30), server_default='hi')
),
{'foo': 'hi', 'id': 1},
{'id': 1, 'foo': 'hi', 'bar': 'hi'}
),
(
{'unsupported': []},
Table(
"t5", metadata,
Column('id', String(10), primary_key=True),
Column('bar', String(30), server_default='hi')
),
{'id': 'id1'},
{'id': 'id1', 'bar': 'hi'},
),
(
{'unsupported': ['sqlite']},
Table(
"t6", metadata,
Column(
'id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('bar', Integer, primary_key=True)
),
{'bar': 0},
{'id': 1, 'bar': 0},
),
]:
if testing.db.name in supported['unsupported']:
continue
try:
table.create(bind=engine, checkfirst=True)
i = insert_values(engine, table, values)
assert i == assertvalues, "tablename: %s %r %r" % \
(table.name, repr(i), repr(assertvalues))
finally:
table.drop(bind=engine)
# TODO: why not in the sqlite suite?
@testing.only_on('sqlite+pysqlite')
@testing.provide_metadata
def test_lastrowid_zero(self):
from sqlalchemy.dialects import sqlite
eng = engines.testing_engine()
class ExcCtx(sqlite.base.SQLiteExecutionContext):
def get_lastrowid(self):
return 0
eng.dialect.execution_ctx_cls = ExcCtx
t = Table(
't', self.metadata, Column('x', Integer, primary_key=True),
Column('y', Integer))
t.create(eng)
r = eng.execute(t.insert().values(y=5))
eq_(r.inserted_primary_key, [0])
@testing.fails_on(
'sqlite', "sqlite autoincremnt doesn't work with composite pks")
def test_misordered_lastrow(self):
related = Table(
'related', metadata,
Column('id', Integer, primary_key=True),
mysql_engine='MyISAM'
)
t6 = Table(
"t6", metadata,
Column(
'manual_id', Integer, ForeignKey('related.id'),
primary_key=True),
Column(
'auto_id', Integer, primary_key=True,
test_needs_autoincrement=True),
mysql_engine='MyISAM'
)
metadata.create_all()
r = related.insert().values(id=12).execute()
id = r.inserted_primary_key[0]
assert id == 12
r = t6.insert().values(manual_id=id).execute()
eq_(r.inserted_primary_key, [12, 1])
def test_implicit_id_insert_select_columns(self):
stmt = users.insert().from_select(
(users.c.user_id, users.c.user_name),
users.select().where(users.c.user_id == 20))
testing.db.execute(stmt)
def test_implicit_id_insert_select_keys(self):
stmt = users.insert().from_select(
["user_id", "user_name"],
users.select().where(users.c.user_id == 20))
testing.db.execute(stmt)
def test_row_iteration(self):
users.insert().execute(
{'user_id': 7, 'user_name': 'jack'},
{'user_id': 8, 'user_name': 'ed'},
{'user_id': 9, 'user_name': 'fred'},
)
r = users.select().execute()
l = []
for row in r:
l.append(row)
self.assert_(len(l) == 3)
@testing.requires.subqueries
def test_anonymous_rows(self):
users.insert().execute(
{'user_id': 7, 'user_name': 'jack'},
{'user_id': 8, 'user_name': 'ed'},
{'user_id': 9, 'user_name': 'fred'},
)
sel = select([users.c.user_id]).where(users.c.user_name == 'jack'). \
as_scalar()
for row in select([sel + 1, sel + 3], bind=users.bind).execute():
assert row['anon_1'] == 8
assert row['anon_2'] == 10
@testing.fails_on(
'firebird', "kinterbasdb doesn't send full type information")
def test_order_by_label(self):
"""test that a label within an ORDER BY works on each backend.
This test should be modified to support [ticket:1068] when that ticket
is implemented. For now, you need to put the actual string in the
ORDER BY.
"""
users.insert().execute(
{'user_id': 7, 'user_name': 'jack'},
{'user_id': 8, 'user_name': 'ed'},
{'user_id': 9, 'user_name': 'fred'},
)
concat = ("test: " + users.c.user_name).label('thedata')
eq_(
select([concat]).order_by("thedata").execute().fetchall(),
[("test: ed",), ("test: fred",), ("test: jack",)]
)
eq_(
select([concat]).order_by("thedata").execute().fetchall(),
[("test: ed",), ("test: fred",), ("test: jack",)]
)
concat = ("test: " + users.c.user_name).label('thedata')
eq_(
select([concat]).order_by(desc('thedata')).execute().fetchall(),
[("test: jack",), ("test: fred",), ("test: ed",)]
)
@testing.requires.order_by_label_with_expression
def test_order_by_label_compound(self):
users.insert().execute(
{'user_id': 7, 'user_name': 'jack'},
{'user_id': 8, 'user_name': 'ed'},
{'user_id': 9, 'user_name': 'fred'},
)
concat = ("test: " + users.c.user_name).label('thedata')
eq_(
select([concat]).order_by(literal_column('thedata') + "x").
execute().fetchall(),
[("test: ed",), ("test: fred",), ("test: jack",)]
)
def test_row_comparison(self):
users.insert().execute(user_id=7, user_name='jack')
rp = users.select().execute().first()
self.assert_(rp == rp)
self.assert_(not(rp != rp))
equal = (7, 'jack')
self.assert_(rp == equal)
self.assert_(equal == rp)
self.assert_(not (rp != equal))
self.assert_(not (equal != equal))
def endless():
while True:
yield 1
self.assert_(rp != endless())
self.assert_(endless() != rp)
# test that everything compares the same
# as it would against a tuple
import operator
for compare in [False, 8, endless(), 'xyz', (7, 'jack')]:
for op in [
operator.eq, operator.ne, operator.gt,
operator.lt, operator.ge, operator.le
]:
try:
control = op(equal, compare)
except TypeError:
# Py3K raises TypeError for some invalid comparisons
assert_raises(TypeError, op, rp, compare)
else:
eq_(control, op(rp, compare))
try:
control = op(compare, equal)
except TypeError:
# Py3K raises TypeError for some invalid comparisons
assert_raises(TypeError, op, compare, rp)
else:
eq_(control, op(compare, rp))
@testing.provide_metadata
def test_column_label_overlap_fallback(self):
content = Table(
'content', self.metadata,
Column('type', String(30)),
)
bar = Table(
'bar', self.metadata,
Column('content_type', String(30))
)
self.metadata.create_all(testing.db)
testing.db.execute(content.insert().values(type="t1"))
row = testing.db.execute(content.select(use_labels=True)).first()
assert content.c.type in row
assert bar.c.content_type not in row
assert sql.column('content_type') in row
row = testing.db.execute(
select([content.c.type.label("content_type")])).first()
assert content.c.type in row
assert bar.c.content_type not in row
assert sql.column('content_type') in row
row = testing.db.execute(select([func.now().label("content_type")])). \
first()
assert content.c.type not in row
assert bar.c.content_type not in row
assert sql.column('content_type') in row
def test_pickled_rows(self):
users.insert().execute(
{'user_id': 7, 'user_name': 'jack'},
{'user_id': 8, 'user_name': 'ed'},
{'user_id': 9, 'user_name': 'fred'},
)
for pickle in False, True:
for use_labels in False, True:
result = users.select(use_labels=use_labels).order_by(
users.c.user_id).execute().fetchall()
if pickle:
result = util.pickle.loads(util.pickle.dumps(result))
eq_(
result,
[(7, "jack"), (8, "ed"), (9, "fred")]
)
if use_labels:
eq_(result[0]['query_users_user_id'], 7)
eq_(
list(result[0].keys()),
["query_users_user_id", "query_users_user_name"])
else:
eq_(result[0]['user_id'], 7)
eq_(list(result[0].keys()), ["user_id", "user_name"])
eq_(result[0][0], 7)
eq_(result[0][users.c.user_id], 7)
eq_(result[0][users.c.user_name], 'jack')
if not pickle or use_labels:
assert_raises(
exc.NoSuchColumnError,
lambda: result[0][addresses.c.user_id])
else:
# test with a different table. name resolution is
# causing 'user_id' to match when use_labels wasn't used.
eq_(result[0][addresses.c.user_id], 7)
assert_raises(
exc.NoSuchColumnError, lambda: result[0]['fake key'])
assert_raises(
exc.NoSuchColumnError,
lambda: result[0][addresses.c.address_id])
def test_column_error_printing(self):
row = testing.db.execute(select([1])).first()
class unprintable(object):
def __str__(self):
raise ValueError("nope")
msg = r"Could not locate column in row for column '%s'"
for accessor, repl in [
("x", "x"),
(Column("q", Integer), "q"),
(Column("q", Integer) + 12, r"q \+ :q_1"),
(unprintable(), "unprintable element.*"),
]:
assert_raises_message(
exc.NoSuchColumnError,
msg % repl,
lambda: row[accessor]
)
@testing.requires.boolean_col_expressions
def test_or_and_as_columns(self):
true, false = literal(True), literal(False)
eq_(testing.db.execute(select([and_(true, false)])).scalar(), False)
eq_(testing.db.execute(select([and_(true, true)])).scalar(), True)
eq_(testing.db.execute(select([or_(true, false)])).scalar(), True)
eq_(testing.db.execute(select([or_(false, false)])).scalar(), False)
eq_(
testing.db.execute(select([not_(or_(false, false))])).scalar(),
True)
row = testing.db.execute(
select(
[or_(false, false).label("x"),
and_(true, false).label("y")])).first()
assert row.x == False # noqa
assert row.y == False # noqa
row = testing.db.execute(
select(
[or_(true, false).label("x"),
and_(true, false).label("y")])).first()
assert row.x == True # noqa
assert row.y == False # noqa
def test_fetchmany(self):
users.insert().execute(user_id=7, user_name='jack')
users.insert().execute(user_id=8, user_name='ed')
users.insert().execute(user_id=9, user_name='fred')
r = users.select().execute()
l = []
for row in r.fetchmany(size=2):
l.append(row)
self.assert_(len(l) == 2, "fetchmany(size=2) got %s rows" % len(l))
def test_like_ops(self):
users.insert().execute(
{'user_id': 1, 'user_name': 'apples'},
{'user_id': 2, 'user_name': 'oranges'},
{'user_id': 3, 'user_name': 'bananas'},
{'user_id': 4, 'user_name': 'legumes'},
{'user_id': 5, 'user_name': 'hi % there'},
)
for expr, result in (
(select([users.c.user_id]).
where(users.c.user_name.startswith('apple')), [(1,)]),
(select([users.c.user_id]).
where(users.c.user_name.contains('i % t')), [(5,)]),
(select([users.c.user_id]).
where(users.c.user_name.endswith('anas')), [(3,)]),
(select([users.c.user_id]).
where(users.c.user_name.contains('i % t', escape='&')),
[(5,)]),
):
eq_(expr.execute().fetchall(), result)
@testing.requires.mod_operator_as_percent_sign
@testing.emits_warning('.*now automatically escapes.*')
def test_percents_in_text(self):
for expr, result in (
(text("select 6 % 10"), 6),
(text("select 17 % 10"), 7),
(text("select '%'"), '%'),
(text("select '%%'"), '%%'),
(text("select '%%%'"), '%%%'),
(text("select 'hello % world'"), "hello % world")
):
eq_(testing.db.scalar(expr), result)
def test_ilike(self):
users.insert().execute(
{'user_id': 1, 'user_name': 'one'},
{'user_id': 2, 'user_name': 'TwO'},
{'user_id': 3, 'user_name': 'ONE'},
{'user_id': 4, 'user_name': 'OnE'},
)
eq_(
select([users.c.user_id]).where(users.c.user_name.ilike('one')).
execute().fetchall(), [(1, ), (3, ), (4, )])
eq_(
select([users.c.user_id]).where(users.c.user_name.ilike('TWO')).
execute().fetchall(), [(2, )])
if testing.against('postgresql'):
eq_(
select([users.c.user_id]).
where(users.c.user_name.like('one')).execute().fetchall(),
[(1, )])
eq_(
select([users.c.user_id]).
where(users.c.user_name.like('TWO')).execute().fetchall(), [])
def test_compiled_execute(self):
users.insert().execute(user_id=7, user_name='jack')
s = select([users], users.c.user_id == bindparam('id')).compile()
c = testing.db.connect()
assert c.execute(s, id=7).fetchall()[0]['user_id'] == 7
def test_compiled_insert_execute(self):
users.insert().compile().execute(user_id=7, user_name='jack')
s = select([users], users.c.user_id == bindparam('id')).compile()
c = testing.db.connect()
assert c.execute(s, id=7).fetchall()[0]['user_id'] == 7
def test_repeated_bindparams(self):
"""Tests that a BindParam can be used more than once.
This should be run for DB-APIs with both positional and named
paramstyles.
"""
users.insert().execute(user_id=7, user_name='jack')
users.insert().execute(user_id=8, user_name='fred')
u = bindparam('userid')
s = users.select(and_(users.c.user_name == u, users.c.user_name == u))
r = s.execute(userid='fred').fetchall()
assert len(r) == 1
def test_bindparam_detection(self):
dialect = default.DefaultDialect(paramstyle='qmark')
prep = lambda q: str(sql.text(q).compile(dialect=dialect))
def a_eq(got, wanted):
if got != wanted:
print("Wanted %s" % wanted)
print("Received %s" % got)
self.assert_(got == wanted, got)
a_eq(prep('select foo'), 'select foo')
a_eq(prep("time='12:30:00'"), "time='12:30:00'")
a_eq(prep("time='12:30:00'"), "time='12:30:00'")
a_eq(prep(":this:that"), ":this:that")
a_eq(prep(":this :that"), "? ?")
a_eq(prep("(:this),(:that :other)"), "(?),(? ?)")
a_eq(prep("(:this),(:that:other)"), "(?),(:that:other)")
a_eq(prep("(:this),(:that,:other)"), "(?),(?,?)")
a_eq(prep("(:that_:other)"), "(:that_:other)")
a_eq(prep("(:that_ :other)"), "(? ?)")
a_eq(prep("(:that_other)"), "(?)")
a_eq(prep("(:that$other)"), "(?)")
a_eq(prep("(:that$:other)"), "(:that$:other)")
a_eq(prep(".:that$ :other."), ".? ?.")
a_eq(prep(r'select \foo'), r'select \foo')
a_eq(prep(r"time='12\:30:00'"), r"time='12\:30:00'")
a_eq(prep(":this \:that"), "? :that")
a_eq(prep(r"(\:that$other)"), "(:that$other)")
a_eq(prep(r".\:that$ :other."), ".:that$ ?.")
@testing.requires.standalone_binds
def test_select_from_bindparam(self):
"""Test result row processing when selecting from a plain bind
param."""
class MyInteger(TypeDecorator):
impl = Integer
def process_bind_param(self, value, dialect):
return int(value[4:])
def process_result_value(self, value, dialect):
return "INT_%d" % value
eq_(
testing.db.scalar(select([cast("INT_5", type_=MyInteger)])),
"INT_5"
)
eq_(
testing.db.scalar(
select([cast("INT_5", type_=MyInteger).label('foo')])),
"INT_5"
)
def test_order_by(self):
"""Exercises ORDER BY clause generation.
Tests simple, compound, aliased and DESC clauses.
"""
users.insert().execute(user_id=1, user_name='c')
users.insert().execute(user_id=2, user_name='b')
users.insert().execute(user_id=3, user_name='a')
def a_eq(executable, wanted):
got = list(executable.execute())
eq_(got, wanted)
for labels in False, True:
a_eq(users.select(order_by=[users.c.user_id],
use_labels=labels),
[(1, 'c'), (2, 'b'), (3, 'a')])
a_eq(users.select(order_by=[users.c.user_name, users.c.user_id],
use_labels=labels),
[(3, 'a'), (2, 'b'), (1, 'c')])
a_eq(select([users.c.user_id.label('foo')],
use_labels=labels,
order_by=[users.c.user_id]),
[(1,), (2,), (3,)])
a_eq(select([users.c.user_id.label('foo'), users.c.user_name],
use_labels=labels,
order_by=[users.c.user_name, users.c.user_id]),
[(3, 'a'), (2, 'b'), (1, 'c')])
a_eq(users.select(distinct=True,
use_labels=labels,
order_by=[users.c.user_id]),
[(1, 'c'), (2, 'b'), (3, 'a')])
a_eq(select([users.c.user_id.label('foo')],
distinct=True,
use_labels=labels,
order_by=[users.c.user_id]),
[(1,), (2,), (3,)])
a_eq(select([users.c.user_id.label('a'),
users.c.user_id.label('b'),
users.c.user_name],
use_labels=labels,
order_by=[users.c.user_id]),
[(1, 1, 'c'), (2, 2, 'b'), (3, 3, 'a')])
a_eq(users.select(distinct=True,
use_labels=labels,
order_by=[desc(users.c.user_id)]),
[(3, 'a'), (2, 'b'), (1, 'c')])
a_eq(select([users.c.user_id.label('foo')],
distinct=True,
use_labels=labels,
order_by=[users.c.user_id.desc()]),
[(3,), (2,), (1,)])
@testing.requires.nullsordering
def test_order_by_nulls(self):
"""Exercises ORDER BY clause generation.
Tests simple, compound, aliased and DESC clauses.
"""
users.insert().execute(user_id=1)
users.insert().execute(user_id=2, user_name='b')
users.insert().execute(user_id=3, user_name='a')
def a_eq(executable, wanted):
got = list(executable.execute())
eq_(got, wanted)
for labels in False, True:
a_eq(users.select(order_by=[users.c.user_name.nullsfirst()],
use_labels=labels),
[(1, None), (3, 'a'), (2, 'b')])
a_eq(users.select(order_by=[users.c.user_name.nullslast()],
use_labels=labels),
[(3, 'a'), (2, 'b'), (1, None)])
a_eq(users.select(order_by=[asc(users.c.user_name).nullsfirst()],
use_labels=labels),
[(1, None), (3, 'a'), (2, 'b')])
a_eq(users.select(order_by=[asc(users.c.user_name).nullslast()],
use_labels=labels),
[(3, 'a'), (2, 'b'), (1, None)])
a_eq(users.select(order_by=[users.c.user_name.desc().nullsfirst()],
use_labels=labels),
[(1, None), (2, 'b'), (3, 'a')])
a_eq(
users.select(
order_by=[users.c.user_name.desc().nullslast()],
use_labels=labels),
[(2, 'b'), (3, 'a'), (1, None)])
a_eq(
users.select(
order_by=[desc(users.c.user_name).nullsfirst()],
use_labels=labels),
[(1, None), (2, 'b'), (3, 'a')])
a_eq(users.select(order_by=[desc(users.c.user_name).nullslast()],
use_labels=labels),
[(2, 'b'), (3, 'a'), (1, None)])
a_eq(
users.select(
order_by=[users.c.user_name.nullsfirst(), users.c.user_id],
use_labels=labels),
[(1, None), (3, 'a'), (2, 'b')])
a_eq(
users.select(
order_by=[users.c.user_name.nullslast(), users.c.user_id],
use_labels=labels),
[(3, 'a'), (2, 'b'), (1, None)])
def test_column_slices(self):
users.insert().execute(user_id=1, user_name='john')
users.insert().execute(user_id=2, user_name='jack')
addresses.insert().execute(
address_id=1, user_id=2, address='foo@bar.com')
r = text(
"select * from query_addresses", bind=testing.db).execute().first()
self.assert_(r[0:1] == (1,))
self.assert_(r[1:] == (2, 'foo@bar.com'))
self.assert_(r[:-1] == (1, 2))
def test_column_accessor_basic_compiled(self):
users.insert().execute(
dict(user_id=1, user_name='john'),
dict(user_id=2, user_name='jack')
)
r = users.select(users.c.user_id == 2).execute().first()
self.assert_(r.user_id == r['user_id'] == r[users.c.user_id] == 2)
self.assert_(
r.user_name == r['user_name'] == r[users.c.user_name] == 'jack')
def test_column_accessor_basic_text(self):
users.insert().execute(
dict(user_id=1, user_name='john'),
dict(user_id=2, user_name='jack')
)
r = testing.db.execute(
text("select * from query_users where user_id=2")).first()
self.assert_(r.user_id == r['user_id'] == r[users.c.user_id] == 2)
self.assert_(
r.user_name == r['user_name'] == r[users.c.user_name] == 'jack')
def test_column_accessor_textual_select(self):
users.insert().execute(
dict(user_id=1, user_name='john'),
dict(user_id=2, user_name='jack')
)
# this will create column() objects inside
# the select(), these need to match on name anyway
r = testing.db.execute(
select([
column('user_id'), column('user_name')
]).select_from(table('query_users')).
where(text('user_id=2'))
).first()
self.assert_(r.user_id == r['user_id'] == r[users.c.user_id] == 2)
self.assert_(
r.user_name == r['user_name'] == r[users.c.user_name] == 'jack')
def test_column_accessor_dotted_union(self):
users.insert().execute(
dict(user_id=1, user_name='john'),
)
# test a little sqlite weirdness - with the UNION,
# cols come back as "query_users.user_id" in cursor.description
r = testing.db.execute(
text(
"select query_users.user_id, query_users.user_name "
"from query_users "
"UNION select query_users.user_id, "
"query_users.user_name from query_users"
)
).first()
eq_(r['user_id'], 1)
eq_(r['user_name'], "john")
eq_(list(r.keys()), ["user_id", "user_name"])
@testing.only_on("sqlite", "sqlite specific feature")
def test_column_accessor_sqlite_raw(self):
users.insert().execute(
dict(user_id=1, user_name='john'),
)
r = text(
"select query_users.user_id, query_users.user_name "
"from query_users "
"UNION select query_users.user_id, "
"query_users.user_name from query_users",
bind=testing.db).execution_options(sqlite_raw_colnames=True). \
execute().first()
assert 'user_id' not in r
assert 'user_name' not in r
eq_(r['query_users.user_id'], 1)
eq_(r['query_users.user_name'], "john")
eq_(list(r.keys()), ["query_users.user_id", "query_users.user_name"])
@testing.only_on("sqlite", "sqlite specific feature")
def test_column_accessor_sqlite_translated(self):
users.insert().execute(
dict(user_id=1, user_name='john'),
)
r = text(
"select query_users.user_id, query_users.user_name "
"from query_users "
"UNION select query_users.user_id, "
"query_users.user_name from query_users",
bind=testing.db).execute().first()
eq_(r['user_id'], 1)
eq_(r['user_name'], "john")
eq_(r['query_users.user_id'], 1)
eq_(r['query_users.user_name'], "john")
eq_(list(r.keys()), ["user_id", "user_name"])
def test_column_accessor_labels_w_dots(self):
users.insert().execute(
dict(user_id=1, user_name='john'),
)
# test using literal tablename.colname
r = text(
'select query_users.user_id AS "query_users.user_id", '
'query_users.user_name AS "query_users.user_name" '
'from query_users', bind=testing.db).\
execution_options(sqlite_raw_colnames=True).execute().first()
eq_(r['query_users.user_id'], 1)
eq_(r['query_users.user_name'], "john")
assert "user_name" not in r
eq_(list(r.keys()), ["query_users.user_id", "query_users.user_name"])
def test_column_accessor_unary(self):
users.insert().execute(
dict(user_id=1, user_name='john'),
)
# unary experssions
r = select([users.c.user_name.distinct()]).order_by(
users.c.user_name).execute().first()
eq_(r[users.c.user_name], 'john')
eq_(r.user_name, 'john')
def test_column_accessor_err(self):
r = testing.db.execute(select([1])).first()
assert_raises_message(
AttributeError,
"Could not locate column in row for column 'foo'",
getattr, r, "foo"
)
assert_raises_message(
KeyError,
"Could not locate column in row for column 'foo'",
lambda: r['foo']
)
def test_graceful_fetch_on_non_rows(self):
"""test that calling fetchone() etc. on a result that doesn't
return rows fails gracefully.
"""
# these proxies don't work with no cursor.description present.
# so they don't apply to this test at the moment.
# result.FullyBufferedResultProxy,
# result.BufferedRowResultProxy,
# result.BufferedColumnResultProxy
conn = testing.db.connect()
for meth in [
lambda r: r.fetchone(),
lambda r: r.fetchall(),
lambda r: r.first(),
lambda r: r.scalar(),
lambda r: r.fetchmany(),
lambda r: r._getter('user'),
lambda r: r._has_key('user'),
]:
trans = conn.begin()
result = conn.execute(users.insert(), user_id=1)
assert_raises_message(
exc.ResourceClosedError,
"This result object does not return rows. "
"It has been closed automatically.",
meth, result,
)
trans.rollback()
@testing.requires.empty_inserts
@testing.requires.returning
def test_no_inserted_pk_on_returning(self):
result = testing.db.execute(users.insert().returning(
users.c.user_id, users.c.user_name))
assert_raises_message(
exc.InvalidRequestError,
r"Can't call inserted_primary_key when returning\(\) is used.",
getattr, result, 'inserted_primary_key'
)
def test_fetchone_til_end(self):
result = testing.db.execute("select * from query_users")
eq_(result.fetchone(), None)
eq_(result.fetchone(), None)
eq_(result.fetchone(), None)
result.close()
assert_raises_message(
exc.ResourceClosedError,
"This result object is closed.",
result.fetchone
)
def test_row_case_sensitive(self):
row = testing.db.execute(
select([
literal_column("1").label("case_insensitive"),
literal_column("2").label("CaseSensitive")
])
).first()
eq_(list(row.keys()), ["case_insensitive", "CaseSensitive"])
in_("case_insensitive", row._keymap)
in_("CaseSensitive", row._keymap)
not_in_("casesensitive", row._keymap)
eq_(row["case_insensitive"], 1)
eq_(row["CaseSensitive"], 2)
assert_raises(
KeyError,
lambda: row["Case_insensitive"]
)
assert_raises(
KeyError,
lambda: row["casesensitive"]
)
def test_row_case_sensitive_unoptimized(self):
ins_db = engines.testing_engine(options={"case_sensitive": True})
row = ins_db.execute(
select([
literal_column("1").label("case_insensitive"),
literal_column("2").label("CaseSensitive"),
text("3 AS screw_up_the_cols")
])
).first()
eq_(
list(row.keys()),
["case_insensitive", "CaseSensitive", "screw_up_the_cols"])
in_("case_insensitive", row._keymap)
in_("CaseSensitive", row._keymap)
not_in_("casesensitive", row._keymap)
eq_(row["case_insensitive"], 1)
eq_(row["CaseSensitive"], 2)
eq_(row["screw_up_the_cols"], 3)
assert_raises(KeyError, lambda: row["Case_insensitive"])
assert_raises(KeyError, lambda: row["casesensitive"])
assert_raises(KeyError, lambda: row["screw_UP_the_cols"])
def test_row_case_insensitive(self):
ins_db = engines.testing_engine(options={"case_sensitive": False})
row = ins_db.execute(
select([
literal_column("1").label("case_insensitive"),
literal_column("2").label("CaseSensitive")
])
).first()
eq_(list(row.keys()), ["case_insensitive", "CaseSensitive"])
in_("case_insensitive", row._keymap)
in_("CaseSensitive", row._keymap)
in_("casesensitive", row._keymap)
eq_(row["case_insensitive"], 1)
eq_(row["CaseSensitive"], 2)
eq_(row["Case_insensitive"], 1)
eq_(row["casesensitive"], 2)
def test_row_case_insensitive_unoptimized(self):
ins_db = engines.testing_engine(options={"case_sensitive": False})
row = ins_db.execute(
select([
literal_column("1").label("case_insensitive"),
literal_column("2").label("CaseSensitive"),
text("3 AS screw_up_the_cols")
])
).first()
eq_(
list(row.keys()),
["case_insensitive", "CaseSensitive", "screw_up_the_cols"])
in_("case_insensitive", row._keymap)
in_("CaseSensitive", row._keymap)
in_("casesensitive", row._keymap)
eq_(row["case_insensitive"], 1)
eq_(row["CaseSensitive"], 2)
eq_(row["screw_up_the_cols"], 3)
eq_(row["Case_insensitive"], 1)
eq_(row["casesensitive"], 2)
eq_(row["screw_UP_the_cols"], 3)
def test_row_as_args(self):
users.insert().execute(user_id=1, user_name='john')
r = users.select(users.c.user_id == 1).execute().first()
users.delete().execute()
users.insert().execute(r)
eq_(users.select().execute().fetchall(), [(1, 'john')])
def test_result_as_args(self):
users.insert().execute([
dict(user_id=1, user_name='john'),
dict(user_id=2, user_name='ed')])
r = users.select().execute()
users2.insert().execute(list(r))
eq_(
users2.select().order_by(users2.c.user_id).execute().fetchall(),
[(1, 'john'), (2, 'ed')]
)
users2.delete().execute()
r = users.select().execute()
users2.insert().execute(*list(r))
eq_(
users2.select().order_by(users2.c.user_id).execute().fetchall(),
[(1, 'john'), (2, 'ed')]
)
@testing.requires.duplicate_names_in_cursor_description
def test_ambiguous_column(self):
users.insert().execute(user_id=1, user_name='john')
result = users.outerjoin(addresses).select().execute()
r = result.first()
assert_raises_message(
exc.InvalidRequestError,
"Ambiguous column name",
lambda: r['user_id']
)
assert_raises_message(
exc.InvalidRequestError,
"Ambiguous column name",
lambda: r[users.c.user_id]
)
assert_raises_message(
exc.InvalidRequestError,
"Ambiguous column name",
lambda: r[addresses.c.user_id]
)
# try to trick it - fake_table isn't in the result!
# we get the correct error
fake_table = Table('fake', MetaData(), Column('user_id', Integer))
assert_raises_message(
exc.InvalidRequestError,
"Could not locate column in row for column 'fake.user_id'",
lambda: r[fake_table.c.user_id]
)
r = util.pickle.loads(util.pickle.dumps(r))
assert_raises_message(
exc.InvalidRequestError,
"Ambiguous column name",
lambda: r['user_id']
)
result = users.outerjoin(addresses).select().execute()
result = _result.BufferedColumnResultProxy(result.context)
r = result.first()
assert isinstance(r, _result.BufferedColumnRow)
assert_raises_message(
exc.InvalidRequestError,
"Ambiguous column name",
lambda: r['user_id']
)
@testing.requires.duplicate_names_in_cursor_description
def test_ambiguous_column_by_col(self):
users.insert().execute(user_id=1, user_name='john')
ua = users.alias()
u2 = users.alias()
result = select([users.c.user_id, ua.c.user_id]).execute()
row = result.first()
assert_raises_message(
exc.InvalidRequestError,
"Ambiguous column name",
lambda: row[users.c.user_id]
)
assert_raises_message(
exc.InvalidRequestError,
"Ambiguous column name",
lambda: row[ua.c.user_id]
)
# Unfortunately, this fails -
# we'd like
# "Could not locate column in row"
# to be raised here, but the check for
# "common column" in _compare_name_for_result()
# has other requirements to be more liberal.
# Ultimately the
# expression system would need a way to determine
# if given two columns in a "proxy" relationship, if they
# refer to a different parent table
assert_raises_message(
exc.InvalidRequestError,
"Ambiguous column name",
lambda: row[u2.c.user_id]
)
@testing.requires.duplicate_names_in_cursor_description
def test_ambiguous_column_contains(self):
# ticket 2702. in 0.7 we'd get True, False.
# in 0.8, both columns are present so it's True;
# but when they're fetched you'll get the ambiguous error.
users.insert().execute(user_id=1, user_name='john')
result = select([users.c.user_id, addresses.c.user_id]).\
select_from(users.outerjoin(addresses)).execute()
row = result.first()
eq_(
set([users.c.user_id in row, addresses.c.user_id in row]),
set([True])
)
def test_ambiguous_column_by_col_plus_label(self):
users.insert().execute(user_id=1, user_name='john')
result = select(
[users.c.user_id,
type_coerce(users.c.user_id, Integer).label('foo')]).execute()
row = result.first()
eq_(
row[users.c.user_id], 1
)
eq_(
row[1], 1
)
def test_fetch_partial_result_map(self):
users.insert().execute(user_id=7, user_name='ed')
t = text("select * from query_users").columns(
user_name=String()
)
eq_(
testing.db.execute(t).fetchall(), [(7, 'ed')]
)
def test_fetch_unordered_result_map(self):
users.insert().execute(user_id=7, user_name='ed')
class Goofy1(TypeDecorator):
impl = String
def process_result_value(self, value, dialect):
return value + "a"
class Goofy2(TypeDecorator):
impl = String
def process_result_value(self, value, dialect):
return value + "b"
class Goofy3(TypeDecorator):
impl = String
def process_result_value(self, value, dialect):
return value + "c"
t = text(
"select user_name as a, user_name as b, "
"user_name as c from query_users").columns(
a=Goofy1(), b=Goofy2(), c=Goofy3()
)
eq_(
testing.db.execute(t).fetchall(), [
('eda', 'edb', 'edc')
]
)
@testing.requires.subqueries
def test_column_label_targeting(self):
users.insert().execute(user_id=7, user_name='ed')
for s in (
users.select().alias('foo'),
users.select().alias(users.name),
):
row = s.select(use_labels=True).execute().first()
assert row[s.c.user_id] == 7
assert row[s.c.user_name] == 'ed'
def test_keys(self):
users.insert().execute(user_id=1, user_name='foo')
result = users.select().execute()
eq_(
result.keys(),
['user_id', 'user_name']
)
row = result.first()
eq_(
row.keys(),
['user_id', 'user_name']
)
def test_keys_anon_labels(self):
"""test [ticket:3483]"""
users.insert().execute(user_id=1, user_name='foo')
result = testing.db.execute(
select([
users.c.user_id,
users.c.user_name.label(None),
func.count(literal_column('1'))]).
group_by(users.c.user_id, users.c.user_name)
)
eq_(
result.keys(),
['user_id', 'user_name_1', 'count_1']
)
row = result.first()
eq_(
row.keys(),
['user_id', 'user_name_1', 'count_1']
)
def test_items(self):
users.insert().execute(user_id=1, user_name='foo')
r = users.select().execute().first()
eq_(
[(x[0].lower(), x[1]) for x in list(r.items())],
[('user_id', 1), ('user_name', 'foo')])
def test_len(self):
users.insert().execute(user_id=1, user_name='foo')
r = users.select().execute().first()
eq_(len(r), 2)
r = testing.db.execute('select user_name, user_id from query_users'). \
first()
eq_(len(r), 2)
r = testing.db.execute('select user_name from query_users').first()
eq_(len(r), 1)
def test_sorting_in_python(self):
users.insert().execute(
dict(user_id=1, user_name='foo'),
dict(user_id=2, user_name='bar'),
dict(user_id=3, user_name='def'),
)
rows = users.select().order_by(users.c.user_name).execute().fetchall()
eq_(rows, [(2, 'bar'), (3, 'def'), (1, 'foo')])
eq_(sorted(rows), [(1, 'foo'), (2, 'bar'), (3, 'def')])
def test_column_order_with_simple_query(self):
# should return values in column definition order
users.insert().execute(user_id=1, user_name='foo')
r = users.select(users.c.user_id == 1).execute().first()
eq_(r[0], 1)
eq_(r[1], 'foo')
eq_([x.lower() for x in list(r.keys())], ['user_id', 'user_name'])
eq_(list(r.values()), [1, 'foo'])
def test_column_order_with_text_query(self):
# should return values in query order
users.insert().execute(user_id=1, user_name='foo')
r = testing.db.execute('select user_name, user_id from query_users'). \
first()
eq_(r[0], 'foo')
eq_(r[1], 1)
eq_([x.lower() for x in list(r.keys())], ['user_name', 'user_id'])
eq_(list(r.values()), ['foo', 1])
@testing.crashes('oracle', 'FIXME: unknown, varify not fails_on()')
@testing.crashes('firebird', 'An identifier must begin with a letter')
def test_column_accessor_shadow(self):
meta = MetaData(testing.db)
shadowed = Table(
'test_shadowed', meta,
Column('shadow_id', INT, primary_key=True),
Column('shadow_name', VARCHAR(20)),
Column('parent', VARCHAR(20)),
Column('row', VARCHAR(40)),
Column('_parent', VARCHAR(20)),
Column('_row', VARCHAR(20)),
)
shadowed.create(checkfirst=True)
try:
shadowed.insert().execute(
shadow_id=1, shadow_name='The Shadow', parent='The Light',
row='Without light there is no shadow',
_parent='Hidden parent', _row='Hidden row')
r = shadowed.select(shadowed.c.shadow_id == 1).execute().first()
self.assert_(
r.shadow_id == r['shadow_id'] == r[shadowed.c.shadow_id] == 1)
self.assert_(
r.shadow_name == r['shadow_name'] ==
r[shadowed.c.shadow_name] == 'The Shadow')
self.assert_(
r.parent == r['parent'] == r[shadowed.c.parent] == 'The Light')
self.assert_(
r.row == r['row'] == r[shadowed.c.row] ==
'Without light there is no shadow')
self.assert_(r['_parent'] == 'Hidden parent')
self.assert_(r['_row'] == 'Hidden row')
finally:
shadowed.drop(checkfirst=True)
@testing.emits_warning('.*empty sequence.*')
def test_in_filtering(self):
"""test the behavior of the in_() function."""
users.insert().execute(user_id=7, user_name='jack')
users.insert().execute(user_id=8, user_name='fred')
users.insert().execute(user_id=9, user_name=None)
s = users.select(users.c.user_name.in_([]))
r = s.execute().fetchall()
# No username is in empty set
assert len(r) == 0
s = users.select(not_(users.c.user_name.in_([])))
r = s.execute().fetchall()
# All usernames with a value are outside an empty set
assert len(r) == 2
s = users.select(users.c.user_name.in_(['jack', 'fred']))
r = s.execute().fetchall()
assert len(r) == 2
s = users.select(not_(users.c.user_name.in_(['jack', 'fred'])))
r = s.execute().fetchall()
# Null values are not outside any set
assert len(r) == 0
@testing.emits_warning('.*empty sequence.*')
@testing.fails_on('firebird', "uses sql-92 rules")
@testing.fails_on('sybase', "uses sql-92 rules")
@testing.fails_if(
lambda: testing.against('mssql+pyodbc') and not
testing.db.dialect.freetds, "uses sql-92 rules")
def test_bind_in(self):
"""test calling IN against a bind parameter.
this isn't allowed on several platforms since we
generate ? = ?.
"""
users.insert().execute(user_id=7, user_name='jack')
users.insert().execute(user_id=8, user_name='fred')
users.insert().execute(user_id=9, user_name=None)
u = bindparam('search_key')
s = users.select(not_(u.in_([])))
r = s.execute(search_key='john').fetchall()
assert len(r) == 3
r = s.execute(search_key=None).fetchall()
assert len(r) == 0
@testing.emits_warning('.*empty sequence.*')
def test_literal_in(self):
"""similar to test_bind_in but use a bind with a value."""
users.insert().execute(user_id=7, user_name='jack')
users.insert().execute(user_id=8, user_name='fred')
users.insert().execute(user_id=9, user_name=None)
s = users.select(not_(literal("john").in_([])))
r = s.execute().fetchall()
assert len(r) == 3
@testing.emits_warning('.*empty sequence.*')
@testing.requires.boolean_col_expressions
def test_in_filtering_advanced(self):
"""test the behavior of the in_() function when
comparing against an empty collection, specifically
that a proper boolean value is generated.
"""
users.insert().execute(user_id=7, user_name='jack')
users.insert().execute(user_id=8, user_name='fred')
users.insert().execute(user_id=9, user_name=None)
s = users.select(users.c.user_name.in_([]) == True) # noqa
r = s.execute().fetchall()
assert len(r) == 0
s = users.select(users.c.user_name.in_([]) == False) # noqa
r = s.execute().fetchall()
assert len(r) == 2
s = users.select(users.c.user_name.in_([]) == None) # noqa
r = s.execute().fetchall()
assert len(r) == 1
class RequiredBindTest(fixtures.TablesTest):
run_create_tables = None
run_deletes = None
@classmethod
def define_tables(cls, metadata):
Table(
'foo', metadata,
Column('id', Integer, primary_key=True),
Column('data', String(50)),
Column('x', Integer)
)
def _assert_raises(self, stmt, params):
assert_raises_message(
exc.StatementError,
"A value is required for bind parameter 'x'",
testing.db.execute, stmt, **params)
assert_raises_message(
exc.StatementError,
"A value is required for bind parameter 'x'",
testing.db.execute, stmt, params)
def test_insert(self):
stmt = self.tables.foo.insert().values(
x=bindparam('x'), data=bindparam('data'))
self._assert_raises(stmt, {'data': 'data'})
def test_select_where(self):
stmt = select([self.tables.foo]). \
where(self.tables.foo.c.data == bindparam('data')). \
where(self.tables.foo.c.x == bindparam('x'))
self._assert_raises(stmt, {'data': 'data'})
@testing.requires.standalone_binds
def test_select_columns(self):
stmt = select([bindparam('data'), bindparam('x')])
self._assert_raises(
stmt, {'data': 'data'}
)
def test_text(self):
stmt = text("select * from foo where x=:x and data=:data1")
self._assert_raises(
stmt, {'data1': 'data'}
)
def test_required_flag(self):
is_(bindparam('foo').required, True)
is_(bindparam('foo', required=False).required, False)
is_(bindparam('foo', 'bar').required, False)
is_(bindparam('foo', 'bar', required=True).required, True)
c = lambda: None
is_(bindparam('foo', callable_=c, required=True).required, True)
is_(bindparam('foo', callable_=c).required, False)
is_(bindparam('foo', callable_=c, required=False).required, False)
class TableInsertTest(fixtures.TablesTest):
"""test for consistent insert behavior across dialects
regarding the inline=True flag, lower-case 't' tables.
"""
run_create_tables = 'each'
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
'foo', metadata,
Column('id', Integer, Sequence('t_id_seq'), primary_key=True),
Column('data', String(50)),
Column('x', Integer)
)
def _fixture(self, types=True):
if types:
t = sql.table(
'foo', sql.column('id', Integer),
sql.column('data', String),
sql.column('x', Integer))
else:
t = sql.table(
'foo', sql.column('id'), sql.column('data'), sql.column('x'))
return t
def _test(self, stmt, row, returning=None, inserted_primary_key=False):
r = testing.db.execute(stmt)
if returning:
returned = r.first()
eq_(returned, returning)
elif inserted_primary_key is not False:
eq_(r.inserted_primary_key, inserted_primary_key)
eq_(testing.db.execute(self.tables.foo.select()).first(), row)
def _test_multi(self, stmt, rows, data):
testing.db.execute(stmt, rows)
eq_(
testing.db.execute(
self.tables.foo.select().
order_by(self.tables.foo.c.id)).fetchall(),
data)
@testing.requires.sequences
def test_expicit_sequence(self):
t = self._fixture()
self._test(
t.insert().values(
id=func.next_value(Sequence('t_id_seq')), data='data', x=5),
(1, 'data', 5)
)
def test_uppercase(self):
t = self.tables.foo
self._test(
t.insert().values(id=1, data='data', x=5),
(1, 'data', 5),
inserted_primary_key=[1]
)
def test_uppercase_inline(self):
t = self.tables.foo
self._test(
t.insert(inline=True).values(id=1, data='data', x=5),
(1, 'data', 5),
inserted_primary_key=[1]
)
@testing.crashes(
"mssql+pyodbc",
"Pyodbc + SQL Server + Py3K, some decimal handling issue")
def test_uppercase_inline_implicit(self):
t = self.tables.foo
self._test(
t.insert(inline=True).values(data='data', x=5),
(1, 'data', 5),
inserted_primary_key=[None]
)
def test_uppercase_implicit(self):
t = self.tables.foo
self._test(
t.insert().values(data='data', x=5),
(1, 'data', 5),
inserted_primary_key=[1]
)
def test_uppercase_direct_params(self):
t = self.tables.foo
self._test(
t.insert().values(id=1, data='data', x=5),
(1, 'data', 5),
inserted_primary_key=[1]
)
@testing.requires.returning
def test_uppercase_direct_params_returning(self):
t = self.tables.foo
self._test(
t.insert().values(id=1, data='data', x=5).returning(t.c.id, t.c.x),
(1, 'data', 5),
returning=(1, 5)
)
@testing.fails_on(
'mssql', "lowercase table doesn't support identity insert disable")
def test_direct_params(self):
t = self._fixture()
self._test(
t.insert().values(id=1, data='data', x=5),
(1, 'data', 5),
inserted_primary_key=[]
)
@testing.fails_on(
'mssql', "lowercase table doesn't support identity insert disable")
@testing.requires.returning
def test_direct_params_returning(self):
t = self._fixture()
self._test(
t.insert().values(id=1, data='data', x=5).returning(t.c.id, t.c.x),
(1, 'data', 5),
returning=(1, 5)
)
@testing.requires.emulated_lastrowid
def test_implicit_pk(self):
t = self._fixture()
self._test(
t.insert().values(data='data', x=5),
(1, 'data', 5),
inserted_primary_key=[]
)
@testing.requires.emulated_lastrowid
def test_implicit_pk_multi_rows(self):
t = self._fixture()
self._test_multi(
t.insert(),
[
{'data': 'd1', 'x': 5},
{'data': 'd2', 'x': 6},
{'data': 'd3', 'x': 7},
],
[
(1, 'd1', 5),
(2, 'd2', 6),
(3, 'd3', 7)
],
)
@testing.requires.emulated_lastrowid
def test_implicit_pk_inline(self):
t = self._fixture()
self._test(
t.insert(inline=True).values(data='data', x=5),
(1, 'data', 5),
inserted_primary_key=[]
)
class KeyTargetingTest(fixtures.TablesTest):
run_inserts = 'once'
run_deletes = None
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
'keyed1', metadata, Column("a", CHAR(2), key="b"),
Column("c", CHAR(2), key="q")
)
Table('keyed2', metadata, Column("a", CHAR(2)), Column("b", CHAR(2)))
Table('keyed3', metadata, Column("a", CHAR(2)), Column("d", CHAR(2)))
Table('keyed4', metadata, Column("b", CHAR(2)), Column("q", CHAR(2)))
Table('content', metadata, Column('t', String(30), key="type"))
Table('bar', metadata, Column('ctype', String(30), key="content_type"))
if testing.requires.schemas.enabled:
Table(
'wschema', metadata,
Column("a", CHAR(2), key="b"),
Column("c", CHAR(2), key="q"),
schema=testing.config.test_schema
)
@classmethod
def insert_data(cls):
cls.tables.keyed1.insert().execute(dict(b="a1", q="c1"))
cls.tables.keyed2.insert().execute(dict(a="a2", b="b2"))
cls.tables.keyed3.insert().execute(dict(a="a3", d="d3"))
cls.tables.keyed4.insert().execute(dict(b="b4", q="q4"))
cls.tables.content.insert().execute(type="t1")
if testing.requires.schemas.enabled:
cls.tables['%s.wschema' % testing.config.test_schema].insert().execute(
dict(b="a1", q="c1"))
@testing.requires.schemas
def test_keyed_accessor_wschema(self):
keyed1 = self.tables['%s.wschema' % testing.config.test_schema]
row = testing.db.execute(keyed1.select()).first()
eq_(row.b, "a1")
eq_(row.q, "c1")
eq_(row.a, "a1")
eq_(row.c, "c1")
def test_keyed_accessor_single(self):
keyed1 = self.tables.keyed1
row = testing.db.execute(keyed1.select()).first()
eq_(row.b, "a1")
eq_(row.q, "c1")
eq_(row.a, "a1")
eq_(row.c, "c1")
def test_keyed_accessor_single_labeled(self):
keyed1 = self.tables.keyed1
row = testing.db.execute(keyed1.select().apply_labels()).first()
eq_(row.keyed1_b, "a1")
eq_(row.keyed1_q, "c1")
eq_(row.keyed1_a, "a1")
eq_(row.keyed1_c, "c1")
@testing.requires.duplicate_names_in_cursor_description
def test_keyed_accessor_composite_conflict_2(self):
keyed1 = self.tables.keyed1
keyed2 = self.tables.keyed2
row = testing.db.execute(select([keyed1, keyed2])).first()
# row.b is unambiguous
eq_(row.b, "b2")
# row.a is ambiguous
assert_raises_message(
exc.InvalidRequestError,
"Ambig",
getattr, row, "a"
)
def test_keyed_accessor_composite_names_precedent(self):
keyed1 = self.tables.keyed1
keyed4 = self.tables.keyed4
row = testing.db.execute(select([keyed1, keyed4])).first()
eq_(row.b, "b4")
eq_(row.q, "q4")
eq_(row.a, "a1")
eq_(row.c, "c1")
@testing.requires.duplicate_names_in_cursor_description
def test_keyed_accessor_composite_keys_precedent(self):
keyed1 = self.tables.keyed1
keyed3 = self.tables.keyed3
row = testing.db.execute(select([keyed1, keyed3])).first()
eq_(row.q, "c1")
assert_raises_message(
exc.InvalidRequestError,
"Ambiguous column name 'b'",
getattr, row, "b"
)
assert_raises_message(
exc.InvalidRequestError,
"Ambiguous column name 'a'",
getattr, row, "a"
)
eq_(row.d, "d3")
def test_keyed_accessor_composite_labeled(self):
keyed1 = self.tables.keyed1
keyed2 = self.tables.keyed2
row = testing.db.execute(select([keyed1, keyed2]).apply_labels()). \
first()
eq_(row.keyed1_b, "a1")
eq_(row.keyed1_a, "a1")
eq_(row.keyed1_q, "c1")
eq_(row.keyed1_c, "c1")
eq_(row.keyed2_a, "a2")
eq_(row.keyed2_b, "b2")
assert_raises(KeyError, lambda: row['keyed2_c'])
assert_raises(KeyError, lambda: row['keyed2_q'])
def test_column_label_overlap_fallback(self):
content, bar = self.tables.content, self.tables.bar
row = testing.db.execute(
select([content.c.type.label("content_type")])).first()
assert content.c.type not in row
assert bar.c.content_type not in row
assert sql.column('content_type') in row
row = testing.db.execute(select([func.now().label("content_type")])). \
first()
assert content.c.type not in row
assert bar.c.content_type not in row
assert sql.column('content_type') in row
def test_column_label_overlap_fallback_2(self):
content, bar = self.tables.content, self.tables.bar
row = testing.db.execute(content.select(use_labels=True)).first()
assert content.c.type in row
assert bar.c.content_type not in row
assert sql.column('content_type') not in row
def test_columnclause_schema_column_one(self):
keyed2 = self.tables.keyed2
# this is addressed by [ticket:2932]
# ColumnClause._compare_name_for_result allows the
# columns which the statement is against to be lightweight
# cols, which results in a more liberal comparison scheme
a, b = sql.column('a'), sql.column('b')
stmt = select([a, b]).select_from(table("keyed2"))
row = testing.db.execute(stmt).first()
assert keyed2.c.a in row
assert keyed2.c.b in row
assert a in row
assert b in row
def test_columnclause_schema_column_two(self):
keyed2 = self.tables.keyed2
a, b = sql.column('a'), sql.column('b')
stmt = select([keyed2.c.a, keyed2.c.b])
row = testing.db.execute(stmt).first()
assert keyed2.c.a in row
assert keyed2.c.b in row
assert a in row
assert b in row
def test_columnclause_schema_column_three(self):
keyed2 = self.tables.keyed2
# this is also addressed by [ticket:2932]
a, b = sql.column('a'), sql.column('b')
stmt = text("select a, b from keyed2").columns(a=CHAR, b=CHAR)
row = testing.db.execute(stmt).first()
assert keyed2.c.a in row
assert keyed2.c.b in row
assert a in row
assert b in row
assert stmt.c.a in row
assert stmt.c.b in row
def test_columnclause_schema_column_four(self):
keyed2 = self.tables.keyed2
# this is also addressed by [ticket:2932]
a, b = sql.column('keyed2_a'), sql.column('keyed2_b')
stmt = text("select a AS keyed2_a, b AS keyed2_b from keyed2").columns(
a, b)
row = testing.db.execute(stmt).first()
assert keyed2.c.a in row
assert keyed2.c.b in row
assert a in row
assert b in row
assert stmt.c.keyed2_a in row
assert stmt.c.keyed2_b in row
def test_columnclause_schema_column_five(self):
keyed2 = self.tables.keyed2
# this is also addressed by [ticket:2932]
stmt = text("select a AS keyed2_a, b AS keyed2_b from keyed2").columns(
keyed2_a=CHAR, keyed2_b=CHAR)
row = testing.db.execute(stmt).first()
assert keyed2.c.a in row
assert keyed2.c.b in row
assert stmt.c.keyed2_a in row
assert stmt.c.keyed2_b in row
class LimitTest(fixtures.TestBase):
__backend__ = True
@classmethod
def setup_class(cls):
global users, addresses, metadata
metadata = MetaData(testing.db)
users = Table(
'query_users', metadata,
Column('user_id', INT, primary_key=True),
Column('user_name', VARCHAR(20)),
)
addresses = Table(
'query_addresses', metadata,
Column('address_id', Integer, primary_key=True),
Column('user_id', Integer, ForeignKey('query_users.user_id')),
Column('address', String(30)))
metadata.create_all()
users.insert().execute(user_id=1, user_name='john')
addresses.insert().execute(address_id=1, user_id=1, address='addr1')
users.insert().execute(user_id=2, user_name='jack')
addresses.insert().execute(address_id=2, user_id=2, address='addr1')
users.insert().execute(user_id=3, user_name='ed')
addresses.insert().execute(address_id=3, user_id=3, address='addr2')
users.insert().execute(user_id=4, user_name='wendy')
addresses.insert().execute(address_id=4, user_id=4, address='addr3')
users.insert().execute(user_id=5, user_name='laura')
addresses.insert().execute(address_id=5, user_id=5, address='addr4')
users.insert().execute(user_id=6, user_name='ralph')
addresses.insert().execute(address_id=6, user_id=6, address='addr5')
users.insert().execute(user_id=7, user_name='fido')
addresses.insert().execute(address_id=7, user_id=7, address='addr5')
@classmethod
def teardown_class(cls):
metadata.drop_all()
def test_select_limit(self):
r = users.select(limit=3, order_by=[users.c.user_id]).execute(). \
fetchall()
self.assert_(r == [(1, 'john'), (2, 'jack'), (3, 'ed')], repr(r))
@testing.requires.offset
def test_select_limit_offset(self):
"""Test the interaction between limit and offset"""
r = users.select(limit=3, offset=2, order_by=[users.c.user_id]). \
execute().fetchall()
self.assert_(r == [(3, 'ed'), (4, 'wendy'), (5, 'laura')])
r = users.select(offset=5, order_by=[users.c.user_id]).execute(). \
fetchall()
self.assert_(r == [(6, 'ralph'), (7, 'fido')])
def test_select_distinct_limit(self):
"""Test the interaction between limit and distinct"""
r = sorted(
[x[0] for x in select([addresses.c.address]).distinct().
limit(3).order_by(addresses.c.address).execute().fetchall()])
self.assert_(len(r) == 3, repr(r))
self.assert_(r[0] != r[1] and r[1] != r[2], repr(r))
@testing.requires.offset
@testing.fails_on('mssql', 'FIXME: unknown')
def test_select_distinct_offset(self):
"""Test the interaction between distinct and offset"""
r = sorted(
[x[0] for x in select([addresses.c.address]).distinct().
offset(1).order_by(addresses.c.address).
execute().fetchall()])
eq_(len(r), 4)
self.assert_(r[0] != r[1] and r[1] != r[2] and r[2] != [3], repr(r))
@testing.requires.offset
def test_select_distinct_limit_offset(self):
"""Test the interaction between limit and limit/offset"""
r = select([addresses.c.address]).order_by(addresses.c.address). \
distinct().offset(2).limit(3).execute().fetchall()
self.assert_(len(r) == 3, repr(r))
self.assert_(r[0] != r[1] and r[1] != r[2], repr(r))
class CompoundTest(fixtures.TestBase):
"""test compound statements like UNION, INTERSECT, particularly their
ability to nest on different databases."""
__backend__ = True
@classmethod
def setup_class(cls):
global metadata, t1, t2, t3
metadata = MetaData(testing.db)
t1 = Table(
't1', metadata,
Column(
'col1', Integer, test_needs_autoincrement=True,
primary_key=True),
Column('col2', String(30)),
Column('col3', String(40)),
Column('col4', String(30)))
t2 = Table(
't2', metadata,
Column(
'col1', Integer, test_needs_autoincrement=True,
primary_key=True),
Column('col2', String(30)),
Column('col3', String(40)),
Column('col4', String(30)))
t3 = Table(
't3', metadata,
Column(
'col1', Integer, test_needs_autoincrement=True,
primary_key=True),
Column('col2', String(30)),
Column('col3', String(40)),
Column('col4', String(30)))
metadata.create_all()
t1.insert().execute([
dict(col2="t1col2r1", col3="aaa", col4="aaa"),
dict(col2="t1col2r2", col3="bbb", col4="bbb"),
dict(col2="t1col2r3", col3="ccc", col4="ccc"),
])
t2.insert().execute([
dict(col2="t2col2r1", col3="aaa", col4="bbb"),
dict(col2="t2col2r2", col3="bbb", col4="ccc"),
dict(col2="t2col2r3", col3="ccc", col4="aaa"),
])
t3.insert().execute([
dict(col2="t3col2r1", col3="aaa", col4="ccc"),
dict(col2="t3col2r2", col3="bbb", col4="aaa"),
dict(col2="t3col2r3", col3="ccc", col4="bbb"),
])
@engines.close_first
def teardown(self):
pass
@classmethod
def teardown_class(cls):
metadata.drop_all()
def _fetchall_sorted(self, executed):
return sorted([tuple(row) for row in executed.fetchall()])
@testing.requires.subqueries
def test_union(self):
(s1, s2) = (
select([t1.c.col3.label('col3'), t1.c.col4.label('col4')],
t1.c.col2.in_(["t1col2r1", "t1col2r2"])),
select([t2.c.col3.label('col3'), t2.c.col4.label('col4')],
t2.c.col2.in_(["t2col2r2", "t2col2r3"]))
)
u = union(s1, s2)
wanted = [('aaa', 'aaa'), ('bbb', 'bbb'), ('bbb', 'ccc'),
('ccc', 'aaa')]
found1 = self._fetchall_sorted(u.execute())
eq_(found1, wanted)
found2 = self._fetchall_sorted(u.alias('bar').select().execute())
eq_(found2, wanted)
@testing.fails_on('firebird', "doesn't like ORDER BY with UNIONs")
def test_union_ordered(self):
(s1, s2) = (
select([t1.c.col3.label('col3'), t1.c.col4.label('col4')],
t1.c.col2.in_(["t1col2r1", "t1col2r2"])),
select([t2.c.col3.label('col3'), t2.c.col4.label('col4')],
t2.c.col2.in_(["t2col2r2", "t2col2r3"]))
)
u = union(s1, s2, order_by=['col3', 'col4'])
wanted = [('aaa', 'aaa'), ('bbb', 'bbb'), ('bbb', 'ccc'),
('ccc', 'aaa')]
eq_(u.execute().fetchall(), wanted)
@testing.fails_on('firebird', "doesn't like ORDER BY with UNIONs")
@testing.requires.subqueries
def test_union_ordered_alias(self):
(s1, s2) = (
select([t1.c.col3.label('col3'), t1.c.col4.label('col4')],
t1.c.col2.in_(["t1col2r1", "t1col2r2"])),
select([t2.c.col3.label('col3'), t2.c.col4.label('col4')],
t2.c.col2.in_(["t2col2r2", "t2col2r3"]))
)
u = union(s1, s2, order_by=['col3', 'col4'])
wanted = [('aaa', 'aaa'), ('bbb', 'bbb'), ('bbb', 'ccc'),
('ccc', 'aaa')]
eq_(u.alias('bar').select().execute().fetchall(), wanted)
@testing.crashes('oracle', 'FIXME: unknown, verify not fails_on')
@testing.fails_on(
'firebird',
"has trouble extracting anonymous column from union subquery")
@testing.fails_on('mysql', 'FIXME: unknown')
@testing.fails_on('sqlite', 'FIXME: unknown')
def test_union_all(self):
e = union_all(
select([t1.c.col3]),
union(
select([t1.c.col3]),
select([t1.c.col3]),
)
)
wanted = [('aaa',), ('aaa',), ('bbb',), ('bbb',), ('ccc',), ('ccc',)]
found1 = self._fetchall_sorted(e.execute())
eq_(found1, wanted)
found2 = self._fetchall_sorted(e.alias('foo').select().execute())
eq_(found2, wanted)
def test_union_all_lightweight(self):
"""like test_union_all, but breaks the sub-union into
a subquery with an explicit column reference on the outside,
more palatable to a wider variety of engines.
"""
u = union(
select([t1.c.col3]),
select([t1.c.col3]),
).alias()
e = union_all(
select([t1.c.col3]),
select([u.c.col3])
)
wanted = [('aaa',), ('aaa',), ('bbb',), ('bbb',), ('ccc',), ('ccc',)]
found1 = self._fetchall_sorted(e.execute())
eq_(found1, wanted)
found2 = self._fetchall_sorted(e.alias('foo').select().execute())
eq_(found2, wanted)
@testing.requires.intersect
def test_intersect(self):
i = intersect(
select([t2.c.col3, t2.c.col4]),
select([t2.c.col3, t2.c.col4], t2.c.col4 == t3.c.col3)
)
wanted = [('aaa', 'bbb'), ('bbb', 'ccc'), ('ccc', 'aaa')]
found1 = self._fetchall_sorted(i.execute())
eq_(found1, wanted)
found2 = self._fetchall_sorted(i.alias('bar').select().execute())
eq_(found2, wanted)
@testing.requires.except_
@testing.fails_on('sqlite', "Can't handle this style of nesting")
def test_except_style1(self):
e = except_(union(
select([t1.c.col3, t1.c.col4]),
select([t2.c.col3, t2.c.col4]),
select([t3.c.col3, t3.c.col4]),
), select([t2.c.col3, t2.c.col4]))
wanted = [('aaa', 'aaa'), ('aaa', 'ccc'), ('bbb', 'aaa'),
('bbb', 'bbb'), ('ccc', 'bbb'), ('ccc', 'ccc')]
found = self._fetchall_sorted(e.alias().select().execute())
eq_(found, wanted)
@testing.requires.except_
def test_except_style2(self):
# same as style1, but add alias().select() to the except_().
# sqlite can handle it now.
e = except_(union(
select([t1.c.col3, t1.c.col4]),
select([t2.c.col3, t2.c.col4]),
select([t3.c.col3, t3.c.col4]),
).alias().select(), select([t2.c.col3, t2.c.col4]))
wanted = [('aaa', 'aaa'), ('aaa', 'ccc'), ('bbb', 'aaa'),
('bbb', 'bbb'), ('ccc', 'bbb'), ('ccc', 'ccc')]
found1 = self._fetchall_sorted(e.execute())
eq_(found1, wanted)
found2 = self._fetchall_sorted(e.alias().select().execute())
eq_(found2, wanted)
@testing.fails_on('sqlite', "Can't handle this style of nesting")
@testing.requires.except_
def test_except_style3(self):
# aaa, bbb, ccc - (aaa, bbb, ccc - (ccc)) = ccc
e = except_(
select([t1.c.col3]), # aaa, bbb, ccc
except_(
select([t2.c.col3]), # aaa, bbb, ccc
select([t3.c.col3], t3.c.col3 == 'ccc'), # ccc
)
)
eq_(e.execute().fetchall(), [('ccc',)])
eq_(e.alias('foo').select().execute().fetchall(), [('ccc',)])
@testing.requires.except_
def test_except_style4(self):
# aaa, bbb, ccc - (aaa, bbb, ccc - (ccc)) = ccc
e = except_(
select([t1.c.col3]), # aaa, bbb, ccc
except_(
select([t2.c.col3]), # aaa, bbb, ccc
select([t3.c.col3], t3.c.col3 == 'ccc'), # ccc
).alias().select()
)
eq_(e.execute().fetchall(), [('ccc',)])
eq_(
e.alias().select().execute().fetchall(),
[('ccc',)]
)
@testing.requires.intersect
@testing.fails_on('sqlite', "sqlite can't handle leading parenthesis")
def test_intersect_unions(self):
u = intersect(
union(
select([t1.c.col3, t1.c.col4]),
select([t3.c.col3, t3.c.col4]),
),
union(
select([t2.c.col3, t2.c.col4]),
select([t3.c.col3, t3.c.col4]),
).alias().select()
)
wanted = [('aaa', 'ccc'), ('bbb', 'aaa'), ('ccc', 'bbb')]
found = self._fetchall_sorted(u.execute())
eq_(found, wanted)
@testing.requires.intersect
def test_intersect_unions_2(self):
u = intersect(
union(
select([t1.c.col3, t1.c.col4]),
select([t3.c.col3, t3.c.col4]),
).alias().select(),
union(
select([t2.c.col3, t2.c.col4]),
select([t3.c.col3, t3.c.col4]),
).alias().select()
)
wanted = [('aaa', 'ccc'), ('bbb', 'aaa'), ('ccc', 'bbb')]
found = self._fetchall_sorted(u.execute())
eq_(found, wanted)
@testing.requires.intersect
def test_intersect_unions_3(self):
u = intersect(
select([t2.c.col3, t2.c.col4]),
union(
select([t1.c.col3, t1.c.col4]),
select([t2.c.col3, t2.c.col4]),
select([t3.c.col3, t3.c.col4]),
).alias().select()
)
wanted = [('aaa', 'bbb'), ('bbb', 'ccc'), ('ccc', 'aaa')]
found = self._fetchall_sorted(u.execute())
eq_(found, wanted)
@testing.requires.intersect
def test_composite_alias(self):
ua = intersect(
select([t2.c.col3, t2.c.col4]),
union(
select([t1.c.col3, t1.c.col4]),
select([t2.c.col3, t2.c.col4]),
select([t3.c.col3, t3.c.col4]),
).alias().select()
).alias()
wanted = [('aaa', 'bbb'), ('bbb', 'ccc'), ('ccc', 'aaa')]
found = self._fetchall_sorted(ua.select().execute())
eq_(found, wanted)
t1 = t2 = t3 = None
class JoinTest(fixtures.TestBase):
"""Tests join execution.
The compiled SQL emitted by the dialect might be ANSI joins or
theta joins ('old oracle style', with (+) for OUTER). This test
tries to exercise join syntax and uncover any inconsistencies in
`JOIN rhs ON lhs.col=rhs.col` vs `rhs.col=lhs.col`. At least one
database seems to be sensitive to this.
"""
__backend__ = True
@classmethod
def setup_class(cls):
global metadata
global t1, t2, t3
metadata = MetaData(testing.db)
t1 = Table('t1', metadata,
Column('t1_id', Integer, primary_key=True),
Column('name', String(32)))
t2 = Table('t2', metadata,
Column('t2_id', Integer, primary_key=True),
Column('t1_id', Integer, ForeignKey('t1.t1_id')),
Column('name', String(32)))
t3 = Table('t3', metadata,
Column('t3_id', Integer, primary_key=True),
Column('t2_id', Integer, ForeignKey('t2.t2_id')),
Column('name', String(32)))
metadata.drop_all()
metadata.create_all()
# t1.10 -> t2.20 -> t3.30
# t1.11 -> t2.21
# t1.12
t1.insert().execute({'t1_id': 10, 'name': 't1 #10'},
{'t1_id': 11, 'name': 't1 #11'},
{'t1_id': 12, 'name': 't1 #12'})
t2.insert().execute({'t2_id': 20, 't1_id': 10, 'name': 't2 #20'},
{'t2_id': 21, 't1_id': 11, 'name': 't2 #21'})
t3.insert().execute({'t3_id': 30, 't2_id': 20, 'name': 't3 #30'})
@classmethod
def teardown_class(cls):
metadata.drop_all()
def assertRows(self, statement, expected):
"""Execute a statement and assert that rows returned equal expected."""
found = sorted([tuple(row)
for row in statement.execute().fetchall()])
eq_(found, sorted(expected))
def test_join_x1(self):
"""Joins t1->t2."""
for criteria in (t1.c.t1_id == t2.c.t1_id, t2.c.t1_id == t1.c.t1_id):
expr = select(
[t1.c.t1_id, t2.c.t2_id],
from_obj=[t1.join(t2, criteria)])
self.assertRows(expr, [(10, 20), (11, 21)])
def test_join_x2(self):
"""Joins t1->t2->t3."""
for criteria in (t1.c.t1_id == t2.c.t1_id, t2.c.t1_id == t1.c.t1_id):
expr = select(
[t1.c.t1_id, t2.c.t2_id],
from_obj=[t1.join(t2, criteria)])
self.assertRows(expr, [(10, 20), (11, 21)])
def test_outerjoin_x1(self):
"""Outer joins t1->t2."""
for criteria in (t2.c.t2_id == t3.c.t2_id, t3.c.t2_id == t2.c.t2_id):
expr = select(
[t1.c.t1_id, t2.c.t2_id],
from_obj=[t1.join(t2).join(t3, criteria)])
self.assertRows(expr, [(10, 20)])
def test_outerjoin_x2(self):
"""Outer joins t1->t2,t3."""
for criteria in (t2.c.t2_id == t3.c.t2_id, t3.c.t2_id == t2.c.t2_id):
expr = select(
[t1.c.t1_id, t2.c.t2_id, t3.c.t3_id],
from_obj=[t1.outerjoin(t2, t1.c.t1_id == t2.c.t1_id).
outerjoin(t3, criteria)])
self.assertRows(
expr, [(10, 20, 30), (11, 21, None), (12, None, None)])
def test_outerjoin_where_x2_t1(self):
"""Outer joins t1->t2,t3, where on t1."""
for criteria in (t2.c.t2_id == t3.c.t2_id, t3.c.t2_id == t2.c.t2_id):
expr = select(
[t1.c.t1_id, t2.c.t2_id, t3.c.t3_id],
t1.c.name == 't1 #10',
from_obj=[(t1.outerjoin(t2, t1.c.t1_id == t2.c.t1_id).
outerjoin(t3, criteria))])
self.assertRows(expr, [(10, 20, 30)])
expr = select(
[t1.c.t1_id, t2.c.t2_id, t3.c.t3_id],
t1.c.t1_id < 12,
from_obj=[(t1.outerjoin(t2, t1.c.t1_id == t2.c.t1_id).
outerjoin(t3, criteria))])
self.assertRows(expr, [(10, 20, 30), (11, 21, None)])
def test_outerjoin_where_x2_t2(self):
"""Outer joins t1->t2,t3, where on t2."""
for criteria in (t2.c.t2_id == t3.c.t2_id, t3.c.t2_id == t2.c.t2_id):
expr = select(
[t1.c.t1_id, t2.c.t2_id, t3.c.t3_id],
t2.c.name == 't2 #20',
from_obj=[(t1.outerjoin(t2, t1.c.t1_id == t2.c.t1_id).
outerjoin(t3, criteria))])
self.assertRows(expr, [(10, 20, 30)])
expr = select(
[t1.c.t1_id, t2.c.t2_id, t3.c.t3_id],
t2.c.t2_id < 29,
from_obj=[(t1.outerjoin(t2, t1.c.t1_id == t2.c.t1_id).
outerjoin(t3, criteria))])
self.assertRows(expr, [(10, 20, 30), (11, 21, None)])
def test_outerjoin_where_x2_t3(self):
"""Outer joins t1->t2,t3, where on t3."""
for criteria in (t2.c.t2_id == t3.c.t2_id, t3.c.t2_id == t2.c.t2_id):
expr = select(
[t1.c.t1_id, t2.c.t2_id, t3.c.t3_id],
t3.c.name == 't3 #30',
from_obj=[(t1.outerjoin(t2, t1.c.t1_id == t2.c.t1_id).
outerjoin(t3, criteria))])
self.assertRows(expr, [(10, 20, 30)])
expr = select(
[t1.c.t1_id, t2.c.t2_id, t3.c.t3_id],
t3.c.t3_id < 39,
from_obj=[(t1.outerjoin(t2, t1.c.t1_id == t2.c.t1_id).
outerjoin(t3, criteria))])
self.assertRows(expr, [(10, 20, 30)])
def test_outerjoin_where_x2_t1t3(self):
"""Outer joins t1->t2,t3, where on t1 and t3."""
for criteria in (t2.c.t2_id == t3.c.t2_id, t3.c.t2_id == t2.c.t2_id):
expr = select(
[t1.c.t1_id, t2.c.t2_id, t3.c.t3_id],
and_(t1.c.name == 't1 #10', t3.c.name == 't3 #30'),
from_obj=[(t1.outerjoin(t2, t1.c.t1_id == t2.c.t1_id).
outerjoin(t3, criteria))])
self.assertRows(expr, [(10, 20, 30)])
expr = select(
[t1.c.t1_id, t2.c.t2_id, t3.c.t3_id],
and_(t1.c.t1_id < 19, t3.c.t3_id < 39),
from_obj=[(t1.outerjoin(t2, t1.c.t1_id == t2.c.t1_id).
outerjoin(t3, criteria))])
self.assertRows(expr, [(10, 20, 30)])
def test_outerjoin_where_x2_t1t2(self):
"""Outer joins t1->t2,t3, where on t1 and t2."""
for criteria in (t2.c.t2_id == t3.c.t2_id, t3.c.t2_id == t2.c.t2_id):
expr = select(
[t1.c.t1_id, t2.c.t2_id, t3.c.t3_id],
and_(t1.c.name == 't1 #10', t2.c.name == 't2 #20'),
from_obj=[(t1.outerjoin(t2, t1.c.t1_id == t2.c.t1_id).
outerjoin(t3, criteria))])
self.assertRows(expr, [(10, 20, 30)])
expr = select(
[t1.c.t1_id, t2.c.t2_id, t3.c.t3_id],
and_(t1.c.t1_id < 12, t2.c.t2_id < 39),
from_obj=[(t1.outerjoin(t2, t1.c.t1_id == t2.c.t1_id).
outerjoin(t3, criteria))])
self.assertRows(expr, [(10, 20, 30), (11, 21, None)])
def test_outerjoin_where_x2_t1t2t3(self):
"""Outer joins t1->t2,t3, where on t1, t2 and t3."""
for criteria in (t2.c.t2_id == t3.c.t2_id, t3.c.t2_id == t2.c.t2_id):
expr = select(
[t1.c.t1_id, t2.c.t2_id, t3.c.t3_id],
and_(t1.c.name == 't1 #10',
t2.c.name == 't2 #20',
t3.c.name == 't3 #30'),
from_obj=[(t1.outerjoin(t2, t1.c.t1_id == t2.c.t1_id).
outerjoin(t3, criteria))])
self.assertRows(expr, [(10, 20, 30)])
expr = select(
[t1.c.t1_id, t2.c.t2_id, t3.c.t3_id],
and_(t1.c.t1_id < 19, t2.c.t2_id < 29, t3.c.t3_id < 39),
from_obj=[
(t1.outerjoin(t2, t1.c.t1_id == t2.c.t1_id).
outerjoin(t3, criteria))])
self.assertRows(expr, [(10, 20, 30)])
def test_mixed(self):
"""Joins t1->t2, outer t2->t3."""
for criteria in (t2.c.t2_id == t3.c.t2_id, t3.c.t2_id == t2.c.t2_id):
expr = select(
[t1.c.t1_id, t2.c.t2_id, t3.c.t3_id],
from_obj=[(t1.join(t2).outerjoin(t3, criteria))])
print(expr)
self.assertRows(expr, [(10, 20, 30), (11, 21, None)])
def test_mixed_where(self):
"""Joins t1->t2, outer t2->t3, plus a where on each table in turn."""
for criteria in (t2.c.t2_id == t3.c.t2_id, t3.c.t2_id == t2.c.t2_id):
expr = select(
[t1.c.t1_id, t2.c.t2_id, t3.c.t3_id],
t1.c.name == 't1 #10',
from_obj=[(t1.join(t2).outerjoin(t3, criteria))])
self.assertRows(expr, [(10, 20, 30)])
expr = select(
[t1.c.t1_id, t2.c.t2_id, t3.c.t3_id],
t2.c.name == 't2 #20',
from_obj=[(t1.join(t2).outerjoin(t3, criteria))])
self.assertRows(expr, [(10, 20, 30)])
expr = select(
[t1.c.t1_id, t2.c.t2_id, t3.c.t3_id],
t3.c.name == 't3 #30',
from_obj=[(t1.join(t2).outerjoin(t3, criteria))])
self.assertRows(expr, [(10, 20, 30)])
expr = select(
[t1.c.t1_id, t2.c.t2_id, t3.c.t3_id],
and_(t1.c.name == 't1 #10', t2.c.name == 't2 #20'),
from_obj=[(t1.join(t2).outerjoin(t3, criteria))])
self.assertRows(expr, [(10, 20, 30)])
expr = select(
[t1.c.t1_id, t2.c.t2_id, t3.c.t3_id],
and_(t2.c.name == 't2 #20', t3.c.name == 't3 #30'),
from_obj=[(t1.join(t2).outerjoin(t3, criteria))])
self.assertRows(expr, [(10, 20, 30)])
expr = select(
[t1.c.t1_id, t2.c.t2_id, t3.c.t3_id],
and_(t1.c.name == 't1 #10',
t2.c.name == 't2 #20',
t3.c.name == 't3 #30'),
from_obj=[(t1.join(t2).outerjoin(t3, criteria))])
self.assertRows(expr, [(10, 20, 30)])
metadata = flds = None
class OperatorTest(fixtures.TestBase):
__backend__ = True
@classmethod
def setup_class(cls):
global metadata, flds
metadata = MetaData(testing.db)
flds = Table(
'flds', metadata,
Column(
'idcol', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('intcol', Integer),
Column('strcol', String(50)),
)
metadata.create_all()
flds.insert().execute([
dict(intcol=5, strcol='foo'),
dict(intcol=13, strcol='bar')
])
@classmethod
def teardown_class(cls):
metadata.drop_all()
# TODO: seems like more tests warranted for this setup.
def test_modulo(self):
eq_(
select([flds.c.intcol % 3],
order_by=flds.c.idcol).execute().fetchall(),
[(2,), (1,)]
)
@testing.requires.window_functions
def test_over(self):
eq_(
select([
flds.c.intcol, func.row_number().over(order_by=flds.c.strcol)
]).execute().fetchall(),
[(13, 1), (5, 2)]
)
| mit |
joaquinlpereyra/ludema | ludema/abstract/actions.py | 1 | 11361 | import random
from functools import wraps
from ludema.abstract.utils import Direction
from ludema.exceptions import (PieceIsNotOnATileError,
PieceIsNotOnThisBoardError,
TileIsEmptyError,
NotGrabbableError)
class Action:
def __init__(self, piece, action_functions):
self.possible_actions = []
self.piece = piece
if action_functions is None:
action_functions = self._default_actions()
self._set_actions(action_functions)
self.history = []
def __getattribute__(self, name):
attr = object.__getattribute__(self, name)
if attr in object.__getattribute__(self, 'possible_actions'):
attr = self._history_appender(attr)
return attr
@property
def is_implemented(self):
"""Return True if action is implemented, False if it can't."""
return True if self.possible_actions else False
def _history_appender(self, func):
@wraps(func)
def history_wrapper(*args, **kwargs):
self.history.append(func.__name__)
return func(*args, **kwargs)
return history_wrapper
def _normal_default_actions(self):
"""Just a collection of four extremely normal set of default actions.
The ones who apply the action to the tile up, right, left and down
of the piece.
"""
def up():
return self.do(self.piece.surroundings[Direction.UP])
def right():
return self.do(self.piece.surroundings[Direction.RIGHT])
def down():
return self.do(self.piece.surroundings[Direction.DOWN])
def left():
return self.do(self.piece.surroundings[Direction.LEFT])
return [up, right, down, left]
def _set_actions(self, action_functions):
"""Sets the action_funcions as methods of the class
and append them to the possible_actions list.
"""
for action_function in action_functions:
self.possible_actions.append(action_function)
setattr(self, action_function.__name__, action_function)
def _default_actions(self):
"""These will be the default action functions. Every action should
implement them, but the _normal_default_actions method give you
four extremely common default function actions: the one which
applies the action to the tiles above, at right, below and at left
of the piece.
"""
return self._normal_default_actions()
def _unsafe_do(self, tile):
"""Intended to actually perform the action. Should check all
action conditions and raise an appropiate error if they are not met.
Doesn't need to return anything. Shouldn't be used for I/O, instead
use the do method for that.
Note:
Every action should implement this method.
"""
raise NotImplementedError("The Action class shouldn't be used directly!")
def do(self, tile, dont_pass_turn=False):
"""Inteded as a safe wraper for _unsafe_do. Should take a tile
indicating where the action must be performed. Should return a bolean
indicating if the action could be performed or not. Should be capable
of handling I/O without raising any exceptions.
Useful for one-use-cases for the actions, if you want to extraordinarily
perform an action to a tile. For ordinary uses, use the actions in the
possible_actions lists. For example, if a piece moves up,down,left,right
alsways, set those as actions functions. If a magician teleports the
piece somewhere, you can use this function to move it there.
All the action functions should ultimately use this method.
Note:
Every action should implement this method.
"""
raise NotImplementedError("The Action class shouldn't be used directly!")
def random(self):
"""Call a random function from the possible actions
list. Keep in mind that the action may or may not be performed,
depending on the current position of the piece and what the action
tries to do.
Returns:
bool: True if action was performed, False if not
"""
surprise_action = random.choice(self.possible_actions)
was_action_valid = surprise_action()
return was_action_valid
def random_and_valid(self):
"""Call a random function from the possible actions,
making sure that the action is actually possible for the piece.
If no actions from the list of possible actions, it will just return
False.
Returns:
bool: True if there was a valid action to be made by the piece,
False if the piece couldn't move anywhere
"""
tries = 0
random_action_performed = self.random()
while not random_action_performed:
random_action_performed = self.random()
tries += 1
if tries >= len(self.possible_actions):
return False
return True
def all(self):
"""Call all possible actions from the list. The actions may or may
not be performed depending on the action conditions.
Returns:
dict: looks like {action_function_name, boolean} key-value pairs,
indicating which actions where actually performed (True) and which
not (False).
"""
successes = {}
for action_function in self.possible_actions:
success = action_function()
successes[action_function.__name__] = success
return successes
def until_success(self):
"""Call all possible actions from the list of possible actions,
but stop once it can perform one successfully.
Returns:
bool: True if there was a valid action performed by the piece,
False if no valid action was found.
"""
for action_function in self.possible_actions:
success = action_function()
if success:
return True
else:
return False
class Moving(Action):
def __init__(self, piece, movement_functions):
"""
Args:
piece (Piece): the movable piece to which the movements refer
movement_functions ([nullary functions]): a list of valid
functions which as a side effect move the piece.
"""
Action.__init__(self, piece, movement_functions)
self.possible_movements = self.possible_actions
def _unsafe_do(self, tile):
"""Move the object if it can.
That means: unlink the piece from its current tile and link it
to the new tile; unless there's a piece in the destiny tile already.
Args:
tile (Tile): the tile to which the piece will try to move
Returns:
bool: False if there was a piece on tile and it wasn't walkable,
True if movement could be completed
Raises:
PieceIsNotOnATileError: if the piece hasn't been put on a tile before
trying to move
PieceIsNotOnThisBoardError: if the piece you're trying to move
is in fact on another board
"""
if not self.piece.home_tile:
raise PieceIsNotOnATileError
if self.piece.home_tile.board is not tile.board:
raise PieceIsNotOnThisBoardError
if tile.piece is not None:
tile.piece.on_touch_do(touching_piece=self.piece)
# what if tile.piece.on_touch_do actually moved the touched piece?
# it could have, so we need to check if tile.piece still has
# a piece...
if tile.piece and not tile.piece.walkable:
return False
self.piece.home_tile.piece = None
tile.piece = self.piece
return True
def do(self, tile):
"""Move the object, if it can.
Args:
tile (Tile): the tile to which the piece will try to move.
Returns:
bool: True if piece could be moved, False if not
"""
if tile:
try:
return self._unsafe_do(tile)
except (PieceIsNotOnATileError, PieceIsNotOnThisBoardError):
return False
else:
return False
class Attacking(Action):
def __init__(self, piece, attack_functions):
Action.__init__(self, piece, attack_functions)
self.possible_attacks = self.possible_actions
def _unsafe_do(self, tile):
"""Attack a piece on tile passed as argument. If tile
has no piece, raise a TileIsEmptyError.
Args:
tile (Tile): the tile which the piece will try to attack
"""
if tile.piece is None:
raise TileIsEmptyError(self.piece, tile)
attacked_piece = tile.piece
attacked_piece.health -= self.piece.attack_damage
def do(self, tile):
"""Attack a tile passed as argument. Safe to use for I/O, should
never raise an error.
Args:
tile (Tile): the tile which the piece will try to attack
Returns:
bool: True if attack could be performed, False if attack failed
(because the tile didn't have a piece associated or it was None)
"""
if tile:
try:
self._unsafe_do(tile)
return True
except TileIsEmptyError:
return False
else:
return False
class Grabbing(Action):
def __init__(self, piece, grab_functions):
Action.__init__(self, piece, grab_functions)
self.possible_grabs = self.possible_actions
def _unsafe_do(self, tile):
"""Grabs from the tile passed as argument.
Args:
tile (Tile): the tile which the piece will try to attack
Raises:
NotGrabbableError if the piece on the tile can't be grabbed
"""
if not callable(tile.piece.grab):
raise NotGrabbableError(tile.piece)
grabbable = tile.piece
grabbable.owner = self.piece
self.piece.items.append(grabbable)
tile.piece = None # POPS!
def do(self, tile):
"""Grabs from the tile passed as argument. Safe to use for I/O, should
never raise an error.
Args:
tile (Tile): the tile which the piece will try to grab from
Returns:
bool: True if something could be grabbed could be performed, False if grab failed
"""
if not tile:
return False
try:
self._unsafe_do(tile)
return True
except TileIsEmptyError:
return False
def from_surroundings(self):
"""Grabs an item from the surroundings of the Character.
Stops at first item grabbed.
Items look-up goes clockwise.
Returns:
bool: True if item found and grabbed, False otherwise.
"""
for tile in self.piece.surroundings.values():
item_grabbed = self.do(tile)
if item_grabbed:
return True
else:
return False
| gpl-3.0 |
promptworks/horizon | openstack_dashboard/dashboards/admin/images/urls.py | 46 | 1407 | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls import patterns
from django.conf.urls import url
from openstack_dashboard.dashboards.admin.images import views
urlpatterns = patterns(
'openstack_dashboard.dashboards.admin.images.views',
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^create/$', views.CreateView.as_view(), name='create'),
url(r'^(?P<image_id>[^/]+)/update/$',
views.UpdateView.as_view(), name='update'),
url(r'^(?P<id>[^/]+)/update_metadata/$',
views.UpdateMetadataView.as_view(), name='update_metadata'),
url(r'^(?P<image_id>[^/]+)/detail/$',
views.DetailView.as_view(), name='detail')
)
| apache-2.0 |
legalsylvain/OpenUpgrade | addons/l10n_in_hr_payroll/report/payslip_report.py | 39 | 3996 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2012-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import tools
from openerp.osv import fields, osv
class payslip_report(osv.osv):
_name = "payslip.report"
_description = "Payslip Analysis"
_auto = False
_columns = {
'name':fields.char('Name', size=32, readonly=True),
'date_from': fields.date('Date From', readonly=True,),
'date_to': fields.date('Date To', readonly=True,),
'year': fields.char('Year', size=4, readonly=True),
'month': fields.selection([('01', 'January'), ('02', 'February'), ('03', 'March'), ('04', 'April'),
('05', 'May'), ('06', 'June'), ('07', 'July'), ('08', 'August'), ('09', 'September'),
('10', 'October'), ('11', 'November'), ('12', 'December')], 'Month', readonly=True),
'day': fields.char('Day', size=128, readonly=True),
'state': fields.selection([
('draft', 'Draft'),
('done', 'Done'),
('cancel', 'Rejected'),
], 'Status', readonly=True),
'employee_id': fields.many2one('hr.employee', 'Employee', readonly=True),
'nbr': fields.integer('# Payslip lines', readonly=True),
'number': fields.char('Number', size=16, readonly=True),
'struct_id': fields.many2one('hr.payroll.structure', 'Structure', readonly=True),
'company_id':fields.many2one('res.company', 'Company', readonly=True),
'paid': fields.boolean('Made Payment Order ? ', readonly=True),
'total': fields.float('Total', readonly=True),
'category_id':fields.many2one('hr.salary.rule.category', 'Category', readonly=True),
}
def init(self, cr):
tools.drop_view_if_exists(cr, 'payslip_report')
cr.execute("""
create or replace view payslip_report as (
select
min(l.id) as id,
l.name,
p.struct_id,
p.state,
p.date_from,
p.date_to,
p.number,
p.company_id,
p.paid,
l.category_id,
l.employee_id,
sum(l.total) as total,
to_char(p.date_from, 'YYYY') as year,
to_char(p.date_from, 'MM') as month,
to_char(p.date_from, 'YYYY-MM-DD') as day,
to_char(p.date_to, 'YYYY') as to_year,
to_char(p.date_to, 'MM') as to_month,
to_char(p.date_to, 'YYYY-MM-DD') as to_day,
1 AS nbr
from
hr_payslip as p
left join hr_payslip_line as l on (p.id=l.slip_id)
where
l.employee_id IS NOT NULL
group by
p.number,l.name,p.date_from,p.date_to,p.state,p.company_id,p.paid,
l.employee_id,p.struct_id,l.category_id
)
""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
hayd/pygments.rb | vendor/pygments-main/pygments/plugin.py | 135 | 1862 | # -*- coding: utf-8 -*-
"""
pygments.plugin
~~~~~~~~~~~~~~~
Pygments setuptools plugin interface. The methods defined
here also work if setuptools isn't installed but they just
return nothing.
lexer plugins::
[pygments.lexers]
yourlexer = yourmodule:YourLexer
formatter plugins::
[pygments.formatters]
yourformatter = yourformatter:YourFormatter
/.ext = yourformatter:YourFormatter
As you can see, you can define extensions for the formatter
with a leading slash.
syntax plugins::
[pygments.styles]
yourstyle = yourstyle:YourStyle
filter plugin::
[pygments.filter]
yourfilter = yourfilter:YourFilter
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
try:
import pkg_resources
except ImportError:
pkg_resources = None
LEXER_ENTRY_POINT = 'pygments.lexers'
FORMATTER_ENTRY_POINT = 'pygments.formatters'
STYLE_ENTRY_POINT = 'pygments.styles'
FILTER_ENTRY_POINT = 'pygments.filters'
def find_plugin_lexers():
if pkg_resources is None:
return
for entrypoint in pkg_resources.iter_entry_points(LEXER_ENTRY_POINT):
yield entrypoint.load()
def find_plugin_formatters():
if pkg_resources is None:
return
for entrypoint in pkg_resources.iter_entry_points(FORMATTER_ENTRY_POINT):
yield entrypoint.name, entrypoint.load()
def find_plugin_styles():
if pkg_resources is None:
return
for entrypoint in pkg_resources.iter_entry_points(STYLE_ENTRY_POINT):
yield entrypoint.name, entrypoint.load()
def find_plugin_filters():
if pkg_resources is None:
return
for entrypoint in pkg_resources.iter_entry_points(FILTER_ENTRY_POINT):
yield entrypoint.name, entrypoint.load()
| mit |
RueLaLa/django-haystack | example_project/regular_app/search_indexes.py | 22 | 1337 | # encoding: utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
from regular_app.models import Dog
from haystack import indexes
# More typical usage involves creating a subclassed `SearchIndex`. This will
# provide more control over how data is indexed, generally resulting in better
# search.
class DogIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
# We can pull data straight out of the model via `model_attr`.
breed = indexes.CharField(model_attr='breed')
# Note that callables are also OK to use.
name = indexes.CharField(model_attr='full_name')
bio = indexes.CharField(model_attr='name')
birth_date = indexes.DateField(model_attr='birth_date')
# Note that we can't assign an attribute here. We'll manually prepare it instead.
toys = indexes.MultiValueField()
def get_model(self):
return Dog
def index_queryset(self, using=None):
return self.get_model().objects.filter(public=True)
def prepare_toys(self, obj):
# Store a list of id's for filtering
return [toy.id for toy in obj.toys.all()]
# Alternatively, you could store the names if searching for toy names
# is more useful.
# return [toy.name for toy in obj.toys.all()]
| bsd-3-clause |
twilio/twilio-python | tests/integration/preview/sync/service/sync_list/test_sync_list_item.py | 1 | 10556 | # coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base import serialize
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class SyncListItemTestCase(IntegrationTestCase):
def test_fetch_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.preview.sync.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_lists("ESXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_list_items(1).fetch()
self.holodeck.assert_has_request(Request(
'get',
'https://preview.twilio.com/Sync/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Lists/ESXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Items/1',
))
def test_fetch_response(self):
self.holodeck.mock(Response(
200,
'''
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"created_by": "created_by",
"data": {},
"date_created": "2015-07-30T20:00:00Z",
"date_updated": "2015-07-30T20:00:00Z",
"index": 100,
"list_sid": "ESaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"revision": "revision",
"service_sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"url": "https://preview.twilio.com/Sync/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Lists/ESaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Items/100"
}
'''
))
actual = self.client.preview.sync.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_lists("ESXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_list_items(1).fetch()
self.assertIsNotNone(actual)
def test_delete_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.preview.sync.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_lists("ESXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_list_items(1).delete(if_match="if_match")
headers = {'If-Match': "if_match", }
self.holodeck.assert_has_request(Request(
'delete',
'https://preview.twilio.com/Sync/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Lists/ESXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Items/1',
headers=headers,
))
def test_delete_response(self):
self.holodeck.mock(Response(
204,
None,
))
actual = self.client.preview.sync.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_lists("ESXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_list_items(1).delete()
self.assertTrue(actual)
def test_create_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.preview.sync.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_lists("ESXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_list_items.create(data={})
values = {'Data': serialize.object({}), }
self.holodeck.assert_has_request(Request(
'post',
'https://preview.twilio.com/Sync/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Lists/ESXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Items',
data=values,
))
def test_create_response(self):
self.holodeck.mock(Response(
201,
'''
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"created_by": "created_by",
"data": {},
"date_created": "2015-07-30T20:00:00Z",
"date_updated": "2015-07-30T20:00:00Z",
"index": 100,
"list_sid": "ESaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"revision": "revision",
"service_sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"url": "https://preview.twilio.com/Sync/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Lists/ESaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Items/100"
}
'''
))
actual = self.client.preview.sync.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_lists("ESXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_list_items.create(data={})
self.assertIsNotNone(actual)
def test_list_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.preview.sync.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_lists("ESXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_list_items.list()
self.holodeck.assert_has_request(Request(
'get',
'https://preview.twilio.com/Sync/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Lists/ESXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Items',
))
def test_read_empty_response(self):
self.holodeck.mock(Response(
200,
'''
{
"items": [],
"meta": {
"first_page_url": "https://preview.twilio.com/Sync/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Lists/ESaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Items?From=from&Bounds=inclusive&Order=asc&PageSize=50&Page=0",
"key": "items",
"next_page_url": null,
"page": 0,
"page_size": 50,
"previous_page_url": null,
"url": "https://preview.twilio.com/Sync/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Lists/ESaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Items?From=from&Bounds=inclusive&Order=asc&PageSize=50&Page=0"
}
}
'''
))
actual = self.client.preview.sync.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_lists("ESXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_list_items.list()
self.assertIsNotNone(actual)
def test_read_full_response(self):
self.holodeck.mock(Response(
200,
'''
{
"items": [
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"created_by": "created_by",
"data": {},
"date_created": "2015-07-30T20:00:00Z",
"date_updated": "2015-07-30T20:00:00Z",
"index": 100,
"list_sid": "ESaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"revision": "revision",
"service_sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"url": "https://preview.twilio.com/Sync/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Lists/ESaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Items/100"
}
],
"meta": {
"first_page_url": "https://preview.twilio.com/Sync/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Lists/ESaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Items?From=from&Bounds=inclusive&Order=asc&PageSize=50&Page=0",
"key": "items",
"next_page_url": null,
"page": 0,
"page_size": 50,
"previous_page_url": null,
"url": "https://preview.twilio.com/Sync/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Lists/ESaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Items?From=from&Bounds=inclusive&Order=asc&PageSize=50&Page=0"
}
}
'''
))
actual = self.client.preview.sync.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_lists("ESXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_list_items.list()
self.assertIsNotNone(actual)
def test_update_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.preview.sync.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_lists("ESXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_list_items(1).update(data={}, if_match="if_match")
values = {'Data': serialize.object({}), }
headers = {'If-Match': "if_match", }
self.holodeck.assert_has_request(Request(
'post',
'https://preview.twilio.com/Sync/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Lists/ESXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Items/1',
headers=headers,
))
self.holodeck.assert_has_request(Request(
'post',
'https://preview.twilio.com/Sync/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Lists/ESXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Items/1',
data=values,
))
def test_update_response(self):
self.holodeck.mock(Response(
200,
'''
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"created_by": "created_by",
"data": {},
"date_created": "2015-07-30T20:00:00Z",
"date_updated": "2015-07-30T20:00:00Z",
"index": 100,
"list_sid": "ESaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"revision": "revision",
"service_sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"url": "https://preview.twilio.com/Sync/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Lists/ESaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Items/100"
}
'''
))
actual = self.client.preview.sync.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_lists("ESXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_list_items(1).update(data={})
self.assertIsNotNone(actual)
| mit |
lycantropos/cetus | cetus/queries/filters.py | 1 | 2007 | from typing import Optional, Tuple, Any
from cetus.types import (FiltersType,
FilterType)
from cetus.utils import join_str
from .utils import normalize_value
LOGICAL_OPERATORS = {'AND', 'OR'}
INCLUSION_OPERATORS = {'IN', 'NOT IN'}
RANGE_OPERATORS = {'BETWEEN'}
COMPARISON_OPERATORS = {'=', '!=',
'<', '>',
'<=', '>=',
'IS', 'IS NOT',
'LIKE', 'NOT LIKE'}
PREDICATES = (INCLUSION_OPERATORS
| RANGE_OPERATORS
| COMPARISON_OPERATORS)
def add_filters(query: str, *,
filters: Optional[Tuple[str, Any]]
) -> str:
if filters:
filters = filters_to_str(filters)
query += f'WHERE {filters} '
return query
def filters_to_str(filters: FiltersType) -> str:
operator, filter_ = filters
if operator in LOGICAL_OPERATORS:
sub_filters = [filters_to_str(sub_filter)
for sub_filter in filter_]
return operator.join(f'({sub_filter})'
for sub_filter in sub_filters)
elif operator in PREDICATES:
res = predicate_to_str(predicate_name=operator,
filter_=filter_)
return res
else:
err_msg = ('Invalid filters operator: '
f'"{operator}" is not found '
f'in logical operators '
f'and predicates lists.')
raise ValueError(err_msg)
def predicate_to_str(
*,
predicate_name: str,
filter_: FilterType) -> str:
column_name, value = filter_
if predicate_name in INCLUSION_OPERATORS:
value = map(normalize_value, value)
value = f'({join_str(value)})'
elif predicate_name in RANGE_OPERATORS:
value = map(normalize_value, value)
value = ' AND '.join(value)
else:
value = normalize_value(value)
return f'{column_name} {predicate_name} {value}'
| mit |
fengbaicanhe/intellij-community | python/lib/Lib/encodings/iso2022_jp_2.py | 816 | 1061 | #
# iso2022_jp_2.py: Python Unicode Codec for ISO2022_JP_2
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_iso2022, codecs
import _multibytecodec as mbc
codec = _codecs_iso2022.getcodec('iso2022_jp_2')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='iso2022_jp_2',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| apache-2.0 |
kingvuplus/EGAMI-B | lib/python/Plugins/Extensions/Volume_adjust/plugin.py | 32 | 22224 | # Volume Adjust
# 2009 Black_64
#
# FIXED SERVICELIST GREENSCREEN BY SCOPE34 (AN)
# ADD AC3 SUPPORT BY BLACK_64
from Screens.Screen import Screen
from Screens.ChannelSelection import *
from Components.ActionMap import HelpableActionMap, ActionMap, NumberActionMap
from Components.Sources.List import List
from Components.Sources.StaticText import StaticText
from Components.config import ConfigInteger, ConfigNothing, getConfigListEntry, ConfigNumber, ConfigYesNo
from Components.ConfigList import ConfigList, ConfigListScreen
from Components.Label import Label
from Components.SelectionList import SelectionList
from Components.MenuList import MenuList
from ServiceReference import ServiceReference
from Plugins.Plugin import PluginDescriptor
from xml.etree.cElementTree import parse as ci_parse
from Tools.XMLTools import elementsWithTag, mergeText, stringToXML
from enigma import *
from os import system, path as os_path
from Components.ServiceEventTracker import ServiceEventTracker, InfoBarBase
global ListChange
ListChange = None
config.Volume = ConfigSubsection()
config.Volume.Enabled = ConfigYesNo(default=False)
config.Volume.AC3_vol = ConfigInteger(default=10, limits=(0, 99))
class Volume_adjust(Screen):
skin = """
<screen position="center,center" size="595,456" title="Volume Adjust" >
<widget name="ServiceList.desc" position="10,30" size="575,22" font="Regular;20" />
<widget name="ServiceList" position="10,70" size="575,250" scrollbarMode="showOnDemand" />
<ePixmap position="10,330" size="575,2" pixmap="skin_default/div-h.png" transparent="1" alphatest="on" />
<ePixmap position="10,400" size="575,2" pixmap="skin_default/div-h.png" transparent="1" alphatest="on" />
<widget source="press_menu" render="Label" position="10,330" zPosition="1" size="575,70" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<eLabel backgroundColor="red" position="10,447" size="140,3" zPosition="0" />
<eLabel backgroundColor="green" position="155,447" size="140,3" zPosition="0" />
<eLabel backgroundColor="yellow" position="300,447" size="140,3" zPosition="0" />
<eLabel backgroundColor="blue" position="445,447" size="140,3" zPosition="0" />
<widget source="key_red" render="Label" position="10,425" zPosition="1" size="140,22" font="Regular;18" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="key_green" render="Label" position="155,426" zPosition="1" size="140,22" font="Regular;18" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="key_yellow" render="Label" position="300,425" zPosition="1" size="140,22" font="Regular;18" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="key_blue" render="Label" position="445,406" zPosition="1" size="140,40" font="Regular;18" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
</screen>"""
def __init__(self, session):
self.skin = Volume_adjust.skin
Screen.__init__(self, session)
# Path of the config file
self.filename="/etc/volume.xml"
global offset
offset = 0
self["key_red"] = StaticText(_("delete"))
self["key_green"] = StaticText(_("add Service"))
self["key_yellow"] = StaticText(_("change"))
self["key_blue"] = StaticText(_("add Current"))
self["press_menu"] = StaticText(_("press the menu button to set a general AC3/Dolby offset"))
self["ServiceList.desc"] = Label(_("Channel \t\t\tVolume +"))
self["actions"] = ActionMap(["ColorActions","OkCancelActions","MenuActions"],
{
"green": self.greenPressed,
"red": self.redPressed,
"yellow": self.yellowPressed,
"blue": self.bluePressed,
"menu": self.config_menu,
"ok": self.okPressed,
"cancel": self.cancel
}, -1)
self.servicelist = []
self.read_volume=[]
serviceList = ConfigList(self.servicelist)
serviceList.list = self.servicelist
serviceList.l.setList(self.servicelist)
self["ServiceList"] = serviceList
self.loadXML() # load the config file
self.onShown.append(self.setWindowTitle)
def setWindowTitle(self):
self.setTitle(_("Volume Adjust"))
def redPressed(self):
# remove current line of the list
self.delete()
def greenPressed(self):
# select service (shows the channel list)
self.session.openWithCallback( self.finishedChannelSelection, mySmallChannelSelection, None)
def yellowPressed(self):
self.okPressed()
def bluePressed(self):
self.service = self.session.nav.getCurrentlyPlayingServiceReference()
if not self.service is None:
service = self.service.toCompareString()
service_name = ServiceReference(self.service).getServiceName().replace('\xc2\x87', '').replace('\xc2\x86', '')
service_name = service_name + '\t\t\t0'
self.servicelist.append( (service_name , ConfigNothing(), 0, service))
self.read_volume.append('0')
offset = 0
self.session.openWithCallback( self.VolumeChanged, Change_volume, service_name, offset)
def config_menu(self):
self.session.open(Volume_Config)
def okPressed(self):
# change the volume offset
if len(self.servicelist):
cur = self["ServiceList"].getCurrentIndex()
global offset
offset = int(self.read_volume[cur])
tmp = self.servicelist[cur]
service_name = tmp[0][0:-3].strip()
self.session.openWithCallback( self.Change_vol_now, Change_volume, service_name, offset)
def cancel(self):
self.saveXML()
self.close()
def delete(self):
cur = self["ServiceList"].getCurrent()
cur1 = self["ServiceList"].getCurrentIndex()
if cur and len(cur) > 2:
self.servicelist.remove(cur)
self.read_volume.remove(self.read_volume[cur1])
self["ServiceList"].l.setList(self.servicelist)
def finishedChannelSelection(self, *args):
# update screen
if len(args):
ref=args[0]
service_ref = ServiceReference(ref)
service_name = service_ref.getServiceName()
if find_in_list(self.servicelist, service_name, 0)==False:
split_ref=service_ref.ref.toString().split(":")
if split_ref[0] == "1":
t = len(self.servicelist)
k = len(self.read_volume)
if t == k:
global offset
offset = 0
self.session.openWithCallback( self.VolumeChanged, Change_volume, service_name, offset)
self.read_volume.append (str(offset))
service_name = service_name + self.Tabs(service_name) + self.read_volume[t]
self.servicelist.append( (service_name , ConfigNothing(), 0, service_ref.ref.toString()))
self["ServiceList"].l.setList(self.servicelist)
def VolumeChanged(self, *args):
# change volume offset after new entry
global offset
t = len(self.servicelist)
tmp = self.servicelist[t-1]
tmp0 = tmp[0][0:-3].strip()
self.read_volume[t-1] = str(offset)
service_name = tmp0 + self.Tabs(tmp0) + str(offset)
self.servicelist[t-1] = ( (service_name , ConfigNothing(), 0, tmp[3]))
self["ServiceList"].l.setList(self.servicelist)
def Change_vol_now(self, *args):
# change volume offset after selection in list
global offset
t = self["ServiceList"].getCurrentIndex()
tmp = self.servicelist[t]
tmp0 = tmp[0][0:-3].strip()
self.read_volume[t] = str(offset)
service_name = tmp0 + self.Tabs(tmp0) + str(offset)
self.servicelist[t] = ( (service_name , ConfigNothing(), 0, tmp[3]))
self["ServiceList"].l.setList(self.servicelist)
def Tabs(self, name):
# remove escape chars and check lenght
k = 0
for let in name:
if ord(let) > 1 and ord(let) < 128:
k+=1
print '[Volume Adjust] length service name = ' + str(k)
if k > 28:
return '\t'
elif k > 18:
return '\t\t'
else:
return '\t\t\t'
def saveXML(self):
# save the config file
global ListChange
ListChange = True
try:
fp = file(self.filename, 'w')
fp.write("<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n")
fp.write("<adjustlist>\n")
fp.write("\t<channels>\n")
fp.write("\t\t<id>%s</id>\n" % 'services')
t=0
for item in self.servicelist:
if len(self.servicelist):
# remove the volume offset from service name
tmp = item[0]
tmp = tmp[0:-3].strip()
# write line in the XML file
if item[2]==1:
fp.write("\t\t<provider name=\"%s\" dvbnamespace=\"%s\" volume=\"%s\" />\n" % (tmp, item[3], self.read_volume[t]))
else:
fp.write("\t\t<service name=\"%s\" ref=\"%s\" volume=\"%s\" />\n" % (tmp, item[3], self.read_volume[t]))
t+=1
fp.write("\t</channels>\n")
fp.write("</adjustlist>\n")
fp.close()
except:
#os.unlink(self.filename) # gives a GS WHY ???
print "[Volume Adjust] error writing xml..."
def loadXML(self):
print "[Volume Adjust] load xml..."
if not os_path.exists(self.filename):
return
self.read_services=[]
self.read_volume=[]
try:
tree = ci_parse(self.filename).getroot()
for channels in tree.findall("channels"):
for service in channels.findall("service"):
read_service_name = service.get("name").encode("UTF-8")
read_service_ref = service.get("ref").encode("UTF-8")
read_service_volume = service.get("volume").encode("UTF-8")
self.read_services.append (read_service_ref)
self.read_volume.append (read_service_volume)
except:
print "[Volume Adjust] error parsing xml..."
for item in self.read_services:
if len(item):
self.finishedChannelSelection(item)
self["ServiceList"].l.setList(self.servicelist)
class Change_volume(ConfigListScreen, Screen):
skin = """
<screen position="center,center" size="310,190" title="Change Volume offset" >
<widget name="config" position="10,10" size="290,210" scrollbarMode="showOnDemand" />
<ePixmap position="10,130" size="290,2" pixmap="skin_default/div-h.png" transparent="1" alphatest="on" />
<eLabel backgroundColor="red" position="10,181" size="90,3" zPosition="0" />
<eLabel backgroundColor="green" position="110,181" size="90,3" zPosition="0" />
<eLabel backgroundColor="yellow" position="210,181" size="90,3" zPosition="0" />
<widget source="key_red" render="Label" position="10,158" zPosition="1" size="90,22" font="Regular;19" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="key_green" render="Label" position="110,158" zPosition="1" size="90,22" font="Regular;19" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="key_yellow" render="Label" position="210,158" zPosition="1" size="90,22" font="Regular;19" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
</screen>"""
def __init__(self, session, name, vol):
self.skin = Change_volume.skin
Screen.__init__(self, session)
self.offset = ConfigNumber(default="0")
global offset
self.offset.setValue(str(offset))
self.Clist = []
self.Clist.append(getConfigListEntry(_(name), self.offset))
ConfigListScreen.__init__(self, self.Clist)
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("Ok"))
self["key_yellow"] = StaticText(_("+/-"))
self["actions"] = ActionMap(["ColorActions","SetupActions"],
{
"ok": self.ok,
"cancel": self.cancel,
"green": self.greenPressed,
"red": self.cancel,
"yellow": self.yellowPressed,
}, -2)
self.onShown.append(self.setWindowTitle)
def setWindowTitle(self):
self.setTitle(_("Change Volume offset"))
def greenPressed(self):
global offset
offset = self.offset.value
self.close()
def yellowPressed(self):
global offset
offset = self.offset.value * -1
self.offset.setValue(str(offset))
self["config"].list = self.Clist
self["config"].l.setList(self.Clist)
def ok(self):
self.greenPressed()
def cancel(self):
self.close()
class mySmallChannelSelection(ChannelSelectionBase):
skin = """
<screen position="center,center" size="560,430" title="Select service to add...">
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" />
<ePixmap pixmap="skin_default/buttons/green.png" position="140,0" size="140,40" />
<ePixmap pixmap="skin_default/buttons/yellow.png" position="280,0" size="140,40" />
<ePixmap pixmap="skin_default/buttons/blue.png" position="420,0" size="140,40" />
<widget name="key_red" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" />
<widget name="key_green" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" />
<widget name="key_yellow" position="280,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#a08500" />
<widget name="key_blue" position="420,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#18188b" />
<widget name="list" position="00,45" size="560,364" scrollbarMode="showOnDemand" />
</screen>"""
def __init__(self, session, title):
self.skin = mySmallChannelSelection.skin
ChannelSelectionBase.__init__(self, session)
self.onShown.append(self.__onExecCallback)
self.bouquet_mark_edit = OFF
service = self.session.nav.getCurrentService()
if service:
info = service.info()
if info:
refstr = info.getInfoString(iServiceInformation.sServiceref)
self.servicelist.setPlayableIgnoreService(eServiceReference(refstr))
self["actions"] = ActionMap(["OkCancelActions", "TvRadioActions", "ChannelSelectBaseActions"],
{
"cancel": self.cancel,
"ok": self.channelSelected,
"keyRadio": self.setModeRadio,
"keyTV": self.setModeTv
})
def __onExecCallback(self):
self.setModeTv()
self.setTitle(_("Select service to add..."))
def channelSelected(self):
ref = self.getCurrentSelection()
if (ref.flags & 7) == 7:
self.enterPath(ref)
elif not (ref.flags & eServiceReference.isMarker):
ref = self.getCurrentSelection()
self.close(ref)
def setModeTv(self):
self.setTvMode()
self.showFavourites()
def setModeRadio(self):
self.setRadioMode()
self.showFavourites()
def cancel(self):
self.close(None)
def find_in_list(list, search, listpos=0):
# check for double entry's in list (only service name)
for item in list:
tmp = item[listpos]
tmp0 = tmp[0:-3].strip()
if tmp0==search:
return True
return False
class Volume_Config(ConfigListScreen, Screen):
skin = """
<screen position="center,center" size="360,210" title="Volume Config" >
<widget name="config" position="10,10" size="340,75" scrollbarMode="showOnDemand" />
<ePixmap position="10,80" size="340,2" pixmap="skin_default/div-h.png" transparent="1" alphatest="on" />
<widget source="infotext" render="Label" position="10,80" zPosition="1" size="340,70" font="Regular;19" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<ePixmap position="10,150" size="340,2" pixmap="skin_default/div-h.png" transparent="1" alphatest="on" />
<eLabel backgroundColor="red" position="20,201" size="100,3" zPosition="0" />
<eLabel backgroundColor="green" position="130,201" size="100,3" zPosition="0" />
<eLabel backgroundColor="yellow" position="240,201" size="100,3" zPosition="0" />
<widget source="key_red" render="Label" position="20,168" zPosition="1" size="100,40" font="Regular;19" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="key_green" render="Label" position="130,168" zPosition="1" size="100,40" font="Regular;19" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="key_yellow" render="Label" position="240,168" zPosition="1" size="100,40" font="Regular;19" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
</screen>"""
def __init__(self, session):
self.skin = Volume_Config.skin
Screen.__init__(self, session)
self.oldEnable = config.Volume.Enabled.value
self.oldOffset = config.Volume.AC3_vol.value
self.Clist = []
self.Clist.append(getConfigListEntry(_('Enable AC3/Dolby'), config.Volume.Enabled))
self.Clist.append(getConfigListEntry(_('AC3/Dolby offset'), config.Volume.AC3_vol))
ConfigListScreen.__init__(self, self.Clist)
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("Save"))
self["key_yellow"] = StaticText(_("+/-"))
self["infotext"] = StaticText(_("this offset will only be used if the channel has not its own volume offset"))
self["actions"] = ActionMap(["ColorActions","SetupActions"],
{
"ok": self.ok,
"cancel": self.cancel,
"green": self.greenPressed,
"red": self.cancel,
"yellow": self.yellowPressed,
}, -2)
self.onShown.append(self.setWindowTitle)
def setWindowTitle(self):
self.setTitle(_("Volume Config"))
def greenPressed(self):
config.Volume.save()
self.close()
def yellowPressed(self):
#config.Volume.Enabled.value = False
#config.Volume.AC3_vol.value = 10
config.Volume.AC3_vol.setValue(config.Volume.AC3_vol.value * -1)
self["config"].list = self.Clist
self["config"].l.setList(self.Clist)
def ok(self):
self.greenPressed()
def cancel(self):
config.Volume.Enabled.setValue(self.oldEnable)
config.Volume.AC3_vol.setValue(self.oldOffset)
config.Volume.save()
self.close()
class Volume:
def __init__(self, session):
# autostarting instance, comes active when info is updated (zap)
self.session = session
self.service = None
self.onClose = [ ]
self.read_services=[]
self.read_volume=[]
self.__event_tracker = ServiceEventTracker(screen=self,eventmap=
{
iPlayableService.evUpdatedInfo: self.__evUpdatedInfo,
})
self.volctrl = eDVBVolumecontrol.getInstance()
self.volchange = None
self.oldvol = 0
self.oldservice = ""
self.filen="/etc/volume.xml"
self.startonce = True
def loadXML(self):
# load the list
print "[Volume Adjust] load xml..."
if not os_path.exists(self.filen):
return
self.read_services=[]
self.read_volume=[]
try:
tree = ci_parse(self.filen).getroot()
for channels in tree.findall("channels"):
for service in channels.findall("service"):
read_service_name = service.get("name").encode("UTF-8")
read_service_ref = service.get("ref").encode("UTF-8")
read_service_volume = service.get("volume").encode("UTF-8")
self.read_services.append (read_service_ref)
self.read_volume.append (read_service_volume)
except:
print "[Volume Adjust] error parsing xml..."
for i in self.read_services:
print i
def __evUpdatedInfo(self):
# here it starts the actual routine to change the volume offset
print "[Volume Adjust] Update Info"
if not self.startonce:
self.setvolume()
vol = self.volctrl.getVolume()
print "[Volume Adjust] Volume = " + str(vol)
global ListChange
# Check if list is updated (new save) or no list loaded
if ListChange or len(self.read_services) == 0:
self.loadXML()
ListChange = None
self.service = self.session.nav.getCurrentlyPlayingServiceReference()
if not self.service is None:
service = self.service.toCompareString()
# check for new channel (zap)
if service <> self.oldservice:
print '[Volume Adjust] New Channel'
# store new channel
self.oldservice = service
# calculate normal volume (subtract previous offset of the actual volume)
vol3 = str(self.volctrl.getVolume())
print '[Volume Adjust] oldvol = ' + str(self.oldvol)
normalvol = int(vol3) - self.oldvol
# don't forget to update the actual volume variable
# don't change the volume if the offset = 0
if self.oldvol != 0:
# change the volume to previous volume
self.oldvol = 0
self.volctrl.setVolume(normalvol, normalvol)
found = None
tel = 0
# search the new channel in list
for i in self.read_services:
if i == service:
# service found
print '[Volume Adjust] Found adjust volume channel'
found = True
break
tel +=1
# if channel found in list, search volume offset and change the volume
if found:
voloffset = self.read_volume[tel]
print '[Volume Adjust] offset = ' + voloffset
# calculate new volume
vol1 = int(voloffset)
vol2 = str(self.volctrl.getVolume())
newvol = int(vol2) + vol1
print '[Volume Adjust] newvol = ' + str(newvol)
# set the new volume
self.volctrl.setVolume(newvol, newvol)
# store the new offset, need to change it back when new channel not in list
self.oldvol = int(voloffset)
else:
if config.Volume.Enabled.value:
print '[Volume Adjust] Check for AC3/Dolby'
if self.isCurrentAudioAC3DTS():
vol = self.volctrl.getVolume()
newvol = int(vol) + config.Volume.AC3_vol.value
print '[Volume Adjust] newvol AC3/Dolby = ' + str(newvol)
self.volctrl.setVolume(newvol, newvol)
self.oldvol = config.Volume.AC3_vol.value
def setvolume(self):
vol = 50
vol = config.audio.volume.value
print '[Setvolume] start with volume ' + str(vol)
self.volctrl.setVolume(vol, vol)
self.startonce = True
def isCurrentAudioAC3DTS(self):
service = self.session.nav.getCurrentService()
audio = service.audioTracks()
if audio:
try: # uhh, servicemp3 leads sometimes to OverflowError Error
tracknr = audio.getCurrentTrack()
i = audio.getTrackInfo(tracknr)
description = i.getDescription();
print '[Volume Adjust] description: ' + description
if "AC3" in description or "DTS" in description or "Dolby Digital" == description:
print '[Volume Adjust] AudioAC3Dolby = YES'
return True
except:
print '[Volume Adjust] Fault AudioAC3Dolby = NO'
return False
print '[Volume Adjust] AudioAC3Dolby = NO'
return False
VolumeInstance = None
def sessionstart(reason, session):
global VolumeInstance
if VolumeInstance is None:
VolumeInstance = Volume(session)
def main(session, **kwargs):
session.open(Volume_adjust)
def menu(menuid, **kwargs):
if menuid == "audio_menu":
return [(_("Volume Adjust"), main, "Volume_Adjust", 5)]
return [ ]
def Plugins(**kwargs):
return [PluginDescriptor( where = PluginDescriptor.WHERE_SESSIONSTART, fnc = sessionstart ),
PluginDescriptor( name = "Volume Adjust", description = _("select channels to add a offset to the Volume"), where = PluginDescriptor.WHERE_MENU, fnc = menu )]
| gpl-2.0 |
laiy/Database_Project | third_party/nltk/tokenize/stanford.py | 7 | 3763 | # -*- coding: utf-8 -*-
# Natural Language Toolkit: Interface to the Stanford Tokenizer
#
# Copyright (C) 2001-2015 NLTK Project
# Author: Steven Xu <xxu@student.unimelb.edu.au>
#
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
from __future__ import unicode_literals, print_function
import tempfile
import os
import json
from subprocess import PIPE
from nltk import compat
from nltk.internals import find_jar, config_java, java, _java_options
from nltk.tokenize.api import TokenizerI
_stanford_url = 'http://nlp.stanford.edu/software/lex-parser.shtml'
class StanfordTokenizer(TokenizerI):
r"""
Interface to the Stanford Tokenizer
>>> from nltk.tokenize.stanford import StanfordTokenizer
>>> s = "Good muffins cost $3.88\nin New York. Please buy me\ntwo of them.\nThanks."
>>> StanfordTokenizer().tokenize(s)
['Good', 'muffins', 'cost', '$', '3.88', 'in', 'New', 'York', '.', 'Please', 'buy', 'me', 'two', 'of', 'them', '.', 'Thanks', '.']
>>> s = "The colour of the wall is blue."
>>> StanfordTokenizer(options={"americanize": True}).tokenize(s)
['The', 'color', 'of', 'the', 'wall', 'is', 'blue', '.']
"""
_JAR = 'stanford-postagger.jar'
def __init__(self, path_to_jar=None, encoding='utf8', options=None, verbose=False, java_options='-mx1000m'):
self._stanford_jar = find_jar(
self._JAR, path_to_jar,
env_vars=('STANFORD_POSTAGGER',),
searchpath=(), url=_stanford_url,
verbose=verbose
)
self._encoding = encoding
self.java_options = java_options
options = {} if options is None else options
self._options_cmd = ','.join('{0}={1}'.format(key, val) for key, val in options.items())
@staticmethod
def _parse_tokenized_output(s):
return s.splitlines()
def tokenize(self, s):
"""
Use stanford tokenizer's PTBTokenizer to tokenize multiple sentences.
"""
cmd = [
'edu.stanford.nlp.process.PTBTokenizer',
]
return self._parse_tokenized_output(self._execute(cmd, s))
def _execute(self, cmd, input_, verbose=False):
encoding = self._encoding
cmd.extend(['-charset', encoding])
_options_cmd = self._options_cmd
if _options_cmd:
cmd.extend(['-options', self._options_cmd])
default_options = ' '.join(_java_options)
# Configure java.
config_java(options=self.java_options, verbose=verbose)
# Windows is incompatible with NamedTemporaryFile() without passing in delete=False.
with tempfile.NamedTemporaryFile(mode='wb', delete=False) as input_file:
# Write the actual sentences to the temporary input file
if isinstance(input_, compat.text_type) and encoding:
input_ = input_.encode(encoding)
input_file.write(input_)
input_file.flush()
cmd.append(input_file.name)
# Run the tagger and get the output.
stdout, stderr = java(cmd, classpath=self._stanford_jar,
stdout=PIPE, stderr=PIPE)
stdout = stdout.decode(encoding)
os.unlink(input_file.name)
# Return java configurations to their default values.
config_java(options=default_options, verbose=False)
return stdout
def setup_module(module):
from nose import SkipTest
try:
StanfordTokenizer()
except LookupError:
raise SkipTest('doctests from nltk.tokenize.stanford are skipped because the stanford postagger jar doesn\'t exist')
if __name__ == '__main__':
import doctest
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS)
| apache-2.0 |
jcsp/manila | setup.py | 225 | 1028 | # Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
import setuptools
# In python < 2.7.4, a lazy loading of package `pbr` will break
# setuptools if some other modules registered functions in `atexit`.
# solution from: http://bugs.python.org/issue15881#msg170215
try:
import multiprocessing # noqa
except ImportError:
pass
setuptools.setup(
setup_requires=['pbr>=1.3'],
pbr=True)
| apache-2.0 |
gangadhar-kadam/nassimlib | webnotes/utils/email_lib/__init__.py | 34 | 1457 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import webnotes
from webnotes import conf
def sendmail_md(recipients, sender=None, msg=None, subject=None):
"""send markdown email"""
import markdown2
sendmail(recipients, sender, markdown2.markdown(msg), subject)
def sendmail(recipients, sender='', msg='', subject='[No Subject]'):
"""send an html email as multipart with attachments and all"""
from webnotes.utils.email_lib.smtp import get_email
get_email(recipients, sender, msg, subject).send()
def sendmail_to_system_managers(subject, content):
from webnotes.utils.email_lib.smtp import get_email
get_email(get_system_managers(), None, content, subject).send()
@webnotes.whitelist()
def get_contact_list():
"""Returns contacts (from autosuggest)"""
cond = ['`%s` like "%s%%"' % (f,
webnotes.form_dict.get('txt')) for f in webnotes.form_dict.get('where').split(',')]
cl = webnotes.conn.sql("select `%s` from `tab%s` where %s" % (
webnotes.form_dict.get('select')
,webnotes.form_dict.get('from')
,' OR '.join(cond)
)
)
webnotes.response['cl'] = filter(None, [c[0] for c in cl])
def get_system_managers():
return webnotes.conn.sql_list("""select parent FROM tabUserRole
WHERE role='System Manager'
AND parent!='Administrator'
AND parent IN
(SELECT email FROM tabProfile WHERE enabled=1)""") | mit |
Sorsly/subtle | google-cloud-sdk/lib/surface/kms/keyrings/remove_iam_policy_binding.py | 3 | 1833 | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command to remove a policy binding from a KeyRing."""
from googlecloudsdk.api_lib.cloudkms import iam
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.iam import iam_util
from googlecloudsdk.command_lib.kms import flags
class RemoveIamPolicyBinding(base.Command):
r"""Remove a policy binding from a KeyRing.
Removes an IAM policy binding from the given KeyRing.
See https://cloud.google.com/iam/docs/managing-policies for details of
policy role and member types.
## EXAMPLES
The following command will remove an IAM policy binding for the role of
'roles/editor' for the user 'test-user@gmail.com' on the KeyRing
`fellowship` with Location `global`:
$ {command} fellowship --location global \
--member='user:test-user@gmail.com' \
--role='roles/editor'
"""
@staticmethod
def Args(parser):
flags.AddKeyRingArgument(parser,
'from which to remove an IAM policy binding')
iam_util.AddArgsForRemoveIamPolicyBinding(parser, 'keyring',
flags.CRYPTO_KEY_COLLECTION)
def Run(self, args):
return iam.RemovePolicyBindingFromKeyRing(
flags.ParseKeyRingName(args), args.member, args.role)
| mit |
ProfessionalIT/maxigenios-website | sdk/google_appengine/lib/django-1.3/django/contrib/gis/gdal/prototypes/srs.py | 321 | 3378 | from ctypes import c_char_p, c_int, c_void_p, POINTER
from django.contrib.gis.gdal.libgdal import lgdal, std_call
from django.contrib.gis.gdal.prototypes.generation import \
const_string_output, double_output, int_output, \
srs_output, string_output, void_output
## Shortcut generation for routines with known parameters.
def srs_double(f):
"""
Creates a function prototype for the OSR routines that take
the OSRSpatialReference object and
"""
return double_output(f, [c_void_p, POINTER(c_int)], errcheck=True)
def units_func(f):
"""
Creates a ctypes function prototype for OSR units functions, e.g.,
OSRGetAngularUnits, OSRGetLinearUnits.
"""
return double_output(f, [c_void_p, POINTER(c_char_p)], strarg=True)
# Creation & destruction.
clone_srs = srs_output(std_call('OSRClone'), [c_void_p])
new_srs = srs_output(std_call('OSRNewSpatialReference'), [c_char_p])
release_srs = void_output(lgdal.OSRRelease, [c_void_p], errcheck=False)
destroy_srs = void_output(std_call('OSRDestroySpatialReference'), [c_void_p], errcheck=False)
srs_validate = void_output(lgdal.OSRValidate, [c_void_p])
# Getting the semi_major, semi_minor, and flattening functions.
semi_major = srs_double(lgdal.OSRGetSemiMajor)
semi_minor = srs_double(lgdal.OSRGetSemiMinor)
invflattening = srs_double(lgdal.OSRGetInvFlattening)
# WKT, PROJ, EPSG, XML importation routines.
from_wkt = void_output(lgdal.OSRImportFromWkt, [c_void_p, POINTER(c_char_p)])
from_proj = void_output(lgdal.OSRImportFromProj4, [c_void_p, c_char_p])
from_epsg = void_output(std_call('OSRImportFromEPSG'), [c_void_p, c_int])
from_xml = void_output(lgdal.OSRImportFromXML, [c_void_p, c_char_p])
from_user_input = void_output(std_call('OSRSetFromUserInput'), [c_void_p, c_char_p])
# Morphing to/from ESRI WKT.
morph_to_esri = void_output(lgdal.OSRMorphToESRI, [c_void_p])
morph_from_esri = void_output(lgdal.OSRMorphFromESRI, [c_void_p])
# Identifying the EPSG
identify_epsg = void_output(lgdal.OSRAutoIdentifyEPSG, [c_void_p])
# Getting the angular_units, linear_units functions
linear_units = units_func(lgdal.OSRGetLinearUnits)
angular_units = units_func(lgdal.OSRGetAngularUnits)
# For exporting to WKT, PROJ.4, "Pretty" WKT, and XML.
to_wkt = string_output(std_call('OSRExportToWkt'), [c_void_p, POINTER(c_char_p)])
to_proj = string_output(std_call('OSRExportToProj4'), [c_void_p, POINTER(c_char_p)])
to_pretty_wkt = string_output(std_call('OSRExportToPrettyWkt'), [c_void_p, POINTER(c_char_p), c_int], offset=-2)
# Memory leak fixed in GDAL 1.5; still exists in 1.4.
to_xml = string_output(lgdal.OSRExportToXML, [c_void_p, POINTER(c_char_p), c_char_p], offset=-2)
# String attribute retrival routines.
get_attr_value = const_string_output(std_call('OSRGetAttrValue'), [c_void_p, c_char_p, c_int])
get_auth_name = const_string_output(lgdal.OSRGetAuthorityName, [c_void_p, c_char_p])
get_auth_code = const_string_output(lgdal.OSRGetAuthorityCode, [c_void_p, c_char_p])
# SRS Properties
isgeographic = int_output(lgdal.OSRIsGeographic, [c_void_p])
islocal = int_output(lgdal.OSRIsLocal, [c_void_p])
isprojected = int_output(lgdal.OSRIsProjected, [c_void_p])
# Coordinate transformation
new_ct= srs_output(std_call('OCTNewCoordinateTransformation'), [c_void_p, c_void_p])
destroy_ct = void_output(std_call('OCTDestroyCoordinateTransformation'), [c_void_p], errcheck=False)
| mit |
aleonliao/depot_tools | trychange.py | 29 | 46227 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Client-side script to send a try job to the try server. It communicates to
the try server by either writting to a svn/git repository or by directly
connecting to the server by HTTP.
"""
import contextlib
import datetime
import errno
import getpass
import itertools
import json
import logging
import optparse
import os
import posixpath
import re
import shutil
import sys
import tempfile
import urllib
import urllib2
import urlparse
import breakpad # pylint: disable=W0611
import fix_encoding
import gcl
import gclient_utils
import gerrit_util
import scm
import subprocess2
__version__ = '1.2'
# Constants
HELP_STRING = "Sorry, Tryserver is not available."
USAGE = r"""%prog [options]
Client-side script to send a try job to the try server. It communicates to
the try server by either writting to a svn repository or by directly connecting
to the server by HTTP."""
EPILOG = """
Examples:
Send a patch directly from rietveld:
%(prog)s -R codereview.chromium.org/1337
--email recipient@example.com --root src
Try a change against a particular revision:
%(prog)s -r 123
Try a change including changes to a sub repository:
%(prog)s -s third_party/WebKit
A git patch off a web site (git inserts a/ and b/) and fix the base dir:
%(prog)s --url http://url/to/patch.diff --patchlevel 1 --root src
Use svn to store the try job, specify an alternate email address and use a
premade diff file on the local drive:
%(prog)s --email user@example.com
--svn_repo svn://svn.chromium.org/chrome-try/try --diff foo.diff
Running only on a 'mac' slave with revision 123 and clobber first; specify
manually the 3 source files to use for the try job:
%(prog)s --bot mac --revision 123 --clobber -f src/a.cc -f src/a.h
-f include/b.h
"""
GIT_PATCH_DIR_BASENAME = os.path.join('git-try', 'patches-git')
GIT_BRANCH_FILE = 'ref'
_GIT_PUSH_ATTEMPTS = 3
def DieWithError(message):
print >> sys.stderr, message
sys.exit(1)
def RunCommand(args, error_ok=False, error_message=None, **kwargs):
try:
return subprocess2.check_output(args, shell=False, **kwargs)
except subprocess2.CalledProcessError, e:
if not error_ok:
DieWithError(
'Command "%s" failed.\n%s' % (
' '.join(args), error_message or e.stdout or ''))
return e.stdout
def RunGit(args, **kwargs):
"""Returns stdout."""
return RunCommand(['git'] + args, **kwargs)
class Error(Exception):
"""An error during a try job submission.
For this error, trychange.py does not display stack trace, only message
"""
class InvalidScript(Error):
def __str__(self):
return self.args[0] + '\n' + HELP_STRING
class NoTryServerAccess(Error):
def __str__(self):
return self.args[0] + '\n' + HELP_STRING
def Escape(name):
"""Escapes characters that could interfere with the file system or try job
parsing.
"""
return re.sub(r'[^\w#-]', '_', name)
class SCM(object):
"""Simplistic base class to implement one function: ProcessOptions."""
def __init__(self, options, path, file_list):
items = path.split('@')
assert len(items) <= 2
self.checkout_root = os.path.abspath(items[0])
items.append(None)
self.diff_against = items[1]
self.options = options
# Lazy-load file list from the SCM unless files were specified in options.
self._files = None
self._file_tuples = None
if file_list:
self._files = file_list
self._file_tuples = [('M', f) for f in self.files]
self.options.files = None
self.codereview_settings = None
self.codereview_settings_file = 'codereview.settings'
self.toplevel_root = None
def GetFileNames(self):
"""Return the list of files in the diff."""
return self.files
def GetCodeReviewSetting(self, key):
"""Returns a value for the given key for this repository.
Uses gcl-style settings from the repository.
"""
if gcl:
gcl_setting = gcl.GetCodeReviewSetting(key)
if gcl_setting != '':
return gcl_setting
if self.codereview_settings is None:
self.codereview_settings = {}
settings_file = self.ReadRootFile(self.codereview_settings_file)
if settings_file:
for line in settings_file.splitlines():
if not line or line.lstrip().startswith('#'):
continue
k, v = line.split(":", 1)
self.codereview_settings[k.strip()] = v.strip()
return self.codereview_settings.get(key, '')
def _GclStyleSettings(self):
"""Set default settings based on the gcl-style settings from the repository.
The settings in the self.options object will only be set if no previous
value exists (i.e. command line flags to the try command will override the
settings in codereview.settings).
"""
settings = {
'port': self.GetCodeReviewSetting('TRYSERVER_HTTP_PORT'),
'host': self.GetCodeReviewSetting('TRYSERVER_HTTP_HOST'),
'svn_repo': self.GetCodeReviewSetting('TRYSERVER_SVN_URL'),
'gerrit_url': self.GetCodeReviewSetting('TRYSERVER_GERRIT_URL'),
'git_repo': self.GetCodeReviewSetting('TRYSERVER_GIT_URL'),
'project': self.GetCodeReviewSetting('TRYSERVER_PROJECT'),
# Primarily for revision=auto
'revision': self.GetCodeReviewSetting('TRYSERVER_REVISION'),
'root': self.GetCodeReviewSetting('TRYSERVER_ROOT'),
'patchlevel': self.GetCodeReviewSetting('TRYSERVER_PATCHLEVEL'),
}
logging.info('\n'.join(['%s: %s' % (k, v)
for (k, v) in settings.iteritems() if v]))
for (k, v) in settings.iteritems():
# Avoid overwriting options already set using command line flags.
if v and getattr(self.options, k) is None:
setattr(self.options, k, v)
def AutomagicalSettings(self):
"""Determines settings based on supported code review and checkout tools.
"""
# Try to find gclient or repo root first.
if not self.options.no_search:
self.toplevel_root = gclient_utils.FindGclientRoot(self.checkout_root)
if self.toplevel_root:
logging.info('Found .gclient at %s' % self.toplevel_root)
else:
self.toplevel_root = gclient_utils.FindFileUpwards(
os.path.join('..', '.repo'), self.checkout_root)
if self.toplevel_root:
logging.info('Found .repo dir at %s'
% os.path.dirname(self.toplevel_root))
# Parse TRYSERVER_* settings from codereview.settings before falling back
# on setting self.options.root manually further down. Otherwise
# TRYSERVER_ROOT would never be used in codereview.settings.
self._GclStyleSettings()
if self.toplevel_root and not self.options.root:
assert os.path.abspath(self.toplevel_root) == self.toplevel_root
self.options.root = gclient_utils.PathDifference(self.toplevel_root,
self.checkout_root)
else:
self._GclStyleSettings()
def ReadRootFile(self, filename):
cur = self.checkout_root
root = self.toplevel_root or self.checkout_root
assert cur.startswith(root), (root, cur)
while cur.startswith(root):
filepath = os.path.join(cur, filename)
if os.path.isfile(filepath):
logging.info('Found %s at %s' % (filename, cur))
return gclient_utils.FileRead(filepath)
cur = os.path.dirname(cur)
logging.warning('Didn\'t find %s' % filename)
return None
def _SetFileTuples(self, file_tuples):
excluded = ['!', '?', 'X', ' ', '~']
def Excluded(f):
if f[0][0] in excluded:
return True
for r in self.options.exclude:
if re.search(r, f[1]):
logging.info('Ignoring "%s"' % f[1])
return True
return False
self._file_tuples = [f for f in file_tuples if not Excluded(f)]
self._files = [f[1] for f in self._file_tuples]
def CaptureStatus(self):
"""Returns the 'svn status' emulated output as an array of (status, file)
tuples."""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
@property
def files(self):
if self._files is None:
self._SetFileTuples(self.CaptureStatus())
return self._files
@property
def file_tuples(self):
if self._file_tuples is None:
self._SetFileTuples(self.CaptureStatus())
return self._file_tuples
class SVN(SCM):
"""Gathers the options and diff for a subversion checkout."""
def __init__(self, *args, **kwargs):
SCM.__init__(self, *args, **kwargs)
self.checkout_root = scm.SVN.GetCheckoutRoot(self.checkout_root)
if not self.options.email:
# Assumes the svn credential is an email address.
self.options.email = scm.SVN.GetEmail(self.checkout_root)
logging.info("SVN(%s)" % self.checkout_root)
def ReadRootFile(self, filename):
data = SCM.ReadRootFile(self, filename)
if data:
return data
# Try to search on the subversion repository for the file.
if not gcl:
return None
data = gcl.GetCachedFile(filename)
logging.debug('%s:\n%s' % (filename, data))
return data
def CaptureStatus(self):
return scm.SVN.CaptureStatus(None, self.checkout_root)
def GenerateDiff(self):
"""Returns a string containing the diff for the given file list.
The files in the list should either be absolute paths or relative to the
given root.
"""
return scm.SVN.GenerateDiff(self.files, self.checkout_root, full_move=True,
revision=self.diff_against)
class GIT(SCM):
"""Gathers the options and diff for a git checkout."""
def __init__(self, *args, **kwargs):
SCM.__init__(self, *args, **kwargs)
self.checkout_root = scm.GIT.GetCheckoutRoot(self.checkout_root)
if not self.options.name:
self.options.name = scm.GIT.GetPatchName(self.checkout_root)
if not self.options.email:
self.options.email = scm.GIT.GetEmail(self.checkout_root)
if not self.diff_against:
self.diff_against = scm.GIT.GetUpstreamBranch(self.checkout_root)
if not self.diff_against:
raise NoTryServerAccess(
"Unable to determine default branch to diff against. "
"Verify this branch is set up to track another"
"(via the --track argument to \"git checkout -b ...\"")
logging.info("GIT(%s)" % self.checkout_root)
def CaptureStatus(self):
return scm.GIT.CaptureStatus(
[],
self.checkout_root.replace(os.sep, '/'),
self.diff_against)
def GenerateDiff(self):
if RunGit(['diff-index', 'HEAD']):
print 'Cannot try with a dirty tree. You must commit locally first.'
return None
return scm.GIT.GenerateDiff(
self.checkout_root,
files=self.files,
full_move=True,
branch=self.diff_against)
def _ParseBotList(botlist, testfilter):
"""Parses bot configurations from a list of strings."""
bots = []
if testfilter:
for bot in itertools.chain.from_iterable(botspec.split(',')
for botspec in botlist):
tests = set()
if ':' in bot:
if bot.endswith(':compile'):
tests |= set(['compile'])
else:
raise ValueError(
'Can\'t use both --testfilter and --bot builder:test formats '
'at the same time')
bots.append((bot, tests))
else:
for botspec in botlist:
botname = botspec.split(':')[0]
tests = set()
if ':' in botspec:
tests |= set(filter(None, botspec.split(':')[1].split(',')))
bots.append((botname, tests))
return bots
def _ApplyTestFilter(testfilter, bot_spec):
"""Applies testfilter from CLI.
Specifying a testfilter strips off any builder-specified tests (except for
compile).
"""
if testfilter:
return [(botname, set(testfilter) | (tests & set(['compile'])))
for botname, tests in bot_spec]
else:
return bot_spec
def _GenTSBotSpec(checkouts, change, changed_files, options):
bot_spec = []
# Get try slaves from PRESUBMIT.py files if not specified.
# Even if the diff comes from options.url, use the local checkout for bot
# selection.
try:
import presubmit_support
root_presubmit = checkouts[0].ReadRootFile('PRESUBMIT.py')
if not change:
if not changed_files:
changed_files = checkouts[0].file_tuples
change = presubmit_support.Change(options.name,
'',
checkouts[0].checkout_root,
changed_files,
options.issue,
options.patchset,
options.email)
masters = presubmit_support.DoGetTryMasters(
change,
checkouts[0].GetFileNames(),
checkouts[0].checkout_root,
root_presubmit,
options.project,
options.verbose,
sys.stdout)
# Compatibility for old checkouts and bots that were on tryserver.chromium.
trybots = masters.get('tryserver.chromium', [])
# Compatibility for checkouts that are not using tryserver.chromium
# but are stuck with git-try or gcl-try.
if not trybots and len(masters) == 1:
trybots = masters.values()[0]
if trybots:
old_style = filter(lambda x: isinstance(x, basestring), trybots)
new_style = filter(lambda x: isinstance(x, tuple), trybots)
# _ParseBotList's testfilter is set to None otherwise it will complain.
bot_spec = _ApplyTestFilter(options.testfilter,
_ParseBotList(old_style, None))
bot_spec.extend(_ApplyTestFilter(options.testfilter, new_style))
except ImportError:
pass
return bot_spec
def _ParseSendChangeOptions(bot_spec, options):
"""Parse common options passed to _SendChangeHTTP, _SendChangeSVN and
_SendChangeGit.
"""
values = [
('user', options.user),
('name', options.name),
]
# A list of options to copy.
optional_values = (
'email',
'revision',
'root',
'patchlevel',
'issue',
'patchset',
'target',
'project',
)
for option_name in optional_values:
value = getattr(options, option_name)
if value:
values.append((option_name, value))
# Not putting clobber to optional_names
# because it used to have lower-case 'true'.
if options.clobber:
values.append(('clobber', 'true'))
for bot, tests in bot_spec:
values.append(('bot', ('%s:%s' % (bot, ','.join(tests)))))
return values
def _SendChangeHTTP(bot_spec, options):
"""Send a change to the try server using the HTTP protocol."""
if not options.host:
raise NoTryServerAccess('Please use the --host option to specify the try '
'server host to connect to.')
if not options.port:
raise NoTryServerAccess('Please use the --port option to specify the try '
'server port to connect to.')
values = _ParseSendChangeOptions(bot_spec, options)
values.append(('patch', options.diff))
url = 'http://%s:%s/send_try_patch' % (options.host, options.port)
logging.info('Sending by HTTP')
logging.info(''.join("%s=%s\n" % (k, v) for k, v in values))
logging.info(url)
logging.info(options.diff)
if options.dry_run:
return
try:
logging.info('Opening connection...')
connection = urllib2.urlopen(url, urllib.urlencode(values))
logging.info('Done')
except IOError, e:
logging.info(str(e))
if bot_spec and len(e.args) > 2 and e.args[2] == 'got a bad status line':
raise NoTryServerAccess('%s is unaccessible. Bad --bot argument?' % url)
else:
raise NoTryServerAccess('%s is unaccessible. Reason: %s' % (url,
str(e.args)))
if not connection:
raise NoTryServerAccess('%s is unaccessible.' % url)
logging.info('Reading response...')
response = connection.read()
logging.info('Done')
if response != 'OK':
raise NoTryServerAccess('%s is unaccessible. Got:\n%s' % (url, response))
PrintSuccess(bot_spec, options)
@contextlib.contextmanager
def _TempFilename(name, contents=None):
"""Create a temporary directory, append the specified name and yield.
In contrast to NamedTemporaryFile, does not keep the file open.
Deletes the file on __exit__.
"""
temp_dir = tempfile.mkdtemp(prefix=name)
try:
path = os.path.join(temp_dir, name)
if contents is not None:
with open(path, 'wb') as f:
f.write(contents)
yield path
finally:
shutil.rmtree(temp_dir, True)
@contextlib.contextmanager
def _PrepareDescriptionAndPatchFiles(description, options):
"""Creates temporary files with description and patch.
__enter__ called on the return value returns a tuple of patch_filename and
description_filename.
Args:
description: contents of description file.
options: patchset options object. Must have attributes: user,
name (of patch) and diff (contents of patch).
"""
current_time = str(datetime.datetime.now()).replace(':', '.')
patch_basename = '%s.%s.%s.diff' % (Escape(options.user),
Escape(options.name), current_time)
with _TempFilename('description', description) as description_filename:
with _TempFilename(patch_basename, options.diff) as patch_filename:
yield patch_filename, description_filename
def _SendChangeSVN(bot_spec, options):
"""Send a change to the try server by committing a diff file on a subversion
server."""
if not options.svn_repo:
raise NoTryServerAccess('Please use the --svn_repo option to specify the'
' try server svn repository to connect to.')
values = _ParseSendChangeOptions(bot_spec, options)
description = ''.join("%s=%s\n" % (k, v) for k, v in values)
logging.info('Sending by SVN')
logging.info(description)
logging.info(options.svn_repo)
logging.info(options.diff)
if options.dry_run:
return
with _PrepareDescriptionAndPatchFiles(description, options) as (
patch_filename, description_filename):
if sys.platform == "cygwin":
# Small chromium-specific issue here:
# git-try uses /usr/bin/python on cygwin but svn.bat will be used
# instead of /usr/bin/svn by default. That causes bad things(tm) since
# Windows' svn.exe has no clue about cygwin paths. Hence force to use
# the cygwin version in this particular context.
exe = "/usr/bin/svn"
else:
exe = "svn"
patch_dir = os.path.dirname(patch_filename)
command = [exe, 'import', '-q', patch_dir, options.svn_repo, '--file',
description_filename]
if scm.SVN.AssertVersion("1.5")[0]:
command.append('--no-ignore')
try:
subprocess2.check_call(command)
except subprocess2.CalledProcessError, e:
raise NoTryServerAccess(str(e))
PrintSuccess(bot_spec, options)
def _GetPatchGitRepo(git_url):
"""Gets a path to a Git repo with patches.
Stores patches in .git/git-try/patches-git directory, a git repo. If it
doesn't exist yet or its origin URL is different, cleans up and clones it.
If it existed before, then pulls changes.
Does not support SVN repo.
Returns a path to the directory with patches.
"""
git_dir = scm.GIT.GetGitDir(os.getcwd())
patch_dir = os.path.join(git_dir, GIT_PATCH_DIR_BASENAME)
logging.info('Looking for git repo for patches')
# Is there already a repo with the expected url or should we clone?
clone = True
if os.path.exists(patch_dir) and scm.GIT.IsInsideWorkTree(patch_dir):
existing_url = scm.GIT.Capture(
['config', '--local', 'remote.origin.url'],
cwd=patch_dir)
clone = existing_url != git_url
if clone:
if os.path.exists(patch_dir):
logging.info('Cleaning up')
shutil.rmtree(patch_dir, True)
logging.info('Cloning patch repo')
scm.GIT.Capture(['clone', git_url, GIT_PATCH_DIR_BASENAME], cwd=git_dir)
email = scm.GIT.GetEmail(cwd=os.getcwd())
scm.GIT.Capture(['config', '--local', 'user.email', email], cwd=patch_dir)
else:
if scm.GIT.IsWorkTreeDirty(patch_dir):
logging.info('Work dir is dirty: hard reset!')
scm.GIT.Capture(['reset', '--hard'], cwd=patch_dir)
logging.info('Updating patch repo')
scm.GIT.Capture(['pull', 'origin', 'master'], cwd=patch_dir)
return os.path.abspath(patch_dir)
def _SendChangeGit(bot_spec, options):
"""Sends a change to the try server by committing a diff file to a GIT repo.
Creates a temp orphan branch, commits patch.diff, creates a ref pointing to
that commit, deletes the temp branch, checks master out, adds 'ref' file
containing the name of the new ref, pushes master and the ref to the origin.
TODO: instead of creating a temp branch, use git-commit-tree.
"""
if not options.git_repo:
raise NoTryServerAccess('Please use the --git_repo option to specify the '
'try server git repository to connect to.')
values = _ParseSendChangeOptions(bot_spec, options)
comment_subject = '%s.%s' % (options.user, options.name)
comment_body = ''.join("%s=%s\n" % (k, v) for k, v in values)
description = '%s\n\n%s' % (comment_subject, comment_body)
logging.info('Sending by GIT')
logging.info(description)
logging.info(options.git_repo)
logging.info(options.diff)
if options.dry_run:
return
patch_dir = _GetPatchGitRepo(options.git_repo)
def patch_git(*args):
return scm.GIT.Capture(list(args), cwd=patch_dir)
def add_and_commit(filename, comment_filename):
patch_git('add', filename)
patch_git('commit', '-F', comment_filename)
assert scm.GIT.IsInsideWorkTree(patch_dir)
assert not scm.GIT.IsWorkTreeDirty(patch_dir)
with _PrepareDescriptionAndPatchFiles(description, options) as (
patch_filename, description_filename):
logging.info('Committing patch')
temp_branch = 'tmp_patch'
target_ref = 'refs/patches/%s/%s' % (
Escape(options.user),
os.path.basename(patch_filename).replace(' ','_'))
target_filename = os.path.join(patch_dir, 'patch.diff')
branch_file = os.path.join(patch_dir, GIT_BRANCH_FILE)
patch_git('checkout', 'master')
try:
# Try deleting an existing temp branch, if any.
try:
patch_git('branch', '-D', temp_branch)
logging.debug('Deleted an existing temp branch.')
except subprocess2.CalledProcessError:
pass
# Create a new branch and put the patch there.
patch_git('checkout', '--orphan', temp_branch)
patch_git('reset')
patch_git('clean', '-f')
shutil.copyfile(patch_filename, target_filename)
add_and_commit(target_filename, description_filename)
assert not scm.GIT.IsWorkTreeDirty(patch_dir)
# Create a ref and point it to the commit referenced by temp_branch.
patch_git('update-ref', target_ref, temp_branch)
# Delete the temp ref.
patch_git('checkout', 'master')
patch_git('branch', '-D', temp_branch)
# Update the branch file in the master.
def update_branch():
with open(branch_file, 'w') as f:
f.write(target_ref)
add_and_commit(branch_file, description_filename)
update_branch()
# Push master and target_ref to origin.
logging.info('Pushing patch')
for attempt in xrange(_GIT_PUSH_ATTEMPTS):
try:
patch_git('push', 'origin', 'master', target_ref)
except subprocess2.CalledProcessError as e:
is_last = attempt == _GIT_PUSH_ATTEMPTS - 1
if is_last:
raise NoTryServerAccess(str(e))
# Fetch, reset, update branch file again.
patch_git('fetch', 'origin')
patch_git('reset', '--hard', 'origin/master')
update_branch()
except subprocess2.CalledProcessError, e:
# Restore state.
patch_git('checkout', 'master')
patch_git('reset', '--hard', 'origin/master')
raise
PrintSuccess(bot_spec, options)
def _SendChangeGerrit(bot_spec, options):
"""Posts a try job to a Gerrit change.
Reads Change-Id from the HEAD commit, resolves the current revision, checks
that local revision matches the uploaded one, posts a try job in form of a
message, sets Tryjob-Request label to 1.
Gerrit message format: starts with !tryjob, optionally followed by a tryjob
definition in JSON format:
buildNames: list of strings specifying build names.
build_properties: a dict of build properties.
"""
logging.info('Sending by Gerrit')
if not options.gerrit_url:
raise NoTryServerAccess('Please use --gerrit_url option to specify the '
'Gerrit instance url to connect to')
gerrit_host = urlparse.urlparse(options.gerrit_url).hostname
logging.debug('Gerrit host: %s' % gerrit_host)
def GetChangeId(commmitish):
"""Finds Change-ID of the HEAD commit."""
CHANGE_ID_RGX = '^Change-Id: (I[a-f0-9]{10,})'
comment = scm.GIT.Capture(['log', '-1', commmitish, '--format=%b'],
cwd=os.getcwd())
change_id_match = re.search(CHANGE_ID_RGX, comment, re.I | re.M)
if not change_id_match:
raise Error('Change-Id was not found in the HEAD commit. Make sure you '
'have a Git hook installed that generates and inserts a '
'Change-Id into a commit message automatically.')
change_id = change_id_match.group(1)
return change_id
def FormatMessage():
# Build job definition.
job_def = {}
build_properties = {}
if options.testfilter:
build_properties['testfilter'] = options.testfilter
builderNames = [builder for builder, _ in bot_spec]
if builderNames:
job_def['builderNames'] = builderNames
if build_properties:
job_def['build_properties'] = build_properties
# Format message.
msg = '!tryjob'
if job_def:
msg = '%s %s' % (msg, json.dumps(job_def, sort_keys=True))
return msg
def PostTryjob(message):
logging.info('Posting gerrit message: %s' % message)
if not options.dry_run:
# Post a message and set TryJob=1 label.
try:
gerrit_util.SetReview(gerrit_host, change_id, msg=message,
labels={'Tryjob-Request': 1})
except gerrit_util.GerritError, e:
if e.http_status == 400:
raise Error(e.message)
else:
raise
head_sha = scm.GIT.Capture(['log', '-1', '--format=%H'], cwd=os.getcwd())
change_id = GetChangeId(head_sha)
try:
# Check that the uploaded revision matches the local one.
changes = gerrit_util.GetChangeCurrentRevision(gerrit_host, change_id)
except gerrit_util.GerritAuthenticationError, e:
raise NoTryServerAccess(e.message)
assert len(changes) <= 1, 'Multiple changes with id %s' % change_id
if not changes:
raise Error('A change %s was not found on the server. Was it uploaded?' %
change_id)
logging.debug('Found Gerrit change: %s' % changes[0])
if changes[0]['current_revision'] != head_sha:
raise Error('Please upload your latest local changes to Gerrit.')
# Post a try job.
message = FormatMessage()
PostTryjob(message)
change_url = urlparse.urljoin(options.gerrit_url,
'/#/c/%s' % changes[0]['_number'])
print('A tryjob was posted on change %s' % change_url)
def PrintSuccess(bot_spec, options):
if not options.dry_run:
text = 'Patch \'%s\' sent to try server' % options.name
if bot_spec:
text += ': %s' % ', '.join(
'%s:%s' % (b[0], ','.join(b[1])) for b in bot_spec)
print(text)
def GuessVCS(options, path, file_list):
"""Helper to guess the version control system.
NOTE: Very similar to upload.GuessVCS. Doesn't look for hg since we don't
support it yet.
This examines the path directory, guesses which SCM we're using, and
returns an instance of the appropriate class. Exit with an error if we can't
figure it out.
Returns:
A SCM instance. Exits if the SCM can't be guessed.
"""
__pychecker__ = 'no-returnvalues'
real_path = path.split('@')[0]
logging.info("GuessVCS(%s)" % path)
# Subversion has a .svn in all working directories.
if os.path.isdir(os.path.join(real_path, '.svn')):
return SVN(options, path, file_list)
# Git has a command to test if you're in a git tree.
# Try running it, but don't die if we don't have git installed.
try:
subprocess2.check_output(
['git', 'rev-parse', '--is-inside-work-tree'], cwd=real_path,
stderr=subprocess2.VOID)
return GIT(options, path, file_list)
except OSError, e:
if e.errno != errno.ENOENT:
raise
except subprocess2.CalledProcessError, e:
if e.returncode != errno.ENOENT and e.returncode != 128:
# ENOENT == 2 = they don't have git installed.
# 128 = git error code when not in a repo.
logging.warning('Unexpected error code: %s' % e.returncode)
raise
raise NoTryServerAccess(
( 'Could not guess version control system for %s.\n'
'Are you in a working copy directory?') % path)
def GetMungedDiff(path_diff, diff):
# Munge paths to match svn.
changed_files = []
for i in range(len(diff)):
if diff[i].startswith('--- ') or diff[i].startswith('+++ '):
new_file = posixpath.join(path_diff, diff[i][4:]).replace('\\', '/')
if diff[i].startswith('--- '):
file_path = new_file.split('\t')[0].strip()
if file_path.startswith('a/'):
file_path = file_path[2:]
changed_files.append(('M', file_path))
diff[i] = diff[i][0:4] + new_file
return (diff, changed_files)
class OptionParser(optparse.OptionParser):
def format_epilog(self, _):
"""Removes epilog formatting."""
return self.epilog or ''
def gen_parser(prog):
# Parse argv
parser = OptionParser(usage=USAGE, version=__version__, prog=prog)
parser.add_option("-v", "--verbose", action="count", default=0,
help="Prints debugging infos")
group = optparse.OptionGroup(parser, "Result and status")
group.add_option("-u", "--user", default=getpass.getuser(),
help="Owner user name [default: %default]")
group.add_option("-e", "--email",
default=os.environ.get('TRYBOT_RESULTS_EMAIL_ADDRESS',
os.environ.get('EMAIL_ADDRESS')),
help="Email address where to send the results. Use either "
"the TRYBOT_RESULTS_EMAIL_ADDRESS environment "
"variable or EMAIL_ADDRESS to set the email address "
"the try bots report results to [default: %default]")
group.add_option("-n", "--name",
help="Descriptive name of the try job")
group.add_option("--issue", type='int',
help="Update rietveld issue try job status")
group.add_option("--patchset", type='int',
help="Update rietveld issue try job status. This is "
"optional if --issue is used, In that case, the "
"latest patchset will be used.")
group.add_option("--dry_run", action='store_true',
help="Don't send the try job. This implies --verbose, so "
"it will print the diff.")
parser.add_option_group(group)
group = optparse.OptionGroup(parser, "Try job options")
group.add_option(
"-b", "--bot", action="append",
help=("IMPORTANT: specify ONE builder per --bot flag. Use it multiple "
"times to specify multiple builders. ex: "
"'-bwin_rel:ui_tests,webkit_unit_tests -bwin_layout'. See "
"the try server waterfall for the builders name and the tests "
"available. Can also be used to specify gtest_filter, e.g. "
"-bwin_rel:base_unittests:ValuesTest.*Value"))
group.add_option("-B", "--print_bots", action="store_true",
help="Print bots we would use (e.g. from PRESUBMIT.py)"
" and exit. Do not send patch. Like --dry_run"
" but less verbose.")
group.add_option("-r", "--revision",
help="Revision to use for the try job. If 'auto' is "
"specified, it is resolved to the revision a patch is "
"generated against (Git only). Default: the "
"revision will be determined by the try server; see "
"its waterfall for more info")
group.add_option("-c", "--clobber", action="store_true",
help="Force a clobber before building; e.g. don't do an "
"incremental build")
# TODO(maruel): help="Select a specific configuration, usually 'debug' or "
# "'release'"
group.add_option("--target", help=optparse.SUPPRESS_HELP)
group.add_option("--project",
help="Override which project to use. Projects are defined "
"server-side to define what default bot set to use")
group.add_option(
"-t", "--testfilter", action="append", default=[],
help=("Apply a testfilter to all the selected builders. Unless the "
"builders configurations are similar, use multiple "
"--bot <builder>:<test> arguments."))
parser.add_option_group(group)
group = optparse.OptionGroup(parser, "Patch to run")
group.add_option("-f", "--file", default=[], dest="files",
metavar="FILE", action="append",
help="Use many times to list the files to include in the "
"try, relative to the repository root")
group.add_option("--diff",
help="File containing the diff to try")
group.add_option("--url",
help="Url where to grab a patch, e.g. "
"http://example.com/x.diff")
group.add_option("-R", "--rietveld_url", default="codereview.appspot.com",
metavar="URL",
help="Has 2 usages, both refer to the rietveld instance: "
"Specify which code review patch to use as the try job "
"or rietveld instance to update the try job results "
"Default:%default")
group.add_option("--root",
help="Root to use for the patch; base subdirectory for "
"patch created in a subdirectory")
group.add_option("-p", "--patchlevel", type='int', metavar="LEVEL",
help="Used as -pN parameter to patch")
group.add_option("-s", "--sub_rep", action="append", default=[],
help="Subcheckout to use in addition. This is mainly "
"useful for gclient-style checkouts. In git, checkout "
"the branch with changes first. Use @rev or "
"@branch to specify the "
"revision/branch to diff against. If no @branch is "
"given the diff will be against the upstream branch. "
"If @branch then the diff is branch..HEAD. "
"All edits must be checked in.")
group.add_option("--no_search", action="store_true",
help=("Disable automatic search for gclient or repo "
"checkout root."))
group.add_option("-E", "--exclude", action="append",
default=['ChangeLog'], metavar='REGEXP',
help="Regexp patterns to exclude files. Default: %default")
group.add_option("--upstream_branch", action="store",
help="Specify the upstream branch to diff against in the "
"main checkout")
parser.add_option_group(group)
group = optparse.OptionGroup(parser, "Access the try server by HTTP")
group.add_option("--use_http",
action="store_const",
const=_SendChangeHTTP,
dest="send_patch",
help="Use HTTP to talk to the try server [default]")
group.add_option("-H", "--host",
help="Host address")
group.add_option("-P", "--port", type="int",
help="HTTP port")
parser.add_option_group(group)
group = optparse.OptionGroup(parser, "Access the try server with SVN")
group.add_option("--use_svn",
action="store_const",
const=_SendChangeSVN,
dest="send_patch",
help="Use SVN to talk to the try server")
group.add_option("-S", "--svn_repo",
metavar="SVN_URL",
help="SVN url to use to write the changes in; --use_svn is "
"implied when using --svn_repo")
parser.add_option_group(group)
group = optparse.OptionGroup(parser, "Access the try server with Git")
group.add_option("--use_git",
action="store_const",
const=_SendChangeGit,
dest="send_patch",
help="Use GIT to talk to the try server")
group.add_option("-G", "--git_repo",
metavar="GIT_URL",
help="GIT url to use to write the changes in; --use_git is "
"implied when using --git_repo")
parser.add_option_group(group)
group = optparse.OptionGroup(parser, "Access the try server with Gerrit")
group.add_option("--use_gerrit",
action="store_const",
const=_SendChangeGerrit,
dest="send_patch",
help="Use Gerrit to talk to the try server")
group.add_option("--gerrit_url",
metavar="GERRIT_URL",
help="Gerrit url to post a tryjob to; --use_gerrit is "
"implied when using --gerrit_url")
parser.add_option_group(group)
return parser
def TryChange(argv,
change,
swallow_exception,
prog=None,
extra_epilog=None):
"""
Args:
argv: Arguments and options.
change: Change instance corresponding to the CL.
swallow_exception: Whether we raise or swallow exceptions.
"""
parser = gen_parser(prog)
epilog = EPILOG % { 'prog': prog }
if extra_epilog:
epilog += extra_epilog
parser.epilog = epilog
options, args = parser.parse_args(argv)
# If they've asked for help, give it to them
if len(args) == 1 and args[0] == 'help':
parser.print_help()
return 0
# If they've said something confusing, don't spawn a try job until you
# understand what they want.
if args:
parser.error('Extra argument(s) "%s" not understood' % ' '.join(args))
if options.dry_run:
options.verbose += 1
LOG_FORMAT = '%(levelname)s %(filename)s(%(lineno)d): %(message)s'
if not swallow_exception:
if options.verbose == 0:
logging.basicConfig(level=logging.WARNING, format=LOG_FORMAT)
elif options.verbose == 1:
logging.basicConfig(level=logging.INFO, format=LOG_FORMAT)
elif options.verbose > 1:
logging.basicConfig(level=logging.DEBUG, format=LOG_FORMAT)
logging.debug(argv)
if (options.patchlevel is not None and
(options.patchlevel < 0 or options.patchlevel > 10)):
parser.error(
'Have you tried --port instead? You probably confused -p and -P.')
# Strip off any @ in the user, otherwise svn gets confused.
options.user = options.user.split('@', 1)[0]
if options.rietveld_url:
# Try to extract the review number if possible and fix the protocol.
if not '://' in options.rietveld_url:
options.rietveld_url = 'http://' + options.rietveld_url
match = re.match(r'^(.*)/(\d+)/?$', options.rietveld_url)
if match:
if options.issue or options.patchset:
parser.error('Cannot use both --issue and use a review number url')
options.issue = int(match.group(2))
options.rietveld_url = match.group(1)
try:
changed_files = None
# Always include os.getcwd() in the checkout settings.
path = os.getcwd()
file_list = []
if options.files:
file_list = options.files
elif change:
file_list = [f.LocalPath() for f in change.AffectedFiles()]
if options.upstream_branch:
path += '@' + options.upstream_branch
# Clear file list so that the correct list will be retrieved from the
# upstream branch.
file_list = []
current_vcs = GuessVCS(options, path, file_list)
current_vcs.AutomagicalSettings()
options = current_vcs.options
vcs_is_git = type(current_vcs) is GIT
# So far, git_repo doesn't work with SVN
if options.git_repo and not vcs_is_git:
parser.error('--git_repo option is supported only for GIT repositories')
# If revision==auto, resolve it
if options.revision and options.revision.lower() == 'auto':
if not vcs_is_git:
parser.error('--revision=auto is supported only for GIT repositories')
options.revision = scm.GIT.Capture(
['rev-parse', current_vcs.diff_against],
cwd=path)
checkouts = [current_vcs]
for item in options.sub_rep:
# Pass file_list=None because we don't know the sub repo's file list.
checkout = GuessVCS(options,
os.path.join(current_vcs.checkout_root, item),
None)
if checkout.checkout_root in [c.checkout_root for c in checkouts]:
parser.error('Specified the root %s two times.' %
checkout.checkout_root)
checkouts.append(checkout)
can_http = options.port and options.host
can_svn = options.svn_repo
can_git = options.git_repo
can_gerrit = options.gerrit_url
can_something = can_http or can_svn or can_git or can_gerrit
# If there was no transport selected yet, now we must have enough data to
# select one.
if not options.send_patch and not can_something:
parser.error('Please specify an access method.')
# Convert options.diff into the content of the diff.
if options.url:
if options.files:
parser.error('You cannot specify files and --url at the same time.')
options.diff = urllib2.urlopen(options.url).read()
elif options.diff:
if options.files:
parser.error('You cannot specify files and --diff at the same time.')
options.diff = gclient_utils.FileRead(options.diff, 'rb')
elif options.issue and options.patchset is None:
# Retrieve the patch from rietveld when the diff is not specified.
# When patchset is specified, it's because it's done by gcl/git-try.
api_url = '%s/api/%d' % (options.rietveld_url, options.issue)
logging.debug(api_url)
contents = json.loads(urllib2.urlopen(api_url).read())
options.patchset = contents['patchsets'][-1]
diff_url = ('%s/download/issue%d_%d.diff' %
(options.rietveld_url, options.issue, options.patchset))
diff = GetMungedDiff('', urllib2.urlopen(diff_url).readlines())
options.diff = ''.join(diff[0])
changed_files = diff[1]
else:
# Use this as the base.
root = checkouts[0].checkout_root
diffs = []
for checkout in checkouts:
raw_diff = checkout.GenerateDiff()
if not raw_diff:
continue
diff = raw_diff.splitlines(True)
path_diff = gclient_utils.PathDifference(root, checkout.checkout_root)
# Munge it.
diffs.extend(GetMungedDiff(path_diff, diff)[0])
if not diffs:
logging.error('Empty or non-existant diff, exiting.')
return 1
options.diff = ''.join(diffs)
if not options.name:
if options.issue:
options.name = 'Issue %s' % options.issue
else:
options.name = 'Unnamed'
print('Note: use --name NAME to change the try job name.')
if not options.email:
parser.error('Using an anonymous checkout. Please use --email or set '
'the TRYBOT_RESULTS_EMAIL_ADDRESS environment variable.')
print('Results will be emailed to: ' + options.email)
if options.bot:
bot_spec = _ApplyTestFilter(
options.testfilter, _ParseBotList(options.bot, options.testfilter))
else:
bot_spec = _GenTSBotSpec(checkouts, change, changed_files, options)
if options.testfilter:
bot_spec = _ApplyTestFilter(options.testfilter, bot_spec)
if any('triggered' in b[0] for b in bot_spec):
print >> sys.stderr, (
'ERROR You are trying to send a job to a triggered bot. This type of'
' bot requires an\ninitial job from a parent (usually a builder). '
'Instead send your job to the parent.\nBot list: %s' % bot_spec)
return 1
if options.print_bots:
print 'Bots which would be used:'
for bot in bot_spec:
if bot[1]:
print ' %s:%s' % (bot[0], ','.join(bot[1]))
else:
print ' %s' % (bot[0])
return 0
# Determine sending protocol
if options.send_patch:
# If forced.
senders = [options.send_patch]
else:
# Try sending patch using avaialble protocols
all_senders = [
(_SendChangeHTTP, can_http),
(_SendChangeSVN, can_svn),
(_SendChangeGerrit, can_gerrit),
(_SendChangeGit, can_git),
]
senders = [sender for sender, can in all_senders if can]
# Send the patch.
for sender in senders:
try:
sender(bot_spec, options)
return 0
except NoTryServerAccess:
is_last = sender == senders[-1]
if is_last:
raise
assert False, "Unreachable code"
except Error, e:
if swallow_exception:
return 1
print >> sys.stderr, e
return 1
except (gclient_utils.Error, subprocess2.CalledProcessError), e:
print >> sys.stderr, e
return 1
return 0
if __name__ == "__main__":
fix_encoding.fix_encoding()
sys.exit(TryChange(None, None, False))
| bsd-3-clause |
janocat/odoo | addons/account/report/account_entries_report.py | 223 | 7809 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import tools
from openerp.osv import fields,osv
import openerp.addons.decimal_precision as dp
class account_entries_report(osv.osv):
_name = "account.entries.report"
_description = "Journal Items Analysis"
_auto = False
_rec_name = 'date'
_columns = {
'date': fields.date('Effective Date', readonly=True), # TDE FIXME master: rename into date_effective
'date_created': fields.date('Date Created', readonly=True),
'date_maturity': fields.date('Date Maturity', readonly=True),
'ref': fields.char('Reference', readonly=True),
'nbr': fields.integer('# of Items', readonly=True),
'debit': fields.float('Debit', readonly=True),
'credit': fields.float('Credit', readonly=True),
'balance': fields.float('Balance', readonly=True),
'currency_id': fields.many2one('res.currency', 'Currency', readonly=True),
'amount_currency': fields.float('Amount Currency', digits_compute=dp.get_precision('Account'), readonly=True),
'period_id': fields.many2one('account.period', 'Period', readonly=True),
'account_id': fields.many2one('account.account', 'Account', readonly=True),
'journal_id': fields.many2one('account.journal', 'Journal', readonly=True),
'fiscalyear_id': fields.many2one('account.fiscalyear', 'Fiscal Year', readonly=True),
'product_id': fields.many2one('product.product', 'Product', readonly=True),
'product_uom_id': fields.many2one('product.uom', 'Product Unit of Measure', readonly=True),
'move_state': fields.selection([('draft','Unposted'), ('posted','Posted')], 'Status', readonly=True),
'move_line_state': fields.selection([('draft','Unbalanced'), ('valid','Valid')], 'State of Move Line', readonly=True),
'reconcile_id': fields.many2one('account.move.reconcile', 'Reconciliation number', readonly=True),
'partner_id': fields.many2one('res.partner','Partner', readonly=True),
'analytic_account_id': fields.many2one('account.analytic.account', 'Analytic Account', readonly=True),
'quantity': fields.float('Products Quantity', digits=(16,2), readonly=True), # TDE FIXME master: rename into product_quantity
'user_type': fields.many2one('account.account.type', 'Account Type', readonly=True),
'type': fields.selection([
('receivable', 'Receivable'),
('payable', 'Payable'),
('cash', 'Cash'),
('view', 'View'),
('consolidation', 'Consolidation'),
('other', 'Regular'),
('closed', 'Closed'),
], 'Internal Type', readonly=True, help="This type is used to differentiate types with "\
"special effects in Odoo: view can not have entries, consolidation are accounts that "\
"can have children accounts for multi-company consolidations, payable/receivable are for "\
"partners accounts (for debit/credit computations), closed for depreciated accounts."),
'company_id': fields.many2one('res.company', 'Company', readonly=True),
}
_order = 'date desc'
def search(self, cr, uid, args, offset=0, limit=None, order=None,
context=None, count=False):
fiscalyear_obj = self.pool.get('account.fiscalyear')
period_obj = self.pool.get('account.period')
for arg in args:
if arg[0] == 'period_id' and arg[2] == 'current_period':
current_period = period_obj.find(cr, uid, context=context)[0]
args.append(['period_id','in',[current_period]])
break
elif arg[0] == 'period_id' and arg[2] == 'current_year':
current_year = fiscalyear_obj.find(cr, uid)
ids = fiscalyear_obj.read(cr, uid, [current_year], ['period_ids'])[0]['period_ids']
args.append(['period_id','in',ids])
for a in [['period_id','in','current_year'], ['period_id','in','current_period']]:
if a in args:
args.remove(a)
return super(account_entries_report, self).search(cr, uid, args=args, offset=offset, limit=limit, order=order,
context=context, count=count)
def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False,lazy=True):
if context is None:
context = {}
fiscalyear_obj = self.pool.get('account.fiscalyear')
period_obj = self.pool.get('account.period')
if context.get('period', False) == 'current_period':
current_period = period_obj.find(cr, uid, context=context)[0]
domain.append(['period_id','in',[current_period]])
elif context.get('year', False) == 'current_year':
current_year = fiscalyear_obj.find(cr, uid)
ids = fiscalyear_obj.read(cr, uid, [current_year], ['period_ids'])[0]['period_ids']
domain.append(['period_id','in',ids])
else:
domain = domain
return super(account_entries_report, self).read_group(cr, uid, domain, fields, groupby, offset, limit, context, orderby,lazy)
def init(self, cr):
tools.drop_view_if_exists(cr, 'account_entries_report')
cr.execute("""
create or replace view account_entries_report as (
select
l.id as id,
am.date as date,
l.date_maturity as date_maturity,
l.date_created as date_created,
am.ref as ref,
am.state as move_state,
l.state as move_line_state,
l.reconcile_id as reconcile_id,
l.partner_id as partner_id,
l.product_id as product_id,
l.product_uom_id as product_uom_id,
am.company_id as company_id,
am.journal_id as journal_id,
p.fiscalyear_id as fiscalyear_id,
am.period_id as period_id,
l.account_id as account_id,
l.analytic_account_id as analytic_account_id,
a.type as type,
a.user_type as user_type,
1 as nbr,
l.quantity as quantity,
l.currency_id as currency_id,
l.amount_currency as amount_currency,
l.debit as debit,
l.credit as credit,
coalesce(l.debit, 0.0) - coalesce(l.credit, 0.0) as balance
from
account_move_line l
left join account_account a on (l.account_id = a.id)
left join account_move am on (am.id=l.move_id)
left join account_period p on (am.period_id=p.id)
where l.state != 'draft'
)
""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
mancoast/CPythonPyc_test | fail/323_test_ioctl.py | 87 | 3327 | import array
import unittest
from test.support import run_unittest, import_module, get_attribute
import os, struct
fcntl = import_module('fcntl')
termios = import_module('termios')
get_attribute(termios, 'TIOCGPGRP') #Can't run tests without this feature
try:
tty = open("/dev/tty", "rb")
except IOError:
raise unittest.SkipTest("Unable to open /dev/tty")
else:
# Skip if another process is in foreground
r = fcntl.ioctl(tty, termios.TIOCGPGRP, " ")
tty.close()
rpgrp = struct.unpack("i", r)[0]
if rpgrp not in (os.getpgrp(), os.getsid(0)):
raise unittest.SkipTest("Neither the process group nor the session "
"are attached to /dev/tty")
del tty, r, rpgrp
try:
import pty
except ImportError:
pty = None
class IoctlTests(unittest.TestCase):
def test_ioctl(self):
# If this process has been put into the background, TIOCGPGRP returns
# the session ID instead of the process group id.
ids = (os.getpgrp(), os.getsid(0))
with open("/dev/tty", "rb") as tty:
r = fcntl.ioctl(tty, termios.TIOCGPGRP, " ")
rpgrp = struct.unpack("i", r)[0]
self.assertIn(rpgrp, ids)
def _check_ioctl_mutate_len(self, nbytes=None):
buf = array.array('i')
intsize = buf.itemsize
ids = (os.getpgrp(), os.getsid(0))
# A fill value unlikely to be in `ids`
fill = -12345
if nbytes is not None:
# Extend the buffer so that it is exactly `nbytes` bytes long
buf.extend([fill] * (nbytes // intsize))
self.assertEqual(len(buf) * intsize, nbytes) # sanity check
else:
buf.append(fill)
with open("/dev/tty", "rb") as tty:
r = fcntl.ioctl(tty, termios.TIOCGPGRP, buf, 1)
rpgrp = buf[0]
self.assertEqual(r, 0)
self.assertIn(rpgrp, ids)
def test_ioctl_mutate(self):
self._check_ioctl_mutate_len()
def test_ioctl_mutate_1024(self):
# Issue #9758: a mutable buffer of exactly 1024 bytes wouldn't be
# copied back after the system call.
self._check_ioctl_mutate_len(1024)
def test_ioctl_mutate_2048(self):
# Test with a larger buffer, just for the record.
self._check_ioctl_mutate_len(2048)
def test_ioctl_signed_unsigned_code_param(self):
if not pty:
raise unittest.SkipTest('pty module required')
mfd, sfd = pty.openpty()
try:
if termios.TIOCSWINSZ < 0:
set_winsz_opcode_maybe_neg = termios.TIOCSWINSZ
set_winsz_opcode_pos = termios.TIOCSWINSZ & 0xffffffff
else:
set_winsz_opcode_pos = termios.TIOCSWINSZ
set_winsz_opcode_maybe_neg, = struct.unpack("i",
struct.pack("I", termios.TIOCSWINSZ))
our_winsz = struct.pack("HHHH",80,25,0,0)
# test both with a positive and potentially negative ioctl code
new_winsz = fcntl.ioctl(mfd, set_winsz_opcode_pos, our_winsz)
new_winsz = fcntl.ioctl(mfd, set_winsz_opcode_maybe_neg, our_winsz)
finally:
os.close(mfd)
os.close(sfd)
def test_main():
run_unittest(IoctlTests)
if __name__ == "__main__":
test_main()
| gpl-3.0 |
shubhdev/openedx | lms/djangoapps/licenses/migrations/0001_initial.py | 188 | 8260 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'CourseSoftware'
db.create_table('licenses_coursesoftware', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('full_name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('url', self.gf('django.db.models.fields.CharField')(max_length=255)),
('course_id', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal('licenses', ['CourseSoftware'])
# Adding model 'UserLicense'
db.create_table('licenses_userlicense', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('software', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['licenses.CourseSoftware'])),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True)),
('serial', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal('licenses', ['UserLicense'])
def backwards(self, orm):
# Deleting model 'CourseSoftware'
db.delete_table('licenses_coursesoftware')
# Deleting model 'UserLicense'
db.delete_table('licenses_userlicense')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}),
'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'consecutive_days_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'display_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'email_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignored_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'interesting_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'new_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'questions_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'seen_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_country': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'w'", 'max_length': '2'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'licenses.coursesoftware': {
'Meta': {'object_name': 'CourseSoftware'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'full_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'licenses.userlicense': {
'Meta': {'object_name': 'UserLicense'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'serial': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'software': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['licenses.CourseSoftware']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'})
}
}
complete_apps = ['licenses']
| agpl-3.0 |
Teamxrtc/webrtc-streaming-node | third_party/depot_tools/external_bin/gsutil/gsutil_4.15/gsutil/third_party/boto/tests/unit/ec2/cloudwatch/test_connection.py | 135 | 3787 | #!/usr/bin/env python
# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import datetime
from tests.unit import unittest
from tests.unit import AWSMockServiceTestCase
from boto.ec2.cloudwatch import CloudWatchConnection
class TestCloudWatchConnection(AWSMockServiceTestCase):
connection_class = CloudWatchConnection
def test_build_put_params_multiple_everything(self):
# This dictionary gets modified by the method call.
# Check to make sure all updates happen appropriately.
params = {}
# Again, these are rubbish parameters. Pay them no mind, we care more
# about the functionality of the method
name = ['whatever', 'goeshere']
value = None
timestamp = [
datetime.datetime(2013, 5, 13, 9, 2, 35),
datetime.datetime(2013, 5, 12, 9, 2, 35),
]
unit = ['lbs', 'ft']
dimensions = None
statistics = [
{
'maximum': 5,
'minimum': 1,
'samplecount': 3,
'sum': 7,
},
{
'maximum': 6,
'minimum': 2,
'samplecount': 4,
'sum': 5,
},
]
# The important part is that this shouldn't generate a warning (due
# to overwriting a variable) & should have the correct number of
# Metrics (2).
self.service_connection.build_put_params(
params,
name=name,
value=value,
timestamp=timestamp,
unit=unit,
dimensions=dimensions,
statistics=statistics
)
self.assertEqual(params, {
'MetricData.member.1.MetricName': 'whatever',
'MetricData.member.1.StatisticValues.Maximum': 5,
'MetricData.member.1.StatisticValues.Minimum': 1,
'MetricData.member.1.StatisticValues.SampleCount': 3,
'MetricData.member.1.StatisticValues.Sum': 7,
'MetricData.member.1.Timestamp': '2013-05-13T09:02:35',
'MetricData.member.1.Unit': 'lbs',
'MetricData.member.2.MetricName': 'goeshere',
'MetricData.member.2.StatisticValues.Maximum': 6,
'MetricData.member.2.StatisticValues.Minimum': 2,
'MetricData.member.2.StatisticValues.SampleCount': 4,
'MetricData.member.2.StatisticValues.Sum': 5,
'MetricData.member.2.Timestamp': '2013-05-12T09:02:35',
# If needed, comment this next line to cause a test failure & see
# the logging warning.
'MetricData.member.2.Unit': 'ft',
})
if __name__ == '__main__':
unittest.main()
| mit |
gorgorom/p2pool-gor-alts | p2pool/bitcoin/stratum.py | 191 | 3756 | import random
import sys
from twisted.internet import protocol, reactor
from twisted.python import log
from p2pool.bitcoin import data as bitcoin_data, getwork
from p2pool.util import expiring_dict, jsonrpc, pack
class StratumRPCMiningProvider(object):
def __init__(self, wb, other, transport):
self.wb = wb
self.other = other
self.transport = transport
self.username = None
self.handler_map = expiring_dict.ExpiringDict(300)
self.watch_id = self.wb.new_work_event.watch(self._send_work)
def rpc_subscribe(self, miner_version=None, session_id=None):
reactor.callLater(0, self._send_work)
return [
["mining.notify", "ae6812eb4cd7735a302a8a9dd95cf71f"], # subscription details
"", # extranonce1
self.wb.COINBASE_NONCE_LENGTH, # extranonce2_size
]
def rpc_authorize(self, username, password):
self.username = username
reactor.callLater(0, self._send_work)
def _send_work(self):
try:
x, got_response = self.wb.get_work(*self.wb.preprocess_request('' if self.username is None else self.username))
except:
log.err()
self.transport.loseConnection()
return
jobid = str(random.randrange(2**128))
self.other.svc_mining.rpc_set_difficulty(bitcoin_data.target_to_difficulty(x['share_target'])*self.wb.net.DUMB_SCRYPT_DIFF).addErrback(lambda err: None)
self.other.svc_mining.rpc_notify(
jobid, # jobid
getwork._swap4(pack.IntType(256).pack(x['previous_block'])).encode('hex'), # prevhash
x['coinb1'].encode('hex'), # coinb1
x['coinb2'].encode('hex'), # coinb2
[pack.IntType(256).pack(s).encode('hex') for s in x['merkle_link']['branch']], # merkle_branch
getwork._swap4(pack.IntType(32).pack(x['version'])).encode('hex'), # version
getwork._swap4(pack.IntType(32).pack(x['bits'].bits)).encode('hex'), # nbits
getwork._swap4(pack.IntType(32).pack(x['timestamp'])).encode('hex'), # ntime
True, # clean_jobs
).addErrback(lambda err: None)
self.handler_map[jobid] = x, got_response
def rpc_submit(self, worker_name, job_id, extranonce2, ntime, nonce):
if job_id not in self.handler_map:
print >>sys.stderr, '''Couldn't link returned work's job id with its handler. This should only happen if this process was recently restarted!'''
return False
x, got_response = self.handler_map[job_id]
coinb_nonce = extranonce2.decode('hex')
assert len(coinb_nonce) == self.wb.COINBASE_NONCE_LENGTH
new_packed_gentx = x['coinb1'] + coinb_nonce + x['coinb2']
header = dict(
version=x['version'],
previous_block=x['previous_block'],
merkle_root=bitcoin_data.check_merkle_link(bitcoin_data.hash256(new_packed_gentx), x['merkle_link']),
timestamp=pack.IntType(32).unpack(getwork._swap4(ntime.decode('hex'))),
bits=x['bits'],
nonce=pack.IntType(32).unpack(getwork._swap4(nonce.decode('hex'))),
)
return got_response(header, worker_name, coinb_nonce)
def close(self):
self.wb.new_work_event.unwatch(self.watch_id)
class StratumProtocol(jsonrpc.LineBasedPeer):
def connectionMade(self):
self.svc_mining = StratumRPCMiningProvider(self.factory.wb, self.other, self.transport)
def connectionLost(self, reason):
self.svc_mining.close()
class StratumServerFactory(protocol.ServerFactory):
protocol = StratumProtocol
def __init__(self, wb):
self.wb = wb
| gpl-3.0 |
kawamon/hue | desktop/core/ext-py/Django-1.11.29/django/utils/lru_cache.py | 335 | 7647 | try:
from functools import lru_cache
except ImportError:
# backport of Python's 3.3 lru_cache, written by Raymond Hettinger and
# licensed under MIT license, from:
# <http://code.activestate.com/recipes/578078-py26-and-py30-backport-of-python-33s-lru-cache/>
# Should be removed when Django only supports Python 3.2 and above.
from collections import namedtuple
from functools import update_wrapper
from threading import RLock
_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])
class _HashedSeq(list):
__slots__ = 'hashvalue'
def __init__(self, tup, hash=hash):
self[:] = tup
self.hashvalue = hash(tup)
def __hash__(self):
return self.hashvalue
def _make_key(args, kwds, typed,
kwd_mark = (object(),),
fasttypes = {int, str, frozenset, type(None)},
sorted=sorted, tuple=tuple, type=type, len=len):
'Make a cache key from optionally typed positional and keyword arguments'
key = args
if kwds:
sorted_items = sorted(kwds.items())
key += kwd_mark
for item in sorted_items:
key += item
if typed:
key += tuple(type(v) for v in args)
if kwds:
key += tuple(type(v) for k, v in sorted_items)
elif len(key) == 1 and type(key[0]) in fasttypes:
return key[0]
return _HashedSeq(key)
def lru_cache(maxsize=100, typed=False):
"""Least-recently-used cache decorator.
If *maxsize* is set to None, the LRU features are disabled and the cache
can grow without bound.
If *typed* is True, arguments of different types will be cached separately.
For example, f(3.0) and f(3) will be treated as distinct calls with
distinct results.
Arguments to the cached function must be hashable.
View the cache statistics named tuple (hits, misses, maxsize, currsize) with
f.cache_info(). Clear the cache and statistics with f.cache_clear().
Access the underlying function with f.__wrapped__.
See: https://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used
"""
# Users should only access the lru_cache through its public API:
# cache_info, cache_clear, and f.__wrapped__
# The internals of the lru_cache are encapsulated for thread safety and
# to allow the implementation to change (including a possible C version).
def decorating_function(user_function):
cache = dict()
stats = [0, 0] # make statistics updateable non-locally
HITS, MISSES = 0, 1 # names for the stats fields
make_key = _make_key
cache_get = cache.get # bound method to lookup key or return None
_len = len # localize the global len() function
lock = RLock() # because linkedlist updates aren't threadsafe
root = [] # root of the circular doubly linked list
root[:] = [root, root, None, None] # initialize by pointing to self
nonlocal_root = [root] # make updateable non-locally
PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields
if maxsize == 0:
def wrapper(*args, **kwds):
# no caching, just do a statistics update after a successful call
result = user_function(*args, **kwds)
stats[MISSES] += 1
return result
elif maxsize is None:
def wrapper(*args, **kwds):
# simple caching without ordering or size limit
key = make_key(args, kwds, typed)
result = cache_get(key, root) # root used here as a unique not-found sentinel
if result is not root:
stats[HITS] += 1
return result
result = user_function(*args, **kwds)
cache[key] = result
stats[MISSES] += 1
return result
else:
def wrapper(*args, **kwds):
# size limited caching that tracks accesses by recency
key = make_key(args, kwds, typed) if kwds or typed else args
with lock:
link = cache_get(key)
if link is not None:
# record recent use of the key by moving it to the front of the list
root, = nonlocal_root
link_prev, link_next, key, result = link
link_prev[NEXT] = link_next
link_next[PREV] = link_prev
last = root[PREV]
last[NEXT] = root[PREV] = link
link[PREV] = last
link[NEXT] = root
stats[HITS] += 1
return result
result = user_function(*args, **kwds)
with lock:
root, = nonlocal_root
if key in cache:
# getting here means that this same key was added to the
# cache while the lock was released. since the link
# update is already done, we need only return the
# computed result and update the count of misses.
pass
elif _len(cache) >= maxsize:
# use the old root to store the new key and result
oldroot = root
oldroot[KEY] = key
oldroot[RESULT] = result
# empty the oldest link and make it the new root
root = nonlocal_root[0] = oldroot[NEXT]
oldkey = root[KEY]
oldvalue = root[RESULT]
root[KEY] = root[RESULT] = None
# now update the cache dictionary for the new links
del cache[oldkey]
cache[key] = oldroot
else:
# put result in a new link at the front of the list
last = root[PREV]
link = [last, root, key, result]
last[NEXT] = root[PREV] = cache[key] = link
stats[MISSES] += 1
return result
def cache_info():
"""Report cache statistics"""
with lock:
return _CacheInfo(stats[HITS], stats[MISSES], maxsize, len(cache))
def cache_clear():
"""Clear the cache and cache statistics"""
with lock:
cache.clear()
root = nonlocal_root[0]
root[:] = [root, root, None, None]
stats[:] = [0, 0]
wrapper.__wrapped__ = user_function
wrapper.cache_info = cache_info
wrapper.cache_clear = cache_clear
return update_wrapper(wrapper, user_function)
return decorating_function
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.