repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
takeshineshiro/wagtail | wagtail/wagtailcore/whitelist.py | 10 | 5168 | """
A generic HTML whitelisting engine, designed to accommodate subclassing to override
specific rules.
"""
import re
from bs4 import BeautifulSoup, NavigableString, Tag
ALLOWED_URL_SCHEMES = ['http', 'https', 'ftp', 'mailto', 'tel']
PROTOCOL_RE = re.compile("^[a-z0-9][-+.a-z0-9]*:")
def check_url(url_string):
# Remove control characters and other disallowed characters
# Browsers sometimes ignore these, so that 'jav\tascript:alert("XSS")'
# is treated as a valid javascript: link
unescaped = url_string.lower()
unescaped = unescaped.replace("<", "<")
unescaped = unescaped.replace(">", ">")
unescaped = unescaped.replace("&", "&")
unescaped = re.sub("[`\000-\040\177-\240\s]+", '', unescaped)
unescaped = unescaped.replace("\ufffd", "")
if PROTOCOL_RE.match(unescaped):
protocol = unescaped.split(':', 1)[0]
if protocol not in ALLOWED_URL_SCHEMES:
return None
return url_string
def attribute_rule(allowed_attrs):
"""
Generator for functions that can be used as entries in Whitelister.element_rules.
These functions accept a tag, and modify its attributes by looking each attribute
up in the 'allowed_attrs' dict defined here:
* if the lookup fails, drop the attribute
* if the lookup returns a callable, replace the attribute with the result of calling
it - e.g. {'title': uppercase} will replace 'title' with the result of uppercasing
the title. If the callable returns None, the attribute is dropped
* if the lookup returns a truthy value, keep the attribute; if falsy, drop it
"""
def fn(tag):
for attr, val in list(tag.attrs.items()):
rule = allowed_attrs.get(attr)
if rule:
if callable(rule):
new_val = rule(val)
if new_val is None:
del tag[attr]
else:
tag[attr] = new_val
else:
# rule is not callable, just truthy - keep the attribute
pass
else:
# rule is falsy or absent - remove the attribute
del tag[attr]
return fn
allow_without_attributes = attribute_rule({})
class Whitelister(object):
element_rules = {
'[document]': allow_without_attributes,
'a': attribute_rule({'href': check_url}),
'b': allow_without_attributes,
'br': allow_without_attributes,
'div': allow_without_attributes,
'em': allow_without_attributes,
'h1': allow_without_attributes,
'h2': allow_without_attributes,
'h3': allow_without_attributes,
'h4': allow_without_attributes,
'h5': allow_without_attributes,
'h6': allow_without_attributes,
'hr': allow_without_attributes,
'i': allow_without_attributes,
'img': attribute_rule({'src': check_url, 'width': True, 'height': True,
'alt': True}),
'li': allow_without_attributes,
'ol': allow_without_attributes,
'p': allow_without_attributes,
'strong': allow_without_attributes,
'sub': allow_without_attributes,
'sup': allow_without_attributes,
'ul': allow_without_attributes,
}
@classmethod
def clean(cls, html):
"""Clean up an HTML string to contain just the allowed elements /
attributes"""
doc = BeautifulSoup(html, 'html5lib')
cls.clean_node(doc, doc)
return doc.decode()
@classmethod
def clean_node(cls, doc, node):
"""Clean a BeautifulSoup document in-place"""
if isinstance(node, NavigableString):
cls.clean_string_node(doc, node)
elif isinstance(node, Tag):
cls.clean_tag_node(doc, node)
# This branch is here in case node is a BeautifulSoup object that does
# not inherit from NavigableString or Tag. I can't find any examples
# of such a thing at the moment, so this branch is untested.
else: # pragma: no cover
cls.clean_unknown_node(doc, node)
@classmethod
def clean_string_node(cls, doc, str):
# by default, nothing needs to be done to whitelist string nodes
pass
@classmethod
def clean_tag_node(cls, doc, tag):
# first, whitelist the contents of this tag
# NB tag.contents will change while this iteration is running, so we need
# to capture the initial state into a static list() and iterate over that
# to avoid losing our place in the sequence.
for child in list(tag.contents):
cls.clean_node(doc, child)
# see if there is a rule in element_rules for this tag type
try:
rule = cls.element_rules[tag.name]
except KeyError:
# don't recognise this tag name, so KILL IT WITH FIRE
tag.unwrap()
return
# apply the rule
rule(tag)
@classmethod
def clean_unknown_node(cls, doc, node):
# don't know what type of object this is, so KILL IT WITH FIRE
node.decompose()
| bsd-3-clause |
javiercantero/streamlink | src/streamlink/plugins/olympicchannel.py | 1 | 2405 | import re
from streamlink.plugin import Plugin
from streamlink.plugin.api import http
from streamlink.plugin.api import validate
from streamlink.stream import HLSStream, HTTPStream
class OlympicChannel(Plugin):
_url_re = re.compile(r"http(?:s)?://(\w+)\.?olympicchannel.com/../(?P<type>tv|playback)/(livestream-.\d|.*)/")
_live_api_url = "https://www.olympicchannel.com{0}api/v2/metadata/{1}"
_stream_get_url = "https://www.olympicchannel.com/en/proxy/viewings/"
_stream_api_schema = validate.Schema({
u'status': u'ok',
u'primary': validate.url(),
validate.optional(u'backup'): validate.url()
})
@classmethod
def can_handle_url(cls, url):
return cls._url_re.match(url)
def _get_vod_streams(self):
page = http.get(self.url)
asset = re.search(r'asse_.{32}', str(page._content)).group(0)
post_data = '{"asset_url":"/api/assets/%s/"}' % asset
stream_data = http.json(http.post(self._stream_get_url, data=post_data))['objects'][0]['level3']['streaming_url']
return HLSStream.parse_variant_playlist(self.session, stream_data)
def _get_live_streams(self, lang, path):
"""
Get the live stream in a particular language
:param lang:
:param path:
:return:
"""
res = http.get(self._live_api_url.format(lang, path))
live_res = http.json(res)['default']['uid']
post_data = '{"channel_url":"/api/channels/%s/"}' % live_res
try:
stream_data = http.json(http.post(self._stream_get_url, data=post_data))['stream_url']
except BaseException:
stream_data = http.json(http.post(self._stream_get_url, data=post_data))['channel_url']
return HLSStream.parse_variant_playlist(self.session, stream_data)
def _get_streams(self):
"""
Find the streams for OlympicChannel
:return:
"""
match = self._url_re.match(self.url)
type_of_stream = match.group('type')
lang = re.search(r"/../", self.url).group(0)
if type_of_stream == 'tv':
path = re.search(r"tv/.*-\d/$", self.url).group(0)
return self._get_live_streams(lang, path)
elif type_of_stream == 'playback':
path = re.search(r"/playback/.*/$", self.url).group(0)
return self._get_vod_streams()
__plugin__ = OlympicChannel
| bsd-2-clause |
johnkit/vtk-dev | ThirdParty/ZopeInterface/zope/interface/tests/test_exceptions.py | 57 | 2722 | ##############################################################################
#
# Copyright (c) 2010 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
""" zope.interface.exceptions unit tests
"""
import unittest
def _makeIface():
from zope.interface import Interface
class IDummy(Interface):
pass
return IDummy
class DoesNotImplementTests(unittest.TestCase):
def _getTargetClass(self):
from zope.interface.exceptions import DoesNotImplement
return DoesNotImplement
def _makeOne(self, iface=None):
if iface is None:
iface = _makeIface()
return self._getTargetClass()(iface)
def test___str__(self):
dni = self._makeOne()
# XXX The trailing newlines and blank spaces are a stupid artifact.
self.assertEqual(str(dni),
'An object does not implement interface <InterfaceClass '
'zope.interface.tests.test_exceptions.IDummy>\n\n ')
class BrokenImplementationTests(unittest.TestCase):
def _getTargetClass(self):
from zope.interface.exceptions import BrokenImplementation
return BrokenImplementation
def _makeOne(self, iface=None, name='missing'):
if iface is None:
iface = _makeIface()
return self._getTargetClass()(iface, name)
def test___str__(self):
dni = self._makeOne()
# XXX The trailing newlines and blank spaces are a stupid artifact.
self.assertEqual(str(dni),
'An object has failed to implement interface <InterfaceClass '
'zope.interface.tests.test_exceptions.IDummy>\n\n'
' The missing attribute was not provided.\n ')
class BrokenMethodImplementationTests(unittest.TestCase):
def _getTargetClass(self):
from zope.interface.exceptions import BrokenMethodImplementation
return BrokenMethodImplementation
def _makeOne(self, method='aMethod', mess='I said so'):
return self._getTargetClass()(method, mess)
def test___str__(self):
dni = self._makeOne()
self.assertEqual(str(dni),
'The implementation of aMethod violates its contract\n'
' because I said so.\n ')
| bsd-3-clause |
vmanoria/bluemix-hue-filebrowser | hue-3.8.1-bluemix/desktop/core/ext-py/Django-1.6.10/django/middleware/gzip.py | 225 | 2140 | import re
from django.utils.text import compress_sequence, compress_string
from django.utils.cache import patch_vary_headers
re_accepts_gzip = re.compile(r'\bgzip\b')
class GZipMiddleware(object):
"""
This middleware compresses content if the browser allows gzip compression.
It sets the Vary header accordingly, so that caches will base their storage
on the Accept-Encoding header.
"""
def process_response(self, request, response):
# It's not worth attempting to compress really short responses.
if not response.streaming and len(response.content) < 200:
return response
patch_vary_headers(response, ('Accept-Encoding',))
# Avoid gzipping if we've already got a content-encoding.
if response.has_header('Content-Encoding'):
return response
# MSIE have issues with gzipped response of various content types.
if "msie" in request.META.get('HTTP_USER_AGENT', '').lower():
ctype = response.get('Content-Type', '').lower()
if not ctype.startswith("text/") or "javascript" in ctype:
return response
ae = request.META.get('HTTP_ACCEPT_ENCODING', '')
if not re_accepts_gzip.search(ae):
return response
if response.streaming:
# Delete the `Content-Length` header for streaming content, because
# we won't know the compressed size until we stream it.
response.streaming_content = compress_sequence(response.streaming_content)
del response['Content-Length']
else:
# Return the compressed content only if it's actually shorter.
compressed_content = compress_string(response.content)
if len(compressed_content) >= len(response.content):
return response
response.content = compressed_content
response['Content-Length'] = str(len(response.content))
if response.has_header('ETag'):
response['ETag'] = re.sub('"$', ';gzip"', response['ETag'])
response['Content-Encoding'] = 'gzip'
return response
| gpl-2.0 |
EmreAtes/spack | var/spack/repos/builtin/packages/qbank/package.py | 5 | 3277 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
import os
class Qbank(Package):
"""QBank is a unique dynamic reservation-based allocation management system
that manages the utilization of computational resources in a multi-project
environment. It is used in conjunction with a resource management system
allowing an organization to guarantee greater fairness and enforce mission
priorities by associating a charge with the use of computational resources
and allocating resource credits which limit how much of the resources may
be used at what time and by whom. It tracks resource utilization and allows
for insightful planning."""
# QBank is so old that it no longer has (never had?) a homepage
# but it was developed at Pacific Northwest National Laboratory
# by Scott Jackson <Scott.Jackson@pnl.gov>
homepage = "http://www.pnnl.gov/"
url = "file://{0}/qbank-2.10.4.tar.gz".format(os.getcwd())
version('2.10.4', '0820587353e63d32ddb49689dd4289e7')
variant('doc', default=False, description='Build documentation')
depends_on('openssl')
depends_on('perl@5.6:5.16', type=('build', 'run'))
depends_on('perl-dbi@1.00:', type=('build', 'run'))
phases = ['configure', 'build', 'install']
def configure_args(self):
prefix = self.prefix
config_args = [
'--prefix', prefix,
'--logdir', join_path(prefix, 'var', 'log', 'qbank')
]
return config_args
def configure(self, spec, prefix):
perl = which('perl')
perl('configure', *self.configure_args())
def build(self, spec, prefix):
make()
if '+doc' in spec:
make('docs')
def install(self, spec, prefix):
make('install')
if '+doc' in spec:
install_tree('doc', join_path(prefix, 'doc'))
def setup_environment(self, spack_env, run_env):
spec = self.spec
prefix = self.prefix
if '+doc' in spec:
run_env.prepend_path('MANPATH', join_path(prefix, 'doc'))
| lgpl-2.1 |
kustodian/ansible | lib/ansible/modules/network/f5/bigip_data_group.py | 23 | 48738 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_data_group
short_description: Manage data groups on a BIG-IP
description:
- Allows for managing data groups on a BIG-IP. Data groups provide a way to store collections
of values on a BIG-IP for later use in things such as LTM rules, iRules, and ASM policies.
version_added: 2.6
options:
name:
description:
- Specifies the name of the data group.
type: str
required: True
description:
description:
- The description of the data group.
type: str
version_added: 2.8
type:
description:
- The type of records in this data group.
- This parameter is especially important because it causes BIG-IP to store your data
in different ways so-as to optimize access to it. For example, it would be wrong
to specify a list of records containing IP addresses, but label them as a C(string)
type.
- This value cannot be changed once the data group is created.
type: str
choices:
- address
- addr
- ip
- string
- str
- integer
- int
default: string
internal:
description:
- The type of this data group.
- You should only consider setting this value in cases where you know exactly what
you're doing, B(or), you are working with a pre-existing internal data group.
- Be aware that if you deliberately force this parameter to C(yes), and you have a
either a large number of records or a large total records size, this large amount
of data will be reflected in your BIG-IP configuration. This can lead to B(long)
system configuration load times due to needing to parse and verify the large
configuration.
- There is a limit of either 4 megabytes or 65,000 records (whichever is more restrictive)
for uploads when this parameter is C(yes).
- This value cannot be changed once the data group is created.
type: bool
default: no
external_file_name:
description:
- When creating a new data group, this specifies the file name that you want to give an
external data group file on the BIG-IP.
- This parameter is ignored when C(internal) is C(yes).
- This parameter can be used to select an existing data group file to use with an
existing external data group.
- If this value is not provided, it will be given the value specified in C(name) and,
therefore, match the name of the data group.
- This value may only contain letters, numbers, underscores, dashes, or a period.
type: str
records:
description:
- Specifies the records that you want to add to a data group.
- If you have a large number of records, it is recommended that you use C(records_content)
instead of typing all those records here.
- The technical limit of either 1. the number of records, or 2. the total size of all
records, varies with the size of the total resources on your system; in particular,
RAM.
- When C(internal) is C(no), at least one record must be specified in either C(records)
or C(records_content).
- "When C(type) is: C(ip), C(address), C(addr) if the addresses use non default route domain,
they must be explicit about it that is they must contain a route domain notation C(%) eg. 10.10.1.1%11.
This is true regardless if the data group resides in a partition or not."
type: list
suboptions:
key:
description:
- The key describing the record in the data group.
- Your key will be used for validation of the C(type) parameter to this module.
type: str
required: True
value:
description:
- The value of the key describing the record in the data group.
type: raw
records_src:
description:
- Path to a file with records in it.
- The file should be well-formed. This means that it includes records, one per line,
that resemble the following format "key separator value". For example, C(foo := bar).
- BIG-IP is strict about this format, but this module is a bit more lax. It will allow
you to include arbitrary amounts (including none) of empty space on either side of
the separator. For an illustration of this, see the Examples section.
- Record keys are limited in length to no more than 65520 characters.
- Values of record keys are limited in length to no more than 65520 characters.
- The total number of records you can have in your BIG-IP is limited by the memory
of the BIG-IP.
- The format of this content is slightly different depending on whether you specify
a C(type) of C(address), C(integer), or C(string). See the examples section for
examples of the different types of payload formats that are expected in your data
group file.
- When C(internal) is C(no), at least one record must be specified in either C(records)
or C(records_content).
type: path
separator:
description:
- When specifying C(records_content), this is the string of characters that will
be used to break apart entries in the C(records_content) into key/value pairs.
- By default, this parameter's value is C(:=).
- This value cannot be changed once it is set.
- This parameter is only relevant when C(internal) is C(no). It will be ignored
otherwise.
type: str
default: ":="
delete_data_group_file:
description:
- When C(yes), will ensure that the remote data group file is deleted.
- This parameter is only relevant when C(state) is C(absent) and C(internal) is C(no).
type: bool
default: no
partition:
description:
- Device partition to manage resources on.
type: str
default: Common
state:
description:
- When C(state) is C(present), ensures the data group exists.
- When C(state) is C(absent), ensures that the data group is removed.
- The use of state in this module refers to the entire data group, not its members.
type: str
choices:
- present
- absent
default: present
notes:
- This module does NOT support atomic updates of data group members in a type C(internal) data group.
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
- Wojciech Wypior (@wojtek0806)
- Greg Crosby (@crosbygw)
'''
EXAMPLES = r'''
- name: Create a data group of addresses
bigip_data_group:
name: foo
internal: yes
records:
- key: 0.0.0.0/32
value: External_NAT
- key: 10.10.10.10
value: No_NAT
type: address
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Create a data group of strings
bigip_data_group:
name: foo
internal: yes
records:
- key: caddy
value: ""
- key: cafeteria
value: ""
- key: cactus
value: ""
type: str
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Create a data group of IP addresses from a file
bigip_data_group:
name: foo
records_src: /path/to/dg-file
type: address
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Update an existing internal data group of strings
bigip_data_group:
name: foo
internal: yes
records:
- key: caddy
value: ""
- key: cafeteria
value: ""
- key: cactus
value: ""
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Show the data format expected for records_content - address 1
copy:
dest: /path/to/addresses.txt
content: |
network 10.0.0.0 prefixlen 8 := "Network1",
network 172.16.0.0 prefixlen 12 := "Network2",
network 192.168.0.0 prefixlen 16 := "Network3",
network 2402:9400:1000:0:: prefixlen 64 := "Network4",
host 192.168.20.1 := "Host1",
host 172.16.1.1 := "Host2",
host 172.16.1.1/32 := "Host3",
host 2001:0db8:85a3:0000:0000:8a2e:0370:7334 := "Host4",
host 2001:0db8:85a3:0000:0000:8a2e:0370:7334/128 := "Host5"
- name: Show the data format expected for records_content - address 2
copy:
dest: /path/to/addresses.txt
content: |
10.0.0.0/8 := "Network1",
172.16.0.0/12 := "Network2",
192.168.0.0/16 := "Network3",
2402:9400:1000:0::/64 := "Network4",
192.168.20.1 := "Host1",
172.16.1.1 := "Host2",
172.16.1.1/32 := "Host3",
2001:0db8:85a3:0000:0000:8a2e:0370:7334 := "Host4",
2001:0db8:85a3:0000:0000:8a2e:0370:7334/128 := "Host5"
- name: Show the data format expected for records_content - string
copy:
dest: /path/to/strings.txt
content: |
a := alpha,
b := bravo,
c := charlie,
x := x-ray,
y := yankee,
z := zulu,
- name: Show the data format expected for records_content - integer
copy:
dest: /path/to/integers.txt
content: |
1 := bar,
2 := baz,
3,
4,
'''
RETURN = r'''
# only common fields returned
'''
import hashlib
import os
import re
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import transform_name
from library.module_utils.network.f5.compare import compare_complex_list
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.ipaddress import is_valid_ip_interface
from library.module_utils.compat.ipaddress import ip_network
from library.module_utils.compat.ipaddress import ip_interface
from library.module_utils.network.f5.icontrol import upload_file
from library.module_utils.network.f5.compare import cmp_str_with_none
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import transform_name
from ansible.module_utils.network.f5.compare import compare_complex_list
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.ipaddress import is_valid_ip_interface
from ansible.module_utils.compat.ipaddress import ip_network
from ansible.module_utils.compat.ipaddress import ip_interface
from ansible.module_utils.network.f5.icontrol import upload_file
from ansible.module_utils.network.f5.compare import cmp_str_with_none
LINE_LIMIT = 65000
SIZE_LIMIT_BYTES = 4000000
def zero_length(content):
content.seek(0, os.SEEK_END)
length = content.tell()
content.seek(0)
if length == 0:
return True
return False
def size_exceeded(content):
records = content
records.seek(0, os.SEEK_END)
size = records.tell()
records.seek(0)
if size > SIZE_LIMIT_BYTES:
return True
return False
def lines_exceeded(content):
result = False
for i, line in enumerate(content):
if i > LINE_LIMIT:
result = True
content.seek(0)
return result
class RecordsEncoder(object):
def __init__(self, record_type=None, separator=None):
self._record_type = record_type
self._separator = separator
self._network_pattern = re.compile(r'^network\s+(?P<addr>[^ ]+)\s+prefixlen\s+(?P<prefix>\d+)\s+.*')
self._host_pattern = re.compile(r'^host\s+(?P<addr>[^ ]+)\s+.*')
self._rd_net_pattern = re.compile(r'(?P<addr>[^%]+)%(?P<rd>[0-9]+)/(?P<prefix>[0-9]+)')
self._rd_host_pattern = re.compile(r'(?P<addr>[^%]+)%(?P<rd>[0-9]+)')
def encode(self, record):
if isinstance(record, dict):
return self.encode_dict(record)
else:
return self.encode_string(record)
def encode_dict(self, record):
if self._record_type == 'ip':
return self.encode_address_from_dict(record)
elif self._record_type == 'integer':
return self.encode_integer_from_dict(record)
else:
return self.encode_string_from_dict(record)
def encode_rd_address(self, record, match, host=False):
if host:
if is_valid_ip_interface(match.group('addr')):
key = ip_interface(u"{0}".format(match.group('addr')))
else:
raise F5ModuleError(
"When specifying an 'address' type, the value to the left of the separator must be an IP."
)
else:
if is_valid_ip_interface(match.group('addr')):
key = ip_interface(u"{0}/{1}".format(match.group('addr'), match.group('prefix')))
else:
raise F5ModuleError(
"When specifying an 'address' type, the value to the left of the separator must be an IP."
)
if key and 'value' in record:
if key.network.prefixlen in [32, 128]:
return self.encode_host(str(key.ip) + '%' + match.group('rd'), record['value'])
return self.encode_network(
str(key.network.network_address) + '%' + match.group('rd'), key.network.prefixlen, record['value']
)
elif key:
if key.network.prefixlen in [32, 128]:
return self.encode_host(str(key.ip) + '%' + match.group('rd'), str(key.ip) + '%' + match.group('rd'))
return self.encode_network(
str(key.network.network_address) + '%' + match.group('rd'), key.network.prefixlen,
str(key.network.network_address) + '%' + match.group('rd')
)
def encode_address_from_dict(self, record):
rd_match = re.match(self._rd_net_pattern, record['key'])
if rd_match:
return self.encode_rd_address(record, rd_match)
rd_match = re.match(self._rd_host_pattern, record['key'])
if rd_match:
return self.encode_rd_address(record, rd_match, host=True)
if is_valid_ip_interface(record['key']):
key = ip_interface(u"{0}".format(str(record['key'])))
else:
raise F5ModuleError(
"When specifying an 'address' type, the value to the left of the separator must be an IP."
)
if key and 'value' in record:
if key.network.prefixlen in [32, 128]:
return self.encode_host(str(key.ip), record['value'])
return self.encode_network(
str(key.network.network_address), key.network.prefixlen, record['value']
)
elif key:
if key.network.prefixlen in [32, 128]:
return self.encode_host(str(key.ip), str(key.ip))
return self.encode_network(
str(key.network.network_address), key.network.prefixlen, str(key.network.network_address)
)
def encode_integer_from_dict(self, record):
try:
int(record['key'])
except ValueError:
raise F5ModuleError(
"When specifying an 'integer' type, the value to the left of the separator must be a number."
)
if 'key' in record and 'value' in record:
return '{0} {1} {2}'.format(record['key'], self._separator, record['value'])
elif 'key' in record:
return str(record['key'])
def encode_string_from_dict(self, record):
if 'key' in record and 'value' in record:
return '{0} {1} {2}'.format(record['key'], self._separator, record['value'])
elif 'key' in record:
return '{0} {1} ""'.format(record['key'], self._separator)
def encode_string(self, record):
record = record.strip().strip(',')
if self._record_type == 'ip':
return self.encode_address_from_string(record)
elif self._record_type == 'integer':
return self.encode_integer_from_string(record)
else:
return self.encode_string_from_string(record)
def encode_address_from_string(self, record):
if self._network_pattern.match(record):
# network 192.168.0.0 prefixlen 16 := "Network3",
# network 2402:9400:1000:0:: prefixlen 64 := "Network4",
return record
elif self._host_pattern.match(record):
# host 172.16.1.1/32 := "Host3"
# host 2001:0db8:85a3:0000:0000:8a2e:0370:7334 := "Host4"
return record
elif self._rd_net_pattern.match(record) or self._rd_host_pattern.match(record):
# 192.168.0.0%11/16 := "Network3",
# 2402:9400:1000:0::%11/64 := "Network4",
# 192.168.1.1%11/32 := "Host3",
# 2001:0db8:85a3:0000:0000:8a2e:0370:7334%11 := "Host4"
return record
else:
# 192.168.0.0/16 := "Network3",
# 2402:9400:1000:0::/64 := "Network4",
parts = record.split(self._separator)
if parts[0] == '':
return
if not is_valid_ip_interface(parts[0]):
raise F5ModuleError(
"When specifying an 'address' type, the value to the left of the separator must be an IP."
)
key = ip_interface(u"{0}".format(str(parts[0])))
if len(parts) == 2:
if key.network.prefixlen in [32, 128]:
return self.encode_host(str(key.ip), parts[1])
return self.encode_network(
str(key.network.network_address), key.network.prefixlen, parts[1]
)
elif len(parts) == 1 and parts[0] != '':
if key.network.prefixlen in [32, 128]:
return self.encode_host(str(key.ip), str(key.ip))
return self.encode_network(
str(key.network.network_address), key.network.prefixlen, str(key.network.network_address)
)
def encode_host(self, key, value):
return 'host {0} {1} {2}'.format(str(key), self._separator, str(value))
def encode_network(self, key, prefixlen, value):
return 'network {0} prefixlen {1} {2} {3}'.format(
str(key), str(prefixlen), self._separator, str(value)
)
def encode_integer_from_string(self, record):
parts = record.split(self._separator)
if len(parts) == 1 and parts[0] == '':
return None
try:
int(parts[0])
except ValueError:
raise F5ModuleError(
"When specifying an 'integer' type, the value to the left of the separator must be a number."
)
if len(parts) == 2:
return '{0} {1} {2}'.format(parts[0], self._separator, parts[1])
elif len(parts) == 1:
return str(parts[0])
def encode_string_from_string(self, record):
parts = record.split(self._separator)
if len(parts) == 2:
return '{0} {1} {2}'.format(parts[0], self._separator, parts[1])
elif len(parts) == 1 and parts[0] != '':
return '{0} {1} ""'.format(parts[0], self._separator)
class RecordsDecoder(object):
def __init__(self, record_type=None, separator=None):
self._record_type = record_type
self._separator = separator
self._network_pattern = re.compile(r'^network\s+(?P<addr>[^ ]+)\s+prefixlen\s+(?P<prefix>\d+)\s+.*')
self._host_pattern = re.compile(r'^host\s+(?P<addr>[^ ]+)\s+.*')
self._rd_net_ptrn = re.compile(r'^network\s+(?P<addr>[^%]+)%(?P<rd>[0-9]+)\s+prefixlen\s+(?P<prefix>\d+)\s+.*')
self._rd_host_ptrn = re.compile(r'^host\s+(?P<addr>[^%]+)%(?P<rd>[0-9]+)\s+.*')
def decode(self, record):
record = record.strip().strip(',')
if self._record_type == 'ip':
return self.decode_address_from_string(record)
else:
return self.decode_from_string(record)
def decode_address_from_string(self, record):
matches = self._rd_net_ptrn.match(record)
if matches:
# network 192.168.0.0%11 prefixlen 16 := "Network3",
# network 2402:9400:1000:0::%11 prefixlen 64 := "Network4",
value = record.split(self._separator)[1].strip().strip('"')
addr = "{0}%{1}/{2}".format(matches.group('addr'), matches.group('rd'), matches.group('prefix'))
result = dict(name=addr, data=value)
return result
matches = self._network_pattern.match(record)
if matches:
# network 192.168.0.0 prefixlen 16 := "Network3",
# network 2402:9400:1000:0:: prefixlen 64 := "Network4",
key = u"{0}/{1}".format(matches.group('addr'), matches.group('prefix'))
addr = ip_network(key)
value = record.split(self._separator)[1].strip().strip('"')
result = dict(name=str(addr), data=value)
return result
matches = self._rd_host_ptrn.match(record)
if matches:
# host 172.16.1.1%11/32 := "Host3"
# host 2001:0db8:85a3:0000:0000:8a2e:0370:7334%11 := "Host4"
host = ip_interface(u"{0}".format(matches.group('addr')))
addr = "{0}%{1}/{2}".format(matches.group('addr'), matches.group('rd'), str(host.network.prefixlen))
value = record.split(self._separator)[1].strip().strip('"')
result = dict(name=addr, data=value)
return result
matches = self._host_pattern.match(record)
if matches:
# host 172.16.1.1/32 := "Host3"
# host 2001:0db8:85a3:0000:0000:8a2e:0370:7334 := "Host4"
key = matches.group('addr')
addr = ip_interface(u"{0}".format(str(key)))
value = record.split(self._separator)[1].strip().strip('"')
result = dict(name=str(addr), data=value)
return result
raise F5ModuleError(
'The value "{0}" is not an address'.format(record)
)
def decode_from_string(self, record):
parts = record.split(self._separator)
if len(parts) == 2:
return dict(name=parts[0].strip(), data=parts[1].strip('"').strip())
else:
return dict(name=parts[0].strip(), data="")
class Parameters(AnsibleF5Parameters):
api_map = {
'externalFileName': 'external_file_name',
}
api_attributes = [
'records',
'type',
'description',
]
returnables = [
'type',
'records',
'description',
]
updatables = [
'records',
'checksum',
'description',
]
@property
def type(self):
if self._values['type'] in ['address', 'addr', 'ip']:
return 'ip'
elif self._values['type'] in ['integer', 'int']:
return 'integer'
elif self._values['type'] in ['string']:
return 'string'
@property
def records_src(self):
try:
self._values['records_src'].seek(0)
return self._values['records_src']
except AttributeError:
pass
if self._values['records_src']:
records = open(self._values['records_src'])
else:
records = self._values['records']
if records is None:
return None
# There is a 98% chance that the user will supply a data group that is < 1MB.
# 99.917% chance it is less than 10 MB. This is well within the range of typical
# memory available on a system.
#
# If this changes, this may need to be changed to use temporary files instead.
self._values['records_src'] = StringIO()
self._write_records_to_file(records)
return self._values['records_src']
def _write_records_to_file(self, records):
bucket_size = 1000000
bucket = []
encoder = RecordsEncoder(record_type=self.type, separator=self.separator)
for record in records:
result = encoder.encode(record)
if result:
bucket.append(to_text(result + ",\n"))
if len(bucket) == bucket_size:
self._values['records_src'].writelines(bucket)
bucket = []
self._values['records_src'].writelines(bucket)
self._values['records_src'].seek(0)
class ApiParameters(Parameters):
@property
def checksum(self):
if self._values['checksum'] is None:
return None
result = self._values['checksum'].split(':')[2]
return result
@property
def records_list(self):
return self._values['records']
@property
def description(self):
if self._values['description'] in [None, 'none']:
return None
return self._values['description']
class ModuleParameters(Parameters):
@property
def description(self):
if self._values['description'] is None:
return None
elif self._values['description'] in ['none', '']:
return ''
return self._values['description']
@property
def checksum(self):
if self._values['checksum']:
return self._values['checksum']
if self.records_src is None:
return None
result = hashlib.sha1()
records = self.records_src
while True:
data = records.read(4096)
if not data:
break
result.update(data.encode('utf-8'))
result = result.hexdigest()
self._values['checksum'] = result
return result
@property
def external_file_name(self):
if self._values['external_file_name'] is None:
name = self.name
else:
name = self._values['external_file_name']
if re.search(r'[^a-zA-Z0-9-_.]', name):
raise F5ModuleError(
"'external_file_name' may only contain letters, numbers, underscores, dashes, or a period."
)
return name
@property
def records(self):
results = []
if self.records_src is None:
return None
decoder = RecordsDecoder(record_type=self.type, separator=self.separator)
for record in self.records_src:
result = decoder.decode(record)
if result:
results.append(result)
return results
@property
def records_list(self):
if self._values['records'] is None:
return None
return self.records
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
pass
class ReportableChanges(Changes):
pass
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def records(self):
# External data groups are compared by their checksum, not their records. This
# is because the BIG-IP does not store the actual records in the API. It instead
# stores the checksum of the file. External DGs have the possibility of being huge
# and we would never want to do a comparison of such huge files.
#
# Therefore, comparison is no-op if the DG being worked with is an external DG.
if self.want.internal is False:
return None
if self.have.records is None and self.want.records == []:
return None
if self.have.records is None:
return self.want.records
result = compare_complex_list(self.want.records, self.have.records)
return result
@property
def type(self):
return None
@property
def checksum(self):
if self.want.internal:
return None
if self.want.checksum is None:
return None
if self.want.checksum != self.have.checksum:
return True
@property
def description(self):
return cmp_str_with_none(self.want.description, self.have.description)
class BaseManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def _set_changed_options(self):
changed = {}
for key in ApiParameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = ApiParameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def absent(self):
if self.exists():
return self.remove()
return False
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
class InternalManager(BaseManager):
def create(self):
self._set_changed_options()
if size_exceeded(self.want.records_src) or lines_exceeded(self.want.records_src):
raise F5ModuleError(
"The size of the provided data (or file) is too large for an internal data group."
)
if self.module.check_mode:
return True
self.create_on_device()
return True
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/data-group/internal/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
return True
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['partition'] = self.want.partition
uri = "https://{0}:{1}/mgmt/tm/ltm/data-group/internal/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403, 409]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/ltm/data-group/internal/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/data-group/internal/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.delete(uri)
if resp.status == 200:
return True
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/data-group/internal/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
class ExternalManager(BaseManager):
def absent(self):
result = False
if self.exists():
result = self.remove()
if self.external_file_exists() and self.want.delete_data_group_file:
result = self.remove_data_group_file_from_device()
return result
def create(self):
if zero_length(self.want.records_src):
raise F5ModuleError(
"An external data group cannot be empty."
)
self._set_changed_options()
if self.module.check_mode:
return True
self.create_on_device()
return True
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.changes.records_src and zero_length(self.want.records_src):
raise F5ModuleError(
"An external data group cannot be empty."
)
if self.module.check_mode:
return True
self.update_on_device()
return True
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/data-group/external/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
return True
def external_file_exists(self):
uri = "https://{0}:{1}/mgmt/tm/sys/file/data-group/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.external_file_name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
return True
def upload_file_to_device(self, content, name):
url = 'https://{0}:{1}/mgmt/shared/file-transfer/uploads'.format(
self.client.provider['server'],
self.client.provider['server_port']
)
try:
upload_file(self.client, url, content, name)
except F5ModuleError:
raise F5ModuleError(
"Failed to upload the file."
)
def _upload_to_file(self, name, type, remote_path, update=False):
self.upload_file_to_device(self.want.records_src, name)
if update:
uri = "https://{0}:{1}/mgmt/tm/sys/file/data-group/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, name)
)
params = {'sourcePath': 'file:{0}'.format(remote_path)}
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
else:
uri = "https://{0}:{1}/mgmt/tm/sys/file/data-group/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
params = dict(
name=name,
type=type,
sourcePath='file:{0}'.format(remote_path)
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return response['name']
def remove_file_on_device(self, remote_path):
uri = "https://{0}:{1}/mgmt/tm/util/unix-rm/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
args = dict(
command='run',
utilCmdArgs=remote_path
)
resp = self.client.api.post(uri, json=args)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def create_on_device(self):
name = self.want.external_file_name
remote_path = '/var/config/rest/downloads/{0}'.format(name)
external_file = self._upload_to_file(name, self.want.type, remote_path, update=False)
params = dict(
name=self.want.name,
partition=self.want.partition,
externalFileName=external_file,
)
if self.want.description:
params['description'] = self.want.description
uri = "https://{0}:{1}/mgmt/tm/ltm/data-group/external/".format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
self.remove_file_on_device(remote_path)
def update_on_device(self):
params = {}
if self.want.records_src is not None:
name = self.want.external_file_name
remote_path = '/var/config/rest/downloads/{0}'.format(name)
external_file = self._upload_to_file(name, self.have.type, remote_path, update=True)
params['externalFileName'] = external_file
if self.changes.description is not None:
params['description'] = self.changes.description
if not params:
return
uri = "https://{0}:{1}/mgmt/tm/ltm/data-group/external/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/data-group/external/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.delete(uri)
# Remove the remote data group file if asked to
if self.want.delete_data_group_file:
self.remove_data_group_file_from_device()
if resp.status == 200:
return True
def remove_data_group_file_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/sys/file/data-group/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.external_file_name)
)
resp = self.client.api.delete(uri)
if resp.status == 200:
return True
else:
return False
def read_current_from_device(self):
"""Reads the current configuration from the device
For an external data group, we are interested in two things from the
current configuration
* ``checksum``
* ``type``
The ``checksum`` will allow us to compare the data group value we have
with the data group value being provided.
The ``type`` will allow us to do validation on the data group value being
provided (if any).
Returns:
ExternalApiParameters: Attributes of the remote resource.
"""
uri = "https://{0}:{1}/mgmt/tm/ltm/data-group/external/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp_dg = self.client.api.get(uri)
try:
response_dg = resp_dg.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response_dg and response_dg['code'] == 400:
if 'message' in response_dg:
raise F5ModuleError(response_dg['message'])
else:
raise F5ModuleError(resp_dg.content)
external_file = os.path.basename(response_dg['externalFileName'])
external_file_partition = os.path.dirname(response_dg['externalFileName']).strip('/')
uri = "https://{0}:{1}/mgmt/tm/sys/file/data-group/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(external_file_partition, external_file)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
result = ApiParameters(params=response)
result.update({'description': response_dg.get('description', None)})
return result
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.kwargs = kwargs
self.module = kwargs.get('module')
def exec_module(self):
if self.module.params['internal']:
manager = self.get_manager('internal')
else:
manager = self.get_manager('external')
return manager.exec_module()
def get_manager(self, type):
if type == 'internal':
return InternalManager(**self.kwargs)
elif type == 'external':
return ExternalManager(**self.kwargs)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(required=True),
type=dict(
choices=['address', 'addr', 'ip', 'string', 'str', 'integer', 'int'],
default='string'
),
delete_data_group_file=dict(type='bool'),
internal=dict(type='bool', default='no'),
records=dict(
type='list',
suboptions=dict(
key=dict(required=True),
value=dict(type='raw')
)
),
records_src=dict(type='path'),
external_file_name=dict(),
separator=dict(default=':='),
description=dict(),
state=dict(choices=['absent', 'present'], default='present'),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
self.mutually_exclusive = [
['records', 'records_content', 'external_file_name']
]
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
mutually_exclusive=spec.mutually_exclusive
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| gpl-3.0 |
x111ong/odoo | addons/account/account_bank_statement.py | 65 | 56688 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools import float_is_zero
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
from openerp.report import report_sxw
from openerp.tools import float_compare, float_round
import time
class account_bank_statement(osv.osv):
def create(self, cr, uid, vals, context=None):
if vals.get('name', '/') == '/':
journal_id = vals.get('journal_id', self._default_journal_id(cr, uid, context=context))
vals['name'] = self._compute_default_statement_name(cr, uid, journal_id, context=context)
if 'line_ids' in vals:
for idx, line in enumerate(vals['line_ids']):
line[2]['sequence'] = idx + 1
return super(account_bank_statement, self).create(cr, uid, vals, context=context)
def write(self, cr, uid, ids, vals, context=None):
res = super(account_bank_statement, self).write(cr, uid, ids, vals, context=context)
account_bank_statement_line_obj = self.pool.get('account.bank.statement.line')
for statement in self.browse(cr, uid, ids, context):
for idx, line in enumerate(statement.line_ids):
account_bank_statement_line_obj.write(cr, uid, [line.id], {'sequence': idx + 1}, context=context)
return res
def _default_journal_id(self, cr, uid, context=None):
if context is None:
context = {}
journal_pool = self.pool.get('account.journal')
journal_type = context.get('journal_type', False)
company_id = self.pool.get('res.company')._company_default_get(cr, uid, 'account.bank.statement',context=context)
if journal_type:
ids = journal_pool.search(cr, uid, [('type', '=', journal_type),('company_id','=',company_id)])
if ids:
return ids[0]
return False
def _end_balance(self, cursor, user, ids, name, attr, context=None):
res = {}
for statement in self.browse(cursor, user, ids, context=context):
res[statement.id] = statement.balance_start
for line in statement.line_ids:
res[statement.id] += line.amount
return res
def _get_period(self, cr, uid, context=None):
periods = self.pool.get('account.period').find(cr, uid, context=context)
if periods:
return periods[0]
return False
def _compute_default_statement_name(self, cr, uid, journal_id, context=None):
context = dict(context or {})
obj_seq = self.pool.get('ir.sequence')
period = self.pool.get('account.period').browse(cr, uid, self._get_period(cr, uid, context=context), context=context)
context['fiscalyear_id'] = period.fiscalyear_id.id
journal = self.pool.get('account.journal').browse(cr, uid, journal_id, None)
return obj_seq.next_by_id(cr, uid, journal.sequence_id.id, context=context)
def _currency(self, cursor, user, ids, name, args, context=None):
res = {}
res_currency_obj = self.pool.get('res.currency')
res_users_obj = self.pool.get('res.users')
default_currency = res_users_obj.browse(cursor, user,
user, context=context).company_id.currency_id
for statement in self.browse(cursor, user, ids, context=context):
currency = statement.journal_id.currency
if not currency:
currency = default_currency
res[statement.id] = currency.id
currency_names = {}
for currency_id, currency_name in res_currency_obj.name_get(cursor,
user, [x for x in res.values()], context=context):
currency_names[currency_id] = currency_name
for statement_id in res.keys():
currency_id = res[statement_id]
res[statement_id] = (currency_id, currency_names[currency_id])
return res
def _get_statement(self, cr, uid, ids, context=None):
result = {}
for line in self.pool.get('account.bank.statement.line').browse(cr, uid, ids, context=context):
result[line.statement_id.id] = True
return result.keys()
def _all_lines_reconciled(self, cr, uid, ids, name, args, context=None):
res = {}
for statement in self.browse(cr, uid, ids, context=context):
res[statement.id] = all([line.journal_entry_id.id or line.account_id.id for line in statement.line_ids])
return res
_order = "date desc, id desc"
_name = "account.bank.statement"
_description = "Bank Statement"
_inherit = ['mail.thread']
_columns = {
'name': fields.char(
'Reference', states={'draft': [('readonly', False)]},
readonly=True, # readonly for account_cash_statement
copy=False,
help='if you give the Name other then /, its created Accounting Entries Move '
'will be with same name as statement name. '
'This allows the statement entries to have the same references than the '
'statement itself'),
'date': fields.date('Date', required=True, states={'confirm': [('readonly', True)]},
select=True, copy=False),
'journal_id': fields.many2one('account.journal', 'Journal', required=True,
readonly=True, states={'draft':[('readonly',False)]}),
'period_id': fields.many2one('account.period', 'Period', required=True,
states={'confirm':[('readonly', True)]}),
'balance_start': fields.float('Starting Balance', digits_compute=dp.get_precision('Account'),
states={'confirm':[('readonly',True)]}),
'balance_end_real': fields.float('Ending Balance', digits_compute=dp.get_precision('Account'),
states={'confirm': [('readonly', True)]}, help="Computed using the cash control lines"),
'balance_end': fields.function(_end_balance,
store = {
'account.bank.statement': (lambda self, cr, uid, ids, c={}: ids, ['line_ids','move_line_ids','balance_start'], 10),
'account.bank.statement.line': (_get_statement, ['amount'], 10),
},
string="Computed Balance", help='Balance as calculated based on Opening Balance and transaction lines'),
'company_id': fields.related('journal_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True),
'line_ids': fields.one2many('account.bank.statement.line',
'statement_id', 'Statement lines',
states={'confirm':[('readonly', True)]}, copy=True),
'move_line_ids': fields.one2many('account.move.line', 'statement_id',
'Entry lines', states={'confirm':[('readonly',True)]}),
'state': fields.selection([('draft', 'New'),
('open','Open'), # used by cash statements
('confirm', 'Closed')],
'Status', required=True, readonly="1",
copy=False,
help='When new statement is created the status will be \'Draft\'.\n'
'And after getting confirmation from the bank it will be in \'Confirmed\' status.'),
'currency': fields.function(_currency, string='Currency',
type='many2one', relation='res.currency'),
'account_id': fields.related('journal_id', 'default_debit_account_id', type='many2one', relation='account.account', string='Account used in this journal', readonly=True, help='used in statement reconciliation domain, but shouldn\'t be used elswhere.'),
'cash_control': fields.related('journal_id', 'cash_control' , type='boolean', relation='account.journal',string='Cash control'),
'all_lines_reconciled': fields.function(_all_lines_reconciled, string='All lines reconciled', type='boolean'),
}
_defaults = {
'name': '/',
'date': fields.date.context_today,
'state': 'draft',
'journal_id': _default_journal_id,
'period_id': _get_period,
'company_id': lambda self,cr,uid,c: self.pool.get('res.company')._company_default_get(cr, uid, 'account.bank.statement',context=c),
}
def _check_company_id(self, cr, uid, ids, context=None):
for statement in self.browse(cr, uid, ids, context=context):
if statement.company_id.id != statement.period_id.company_id.id:
return False
return True
_constraints = [
(_check_company_id, 'The journal and period chosen have to belong to the same company.', ['journal_id','period_id']),
]
def onchange_date(self, cr, uid, ids, date, company_id, context=None):
"""
Find the correct period to use for the given date and company_id, return it and set it in the context
"""
res = {}
period_pool = self.pool.get('account.period')
if context is None:
context = {}
ctx = context.copy()
ctx.update({'company_id': company_id})
pids = period_pool.find(cr, uid, dt=date, context=ctx)
if pids:
res.update({'period_id': pids[0]})
context = dict(context, period_id=pids[0])
return {
'value':res,
'context':context,
}
def button_dummy(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {}, context=context)
def _prepare_move(self, cr, uid, st_line, st_line_number, context=None):
"""Prepare the dict of values to create the move from a
statement line. This method may be overridden to implement custom
move generation (making sure to call super() to establish
a clean extension chain).
:param browse_record st_line: account.bank.statement.line record to
create the move from.
:param char st_line_number: will be used as the name of the generated account move
:return: dict of value to create() the account.move
"""
return {
'journal_id': st_line.statement_id.journal_id.id,
'period_id': st_line.statement_id.period_id.id,
'date': st_line.date,
'name': st_line_number,
'ref': st_line.ref,
}
def _get_counter_part_account(self, cr, uid, st_line, context=None):
"""Retrieve the account to use in the counterpart move.
:param browse_record st_line: account.bank.statement.line record to create the move from.
:return: int/long of the account.account to use as counterpart
"""
if st_line.amount >= 0:
return st_line.statement_id.journal_id.default_credit_account_id.id
return st_line.statement_id.journal_id.default_debit_account_id.id
def _get_counter_part_partner(self, cr, uid, st_line, context=None):
"""Retrieve the partner to use in the counterpart move.
:param browse_record st_line: account.bank.statement.line record to create the move from.
:return: int/long of the res.partner to use as counterpart
"""
return st_line.partner_id and st_line.partner_id.id or False
def _prepare_bank_move_line(self, cr, uid, st_line, move_id, amount, company_currency_id, context=None):
"""Compute the args to build the dict of values to create the counter part move line from a
statement line by calling the _prepare_move_line_vals.
:param browse_record st_line: account.bank.statement.line record to create the move from.
:param int/long move_id: ID of the account.move to link the move line
:param float amount: amount of the move line
:param int/long company_currency_id: ID of currency of the concerned company
:return: dict of value to create() the bank account.move.line
"""
account_id = self._get_counter_part_account(cr, uid, st_line, context=context)
partner_id = self._get_counter_part_partner(cr, uid, st_line, context=context)
debit = ((amount > 0) and amount) or 0.0
credit = ((amount < 0) and -amount) or 0.0
cur_id = False
amt_cur = False
if st_line.statement_id.currency.id != company_currency_id:
amt_cur = st_line.amount
cur_id = st_line.statement_id.currency.id
elif st_line.currency_id and st_line.amount_currency:
amt_cur = st_line.amount_currency
cur_id = st_line.currency_id.id
return self._prepare_move_line_vals(cr, uid, st_line, move_id, debit, credit,
amount_currency=amt_cur, currency_id=cur_id, account_id=account_id,
partner_id=partner_id, context=context)
def _prepare_move_line_vals(self, cr, uid, st_line, move_id, debit, credit, currency_id=False,
amount_currency=False, account_id=False, partner_id=False, context=None):
"""Prepare the dict of values to create the move line from a
statement line.
:param browse_record st_line: account.bank.statement.line record to
create the move from.
:param int/long move_id: ID of the account.move to link the move line
:param float debit: debit amount of the move line
:param float credit: credit amount of the move line
:param int/long currency_id: ID of currency of the move line to create
:param float amount_currency: amount of the debit/credit expressed in the currency_id
:param int/long account_id: ID of the account to use in the move line if different
from the statement line account ID
:param int/long partner_id: ID of the partner to put on the move line
:return: dict of value to create() the account.move.line
"""
acc_id = account_id or st_line.account_id.id
cur_id = currency_id or st_line.statement_id.currency.id
par_id = partner_id or (((st_line.partner_id) and st_line.partner_id.id) or False)
return {
'name': st_line.name,
'date': st_line.date,
'ref': st_line.ref,
'move_id': move_id,
'partner_id': par_id,
'account_id': acc_id,
'credit': credit,
'debit': debit,
'statement_id': st_line.statement_id.id,
'journal_id': st_line.statement_id.journal_id.id,
'period_id': st_line.statement_id.period_id.id,
'currency_id': amount_currency and cur_id,
'amount_currency': amount_currency,
}
def balance_check(self, cr, uid, st_id, journal_type='bank', context=None):
st = self.browse(cr, uid, st_id, context=context)
if not ((abs((st.balance_end or 0.0) - st.balance_end_real) < 0.0001) or (abs((st.balance_end or 0.0) - st.balance_end_real) < 0.0001)):
raise osv.except_osv(_('Error!'),
_('The statement balance is incorrect !\nThe expected balance (%.2f) is different than the computed one. (%.2f)') % (st.balance_end_real, st.balance_end))
return True
def statement_close(self, cr, uid, ids, journal_type='bank', context=None):
return self.write(cr, uid, ids, {'state':'confirm'}, context=context)
def check_status_condition(self, cr, uid, state, journal_type='bank'):
return state in ('draft','open')
def button_confirm_bank(self, cr, uid, ids, context=None):
if context is None:
context = {}
for st in self.browse(cr, uid, ids, context=context):
j_type = st.journal_id.type
if not self.check_status_condition(cr, uid, st.state, journal_type=j_type):
continue
self.balance_check(cr, uid, st.id, journal_type=j_type, context=context)
if (not st.journal_id.default_credit_account_id) \
or (not st.journal_id.default_debit_account_id):
raise osv.except_osv(_('Configuration Error!'), _('Please verify that an account is defined in the journal.'))
for line in st.move_line_ids:
if line.state != 'valid':
raise osv.except_osv(_('Error!'), _('The account entries lines are not in valid state.'))
move_ids = []
for st_line in st.line_ids:
if not st_line.amount:
continue
if st_line.account_id and not st_line.journal_entry_id.id:
#make an account move as before
vals = {
'debit': st_line.amount < 0 and -st_line.amount or 0.0,
'credit': st_line.amount > 0 and st_line.amount or 0.0,
'account_id': st_line.account_id.id,
'name': st_line.name
}
self.pool.get('account.bank.statement.line').process_reconciliation(cr, uid, st_line.id, [vals], context=context)
elif not st_line.journal_entry_id.id:
raise osv.except_osv(_('Error!'), _('All the account entries lines must be processed in order to close the statement.'))
move_ids.append(st_line.journal_entry_id.id)
if move_ids:
self.pool.get('account.move').post(cr, uid, move_ids, context=context)
self.message_post(cr, uid, [st.id], body=_('Statement %s confirmed, journal items were created.') % (st.name,), context=context)
self.link_bank_to_partner(cr, uid, ids, context=context)
return self.write(cr, uid, ids, {'state': 'confirm', 'closing_date': time.strftime("%Y-%m-%d %H:%M:%S")}, context=context)
def button_cancel(self, cr, uid, ids, context=None):
bnk_st_line_ids = []
for st in self.browse(cr, uid, ids, context=context):
bnk_st_line_ids += [line.id for line in st.line_ids]
self.pool.get('account.bank.statement.line').cancel(cr, uid, bnk_st_line_ids, context=context)
return self.write(cr, uid, ids, {'state': 'draft'}, context=context)
def _compute_balance_end_real(self, cr, uid, journal_id, context=None):
res = False
if journal_id:
journal = self.pool.get('account.journal').browse(cr, uid, journal_id, context=context)
if journal.with_last_closing_balance:
cr.execute('SELECT balance_end_real \
FROM account_bank_statement \
WHERE journal_id = %s AND NOT state = %s \
ORDER BY date DESC,id DESC LIMIT 1', (journal_id, 'draft'))
res = cr.fetchone()
return res and res[0] or 0.0
def onchange_journal_id(self, cr, uid, statement_id, journal_id, context=None):
if not journal_id:
return {}
balance_start = self._compute_balance_end_real(cr, uid, journal_id, context=context)
journal = self.pool.get('account.journal').browse(cr, uid, journal_id, context=context)
currency = journal.currency or journal.company_id.currency_id
res = {'balance_start': balance_start, 'company_id': journal.company_id.id, 'currency': currency.id}
if journal.type == 'cash':
res['cash_control'] = journal.cash_control
return {'value': res}
def unlink(self, cr, uid, ids, context=None):
statement_line_obj = self.pool['account.bank.statement.line']
for item in self.browse(cr, uid, ids, context=context):
if item.state != 'draft':
raise osv.except_osv(
_('Invalid Action!'),
_('In order to delete a bank statement, you must first cancel it to delete related journal items.')
)
# Explicitly unlink bank statement lines
# so it will check that the related journal entries have
# been deleted first
statement_line_obj.unlink(cr, uid, [line.id for line in item.line_ids], context=context)
return super(account_bank_statement, self).unlink(cr, uid, ids, context=context)
def button_journal_entries(self, cr, uid, ids, context=None):
ctx = (context or {}).copy()
ctx['journal_id'] = self.browse(cr, uid, ids[0], context=context).journal_id.id
return {
'name': _('Journal Items'),
'view_type':'form',
'view_mode':'tree',
'res_model':'account.move.line',
'view_id':False,
'type':'ir.actions.act_window',
'domain':[('statement_id','in',ids)],
'context':ctx,
}
def number_of_lines_reconciled(self, cr, uid, ids, context=None):
bsl_obj = self.pool.get('account.bank.statement.line')
return bsl_obj.search_count(cr, uid, [('statement_id', 'in', ids), ('journal_entry_id', '!=', False)], context=context)
def link_bank_to_partner(self, cr, uid, ids, context=None):
for statement in self.browse(cr, uid, ids, context=context):
for st_line in statement.line_ids:
if st_line.bank_account_id and st_line.partner_id and st_line.bank_account_id.partner_id.id != st_line.partner_id.id:
# Update the partner informations of the bank account, possibly overriding existing ones
bank_obj = self.pool.get('res.partner.bank')
bank_vals = bank_obj.onchange_partner_id(cr, uid, [st_line.bank_account_id.id], st_line.partner_id.id, context=context)['value']
bank_vals.update({'partner_id': st_line.partner_id.id})
bank_obj.write(cr, uid, [st_line.bank_account_id.id], bank_vals, context=context)
class account_bank_statement_line(osv.osv):
def create(self, cr, uid, vals, context=None):
if vals.get('amount_currency', 0) and not vals.get('amount', 0):
raise osv.except_osv(_('Error!'), _('If "Amount Currency" is specified, then "Amount" must be as well.'))
return super(account_bank_statement_line, self).create(cr, uid, vals, context=context)
def unlink(self, cr, uid, ids, context=None):
for item in self.browse(cr, uid, ids, context=context):
if item.journal_entry_id:
raise osv.except_osv(
_('Invalid Action!'),
_('In order to delete a bank statement line, you must first cancel it to delete related journal items.')
)
return super(account_bank_statement_line, self).unlink(cr, uid, ids, context=context)
def cancel(self, cr, uid, ids, context=None):
account_move_obj = self.pool.get('account.move')
move_ids = []
for line in self.browse(cr, uid, ids, context=context):
if line.journal_entry_id:
move_ids.append(line.journal_entry_id.id)
for aml in line.journal_entry_id.line_id:
if aml.reconcile_id:
move_lines = [l.id for l in aml.reconcile_id.line_id]
move_lines.remove(aml.id)
self.pool.get('account.move.reconcile').unlink(cr, uid, [aml.reconcile_id.id], context=context)
if len(move_lines) >= 2:
self.pool.get('account.move.line').reconcile_partial(cr, uid, move_lines, 'auto', context=context)
if move_ids:
account_move_obj.button_cancel(cr, uid, move_ids, context=context)
account_move_obj.unlink(cr, uid, move_ids, context)
def get_data_for_reconciliations(self, cr, uid, ids, excluded_ids=None, search_reconciliation_proposition=True, context=None):
""" Returns the data required to display a reconciliation, for each statement line id in ids """
ret = []
if excluded_ids is None:
excluded_ids = []
for st_line in self.browse(cr, uid, ids, context=context):
reconciliation_data = {}
if search_reconciliation_proposition:
reconciliation_proposition = self.get_reconciliation_proposition(cr, uid, st_line, excluded_ids=excluded_ids, context=context)
for mv_line in reconciliation_proposition:
excluded_ids.append(mv_line['id'])
reconciliation_data['reconciliation_proposition'] = reconciliation_proposition
else:
reconciliation_data['reconciliation_proposition'] = []
st_line = self.get_statement_line_for_reconciliation(cr, uid, st_line, context=context)
reconciliation_data['st_line'] = st_line
ret.append(reconciliation_data)
return ret
def get_statement_line_for_reconciliation(self, cr, uid, st_line, context=None):
""" Returns the data required by the bank statement reconciliation widget to display a statement line """
if context is None:
context = {}
statement_currency = st_line.journal_id.currency or st_line.journal_id.company_id.currency_id
rml_parser = report_sxw.rml_parse(cr, uid, 'reconciliation_widget_asl', context=context)
if st_line.amount_currency and st_line.currency_id:
amount = st_line.amount_currency
amount_currency = st_line.amount
amount_currency_str = amount_currency > 0 and amount_currency or -amount_currency
amount_currency_str = rml_parser.formatLang(amount_currency_str, currency_obj=statement_currency)
else:
amount = st_line.amount
amount_currency_str = ""
amount_str = amount > 0 and amount or -amount
amount_str = rml_parser.formatLang(amount_str, currency_obj=st_line.currency_id or statement_currency)
data = {
'id': st_line.id,
'ref': st_line.ref,
'note': st_line.note or "",
'name': st_line.name,
'date': st_line.date,
'amount': amount,
'amount_str': amount_str, # Amount in the statement line currency
'currency_id': st_line.currency_id.id or statement_currency.id,
'partner_id': st_line.partner_id.id,
'statement_id': st_line.statement_id.id,
'account_code': st_line.journal_id.default_debit_account_id.code,
'account_name': st_line.journal_id.default_debit_account_id.name,
'partner_name': st_line.partner_id.name,
'communication_partner_name': st_line.partner_name,
'amount_currency_str': amount_currency_str, # Amount in the statement currency
'has_no_partner': not st_line.partner_id.id,
}
if st_line.partner_id.id:
if amount > 0:
data['open_balance_account_id'] = st_line.partner_id.property_account_receivable.id
else:
data['open_balance_account_id'] = st_line.partner_id.property_account_payable.id
return data
def _domain_reconciliation_proposition(self, cr, uid, st_line, excluded_ids=None, context=None):
if excluded_ids is None:
excluded_ids = []
domain = [('ref', '=', st_line.name),
('reconcile_id', '=', False),
('state', '=', 'valid'),
('account_id.reconcile', '=', True),
('id', 'not in', excluded_ids),
('partner_id', 'in', (False, st_line.partner_id.id))]
return domain
def get_reconciliation_proposition(self, cr, uid, st_line, excluded_ids=None, context=None):
""" Returns move lines that constitute the best guess to reconcile a statement line. """
mv_line_pool = self.pool.get('account.move.line')
# Look for structured communication
if st_line.name:
domain = self._domain_reconciliation_proposition(cr, uid, st_line, excluded_ids=excluded_ids, context=context)
match_id = mv_line_pool.search(cr, uid, domain, offset=0, limit=2, context=context)
if match_id and len(match_id) == 1:
mv_line_br = mv_line_pool.browse(cr, uid, match_id, context=context)
target_currency = st_line.currency_id or st_line.journal_id.currency or st_line.journal_id.company_id.currency_id
mv_line = mv_line_pool.prepare_move_lines_for_reconciliation_widget(cr, uid, mv_line_br, target_currency=target_currency, target_date=st_line.date, context=context)[0]
mv_line['has_no_partner'] = not bool(st_line.partner_id.id)
# If the structured communication matches a move line that is associated with a partner, we can safely associate the statement line with the partner
if (mv_line['partner_id']):
self.write(cr, uid, st_line.id, {'partner_id': mv_line['partner_id']}, context=context)
mv_line['has_no_partner'] = False
return [mv_line]
# How to compare statement line amount and move lines amount
precision_digits = self.pool.get('decimal.precision').precision_get(cr, uid, 'Account')
currency_id = st_line.currency_id.id or st_line.journal_id.currency.id
# NB : amount can't be == 0 ; so float precision is not an issue for amount > 0 or amount < 0
amount = st_line.amount_currency or st_line.amount
domain = [('reconcile_partial_id', '=', False)]
if currency_id:
domain += [('currency_id', '=', currency_id)]
sign = 1 # correct the fact that st_line.amount is signed and debit/credit is not
amount_field = 'debit'
if currency_id == False:
if amount < 0:
amount_field = 'credit'
sign = -1
else:
amount_field = 'amount_currency'
# Look for a matching amount
domain_exact_amount = domain + [(amount_field, '=', float_round(sign * amount, precision_digits=precision_digits))]
domain_exact_amount_ref = domain_exact_amount + [('ref', '=', st_line.ref)]
match_id = self.get_move_lines_for_reconciliation(cr, uid, st_line, excluded_ids=excluded_ids, offset=0, limit=2, additional_domain=domain_exact_amount_ref)
if not match_id:
match_id = self.get_move_lines_for_reconciliation(cr, uid, st_line, excluded_ids=excluded_ids, offset=0, limit=2, additional_domain=domain_exact_amount)
if match_id and len(match_id) == 1:
return match_id
if not st_line.partner_id.id:
return []
# Look for a set of move line whose amount is <= to the line's amount
if amount > 0: # Make sure we can't mix receivable and payable
domain += [('account_id.type', '=', 'receivable')]
else:
domain += [('account_id.type', '=', 'payable')]
if amount_field == 'amount_currency' and amount < 0:
domain += [(amount_field, '<', 0), (amount_field, '>', (sign * amount))]
else:
domain += [(amount_field, '>', 0), (amount_field, '<', (sign * amount))]
mv_lines = self.get_move_lines_for_reconciliation(cr, uid, st_line, excluded_ids=excluded_ids, limit=5, additional_domain=domain, context=context)
ret = []
total = 0
for line in mv_lines:
total += abs(line['debit'] - line['credit'])
if float_compare(total, abs(amount), precision_digits=precision_digits) != 1:
ret.append(line)
else:
break
return ret
def get_move_lines_for_reconciliation_by_statement_line_id(self, cr, uid, st_line_id, excluded_ids=None, str=False, offset=0, limit=None, count=False, additional_domain=None, context=None):
""" Bridge between the web client reconciliation widget and get_move_lines_for_reconciliation (which expects a browse record) """
if excluded_ids is None:
excluded_ids = []
if additional_domain is None:
additional_domain = []
st_line = self.browse(cr, uid, st_line_id, context=context)
return self.get_move_lines_for_reconciliation(cr, uid, st_line, excluded_ids, str, offset, limit, count, additional_domain, context=context)
def _domain_move_lines_for_reconciliation(self, cr, uid, st_line, excluded_ids=None, str=False, additional_domain=None, context=None):
if excluded_ids is None:
excluded_ids = []
if additional_domain is None:
additional_domain = []
# Make domain
domain = additional_domain + [
('reconcile_id', '=', False),
('state', '=', 'valid'),
('account_id.reconcile', '=', True)
]
if st_line.partner_id.id:
domain += [('partner_id', '=', st_line.partner_id.id)]
if excluded_ids:
domain.append(('id', 'not in', excluded_ids))
if str:
domain += [
'|', ('move_id.name', 'ilike', str),
'|', ('move_id.ref', 'ilike', str),
('date_maturity', 'like', str),
]
if not st_line.partner_id.id:
domain.insert(-1, '|', )
domain.append(('partner_id.name', 'ilike', str))
if str != '/':
domain.insert(-1, '|', )
domain.append(('name', 'ilike', str))
return domain
def get_move_lines_for_reconciliation(self, cr, uid, st_line, excluded_ids=None, str=False, offset=0, limit=None, count=False, additional_domain=None, context=None):
""" Find the move lines that could be used to reconcile a statement line. If count is true, only returns the count.
:param st_line: the browse record of the statement line
:param integers list excluded_ids: ids of move lines that should not be fetched
:param boolean count: just return the number of records
:param tuples list additional_domain: additional domain restrictions
"""
mv_line_pool = self.pool.get('account.move.line')
domain = self._domain_move_lines_for_reconciliation(cr, uid, st_line, excluded_ids=excluded_ids, str=str, additional_domain=additional_domain, context=context)
# Get move lines ; in case of a partial reconciliation, only keep one line (the first whose amount is greater than
# the residual amount because it is presumably the invoice, which is the relevant item in this situation)
filtered_lines = []
reconcile_partial_ids = []
actual_offset = offset
while True:
line_ids = mv_line_pool.search(cr, uid, domain, offset=actual_offset, limit=limit, order="date_maturity asc, id asc", context=context)
lines = mv_line_pool.browse(cr, uid, line_ids, context=context)
make_one_more_loop = False
for line in lines:
if line.reconcile_partial_id and \
(line.reconcile_partial_id.id in reconcile_partial_ids or \
abs(line.debit - line.credit) < abs(line.amount_residual)):
#if we filtered a line because it is partially reconciled with an already selected line, we must do one more loop
#in order to get the right number of items in the pager
make_one_more_loop = True
continue
filtered_lines.append(line)
if line.reconcile_partial_id:
reconcile_partial_ids.append(line.reconcile_partial_id.id)
if not limit or not make_one_more_loop or len(filtered_lines) >= limit:
break
actual_offset = actual_offset + limit
lines = limit and filtered_lines[:limit] or filtered_lines
# Either return number of lines
if count:
return len(lines)
# Or return list of dicts representing the formatted move lines
else:
target_currency = st_line.currency_id or st_line.journal_id.currency or st_line.journal_id.company_id.currency_id
mv_lines = mv_line_pool.prepare_move_lines_for_reconciliation_widget(cr, uid, lines, target_currency=target_currency, target_date=st_line.date, context=context)
has_no_partner = not bool(st_line.partner_id.id)
for line in mv_lines:
line['has_no_partner'] = has_no_partner
return mv_lines
def get_currency_rate_line(self, cr, uid, st_line, currency_diff, move_id, context=None):
if currency_diff < 0:
account_id = st_line.company_id.expense_currency_exchange_account_id.id
if not account_id:
raise osv.except_osv(_('Insufficient Configuration!'), _("You should configure the 'Loss Exchange Rate Account' in the accounting settings, to manage automatically the booking of accounting entries related to differences between exchange rates."))
else:
account_id = st_line.company_id.income_currency_exchange_account_id.id
if not account_id:
raise osv.except_osv(_('Insufficient Configuration!'), _("You should configure the 'Gain Exchange Rate Account' in the accounting settings, to manage automatically the booking of accounting entries related to differences between exchange rates."))
return {
'move_id': move_id,
'name': _('change') + ': ' + (st_line.name or '/'),
'period_id': st_line.statement_id.period_id.id,
'journal_id': st_line.journal_id.id,
'partner_id': st_line.partner_id.id,
'company_id': st_line.company_id.id,
'statement_id': st_line.statement_id.id,
'debit': currency_diff < 0 and -currency_diff or 0,
'credit': currency_diff > 0 and currency_diff or 0,
'amount_currency': 0.0,
'date': st_line.date,
'account_id': account_id
}
def _get_exchange_lines(self, cr, uid, st_line, mv_line, currency_diff, currency_id, move_id, context=None):
'''
Prepare the two lines in company currency due to currency rate difference.
:param line: browse record of the voucher.line for which we want to create currency rate difference accounting
entries
:param move_id: Account move wher the move lines will be.
:param currency_diff: Amount to be posted.
:param company_currency: id of currency of the company to which the voucher belong
:param current_currency: id of currency of the voucher
:return: the account move line and its counterpart to create, depicted as mapping between fieldname and value
:rtype: tuple of dict
'''
if currency_diff > 0:
exchange_account_id = st_line.company_id.expense_currency_exchange_account_id.id
else:
exchange_account_id = st_line.company_id.income_currency_exchange_account_id.id
# Even if the amount_currency is never filled, we need to pass the foreign currency because otherwise
# the receivable/payable account may have a secondary currency, which render this field mandatory
if mv_line.account_id.currency_id:
account_currency_id = mv_line.account_id.currency_id.id
else:
account_currency_id = st_line.company_id.currency_id.id != currency_id and currency_id or False
move_line = {
'journal_id': st_line.journal_id.id,
'period_id': st_line.statement_id.period_id.id,
'name': _('change') + ': ' + (st_line.name or '/'),
'account_id': mv_line.account_id.id,
'move_id': move_id,
'partner_id': st_line.partner_id.id,
'currency_id': account_currency_id,
'amount_currency': 0.0,
'quantity': 1,
'credit': currency_diff > 0 and currency_diff or 0.0,
'debit': currency_diff < 0 and -currency_diff or 0.0,
'date': st_line.date,
'counterpart_move_line_id': mv_line.id,
}
move_line_counterpart = {
'journal_id': st_line.journal_id.id,
'period_id': st_line.statement_id.period_id.id,
'name': _('change') + ': ' + (st_line.name or '/'),
'account_id': exchange_account_id,
'move_id': move_id,
'amount_currency': 0.0,
'partner_id': st_line.partner_id.id,
'currency_id': account_currency_id,
'quantity': 1,
'debit': currency_diff > 0 and currency_diff or 0.0,
'credit': currency_diff < 0 and -currency_diff or 0.0,
'date': st_line.date,
}
return (move_line, move_line_counterpart)
def process_reconciliations(self, cr, uid, data, context=None):
for datum in data:
self.process_reconciliation(cr, uid, datum[0], datum[1], context=context)
def process_reconciliation(self, cr, uid, id, mv_line_dicts, context=None):
""" Creates a move line for each item of mv_line_dicts and for the statement line. Reconcile a new move line with its counterpart_move_line_id if specified. Finally, mark the statement line as reconciled by putting the newly created move id in the column journal_entry_id.
:param int id: id of the bank statement line
:param list of dicts mv_line_dicts: move lines to create. If counterpart_move_line_id is specified, reconcile with it
"""
if context is None:
context = {}
st_line = self.browse(cr, uid, id, context=context)
company_currency = st_line.journal_id.company_id.currency_id
statement_currency = st_line.journal_id.currency or company_currency
bs_obj = self.pool.get('account.bank.statement')
am_obj = self.pool.get('account.move')
aml_obj = self.pool.get('account.move.line')
currency_obj = self.pool.get('res.currency')
# Checks
if st_line.journal_entry_id.id:
raise osv.except_osv(_('Error!'), _('The bank statement line was already reconciled.'))
for mv_line_dict in mv_line_dicts:
for field in ['debit', 'credit', 'amount_currency']:
if field not in mv_line_dict:
mv_line_dict[field] = 0.0
if mv_line_dict.get('counterpart_move_line_id'):
mv_line = aml_obj.browse(cr, uid, mv_line_dict.get('counterpart_move_line_id'), context=context)
if mv_line.reconcile_id:
raise osv.except_osv(_('Error!'), _('A selected move line was already reconciled.'))
# Create the move
move_name = (st_line.statement_id.name or st_line.name) + "/" + str(st_line.sequence)
move_vals = bs_obj._prepare_move(cr, uid, st_line, move_name, context=context)
move_id = am_obj.create(cr, uid, move_vals, context=context)
# Create the move line for the statement line
if st_line.statement_id.currency.id != company_currency.id:
if st_line.currency_id == company_currency:
amount = st_line.amount_currency
else:
ctx = context.copy()
ctx['date'] = st_line.date
amount = currency_obj.compute(cr, uid, st_line.statement_id.currency.id, company_currency.id, st_line.amount, context=ctx)
else:
amount = st_line.amount
bank_st_move_vals = bs_obj._prepare_bank_move_line(cr, uid, st_line, move_id, amount, company_currency.id, context=context)
aml_obj.create(cr, uid, bank_st_move_vals, context=context)
# Complete the dicts
st_line_currency = st_line.currency_id or statement_currency
st_line_currency_rate = st_line.currency_id and (st_line.amount_currency / st_line.amount) or False
to_create = []
for mv_line_dict in mv_line_dicts:
if mv_line_dict.get('is_tax_line'):
continue
mv_line_dict['ref'] = move_name
mv_line_dict['move_id'] = move_id
mv_line_dict['period_id'] = st_line.statement_id.period_id.id
mv_line_dict['journal_id'] = st_line.journal_id.id
mv_line_dict['company_id'] = st_line.company_id.id
mv_line_dict['statement_id'] = st_line.statement_id.id
if mv_line_dict.get('counterpart_move_line_id'):
mv_line = aml_obj.browse(cr, uid, mv_line_dict['counterpart_move_line_id'], context=context)
mv_line_dict['partner_id'] = mv_line.partner_id.id or st_line.partner_id.id
mv_line_dict['account_id'] = mv_line.account_id.id
if st_line_currency.id != company_currency.id:
ctx = context.copy()
ctx['date'] = st_line.date
mv_line_dict['amount_currency'] = mv_line_dict['debit'] - mv_line_dict['credit']
mv_line_dict['currency_id'] = st_line_currency.id
if st_line.currency_id and statement_currency.id == company_currency.id and st_line_currency_rate:
debit_at_current_rate = self.pool.get('res.currency').round(cr, uid, company_currency, mv_line_dict['debit'] / st_line_currency_rate)
credit_at_current_rate = self.pool.get('res.currency').round(cr, uid, company_currency, mv_line_dict['credit'] / st_line_currency_rate)
elif st_line.currency_id and st_line_currency_rate:
debit_at_current_rate = currency_obj.compute(cr, uid, statement_currency.id, company_currency.id, mv_line_dict['debit'] / st_line_currency_rate, context=ctx)
credit_at_current_rate = currency_obj.compute(cr, uid, statement_currency.id, company_currency.id, mv_line_dict['credit'] / st_line_currency_rate, context=ctx)
else:
debit_at_current_rate = currency_obj.compute(cr, uid, st_line_currency.id, company_currency.id, mv_line_dict['debit'], context=ctx)
credit_at_current_rate = currency_obj.compute(cr, uid, st_line_currency.id, company_currency.id, mv_line_dict['credit'], context=ctx)
if mv_line_dict.get('counterpart_move_line_id'):
#post an account line that use the same currency rate than the counterpart (to balance the account) and post the difference in another line
ctx['date'] = mv_line.date
if mv_line.currency_id.id == mv_line_dict['currency_id'] \
and float_is_zero(abs(mv_line.amount_currency) - abs(mv_line_dict['amount_currency']), precision_rounding=mv_line.currency_id.rounding):
debit_at_old_rate = mv_line.credit
credit_at_old_rate = mv_line.debit
else:
debit_at_old_rate = currency_obj.compute(cr, uid, st_line_currency.id, company_currency.id, mv_line_dict['debit'], context=ctx)
credit_at_old_rate = currency_obj.compute(cr, uid, st_line_currency.id, company_currency.id, mv_line_dict['credit'], context=ctx)
mv_line_dict['credit'] = credit_at_old_rate
mv_line_dict['debit'] = debit_at_old_rate
if debit_at_old_rate - debit_at_current_rate:
currency_diff = debit_at_current_rate - debit_at_old_rate
to_create.append(self.get_currency_rate_line(cr, uid, st_line, -currency_diff, move_id, context=context))
if credit_at_old_rate - credit_at_current_rate:
currency_diff = credit_at_current_rate - credit_at_old_rate
to_create.append(self.get_currency_rate_line(cr, uid, st_line, currency_diff, move_id, context=context))
if mv_line.currency_id and mv_line_dict['currency_id'] == mv_line.currency_id.id:
amount_unreconciled = mv_line.amount_residual_currency
else:
amount_unreconciled = currency_obj.compute(cr, uid, company_currency.id, mv_line_dict['currency_id'] , mv_line.amount_residual, context=ctx)
if float_is_zero(mv_line_dict['amount_currency'] + amount_unreconciled, precision_rounding=mv_line.currency_id.rounding):
amount = mv_line_dict['debit'] or mv_line_dict['credit']
sign = -1 if mv_line_dict['debit'] else 1
currency_rate_difference = sign * (mv_line.amount_residual - amount)
if not company_currency.is_zero(currency_rate_difference):
exchange_lines = self._get_exchange_lines(cr, uid, st_line, mv_line, currency_rate_difference, mv_line_dict['currency_id'], move_id, context=context)
for exchange_line in exchange_lines:
to_create.append(exchange_line)
else:
mv_line_dict['debit'] = debit_at_current_rate
mv_line_dict['credit'] = credit_at_current_rate
elif statement_currency.id != company_currency.id:
#statement is in foreign currency but the transaction is in company currency
prorata_factor = (mv_line_dict['debit'] - mv_line_dict['credit']) / st_line.amount_currency
mv_line_dict['amount_currency'] = prorata_factor * st_line.amount
to_create.append(mv_line_dict)
# If the reconciliation is performed in another currency than the company currency, the amounts are converted to get the right debit/credit.
# If there is more than 1 debit and 1 credit, this can induce a rounding error, which we put in the foreign exchane gain/loss account.
if st_line_currency.id != company_currency.id:
diff_amount = bank_st_move_vals['debit'] - bank_st_move_vals['credit'] \
+ sum(aml['debit'] for aml in to_create) - sum(aml['credit'] for aml in to_create)
if not company_currency.is_zero(diff_amount):
diff_aml = self.get_currency_rate_line(cr, uid, st_line, diff_amount, move_id, context=context)
diff_aml['name'] = _('Rounding error from currency conversion')
to_create.append(diff_aml)
# Create move lines
move_line_pairs_to_reconcile = []
for mv_line_dict in to_create:
counterpart_move_line_id = None # NB : this attribute is irrelevant for aml_obj.create() and needs to be removed from the dict
if mv_line_dict.get('counterpart_move_line_id'):
counterpart_move_line_id = mv_line_dict['counterpart_move_line_id']
del mv_line_dict['counterpart_move_line_id']
new_aml_id = aml_obj.create(cr, uid, mv_line_dict, context=context)
if counterpart_move_line_id != None:
move_line_pairs_to_reconcile.append([new_aml_id, counterpart_move_line_id])
# Reconcile
for pair in move_line_pairs_to_reconcile:
aml_obj.reconcile_partial(cr, uid, pair, context=context)
# Mark the statement line as reconciled
self.write(cr, uid, id, {'journal_entry_id': move_id}, context=context)
# FIXME : if it wasn't for the multicompany security settings in account_security.xml, the method would just
# return [('journal_entry_id', '=', False)]
# Unfortunately, that spawns a "no access rights" error ; it shouldn't.
def _needaction_domain_get(self, cr, uid, context=None):
user = self.pool.get("res.users").browse(cr, uid, uid)
return ['|', ('company_id', '=', False), ('company_id', 'child_of', [user.company_id.id]), ('journal_entry_id', '=', False), ('account_id', '=', False)]
_order = "statement_id desc, sequence"
_name = "account.bank.statement.line"
_description = "Bank Statement Line"
_inherit = ['ir.needaction_mixin']
_columns = {
'name': fields.char('Communication', required=True),
'date': fields.date('Date', required=True),
'amount': fields.float('Amount', digits_compute=dp.get_precision('Account')),
'partner_id': fields.many2one('res.partner', 'Partner'),
'bank_account_id': fields.many2one('res.partner.bank','Bank Account'),
'account_id': fields.many2one('account.account', 'Account', help="This technical field can be used at the statement line creation/import time in order to avoid the reconciliation process on it later on. The statement line will simply create a counterpart on this account"),
'statement_id': fields.many2one('account.bank.statement', 'Statement', select=True, required=True, ondelete='restrict'),
'journal_id': fields.related('statement_id', 'journal_id', type='many2one', relation='account.journal', string='Journal', store=True, readonly=True),
'partner_name': fields.char('Partner Name', help="This field is used to record the third party name when importing bank statement in electronic format, when the partner doesn't exist yet in the database (or cannot be found)."),
'ref': fields.char('Reference'),
'note': fields.text('Notes'),
'sequence': fields.integer('Sequence', select=True, help="Gives the sequence order when displaying a list of bank statement lines."),
'company_id': fields.related('statement_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True),
'journal_entry_id': fields.many2one('account.move', 'Journal Entry', copy=False),
'amount_currency': fields.float('Amount Currency', help="The amount expressed in an optional other currency if it is a multi-currency entry.", digits_compute=dp.get_precision('Account')),
'currency_id': fields.many2one('res.currency', 'Currency', help="The optional other currency if it is a multi-currency entry."),
}
_defaults = {
'name': lambda self,cr,uid,context={}: self.pool.get('ir.sequence').get(cr, uid, 'account.bank.statement.line', context=context),
'date': lambda self,cr,uid,context={}: context.get('date', fields.date.context_today(self,cr,uid,context=context)),
}
class account_statement_operation_template(osv.osv):
_name = "account.statement.operation.template"
_description = "Preset for the lines that can be created in a bank statement reconciliation"
_columns = {
'name': fields.char('Button Label', required=True),
'account_id': fields.many2one('account.account', 'Account', ondelete='cascade', domain=[('type', 'not in', ('view', 'closed', 'consolidation'))]),
'label': fields.char('Label'),
'amount_type': fields.selection([('fixed', 'Fixed'),('percentage_of_total','Percentage of total amount'),('percentage_of_balance', 'Percentage of open balance')],
'Amount type', required=True),
'amount': fields.float('Amount', digits_compute=dp.get_precision('Account'), help="The amount will count as a debit if it is negative, as a credit if it is positive (except if amount type is 'Percentage of open balance').", required=True),
'tax_id': fields.many2one('account.tax', 'Tax', ondelete='restrict', domain=[('type_tax_use', 'in', ['purchase', 'all']), ('parent_id', '=', False)]),
'analytic_account_id': fields.many2one('account.analytic.account', 'Analytic Account', ondelete='set null', domain=[('type','!=','view'), ('state','not in',('close','cancelled'))]),
}
_defaults = {
'amount_type': 'percentage_of_balance',
'amount': 100.0
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
GdZ/scriptfile | software/googleAppEngine/lib/django_1_3/tests/regressiontests/generic_views/views.py | 49 | 4506 | from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator
from django.core.urlresolvers import reverse
from django.utils.decorators import method_decorator
from django.views import generic
from regressiontests.generic_views.models import Artist, Author, Book, Page
from regressiontests.generic_views.forms import AuthorForm
class CustomTemplateView(generic.TemplateView):
template_name = 'generic_views/about.html'
def get_context_data(self, **kwargs):
return {
'params': kwargs,
'key': 'value'
}
class ObjectDetail(generic.DetailView):
template_name = 'generic_views/detail.html'
def get_object(self):
return {'foo': 'bar'}
class ArtistDetail(generic.DetailView):
queryset = Artist.objects.all()
class AuthorDetail(generic.DetailView):
queryset = Author.objects.all()
class PageDetail(generic.DetailView):
queryset = Page.objects.all()
template_name_field = 'template'
class DictList(generic.ListView):
"""A ListView that doesn't use a model."""
queryset = [
{'first': 'John', 'last': 'Lennon'},
{'last': 'Yoko', 'last': 'Ono'}
]
template_name = 'generic_views/list.html'
class ArtistList(generic.ListView):
template_name = 'generic_views/list.html'
queryset = Artist.objects.all()
class AuthorList(generic.ListView):
queryset = Author.objects.all()
class CustomPaginator(Paginator):
def __init__(self, queryset, page_size, orphans=0, allow_empty_first_page=True):
super(CustomPaginator, self).__init__(
queryset,
page_size,
orphans=2,
allow_empty_first_page=allow_empty_first_page)
class AuthorListCustomPaginator(AuthorList):
paginate_by = 5;
def get_paginator(self, queryset, page_size, orphans=0, allow_empty_first_page=True):
return super(AuthorListCustomPaginator, self).get_paginator(
queryset,
page_size,
orphans=2,
allow_empty_first_page=allow_empty_first_page)
class ArtistCreate(generic.CreateView):
model = Artist
class NaiveAuthorCreate(generic.CreateView):
queryset = Author.objects.all()
class AuthorCreate(generic.CreateView):
model = Author
success_url = '/list/authors/'
class SpecializedAuthorCreate(generic.CreateView):
model = Author
form_class = AuthorForm
template_name = 'generic_views/form.html'
context_object_name = 'thingy'
def get_success_url(self):
return reverse('author_detail', args=[self.object.id,])
class AuthorCreateRestricted(AuthorCreate):
post = method_decorator(login_required)(AuthorCreate.post)
class ArtistUpdate(generic.UpdateView):
model = Artist
class NaiveAuthorUpdate(generic.UpdateView):
queryset = Author.objects.all()
class AuthorUpdate(generic.UpdateView):
model = Author
success_url = '/list/authors/'
class OneAuthorUpdate(generic.UpdateView):
success_url = '/list/authors/'
def get_object(self):
return Author.objects.get(pk=1)
class SpecializedAuthorUpdate(generic.UpdateView):
model = Author
form_class = AuthorForm
template_name = 'generic_views/form.html'
context_object_name = 'thingy'
def get_success_url(self):
return reverse('author_detail', args=[self.object.id,])
class NaiveAuthorDelete(generic.DeleteView):
queryset = Author.objects.all()
class AuthorDelete(generic.DeleteView):
model = Author
success_url = '/list/authors/'
class SpecializedAuthorDelete(generic.DeleteView):
queryset = Author.objects.all()
template_name = 'generic_views/confirm_delete.html'
context_object_name = 'thingy'
def get_success_url(self):
return reverse('authors_list')
class BookConfig(object):
queryset = Book.objects.all()
date_field = 'pubdate'
class BookArchive(BookConfig, generic.ArchiveIndexView):
pass
class BookYearArchive(BookConfig, generic.YearArchiveView):
pass
class BookMonthArchive(BookConfig, generic.MonthArchiveView):
pass
class BookWeekArchive(BookConfig, generic.WeekArchiveView):
pass
class BookDayArchive(BookConfig, generic.DayArchiveView):
pass
class BookTodayArchive(BookConfig, generic.TodayArchiveView):
pass
class BookDetail(BookConfig, generic.DateDetailView):
pass
class AuthorGetQuerySetFormView(generic.edit.ModelFormMixin):
def get_queryset(self):
return Author.objects.all()
| mit |
krasin/omim | 3party/Alohalytics/tests/googletest/test/gtest_xml_output_unittest.py | 1815 | 14580 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for the gtest_xml_output module"""
__author__ = 'eefacm@gmail.com (Sean Mcafee)'
import datetime
import errno
import os
import re
import sys
from xml.dom import minidom, Node
import gtest_test_utils
import gtest_xml_test_utils
GTEST_FILTER_FLAG = '--gtest_filter'
GTEST_LIST_TESTS_FLAG = '--gtest_list_tests'
GTEST_OUTPUT_FLAG = "--gtest_output"
GTEST_DEFAULT_OUTPUT_FILE = "test_detail.xml"
GTEST_PROGRAM_NAME = "gtest_xml_output_unittest_"
SUPPORTS_STACK_TRACES = False
if SUPPORTS_STACK_TRACES:
STACK_TRACE_TEMPLATE = '\nStack trace:\n*'
else:
STACK_TRACE_TEMPLATE = ''
EXPECTED_NON_EMPTY_XML = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="23" failures="4" disabled="2" errors="0" time="*" timestamp="*" name="AllTests" ad_hoc_property="42">
<testsuite name="SuccessfulTest" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="Succeeds" status="run" time="*" classname="SuccessfulTest"/>
</testsuite>
<testsuite name="FailedTest" tests="1" failures="1" disabled="0" errors="0" time="*">
<testcase name="Fails" status="run" time="*" classname="FailedTest">
<failure message="gtest_xml_output_unittest_.cc:*
Value of: 2
Expected: 1" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Value of: 2
Expected: 1%(stack)s]]></failure>
</testcase>
</testsuite>
<testsuite name="MixedResultTest" tests="3" failures="1" disabled="1" errors="0" time="*">
<testcase name="Succeeds" status="run" time="*" classname="MixedResultTest"/>
<testcase name="Fails" status="run" time="*" classname="MixedResultTest">
<failure message="gtest_xml_output_unittest_.cc:*
Value of: 2
Expected: 1" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Value of: 2
Expected: 1%(stack)s]]></failure>
<failure message="gtest_xml_output_unittest_.cc:*
Value of: 3
Expected: 2" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Value of: 3
Expected: 2%(stack)s]]></failure>
</testcase>
<testcase name="DISABLED_test" status="notrun" time="*" classname="MixedResultTest"/>
</testsuite>
<testsuite name="XmlQuotingTest" tests="1" failures="1" disabled="0" errors="0" time="*">
<testcase name="OutputsCData" status="run" time="*" classname="XmlQuotingTest">
<failure message="gtest_xml_output_unittest_.cc:*
Failed
XML output: <?xml encoding="utf-8"><top><![CDATA[cdata text]]></top>" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Failed
XML output: <?xml encoding="utf-8"><top><![CDATA[cdata text]]>]]><![CDATA[</top>%(stack)s]]></failure>
</testcase>
</testsuite>
<testsuite name="InvalidCharactersTest" tests="1" failures="1" disabled="0" errors="0" time="*">
<testcase name="InvalidCharactersInMessage" status="run" time="*" classname="InvalidCharactersTest">
<failure message="gtest_xml_output_unittest_.cc:*
Failed
Invalid characters in brackets []" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Failed
Invalid characters in brackets []%(stack)s]]></failure>
</testcase>
</testsuite>
<testsuite name="DisabledTest" tests="1" failures="0" disabled="1" errors="0" time="*">
<testcase name="DISABLED_test_not_run" status="notrun" time="*" classname="DisabledTest"/>
</testsuite>
<testsuite name="PropertyRecordingTest" tests="4" failures="0" disabled="0" errors="0" time="*" SetUpTestCase="yes" TearDownTestCase="aye">
<testcase name="OneProperty" status="run" time="*" classname="PropertyRecordingTest" key_1="1"/>
<testcase name="IntValuedProperty" status="run" time="*" classname="PropertyRecordingTest" key_int="1"/>
<testcase name="ThreeProperties" status="run" time="*" classname="PropertyRecordingTest" key_1="1" key_2="2" key_3="3"/>
<testcase name="TwoValuesForOneKeyUsesLastValue" status="run" time="*" classname="PropertyRecordingTest" key_1="2"/>
</testsuite>
<testsuite name="NoFixtureTest" tests="3" failures="0" disabled="0" errors="0" time="*">
<testcase name="RecordProperty" status="run" time="*" classname="NoFixtureTest" key="1"/>
<testcase name="ExternalUtilityThatCallsRecordIntValuedProperty" status="run" time="*" classname="NoFixtureTest" key_for_utility_int="1"/>
<testcase name="ExternalUtilityThatCallsRecordStringValuedProperty" status="run" time="*" classname="NoFixtureTest" key_for_utility_string="1"/>
</testsuite>
<testsuite name="Single/ValueParamTest" tests="4" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasValueParamAttribute/0" value_param="33" status="run" time="*" classname="Single/ValueParamTest" />
<testcase name="HasValueParamAttribute/1" value_param="42" status="run" time="*" classname="Single/ValueParamTest" />
<testcase name="AnotherTestThatHasValueParamAttribute/0" value_param="33" status="run" time="*" classname="Single/ValueParamTest" />
<testcase name="AnotherTestThatHasValueParamAttribute/1" value_param="42" status="run" time="*" classname="Single/ValueParamTest" />
</testsuite>
<testsuite name="TypedTest/0" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="TypedTest/0" />
</testsuite>
<testsuite name="TypedTest/1" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="TypedTest/1" />
</testsuite>
<testsuite name="Single/TypeParameterizedTestCase/0" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="Single/TypeParameterizedTestCase/0" />
</testsuite>
<testsuite name="Single/TypeParameterizedTestCase/1" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="Single/TypeParameterizedTestCase/1" />
</testsuite>
</testsuites>""" % {'stack': STACK_TRACE_TEMPLATE}
EXPECTED_FILTERED_TEST_XML = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="1" failures="0" disabled="0" errors="0" time="*"
timestamp="*" name="AllTests" ad_hoc_property="42">
<testsuite name="SuccessfulTest" tests="1" failures="0" disabled="0"
errors="0" time="*">
<testcase name="Succeeds" status="run" time="*" classname="SuccessfulTest"/>
</testsuite>
</testsuites>"""
EXPECTED_EMPTY_XML = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="0" failures="0" disabled="0" errors="0" time="*"
timestamp="*" name="AllTests">
</testsuites>"""
GTEST_PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath(GTEST_PROGRAM_NAME)
SUPPORTS_TYPED_TESTS = 'TypedTest' in gtest_test_utils.Subprocess(
[GTEST_PROGRAM_PATH, GTEST_LIST_TESTS_FLAG], capture_stderr=False).output
class GTestXMLOutputUnitTest(gtest_xml_test_utils.GTestXMLTestCase):
"""
Unit test for Google Test's XML output functionality.
"""
# This test currently breaks on platforms that do not support typed and
# type-parameterized tests, so we don't run it under them.
if SUPPORTS_TYPED_TESTS:
def testNonEmptyXmlOutput(self):
"""
Runs a test program that generates a non-empty XML output, and
tests that the XML output is expected.
"""
self._TestXmlOutput(GTEST_PROGRAM_NAME, EXPECTED_NON_EMPTY_XML, 1)
def testEmptyXmlOutput(self):
"""Verifies XML output for a Google Test binary without actual tests.
Runs a test program that generates an empty XML output, and
tests that the XML output is expected.
"""
self._TestXmlOutput('gtest_no_test_unittest', EXPECTED_EMPTY_XML, 0)
def testTimestampValue(self):
"""Checks whether the timestamp attribute in the XML output is valid.
Runs a test program that generates an empty XML output, and checks if
the timestamp attribute in the testsuites tag is valid.
"""
actual = self._GetXmlOutput('gtest_no_test_unittest', [], 0)
date_time_str = actual.documentElement.getAttributeNode('timestamp').value
# datetime.strptime() is only available in Python 2.5+ so we have to
# parse the expected datetime manually.
match = re.match(r'(\d+)-(\d\d)-(\d\d)T(\d\d):(\d\d):(\d\d)', date_time_str)
self.assertTrue(
re.match,
'XML datettime string %s has incorrect format' % date_time_str)
date_time_from_xml = datetime.datetime(
year=int(match.group(1)), month=int(match.group(2)),
day=int(match.group(3)), hour=int(match.group(4)),
minute=int(match.group(5)), second=int(match.group(6)))
time_delta = abs(datetime.datetime.now() - date_time_from_xml)
# timestamp value should be near the current local time
self.assertTrue(time_delta < datetime.timedelta(seconds=600),
'time_delta is %s' % time_delta)
actual.unlink()
def testDefaultOutputFile(self):
"""
Confirms that Google Test produces an XML output file with the expected
default name if no name is explicitly specified.
"""
output_file = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_DEFAULT_OUTPUT_FILE)
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(
'gtest_no_test_unittest')
try:
os.remove(output_file)
except OSError, e:
if e.errno != errno.ENOENT:
raise
p = gtest_test_utils.Subprocess(
[gtest_prog_path, '%s=xml' % GTEST_OUTPUT_FLAG],
working_dir=gtest_test_utils.GetTempDir())
self.assert_(p.exited)
self.assertEquals(0, p.exit_code)
self.assert_(os.path.isfile(output_file))
def testSuppressedXmlOutput(self):
"""
Tests that no XML file is generated if the default XML listener is
shut down before RUN_ALL_TESTS is invoked.
"""
xml_path = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_PROGRAM_NAME + 'out.xml')
if os.path.isfile(xml_path):
os.remove(xml_path)
command = [GTEST_PROGRAM_PATH,
'%s=xml:%s' % (GTEST_OUTPUT_FLAG, xml_path),
'--shut_down_xml']
p = gtest_test_utils.Subprocess(command)
if p.terminated_by_signal:
# p.signal is avalable only if p.terminated_by_signal is True.
self.assertFalse(
p.terminated_by_signal,
'%s was killed by signal %d' % (GTEST_PROGRAM_NAME, p.signal))
else:
self.assert_(p.exited)
self.assertEquals(1, p.exit_code,
"'%s' exited with code %s, which doesn't match "
'the expected exit code %s.'
% (command, p.exit_code, 1))
self.assert_(not os.path.isfile(xml_path))
def testFilteredTestXmlOutput(self):
"""Verifies XML output when a filter is applied.
Runs a test program that executes only some tests and verifies that
non-selected tests do not show up in the XML output.
"""
self._TestXmlOutput(GTEST_PROGRAM_NAME, EXPECTED_FILTERED_TEST_XML, 0,
extra_args=['%s=SuccessfulTest.*' % GTEST_FILTER_FLAG])
def _GetXmlOutput(self, gtest_prog_name, extra_args, expected_exit_code):
"""
Returns the xml output generated by running the program gtest_prog_name.
Furthermore, the program's exit code must be expected_exit_code.
"""
xml_path = os.path.join(gtest_test_utils.GetTempDir(),
gtest_prog_name + 'out.xml')
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(gtest_prog_name)
command = ([gtest_prog_path, '%s=xml:%s' % (GTEST_OUTPUT_FLAG, xml_path)] +
extra_args)
p = gtest_test_utils.Subprocess(command)
if p.terminated_by_signal:
self.assert_(False,
'%s was killed by signal %d' % (gtest_prog_name, p.signal))
else:
self.assert_(p.exited)
self.assertEquals(expected_exit_code, p.exit_code,
"'%s' exited with code %s, which doesn't match "
'the expected exit code %s.'
% (command, p.exit_code, expected_exit_code))
actual = minidom.parse(xml_path)
return actual
def _TestXmlOutput(self, gtest_prog_name, expected_xml,
expected_exit_code, extra_args=None):
"""
Asserts that the XML document generated by running the program
gtest_prog_name matches expected_xml, a string containing another
XML document. Furthermore, the program's exit code must be
expected_exit_code.
"""
actual = self._GetXmlOutput(gtest_prog_name, extra_args or [],
expected_exit_code)
expected = minidom.parseString(expected_xml)
self.NormalizeXml(actual.documentElement)
self.AssertEquivalentNodes(expected.documentElement,
actual.documentElement)
expected.unlink()
actual.unlink()
if __name__ == '__main__':
os.environ['GTEST_STACK_TRACE_DEPTH'] = '1'
gtest_test_utils.Main()
| apache-2.0 |
ZettelGeist/zettelgeist | sphinx-docs/source/conf.py | 1 | 2464 | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'ZettelGeist'
copyright = '2020, George K. Thiruvathukal and David B. Dennis'
author = 'George K. Thiruvathukal and David B. Dennis'
# The full version, including alpha/beta/rc tags
version = os.environ.get("BOOK_VERSION", "beta")
release = version
rst_epilog = """
.. |site-version| replace:: **version %(version)s**
""" % vars()
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.todo',
'sphinx.ext.mathjax',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
source_suffix = ['.rst', '.md']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
html_theme_options = {
'logo': 'peace.png',
'github_user': 'zettelgeist',
'github_repo': 'zettelgeist',
#'analytics_id' : 'UA-23507985-1',
'extra_nav_links' : {
'Site Index' : 'genindex.html',
'Software Systems Laboratory' : 'https://ssl.cs.luc.edu',
}
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
| apache-2.0 |
emonty/vhd-util | tools/python/logging/logging-0.4.9.2/test/log_test10.py | 42 | 2847 | #!/usr/bin/env python
#
# Copyright 2001-2002 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
# This file is part of the Python logging distribution. See
# http://www.red-dove.com/python_logging.html
#
"""Test harness for the logging module. Shows use of a user-defined Logger subclass.
Copyright (C) 2001-2002 Vinay Sajip. All Rights Reserved.
"""
import sys
import locale
locale.setlocale(locale.LC_ALL, '')
from logging import *
LOG_FORMAT = "%(asctime)s %(levelname)-5s %(message)s"
DATE_FORMAT = "%x %X"
class MyLogger(Logger):
"""
A simple example of a logger extension.
"""
def debug(self, msg, *args, **kwargs):
"""
This overridden method passes exception information for DEBUG level calls
"""
if self.manager.disable >= DEBUG:
return
if DEBUG >= self.getEffectiveLevel():
exc_info = kwargs.get("exc_info", 0)
ei = None
if exc_info:
ei = sys.exc_info()
if not ei[1]:
ei = None
self._log(DEBUG, msg, args, ei)
del ei
class NotALogger:
pass
def config():
try:
setLoggerClass(NotALogger)
except Exception, e:
sys.stderr.write("%s\n" % e)
setLoggerClass(MyLogger)
if __name__ == "__main__":
basicConfig()
if __name__ == "__main__":
getLogger("").handlers[0].setFormatter(Formatter(LOG_FORMAT, DATE_FORMAT))
def run():
getLogger("").setLevel(DEBUG)
logger = getLogger("mylogger")
logger.info("Starting...")
logger.debug("Debug message not in exception handler (no traceback)")
logger.info("About to throw exception...")
try:
print "7" + 4
except Exception, e:
logger.debug("Debug message inside exception handler (traceback)",exc_info=1)
logger.info("Done.")
def main():
config()
run()
if __name__ == "__main__":
main()
| gpl-2.0 |
genome-vendor/cython | debian/cython/usr/share/pyshared/Cython/Compiler/UtilityCode.py | 98 | 6918 | from TreeFragment import parse_from_strings, StringParseContext
import Symtab
import Naming
import Code
class NonManglingModuleScope(Symtab.ModuleScope):
def __init__(self, prefix, *args, **kw):
self.prefix = prefix
self.cython_scope = None
Symtab.ModuleScope.__init__(self, *args, **kw)
def add_imported_entry(self, name, entry, pos):
entry.used = True
return super(NonManglingModuleScope, self).add_imported_entry(
name, entry, pos)
def mangle(self, prefix, name=None):
if name:
if prefix in (Naming.typeobj_prefix, Naming.func_prefix, Naming.var_prefix, Naming.pyfunc_prefix):
# Functions, classes etc. gets a manually defined prefix easily
# manually callable instead (the one passed to CythonUtilityCode)
prefix = self.prefix
return "%s%s" % (prefix, name)
else:
return Symtab.ModuleScope.mangle(self, prefix)
class CythonUtilityCodeContext(StringParseContext):
scope = None
def find_module(self, module_name, relative_to = None, pos = None,
need_pxd = 1):
if module_name != self.module_name:
if module_name not in self.modules:
raise AssertionError("Only the cython cimport is supported.")
else:
return self.modules[module_name]
if self.scope is None:
self.scope = NonManglingModuleScope(self.prefix,
module_name,
parent_module=None,
context=self)
return self.scope
class CythonUtilityCode(Code.UtilityCodeBase):
"""
Utility code written in the Cython language itself.
The @cname decorator can set the cname for a function, method of cdef class.
Functions decorated with @cname('c_func_name') get the given cname.
For cdef classes the rules are as follows:
obj struct -> <cname>_obj
obj type ptr -> <cname>_type
methods -> <class_cname>_<method_cname>
For methods the cname decorator is optional, but without the decorator the
methods will not be prototyped. See Cython.Compiler.CythonScope and
tests/run/cythonscope.pyx for examples.
"""
is_cython_utility = True
def __init__(self, impl, name="__pyxutil", prefix="", requires=None,
file=None, from_scope=None, context=None):
# 1) We need to delay the parsing/processing, so that all modules can be
# imported without import loops
# 2) The same utility code object can be used for multiple source files;
# while the generated node trees can be altered in the compilation of a
# single file.
# Hence, delay any processing until later.
if context is not None:
impl = Code.sub_tempita(impl, context, file, name)
self.impl = impl
self.name = name
self.file = file
self.prefix = prefix
self.requires = requires or []
self.from_scope = from_scope
def get_tree(self, entries_only=False, cython_scope=None):
from AnalysedTreeTransforms import AutoTestDictTransform
# The AutoTestDictTransform creates the statement "__test__ = {}",
# which when copied into the main ModuleNode overwrites
# any __test__ in user code; not desired
excludes = [AutoTestDictTransform]
import Pipeline, ParseTreeTransforms
context = CythonUtilityCodeContext(self.name)
context.prefix = self.prefix
context.cython_scope = cython_scope
#context = StringParseContext(self.name)
tree = parse_from_strings(self.name, self.impl, context=context,
allow_struct_enum_decorator=True)
pipeline = Pipeline.create_pipeline(context, 'pyx', exclude_classes=excludes)
if entries_only:
p = []
for t in pipeline:
p.append(t)
if isinstance(p, ParseTreeTransforms.AnalyseDeclarationsTransform):
break
pipeline = p
transform = ParseTreeTransforms.CnameDirectivesTransform(context)
# InterpretCompilerDirectives already does a cdef declarator check
#before = ParseTreeTransforms.DecoratorTransform
before = ParseTreeTransforms.InterpretCompilerDirectives
pipeline = Pipeline.insert_into_pipeline(pipeline, transform,
before=before)
if self.from_scope:
def scope_transform(module_node):
module_node.scope.merge_in(self.from_scope)
return module_node
transform = ParseTreeTransforms.AnalyseDeclarationsTransform
pipeline = Pipeline.insert_into_pipeline(pipeline, scope_transform,
before=transform)
(err, tree) = Pipeline.run_pipeline(pipeline, tree, printtree=False)
assert not err, err
return tree
def put_code(self, output):
pass
@classmethod
def load_as_string(cls, util_code_name, from_file=None, **kwargs):
"""
Load a utility code as a string. Returns (proto, implementation)
"""
util = cls.load(util_code_name, from_file, **kwargs)
return util.proto, util.impl # keep line numbers => no lstrip()
def declare_in_scope(self, dest_scope, used=False, cython_scope=None,
whitelist=None):
"""
Declare all entries from the utility code in dest_scope. Code will only
be included for used entries. If module_name is given, declare the
type entries with that name.
"""
tree = self.get_tree(entries_only=True, cython_scope=cython_scope)
entries = tree.scope.entries
entries.pop('__name__')
entries.pop('__file__')
entries.pop('__builtins__')
entries.pop('__doc__')
for name, entry in entries.iteritems():
entry.utility_code_definition = self
entry.used = used
original_scope = tree.scope
dest_scope.merge_in(original_scope, merge_unused=True,
whitelist=whitelist)
tree.scope = dest_scope
for dep in self.requires:
if dep.is_cython_utility:
dep.declare_in_scope(dest_scope)
return original_scope
def declare_declarations_in_scope(declaration_string, env, private_type=True,
*args, **kwargs):
"""
Declare some declarations given as Cython code in declaration_string
in scope env.
"""
CythonUtilityCode(declaration_string, *args, **kwargs).declare_in_scope(env)
| apache-2.0 |
r24mille/think_stats | chapter_one/first.py | 1 | 1783 | '''
Created on Apr 15, 2014
@author: r24mille
'''
# Initial code reccomended in Excersize 1.3
import survey
table = survey.Pregnancies()
table.ReadRecords()
print 'Number of pregnancies', len(table.records)
# Count the number of live births, outcome=1 is a live birth
numbirths = 0;
for record in table.records:
if record.outcome == 1:
numbirths += 1
print 'Number of live births', numbirths
# Modify the loop (though I'm creating a new loop) to partition live birth
# records into two groups. One for first babies and one for others. The
# birthord code for a first child is 1.
firstbirths = []
otherbirths = []
for record in table.records:
if record.outcome == 1:
if record.birthord == 1:
firstbirths.append(record)
else:
otherbirths.append(record)
print 'Number of first births', len(firstbirths)
print 'Number of non-first births', len(otherbirths)
# Compute the average pregnancy length (in weeks) for first births and others.
# prglength is the integer duration of the pregnancy in weeks.
total_first_preg_len = 0
total_other_preg_len = 0
for record in firstbirths:
total_first_preg_len += record.prglength
avg_first_preg_len = float(total_first_preg_len) / len(firstbirths)
print 'Average pregnancy length for first births', avg_first_preg_len
for record in otherbirths:
total_other_preg_len += record.prglength
avg_other_preg_len = float(total_other_preg_len) / len(otherbirths)
print 'Average pregnancy length for other births', avg_other_preg_len
print 'Difference between pregnancy length of first births to others', \
(avg_first_preg_len - avg_other_preg_len), 'weeks, aka', \
((avg_first_preg_len - avg_other_preg_len) * 7), 'days.'
| gpl-3.0 |
bhamlin/discord-pip-boy | test-db.py | 1 | 1210 | #!env/bin/python3
import json
import os
from pydblite import Base
db = Base('test.pdl')
db.create('owner', 'name', 'game', 'char', mode='override')
db.create_index('owner')
db.create_index('game')
c = {
'name': 'Covfefe',
'race': 'dwarf',
'level': [1],
'class': ['wizard#d20srd'],
'class_features': [{'specialization': 'conjuration'}],
'stats': {
'str': 10,
'dex': 15,
'con': 15,
'int': 16,
'wis': 11,
'cha': 10 },
'skills': {
'spellcraft': {
'class': True,
'ranks': 1,
'check': 'int'
}
},
'feats': {
'spell penetration': 1,
}
}
if not db._owner['bhamlin#6283']:
db.insert(owner='bhamlin#6283', name='Covfefe', game=None, char=c)
db.commit()
for rec in db._owner['bhamlin#6283']:
q = rec['char']
q['level'][0] += 1
db.update(rec, char=q)
db.commit()
for rec in (db('owner') == 'bhamlin#6283'):
#print(rec)
#print(json.dumps(rec['char']))
print(rec['char']['name'], rec['char']['level'])
print()
# for rec in [rec for rec in db if rec['owner'] == 'bhamlin#6283']:
# print(rec)
# print(rec['char'].__dict__)
| gpl-3.0 |
JioCloud/neutron | neutron/tests/unit/test_context.py | 14 | 6529 | # Copyright 2012 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_context import context as oslo_context
from testtools import matchers
from neutron import context
from neutron.tests import base
class TestNeutronContext(base.BaseTestCase):
def setUp(self):
super(TestNeutronContext, self).setUp()
db_api = 'neutron.db.api.get_session'
self._db_api_session_patcher = mock.patch(db_api)
self.db_api_session = self._db_api_session_patcher.start()
def test_neutron_context_create(self):
ctx = context.Context('user_id', 'tenant_id')
self.assertEqual('user_id', ctx.user_id)
self.assertEqual('tenant_id', ctx.project_id)
self.assertEqual('tenant_id', ctx.tenant_id)
self.assertThat(ctx.request_id, matchers.StartsWith('req-'))
self.assertEqual('user_id', ctx.user)
self.assertEqual('tenant_id', ctx.tenant)
self.assertIsNone(ctx.user_name)
self.assertIsNone(ctx.tenant_name)
self.assertIsNone(ctx.auth_token)
def test_neutron_context_create_with_name(self):
ctx = context.Context('user_id', 'tenant_id',
tenant_name='tenant_name', user_name='user_name')
# Check name is set
self.assertEqual('user_name', ctx.user_name)
self.assertEqual('tenant_name', ctx.tenant_name)
# Check user/tenant contains its ID even if user/tenant_name is passed
self.assertEqual('user_id', ctx.user)
self.assertEqual('tenant_id', ctx.tenant)
def test_neutron_context_create_with_request_id(self):
ctx = context.Context('user_id', 'tenant_id', request_id='req_id_xxx')
self.assertEqual('req_id_xxx', ctx.request_id)
def test_neutron_context_create_with_auth_token(self):
ctx = context.Context('user_id', 'tenant_id',
auth_token='auth_token_xxx')
self.assertEqual('auth_token_xxx', ctx.auth_token)
def test_neutron_context_to_dict(self):
ctx = context.Context('user_id', 'tenant_id')
ctx_dict = ctx.to_dict()
self.assertEqual('user_id', ctx_dict['user_id'])
self.assertEqual('tenant_id', ctx_dict['project_id'])
self.assertEqual(ctx.request_id, ctx_dict['request_id'])
self.assertEqual('user_id', ctx_dict['user'])
self.assertEqual('tenant_id', ctx_dict['tenant'])
self.assertIsNone(ctx_dict['user_name'])
self.assertIsNone(ctx_dict['tenant_name'])
self.assertIsNone(ctx_dict['project_name'])
self.assertIsNone(ctx_dict['auth_token'])
def test_neutron_context_to_dict_with_name(self):
ctx = context.Context('user_id', 'tenant_id',
tenant_name='tenant_name', user_name='user_name')
ctx_dict = ctx.to_dict()
self.assertEqual('user_name', ctx_dict['user_name'])
self.assertEqual('tenant_name', ctx_dict['tenant_name'])
self.assertEqual('tenant_name', ctx_dict['project_name'])
def test_neutron_context_to_dict_with_auth_token(self):
ctx = context.Context('user_id', 'tenant_id',
auth_token='auth_token_xxx')
ctx_dict = ctx.to_dict()
self.assertEqual('auth_token_xxx', ctx_dict['auth_token'])
def test_neutron_context_admin_to_dict(self):
self.db_api_session.return_value = 'fakesession'
ctx = context.get_admin_context()
ctx_dict = ctx.to_dict()
self.assertIsNone(ctx_dict['user_id'])
self.assertIsNone(ctx_dict['tenant_id'])
self.assertIsNone(ctx_dict['auth_token'])
self.assertIsNotNone(ctx.session)
self.assertNotIn('session', ctx_dict)
def test_neutron_context_admin_without_session_to_dict(self):
ctx = context.get_admin_context_without_session()
ctx_dict = ctx.to_dict()
self.assertIsNone(ctx_dict['user_id'])
self.assertIsNone(ctx_dict['tenant_id'])
self.assertIsNone(ctx_dict['auth_token'])
self.assertFalse(hasattr(ctx, 'session'))
def test_neutron_context_with_load_roles_true(self):
ctx = context.get_admin_context()
self.assertIn('admin', ctx.roles)
def test_neutron_context_with_load_roles_false(self):
ctx = context.get_admin_context(load_admin_roles=False)
self.assertFalse(ctx.roles)
def test_neutron_context_elevated_retains_request_id(self):
ctx = context.Context('user_id', 'tenant_id')
self.assertFalse(ctx.is_admin)
req_id_before = ctx.request_id
elevated_ctx = ctx.elevated()
self.assertTrue(elevated_ctx.is_admin)
self.assertEqual(req_id_before, elevated_ctx.request_id)
def test_neutron_context_overwrite(self):
ctx1 = context.Context('user_id', 'tenant_id')
self.assertEqual(ctx1.request_id,
oslo_context.get_current().request_id)
# If overwrite is not specified, request_id should be updated.
ctx2 = context.Context('user_id', 'tenant_id')
self.assertNotEqual(ctx2.request_id, ctx1.request_id)
self.assertEqual(ctx2.request_id,
oslo_context.get_current().request_id)
# If overwrite is specified, request_id should be kept.
ctx3 = context.Context('user_id', 'tenant_id', overwrite=False)
self.assertNotEqual(ctx3.request_id, ctx2.request_id)
self.assertEqual(ctx2.request_id,
oslo_context.get_current().request_id)
def test_neutron_context_get_admin_context_not_update_local_store(self):
ctx = context.Context('user_id', 'tenant_id')
req_id_before = oslo_context.get_current().request_id
self.assertEqual(ctx.request_id, req_id_before)
ctx_admin = context.get_admin_context()
self.assertEqual(req_id_before, oslo_context.get_current().request_id)
self.assertNotEqual(req_id_before, ctx_admin.request_id)
| apache-2.0 |
shssoichiro/servo | tests/wpt/css-tests/tools/py/testing/process/test_forkedfunc.py | 162 | 4839 | import pytest
import py, sys, os
pytestmark = py.test.mark.skipif("not hasattr(os, 'fork')")
def test_waitfinish_removes_tempdir():
ff = py.process.ForkedFunc(boxf1)
assert ff.tempdir.check()
ff.waitfinish()
assert not ff.tempdir.check()
def test_tempdir_gets_gc_collected(monkeypatch):
monkeypatch.setattr(os, 'fork', lambda: os.getpid())
ff = py.process.ForkedFunc(boxf1)
assert ff.tempdir.check()
ff.__del__()
assert not ff.tempdir.check()
def test_basic_forkedfunc():
result = py.process.ForkedFunc(boxf1).waitfinish()
assert result.out == "some out\n"
assert result.err == "some err\n"
assert result.exitstatus == 0
assert result.signal == 0
assert result.retval == 1
def test_exitstatus():
def func():
os._exit(4)
result = py.process.ForkedFunc(func).waitfinish()
assert result.exitstatus == 4
assert result.signal == 0
assert not result.out
assert not result.err
def test_execption_in_func():
def fun():
raise ValueError(42)
ff = py.process.ForkedFunc(fun)
result = ff.waitfinish()
assert result.exitstatus == ff.EXITSTATUS_EXCEPTION
assert result.err.find("ValueError: 42") != -1
assert result.signal == 0
assert not result.retval
def test_forkedfunc_on_fds():
result = py.process.ForkedFunc(boxf2).waitfinish()
assert result.out == "someout"
assert result.err == "someerr"
assert result.exitstatus == 0
assert result.signal == 0
assert result.retval == 2
def test_forkedfunc_on_fds_output():
result = py.process.ForkedFunc(boxf3).waitfinish()
assert result.signal == 11
assert result.out == "s"
def test_forkedfunc_on_stdout():
def boxf3():
import sys
sys.stdout.write("hello\n")
os.kill(os.getpid(), 11)
result = py.process.ForkedFunc(boxf3).waitfinish()
assert result.signal == 11
assert result.out == "hello\n"
def test_forkedfunc_signal():
result = py.process.ForkedFunc(boxseg).waitfinish()
assert result.retval is None
if sys.version_info < (2,4):
py.test.skip("signal detection does not work with python prior 2.4")
assert result.signal == 11
def test_forkedfunc_huge_data():
result = py.process.ForkedFunc(boxhuge).waitfinish()
assert result.out
assert result.exitstatus == 0
assert result.signal == 0
assert result.retval == 3
def test_box_seq():
# we run many boxes with huge data, just one after another
for i in range(50):
result = py.process.ForkedFunc(boxhuge).waitfinish()
assert result.out
assert result.exitstatus == 0
assert result.signal == 0
assert result.retval == 3
def test_box_in_a_box():
def boxfun():
result = py.process.ForkedFunc(boxf2).waitfinish()
print (result.out)
sys.stderr.write(result.err + "\n")
return result.retval
result = py.process.ForkedFunc(boxfun).waitfinish()
assert result.out == "someout\n"
assert result.err == "someerr\n"
assert result.exitstatus == 0
assert result.signal == 0
assert result.retval == 2
def test_kill_func_forked():
class A:
pass
info = A()
import time
def box_fun():
time.sleep(10) # we don't want to last forever here
ff = py.process.ForkedFunc(box_fun)
os.kill(ff.pid, 15)
result = ff.waitfinish()
if py.std.sys.version_info < (2,4):
py.test.skip("signal detection does not work with python prior 2.4")
assert result.signal == 15
def test_hooks(monkeypatch):
def _boxed():
return 1
def _on_start():
sys.stdout.write("some out\n")
sys.stdout.flush()
def _on_exit():
sys.stderr.write("some err\n")
sys.stderr.flush()
result = py.process.ForkedFunc(_boxed, child_on_start=_on_start,
child_on_exit=_on_exit).waitfinish()
assert result.out == "some out\n"
assert result.err == "some err\n"
assert result.exitstatus == 0
assert result.signal == 0
assert result.retval == 1
# ======================================================================
# examples
# ======================================================================
#
def boxf1():
sys.stdout.write("some out\n")
sys.stderr.write("some err\n")
return 1
def boxf2():
os.write(1, "someout".encode('ascii'))
os.write(2, "someerr".encode('ascii'))
return 2
def boxf3():
os.write(1, "s".encode('ascii'))
os.kill(os.getpid(), 11)
def boxseg():
os.kill(os.getpid(), 11)
def boxhuge():
s = " ".encode('ascii')
os.write(1, s * 10000)
os.write(2, s * 10000)
os.write(1, s * 10000)
os.write(1, s * 10000)
os.write(2, s * 10000)
os.write(2, s * 10000)
os.write(1, s * 10000)
return 3
| mpl-2.0 |
supergis/QGIS | python/plugins/processing/algs/saga/versioncheck.py | 9 | 2449 | import os
import subprocess
def getAlgParams(f):
params = []
booleanparams = []
numparams = []
lines = open(f)
line = lines.readline().strip('\n').strip()
name = line
if '|' in name:
tokens = name.split('|')
cmdname = tokens[1]
else:
cmdname = name
line = lines.readline().strip('\n').strip()
group = line
line = lines.readline().strip('\n').strip()
while line != '':
if line.startswith('Hardcoded'):
pass
elif line.startswith('AllowUnmatching'):
pass
elif line.startswith('Extent'):
extentParamNames = line[6:].strip().split(' ')
params.extend(["-" + p for p in extentParamNames])
else:
tokens = line.split("|")
if tokens[0] == "ParameterBoolean":
booleanparams.append("-" + tokens[1].strip())
elif tokens[0] == "ParameterNumber":
numparams.append("-" + tokens[1].strip())
else:
params.append("-" + tokens[1])
line = lines.readline().strip('\n').strip()
lines.close()
return cmdname, group, params, booleanparams, numparams
def testDescriptionFile(f):
usage = ""
cmdname, group, params, booleanparams, numparams = getAlgParams(f)
command = [r'd:\saga2.1.2\saga_cmd.exe', group, cmdname]
for p in params:
command.append(p)
command.append("dummy")
for p in numparams:
command.append(p)
command.append("0")
command.extend(booleanparams)
proc = subprocess.Popen(
command,
shell=True,
stdout=subprocess.PIPE,
stdin=open(os.devnull),
stderr=subprocess.STDOUT,
universal_newlines=True,
).stdout
lines = []
for line in iter(proc.readline, ''):
lines.append(line)
if "Usage" in line:
usage = line
if usage and not lines[0].startswith("_"):
print "-" * 50
print f + " [ERROR]"
print lines
print usage
print "Name in description:" + cmdname
print "Parameters in description:" + unicode(params)
print "-" * 50
print
if __name__ == '__main__':
folder = os.path.join(os.path.dirname(__file__), "description")
for descriptionFile in os.listdir(folder):
if descriptionFile.endswith('txt'):
testDescriptionFile(os.path.join(folder, descriptionFile))
| gpl-2.0 |
carragom/modoboa | modoboa/admin/tests/test_api.py | 1 | 23672 | # coding: utf-8
"""Admin API related tests."""
import copy
import json
from django.core.urlresolvers import reverse
from rest_framework.authtoken.models import Token
from modoboa.admin import models as admin_models
from modoboa.core import models as core_models
from modoboa.lib.tests import ModoAPITestCase
from .. import factories
from .. import models
class DomainAPITestCase(ModoAPITestCase):
"""Check API."""
@classmethod
def setUpTestData(cls):
"""Create test data."""
super(DomainAPITestCase, cls).setUpTestData()
factories.populate_database()
cls.da_token = Token.objects.create(
user=core_models.User.objects.get(username="admin@test.com"))
def test_get_domains(self):
"""Retrieve a list of domains."""
url = reverse("external_api:domain-list")
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 2)
domain = response.data[0]
url = reverse(
"external_api:domain-detail", args=[domain["pk"]])
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data["name"], domain["name"])
def test_create_domain(self):
"""Check domain creation."""
url = reverse("external_api:domain-list")
response = self.client.post(url, {"name": "test3.com", "quota": 10})
self.assertEqual(response.status_code, 201)
self.assertTrue(
models.Domain.objects.filter(name="test3.com").exists())
response = self.client.post(url, {})
self.assertEqual(response.status_code, 400)
self.assertIn("name", response.data)
self.assertIn("quota", response.data)
self.client.credentials(
HTTP_AUTHORIZATION='Token ' + self.da_token.key)
response = self.client.post(url, {"name": "test4.com", "quota": 10})
self.assertEqual(response.status_code, 403)
def test_update_domain(self):
"""Check domain update."""
domain = models.Domain.objects.get(name="test.com")
models.Mailbox.objects.filter(
domain__name="test.com", address="user").update(
use_domain_quota=True)
url = reverse("external_api:domain-detail", args=[domain.pk])
response = self.client.put(url, {"name": "test.com", "quota": 1000})
self.assertEqual(response.status_code, 200)
domain.refresh_from_db()
self.assertEqual(domain.quota, 1000)
mb = models.Mailbox.objects.get(
domain__name="test.com", address="user")
self.assertEqual(mb.quota, 1000)
response = self.client.put(url, {"name": "test42.com", "quota": 1000})
self.assertEqual(response.status_code, 200)
self.assertTrue(
models.Mailbox.objects.filter(
address="user", domain__name="test42.com").exists())
def test_delete_domain(self):
"""Try to delete a domain."""
domain = models.Domain.objects.get(name="test.com")
url = reverse("external_api:domain-detail", args=[domain.pk])
response = self.client.delete(url)
self.assertEqual(response.status_code, 204)
self.assertFalse(models.Domain.objects.filter(pk=domain.pk).exists())
class DomainAliasAPITestCase(ModoAPITestCase):
"""Check DomainAlias API."""
@classmethod
def setUpTestData(cls):
"""Create test data."""
super(DomainAliasAPITestCase, cls).setUpTestData()
factories.populate_database()
cls.dom_alias1 = factories.DomainAliasFactory(
name="dalias1.com", target__name="test.com")
cls.dom_alias2 = factories.DomainAliasFactory(
name="dalias2.com", target__name="test2.com")
cls.da_token = Token.objects.create(
user=core_models.User.objects.get(username="admin@test.com"))
def test_get(self):
"""Retrieve a list of domain aliases."""
url = reverse("external_api:domain_alias-list")
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 2)
url = reverse(
"external_api:domain_alias-detail", args=[response.data[0]["pk"]])
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data["name"], "dalias1.com")
url = reverse("external_api:domain_alias-list")
response = self.client.get("{}?domain=test.com".format(url))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 1)
self.client.credentials(
HTTP_AUTHORIZATION='Token ' + self.da_token.key)
response = self.client.get(reverse("external_api:domain_alias-list"))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 0)
def test_post(self):
"""Try to create a new domain alias."""
url = reverse("external_api:domain_alias-list")
target = models.Domain.objects.get(name="test.com")
data = {
"name": "dalias3.com",
"target": target.pk
}
response = self.client.post(url, data, format="json")
self.assertEqual(response.status_code, 201)
dalias = json.loads(response.content)
dalias = models.DomainAlias.objects.filter(
pk=dalias["pk"]).first()
self.assertEqual(dalias.target, target)
self.client.credentials(
HTTP_AUTHORIZATION='Token ' + self.da_token.key)
response = self.client.post(
url, {"name": "dalias4.com", "target": target.pk}, format="json")
self.assertEqual(response.status_code, 403)
def test_put(self):
"""Try to update a domain alias."""
dalias = models.DomainAlias.objects.get(name="dalias1.com")
url = reverse("external_api:domain_alias-detail", args=[dalias.pk])
data = {
"name": "dalias3.com", "target": dalias.target.pk
}
response = self.client.put(url, data, format="json")
self.assertEqual(response.status_code, 200)
dalias.refresh_from_db()
self.assertEqual(dalias.name, "dalias3.com")
self.assertTrue(dalias.enabled)
def test_delete(self):
"""Try to delete an existing domain alias."""
dalias = models.DomainAlias.objects.get(name="dalias1.com")
url = reverse("external_api:domain_alias-detail", args=[dalias.pk])
response = self.client.delete(url)
self.assertEqual(response.status_code, 204)
self.assertFalse(
models.DomainAlias.objects.filter(pk=dalias.pk).exists())
class AccountAPITestCase(ModoAPITestCase):
"""Check Account API."""
ACCOUNT_DATA = {
"username": "fromapi@test.com",
"role": "SimpleUsers",
"password": "Toto1234",
"mailbox": {
"full_address": "fromapi@test.com",
"quota": 10
}
}
@classmethod
def setUpTestData(cls):
"""Create test data."""
super(AccountAPITestCase, cls).setUpTestData()
factories.populate_database()
cls.da_token = Token.objects.create(
user=core_models.User.objects.get(username="admin@test.com"))
def setUp(self):
"""Test setup."""
super(AccountAPITestCase, self).setUp()
self.set_global_parameters({
"enable_admin_limits": False,
"enable_domain_limits": False
}, app="limits")
def test_get_accounts(self):
"""Retrieve a list of accounts."""
url = reverse("external_api:account-list")
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
response = json.loads(response.content)
self.assertEqual(len(response), 5)
response = self.client.get("{}?domain=test.com".format(url))
self.assertEqual(response.status_code, 200)
response = json.loads(response.content)
self.assertEqual(len(response), 2)
response = self.client.get("{}?domain=pouet.com".format(url))
self.assertEqual(response.status_code, 200)
response = json.loads(response.content)
self.assertEqual(len(response), 0)
def test_create_account(self):
"""Try to create a new account."""
url = reverse("external_api:account-list")
response = self.client.post(url, self.ACCOUNT_DATA, format="json")
self.assertEqual(response.status_code, 201)
account = json.loads(response.content)
user = core_models.User.objects.filter(pk=account["pk"]).first()
self.assertIsNot(user, None)
self.assertIsNot(user.mailbox, None)
domadmin = core_models.User.objects.get(username="admin@test.com")
self.assertTrue(domadmin.can_access(user))
data = copy.deepcopy(self.ACCOUNT_DATA)
data["username"] = "fromapi_ééé@test.com"
data["mailbox"]["full_address"] = data["username"]
url = reverse("external_api:account-list")
response = self.client.post(url, data, format="json")
self.assertEqual(response.status_code, 201)
def test_create_domainadmin_account(self):
"""Try to create a domain admin."""
data = copy.deepcopy(self.ACCOUNT_DATA)
data["domains"] = ["test.com"]
data["role"] = "DomainAdmins"
url = reverse("external_api:account-list")
response = self.client.post(url, data, format="json")
self.assertEqual(response.status_code, 201)
domain = admin_models.Domain.objects.get(name="test.com")
admin = core_models.User.objects.get(
pk=json.loads(response.content)["pk"])
self.assertIn(admin, domain.admins)
data["username"] = "domain_admin"
del data["mailbox"]
response = self.client.post(url, data, format="json")
self.assertEqual(response.status_code, 201)
def test_create_account_with_no_mailbox(self):
"""Try to create a new account."""
data = copy.deepcopy(self.ACCOUNT_DATA)
del data["mailbox"]
url = reverse("external_api:account-list")
response = self.client.post(url, data, format="json")
self.assertEqual(response.status_code, 201)
account = json.loads(response.content)
user = core_models.User.objects.filter(pk=account["pk"]).first()
self.assertIsNot(user, None)
self.assertIsNot(user.mailbox, None)
self.assertEqual(user.mailbox.quota, user.mailbox.domain.quota)
def test_create_existing_account(self):
"""Check if unicity is respected."""
data = copy.deepcopy(self.ACCOUNT_DATA)
data["username"] = "user@test.com"
url = reverse("external_api:account-list")
response = self.client.post(url, data, format="json")
self.assertEqual(response.status_code, 400)
data.update({"username": "domainadmin", "role": "DomainAdmins"})
data["mailbox"]["full_address"] = "admin@test.com"
response = self.client.post(url, data, format="json")
self.assertEqual(response.status_code, 409)
def test_create_account_bad_password(self):
"""Try to create a new account."""
data = copy.deepcopy(self.ACCOUNT_DATA)
data["password"] = "toto"
url = reverse("external_api:account-list")
response = self.client.post(url, data, format="json")
self.assertEqual(response.status_code, 400)
errors = json.loads(response.content)
self.assertIn("password", errors)
def test_create_account_as_domadmin(self):
"""As DomainAdmin, try to create a new account."""
self.client.credentials(
HTTP_AUTHORIZATION='Token ' + self.da_token.key)
data = copy.deepcopy(self.ACCOUNT_DATA)
data["mailbox"]["quota"] = 20
url = reverse("external_api:account-list")
response = self.client.post(url, data, format="json")
self.assertEqual(response.status_code, 400)
data["username"] = "fromapi@test2.com"
data["mailbox"].update({"full_address": "fromapi@test2.com",
"quota": 10})
url = reverse("external_api:account-list")
response = self.client.post(url, data, format="json")
self.assertEqual(response.status_code, 400)
errors = json.loads(response.content)
self.assertIn("domain", errors)
def test_create_account_bad_master_user(self):
"""Try to create a new account."""
data = copy.deepcopy(self.ACCOUNT_DATA)
data["master_user"] = True
url = reverse("external_api:account-list")
response = self.client.post(url, data, format="json")
self.assertEqual(response.status_code, 400)
errors = json.loads(response.content)
self.assertIn("master_user", errors)
def test_update_account(self):
"""Try to update an account."""
account = core_models.User.objects.get(username="user@test.com")
url = reverse("external_api:account-detail", args=[account.pk])
data = {
"username": "fromapi@test.com",
"role": account.role,
"password": "Toto1234",
"mailbox": {
"full_address": "fromapi@test.com",
"quota": account.mailbox.quota
}
}
response = self.client.put(url, data, format="json")
self.assertEqual(response.status_code, 200)
account.refresh_from_db()
self.assertEqual(account.email, account.mailbox.full_address)
self.assertTrue(account.check_password("Toto1234"))
del data["password"]
response = self.client.put(url, data, format="json")
self.assertEqual(response.status_code, 200)
account.refresh_from_db()
self.assertTrue(account.check_password("Toto1234"))
def test_patch_account(self):
"""Try to patch an account."""
account = core_models.User.objects.get(username="user@test.com")
url = reverse("external_api:account-detail", args=[account.pk])
data = {
"username": "fromapi@test.com",
"mailbox": {
"full_address": "fromapi@test.com",
}
}
response = self.client.patch(url, data, format="json")
self.assertEqual(response.status_code, 405)
def test_update_domain_admin_account(self):
"""Try to change administered domains."""
account = core_models.User.objects.get(username="admin@test.com")
url = reverse("external_api:account-detail", args=[account.pk])
data = {
"username": account.username,
"role": account.role,
"password": "Toto1234",
"mailbox": {
"full_address": account.mailbox.full_address,
"quota": account.mailbox.quota
},
"domains": ["test.com", "test2.com"]
}
response = self.client.put(url, data, format="json")
self.assertEqual(response.status_code, 200)
domains = admin_models.Domain.objects.get_for_admin(account)
self.assertEqual(domains.count(), 2)
self.assertTrue(domains.filter(name="test2.com").exists())
data["domains"] = ["test2.com"]
response = self.client.put(url, data, format="json")
self.assertEqual(response.status_code, 200)
domains = admin_models.Domain.objects.get_for_admin(account)
self.assertEqual(domains.count(), 1)
self.assertTrue(domains.filter(name="test2.com").exists())
def test_update_account_wrong_address(self):
"""Try to update an account."""
account = core_models.User.objects.get(username="user@test.com")
url = reverse("external_api:account-detail", args=[account.pk])
data = {
"username": "fromapi@test3.com",
"role": account.role,
"password": "Toto1234",
"mailbox": {
"full_address": "fromapi@test3.com",
"quota": account.mailbox.quota
}
}
response = self.client.put(url, data, format="json")
self.assertEqual(response.status_code, 404)
def test_delete_account(self):
"""Try to delete an account."""
account = core_models.User.objects.get(username="user@test.com")
domadmin = core_models.User.objects.get(username="admin@test.com")
self.assertTrue(domadmin.can_access(account))
url = reverse("external_api:account-detail", args=[account.pk])
response = self.client.delete(url)
self.assertEqual(response.status_code, 204)
self.assertFalse(
core_models.User.objects.filter(pk=account.pk).exists())
self.assertFalse(domadmin.can_access(account))
def test_account_exists(self):
"""Validate /exists/ service."""
url = reverse("external_api:account-exists")
response = self.client.get(
"{}?email={}".format(url, "user@test.com"))
self.assertEqual(response.status_code, 200)
content = json.loads(response.content)
self.assertTrue(content["exists"])
response = self.client.get(
"{}?email={}".format(url, "pipo@test.com"))
self.assertEqual(response.status_code, 200)
content = json.loads(response.content)
self.assertFalse(content["exists"])
response = self.client.get(url)
self.assertEqual(response.status_code, 400)
def test_change_password(self):
"""Check the change password service."""
account = core_models.User.objects.get(username="user@test.com")
url = reverse(
"external_api:account-password", args=[account.pk])
response = self.client.put(
url, {"password": "toto", "new_password": "pass"},
format="json")
# must fail because password is too weak
self.assertEqual(response.status_code, 400)
response = self.client.put(
url, {"password": "toto", "new_password": "Toto1234"},
format="json")
self.assertEqual(response.status_code, 200)
account.refresh_from_db()
self.assertTrue(account.check_password("Toto1234"))
class AliasAPITestCase(ModoAPITestCase):
"""Check Alias API."""
ALIAS_DATA = {
"address": "alias_fromapi@test.com",
"recipients": [
"user@test.com", "postmaster@test.com", "user_éé@nonlocal.com"
]
}
@classmethod
def setUpTestData(cls):
"""Create test data."""
super(AliasAPITestCase, cls).setUpTestData()
cls.localconfig.parameters.set_value(
"enable_admin_limits", False, app="limits")
cls.localconfig.save()
factories.populate_database()
cls.da_token = Token.objects.create(
user=core_models.User.objects.get(username="admin@test.com"))
def test_get_aliases(self):
"""Retrieve a list of aliases."""
url = reverse("external_api:alias-list")
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
response = json.loads(response.content)
self.assertEqual(len(response), 3)
response = self.client.get("{}?domain=test.com".format(url))
self.assertEqual(response.status_code, 200)
response = json.loads(response.content)
self.assertEqual(len(response), 3)
def test_get_alias(self):
"""Retrieve an alias."""
al = models.Alias.objects.get(address="alias@test.com")
url = reverse("external_api:alias-detail", args=[al.pk])
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
response = json.loads(response.content)
self.assertEqual(response["recipients"], ["user@test.com"])
def test_create_alias(self):
"""Try to create a new alias."""
url = reverse("external_api:alias-list")
response = self.client.post(url, self.ALIAS_DATA, format="json")
self.assertEqual(response.status_code, 201)
alias = json.loads(response.content)
alias = models.Alias.objects.filter(pk=alias["pk"]).first()
domadmin = core_models.User.objects.get(username="admin@test.com")
self.assertTrue(domadmin.can_access(alias))
self.assertEqual(alias.aliasrecipient_set.count(), 3)
self.assertTrue(alias.aliasrecipient_set.filter(
address="user@test.com", r_mailbox__isnull=False).exists())
self.assertTrue(alias.aliasrecipient_set.filter(
address="postmaster@test.com", r_alias__isnull=False).exists())
self.assertTrue(alias.aliasrecipient_set.filter(
address="user_éé@nonlocal.com",
r_mailbox__isnull=True, r_alias__isnull=True).exists())
# Create catchall alias
data = copy.deepcopy(self.ALIAS_DATA)
data["address"] = "@test.com"
response = self.client.post(url, data, format="json")
self.assertEqual(response.status_code, 201)
def test_create_alias_as_domadmin(self):
"""As DomainAdmin, try to create a new alias."""
self.client.credentials(
HTTP_AUTHORIZATION='Token ' + self.da_token.key)
url = reverse("external_api:alias-list")
response = self.client.post(url, self.ALIAS_DATA, format="json")
self.assertEqual(response.status_code, 201)
data = copy.deepcopy(self.ALIAS_DATA)
data["address"] = "alias_fromapi@test2.com"
response = self.client.post(url, data, format="json")
self.assertEqual(response.status_code, 400)
errors = json.loads(response.content)
self.assertIn("address", errors)
def test_update_alias(self):
"""Try to update an alias."""
alias = models.Alias.objects.get(address="alias@test.com")
url = reverse("external_api:alias-detail", args=[alias.pk])
data = {
"address": "alias@test.com",
"recipients": ["user@test.com", "user@nonlocal.com"]
}
response = self.client.put(url, data, format="json")
self.assertEqual(response.status_code, 200)
alias.refresh_from_db()
self.assertEqual(alias.aliasrecipient_set.count(), 2)
data = {
"address": "alias@test.com",
"recipients": ["user@nonlocal.com"]
}
response = self.client.put(url, data, format="json")
self.assertEqual(response.status_code, 200)
alias.refresh_from_db()
self.assertEqual(alias.aliasrecipient_set.count(), 1)
data = {
"address": "alias@test.com",
"recipients": []
}
response = self.client.put(url, data, format="json")
self.assertEqual(response.status_code, 400)
def test_delete_alias(self):
"""Try to delete an existing alias."""
alias = models.Alias.objects.get(address="alias@test.com")
domadmin = core_models.User.objects.get(username="admin@test.com")
self.assertTrue(domadmin.can_access(alias))
url = reverse("external_api:alias-detail", args=[alias.pk])
response = self.client.delete(url)
self.assertEqual(response.status_code, 204)
self.assertFalse(
models.Alias.objects.filter(pk=alias.pk).exists())
self.assertFalse(domadmin.can_access(alias))
self.assertFalse(
models.AliasRecipient.objects.filter(
address="user@test.com", alias__address="alias@test.com")
.exists()
)
| isc |
MaxVanDeursen/tribler | Tribler/Core/Utilities/torrent_utils.py | 2 | 4348 | import logging
import os
import libtorrent
logger = logging.getLogger(__name__)
def commonprefix(l):
# this unlike the os.path.commonprefix version always returns path prefixes as it compares
# path component wise.
cp = []
ls = [p.split('/') for p in l]
ml = min(len(p) for p in ls)
for i in range(ml):
s = set(p[i] for p in ls)
if len(s) != 1:
break
cp.append(s.pop())
return os.path.sep.join(cp)
def create_torrent_file(file_path_list, params):
fs = libtorrent.file_storage()
# filter all non-files
file_path_list_filtered = []
for path in file_path_list:
if not os.path.exists(path):
raise IOError('Path does not exist: %s' % path)
elif os.path.isfile(path):
file_path_list_filtered.append(path)
# get the directory where these files are in. If there are multiple files, take the common directory they are in
if len(file_path_list_filtered) == 1:
base_path = os.path.split(file_path_list_filtered[0])[0]
else:
base_path = os.path.abspath(commonprefix(file_path_list_filtered))
# the base_dir directory is the parent directory of the base_path and is passed to the set_piece_hash method
base_dir = os.path.split(base_path)[0]
if len(file_path_list_filtered) == 1:
filename = os.path.basename(file_path_list_filtered[0])
fs.add_file(filename, os.path.getsize(file_path_list_filtered[0]))
else:
for full_file_path in file_path_list_filtered:
filename = os.path.join(base_path[len(base_dir) + 1:], full_file_path[len(base_dir):])[1:]
fs.add_file(filename, os.path.getsize(full_file_path))
if params.get('piece length'):
piece_size = params['piece length']
else:
piece_size = 0
flags = libtorrent.create_torrent_flags_t.optimize
# This flag doesn't exist anymore in libtorrent V1.1.0
if hasattr(libtorrent.create_torrent_flags_t, 'calculate_file_hashes'):
flags |= libtorrent.create_torrent_flags_t.calculate_file_hashes
torrent = libtorrent.create_torrent(fs, piece_size=piece_size, flags=flags)
if params.get('comment'):
torrent.set_comment(params['comment'])
if params.get('created by'):
torrent.set_creator(params['created by'])
# main tracker
if params.get('announce'):
torrent.add_tracker(params['announce'])
# tracker list
if params.get('announce-list'):
tier = 1
for tracker in params['announce-list']:
torrent.add_tracker(tracker, tier=tier)
tier += 1
# DHT nodes
# http://www.bittorrent.org/beps/bep_0005.html
if params.get('nodes'):
for node in params['nodes']:
torrent.add_node(*node)
# HTTP seeding
# http://www.bittorrent.org/beps/bep_0017.html
if params.get('httpseeds'):
torrent.add_http_seed(params['httpseeds'])
# Web seeding
# http://www.bittorrent.org/beps/bep_0019.html
if len(file_path_list) == 1:
if params.get('urllist', False):
torrent.add_url_seed(params['urllist'])
# read the files and calculate the hashes
if len(file_path_list) == 1:
libtorrent.set_piece_hashes(torrent, base_path)
else:
libtorrent.set_piece_hashes(torrent, base_dir)
t1 = torrent.generate()
torrent = libtorrent.bencode(t1)
postfix = u'.torrent'
torrent_file_name = os.path.join(base_path, unicode(t1['info']['name'], 'utf-8') + postfix)
with open(torrent_file_name, 'wb') as f:
f.write(torrent)
return {'success': True,
'base_path': base_path,
'base_dir': base_dir,
'torrent_file_path': torrent_file_name}
def get_info_from_handle(handle):
# In libtorrent 0.16.18, the torrent_handle.torrent_file method is not available.
# this method checks whether the torrent_file method is available on a given handle.
# If not, fall back on the deprecated get_torrent_info
try:
if hasattr(handle, 'torrent_file'):
return handle.torrent_file()
return handle.get_torrent_info()
except RuntimeError as e: # This can happen when the torrent handle is invalid.
logger.warning("Got exception when fetching info from handle: %s", str(e))
return None
| lgpl-3.0 |
kaleidos/intranet | backend/autoreports/urls.py | 2 | 1677 | # Copyright (c) 2010 by Yaco Sistemas <pmartin@yaco.es>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this programe. If not, see <http://www.gnu.org/licenses/>.
from django.conf.urls import patterns, url
urlpatterns = patterns('autoreports.views',
url(r'^ajax/fields/tree/$', 'reports_ajax_fields', name='reports_ajax_fields'),
url(r'^ajax/fields/options/$', 'reports_ajax_fields_options', name='reports_ajax_fields_options'),
url(r'^(category/(?P<category_key>[\w-]+)/)?$', 'reports_list', name='reports_list'),
url(r'^(?P<registry_key>[\w-]+)/$', 'reports_api', name='reports_api'),
url(r'^(?P<registry_key>[\w-]+)/(?P<report_id>\d+)/$', 'reports_api', name='reports_api'),
url(r'^(?P<registry_key>[\w-]+)/reports/$', 'reports_api_list', name='reports_api_list'),
url(r'^(?P<registry_key>[\w-]+)/wizard/$', 'reports_api_wizard', name='reports_api_wizard'),
url(r'^(?P<registry_key>[\w-]+)/wizard/(?P<report_id>\d+)/$', 'reports_api_wizard', name='reports_api_wizard'),
url(r'^(?P<app_name>[\w-]+)/(?P<model_name>[\w-]+)/$', 'reports_view', name='reports_view'),
)
| apache-2.0 |
elijah513/scikit-learn | examples/cluster/plot_affinity_propagation.py | 349 | 2304 | """
=================================================
Demo of affinity propagation clustering algorithm
=================================================
Reference:
Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages
Between Data Points", Science Feb. 2007
"""
print(__doc__)
from sklearn.cluster import AffinityPropagation
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
##############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=300, centers=centers, cluster_std=0.5,
random_state=0)
##############################################################################
# Compute Affinity Propagation
af = AffinityPropagation(preference=-50).fit(X)
cluster_centers_indices = af.cluster_centers_indices_
labels = af.labels_
n_clusters_ = len(cluster_centers_indices)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels, metric='sqeuclidean'))
##############################################################################
# Plot result
import matplotlib.pyplot as plt
from itertools import cycle
plt.close('all')
plt.figure(1)
plt.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
class_members = labels == k
cluster_center = X[cluster_centers_indices[k]]
plt.plot(X[class_members, 0], X[class_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
for x in X[class_members]:
plt.plot([cluster_center[0], x[0]], [cluster_center[1], x[1]], col)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
adw0rd/lettuce | tests/integration/lib/Django-1.3/django/core/management/commands/dumpdata.py | 249 | 8960 | from django.core.exceptions import ImproperlyConfigured
from django.core.management.base import BaseCommand, CommandError
from django.core import serializers
from django.db import connections, router, DEFAULT_DB_ALIAS
from django.utils.datastructures import SortedDict
from optparse import make_option
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--format', default='json', dest='format',
help='Specifies the output serialization format for fixtures.'),
make_option('--indent', default=None, dest='indent', type='int',
help='Specifies the indent level to use when pretty-printing output'),
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a specific database to load '
'fixtures into. Defaults to the "default" database.'),
make_option('-e', '--exclude', dest='exclude',action='append', default=[],
help='An appname or appname.ModelName to exclude (use multiple --exclude to exclude multiple apps/models).'),
make_option('-n', '--natural', action='store_true', dest='use_natural_keys', default=False,
help='Use natural keys if they are available.'),
make_option('-a', '--all', action='store_true', dest='use_base_manager', default=False,
help="Use Django's base manager to dump all models stored in the database, including those that would otherwise be filtered or modified by a custom manager."),
)
help = ("Output the contents of the database as a fixture of the given "
"format (using each model's default manager unless --all is "
"specified).")
args = '[appname appname.ModelName ...]'
def handle(self, *app_labels, **options):
from django.db.models import get_app, get_apps, get_models, get_model
format = options.get('format','json')
indent = options.get('indent',None)
using = options.get('database', DEFAULT_DB_ALIAS)
connection = connections[using]
excludes = options.get('exclude',[])
show_traceback = options.get('traceback', False)
use_natural_keys = options.get('use_natural_keys', False)
use_base_manager = options.get('use_base_manager', False)
excluded_apps = set()
excluded_models = set()
for exclude in excludes:
if '.' in exclude:
app_label, model_name = exclude.split('.', 1)
model_obj = get_model(app_label, model_name)
if not model_obj:
raise CommandError('Unknown model in excludes: %s' % exclude)
excluded_models.add(model_obj)
else:
try:
app_obj = get_app(exclude)
excluded_apps.add(app_obj)
except ImproperlyConfigured:
raise CommandError('Unknown app in excludes: %s' % exclude)
if len(app_labels) == 0:
app_list = SortedDict((app, None) for app in get_apps() if app not in excluded_apps)
else:
app_list = SortedDict()
for label in app_labels:
try:
app_label, model_label = label.split('.')
try:
app = get_app(app_label)
except ImproperlyConfigured:
raise CommandError("Unknown application: %s" % app_label)
if app in excluded_apps:
continue
model = get_model(app_label, model_label)
if model is None:
raise CommandError("Unknown model: %s.%s" % (app_label, model_label))
if app in app_list.keys():
if app_list[app] and model not in app_list[app]:
app_list[app].append(model)
else:
app_list[app] = [model]
except ValueError:
# This is just an app - no model qualifier
app_label = label
try:
app = get_app(app_label)
except ImproperlyConfigured:
raise CommandError("Unknown application: %s" % app_label)
if app in excluded_apps:
continue
app_list[app] = None
# Check that the serialization format exists; this is a shortcut to
# avoid collating all the objects and _then_ failing.
if format not in serializers.get_public_serializer_formats():
raise CommandError("Unknown serialization format: %s" % format)
try:
serializers.get_serializer(format)
except KeyError:
raise CommandError("Unknown serialization format: %s" % format)
# Now collate the objects to be serialized.
objects = []
for model in sort_dependencies(app_list.items()):
if model in excluded_models:
continue
if not model._meta.proxy and router.allow_syncdb(using, model):
if use_base_manager:
objects.extend(model._base_manager.using(using).all())
else:
objects.extend(model._default_manager.using(using).all())
try:
return serializers.serialize(format, objects, indent=indent,
use_natural_keys=use_natural_keys)
except Exception, e:
if show_traceback:
raise
raise CommandError("Unable to serialize database: %s" % e)
def sort_dependencies(app_list):
"""Sort a list of app,modellist pairs into a single list of models.
The single list of models is sorted so that any model with a natural key
is serialized before a normal model, and any model with a natural key
dependency has it's dependencies serialized first.
"""
from django.db.models import get_model, get_models
# Process the list of models, and get the list of dependencies
model_dependencies = []
models = set()
for app, model_list in app_list:
if model_list is None:
model_list = get_models(app)
for model in model_list:
models.add(model)
# Add any explicitly defined dependencies
if hasattr(model, 'natural_key'):
deps = getattr(model.natural_key, 'dependencies', [])
if deps:
deps = [get_model(*d.split('.')) for d in deps]
else:
deps = []
# Now add a dependency for any FK or M2M relation with
# a model that defines a natural key
for field in model._meta.fields:
if hasattr(field.rel, 'to'):
rel_model = field.rel.to
if hasattr(rel_model, 'natural_key'):
deps.append(rel_model)
for field in model._meta.many_to_many:
rel_model = field.rel.to
if hasattr(rel_model, 'natural_key'):
deps.append(rel_model)
model_dependencies.append((model, deps))
model_dependencies.reverse()
# Now sort the models to ensure that dependencies are met. This
# is done by repeatedly iterating over the input list of models.
# If all the dependencies of a given model are in the final list,
# that model is promoted to the end of the final list. This process
# continues until the input list is empty, or we do a full iteration
# over the input models without promoting a model to the final list.
# If we do a full iteration without a promotion, that means there are
# circular dependencies in the list.
model_list = []
while model_dependencies:
skipped = []
changed = False
while model_dependencies:
model, deps = model_dependencies.pop()
# If all of the models in the dependency list are either already
# on the final model list, or not on the original serialization list,
# then we've found another model with all it's dependencies satisfied.
found = True
for candidate in ((d not in models or d in model_list) for d in deps):
if not candidate:
found = False
if found:
model_list.append(model)
changed = True
else:
skipped.append((model, deps))
if not changed:
raise CommandError("Can't resolve dependencies for %s in serialized app list." %
', '.join('%s.%s' % (model._meta.app_label, model._meta.object_name)
for model, deps in sorted(skipped, key=lambda obj: obj[0].__name__))
)
model_dependencies = skipped
return model_list
| gpl-3.0 |
AllisonWang/incubator-airflow | airflow/executors/sequential_executor.py | 46 | 1862 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from builtins import str
import subprocess
from airflow.executors.base_executor import BaseExecutor
from airflow.utils.state import State
class SequentialExecutor(BaseExecutor):
"""
This executor will only run one task instance at a time, can be used
for debugging. It is also the only executor that can be used with sqlite
since sqlite doesn't support multiple connections.
Since we want airflow to work out of the box, it defaults to this
SequentialExecutor alongside sqlite as you first install it.
"""
def __init__(self):
super(SequentialExecutor, self).__init__()
self.commands_to_run = []
def execute_async(self, key, command, queue=None):
self.commands_to_run.append((key, command,))
def sync(self):
for key, command in self.commands_to_run:
self.logger.info("Executing command: {}".format(command))
try:
subprocess.check_call(command, shell=True)
self.change_state(key, State.SUCCESS)
except subprocess.CalledProcessError as e:
self.change_state(key, State.FAILED)
self.logger.error("Failed to execute task {}:".format(str(e)))
self.commands_to_run = []
def end(self):
self.heartbeat()
| apache-2.0 |
stevemayhew/Arduino | arduino-core/src/processing/app/i18n/python/requests/packages/urllib3/packages/ordered_dict.py | 1093 | 8936 | # Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
# Copyright 2009 Raymond Hettinger, released under the MIT License.
# http://code.activestate.com/recipes/576693/
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
| lgpl-2.1 |
newerthcom/savagerebirth | libs/python-2.72/Lib/encodings/iso8859_11.py | 593 | 12591 | """ Python Character Mapping Codec iso8859_11 generated from 'MAPPINGS/ISO8859/8859-11.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-11',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\x80' # 0x80 -> <control>
u'\x81' # 0x81 -> <control>
u'\x82' # 0x82 -> <control>
u'\x83' # 0x83 -> <control>
u'\x84' # 0x84 -> <control>
u'\x85' # 0x85 -> <control>
u'\x86' # 0x86 -> <control>
u'\x87' # 0x87 -> <control>
u'\x88' # 0x88 -> <control>
u'\x89' # 0x89 -> <control>
u'\x8a' # 0x8A -> <control>
u'\x8b' # 0x8B -> <control>
u'\x8c' # 0x8C -> <control>
u'\x8d' # 0x8D -> <control>
u'\x8e' # 0x8E -> <control>
u'\x8f' # 0x8F -> <control>
u'\x90' # 0x90 -> <control>
u'\x91' # 0x91 -> <control>
u'\x92' # 0x92 -> <control>
u'\x93' # 0x93 -> <control>
u'\x94' # 0x94 -> <control>
u'\x95' # 0x95 -> <control>
u'\x96' # 0x96 -> <control>
u'\x97' # 0x97 -> <control>
u'\x98' # 0x98 -> <control>
u'\x99' # 0x99 -> <control>
u'\x9a' # 0x9A -> <control>
u'\x9b' # 0x9B -> <control>
u'\x9c' # 0x9C -> <control>
u'\x9d' # 0x9D -> <control>
u'\x9e' # 0x9E -> <control>
u'\x9f' # 0x9F -> <control>
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\u0e01' # 0xA1 -> THAI CHARACTER KO KAI
u'\u0e02' # 0xA2 -> THAI CHARACTER KHO KHAI
u'\u0e03' # 0xA3 -> THAI CHARACTER KHO KHUAT
u'\u0e04' # 0xA4 -> THAI CHARACTER KHO KHWAI
u'\u0e05' # 0xA5 -> THAI CHARACTER KHO KHON
u'\u0e06' # 0xA6 -> THAI CHARACTER KHO RAKHANG
u'\u0e07' # 0xA7 -> THAI CHARACTER NGO NGU
u'\u0e08' # 0xA8 -> THAI CHARACTER CHO CHAN
u'\u0e09' # 0xA9 -> THAI CHARACTER CHO CHING
u'\u0e0a' # 0xAA -> THAI CHARACTER CHO CHANG
u'\u0e0b' # 0xAB -> THAI CHARACTER SO SO
u'\u0e0c' # 0xAC -> THAI CHARACTER CHO CHOE
u'\u0e0d' # 0xAD -> THAI CHARACTER YO YING
u'\u0e0e' # 0xAE -> THAI CHARACTER DO CHADA
u'\u0e0f' # 0xAF -> THAI CHARACTER TO PATAK
u'\u0e10' # 0xB0 -> THAI CHARACTER THO THAN
u'\u0e11' # 0xB1 -> THAI CHARACTER THO NANGMONTHO
u'\u0e12' # 0xB2 -> THAI CHARACTER THO PHUTHAO
u'\u0e13' # 0xB3 -> THAI CHARACTER NO NEN
u'\u0e14' # 0xB4 -> THAI CHARACTER DO DEK
u'\u0e15' # 0xB5 -> THAI CHARACTER TO TAO
u'\u0e16' # 0xB6 -> THAI CHARACTER THO THUNG
u'\u0e17' # 0xB7 -> THAI CHARACTER THO THAHAN
u'\u0e18' # 0xB8 -> THAI CHARACTER THO THONG
u'\u0e19' # 0xB9 -> THAI CHARACTER NO NU
u'\u0e1a' # 0xBA -> THAI CHARACTER BO BAIMAI
u'\u0e1b' # 0xBB -> THAI CHARACTER PO PLA
u'\u0e1c' # 0xBC -> THAI CHARACTER PHO PHUNG
u'\u0e1d' # 0xBD -> THAI CHARACTER FO FA
u'\u0e1e' # 0xBE -> THAI CHARACTER PHO PHAN
u'\u0e1f' # 0xBF -> THAI CHARACTER FO FAN
u'\u0e20' # 0xC0 -> THAI CHARACTER PHO SAMPHAO
u'\u0e21' # 0xC1 -> THAI CHARACTER MO MA
u'\u0e22' # 0xC2 -> THAI CHARACTER YO YAK
u'\u0e23' # 0xC3 -> THAI CHARACTER RO RUA
u'\u0e24' # 0xC4 -> THAI CHARACTER RU
u'\u0e25' # 0xC5 -> THAI CHARACTER LO LING
u'\u0e26' # 0xC6 -> THAI CHARACTER LU
u'\u0e27' # 0xC7 -> THAI CHARACTER WO WAEN
u'\u0e28' # 0xC8 -> THAI CHARACTER SO SALA
u'\u0e29' # 0xC9 -> THAI CHARACTER SO RUSI
u'\u0e2a' # 0xCA -> THAI CHARACTER SO SUA
u'\u0e2b' # 0xCB -> THAI CHARACTER HO HIP
u'\u0e2c' # 0xCC -> THAI CHARACTER LO CHULA
u'\u0e2d' # 0xCD -> THAI CHARACTER O ANG
u'\u0e2e' # 0xCE -> THAI CHARACTER HO NOKHUK
u'\u0e2f' # 0xCF -> THAI CHARACTER PAIYANNOI
u'\u0e30' # 0xD0 -> THAI CHARACTER SARA A
u'\u0e31' # 0xD1 -> THAI CHARACTER MAI HAN-AKAT
u'\u0e32' # 0xD2 -> THAI CHARACTER SARA AA
u'\u0e33' # 0xD3 -> THAI CHARACTER SARA AM
u'\u0e34' # 0xD4 -> THAI CHARACTER SARA I
u'\u0e35' # 0xD5 -> THAI CHARACTER SARA II
u'\u0e36' # 0xD6 -> THAI CHARACTER SARA UE
u'\u0e37' # 0xD7 -> THAI CHARACTER SARA UEE
u'\u0e38' # 0xD8 -> THAI CHARACTER SARA U
u'\u0e39' # 0xD9 -> THAI CHARACTER SARA UU
u'\u0e3a' # 0xDA -> THAI CHARACTER PHINTHU
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\u0e3f' # 0xDF -> THAI CURRENCY SYMBOL BAHT
u'\u0e40' # 0xE0 -> THAI CHARACTER SARA E
u'\u0e41' # 0xE1 -> THAI CHARACTER SARA AE
u'\u0e42' # 0xE2 -> THAI CHARACTER SARA O
u'\u0e43' # 0xE3 -> THAI CHARACTER SARA AI MAIMUAN
u'\u0e44' # 0xE4 -> THAI CHARACTER SARA AI MAIMALAI
u'\u0e45' # 0xE5 -> THAI CHARACTER LAKKHANGYAO
u'\u0e46' # 0xE6 -> THAI CHARACTER MAIYAMOK
u'\u0e47' # 0xE7 -> THAI CHARACTER MAITAIKHU
u'\u0e48' # 0xE8 -> THAI CHARACTER MAI EK
u'\u0e49' # 0xE9 -> THAI CHARACTER MAI THO
u'\u0e4a' # 0xEA -> THAI CHARACTER MAI TRI
u'\u0e4b' # 0xEB -> THAI CHARACTER MAI CHATTAWA
u'\u0e4c' # 0xEC -> THAI CHARACTER THANTHAKHAT
u'\u0e4d' # 0xED -> THAI CHARACTER NIKHAHIT
u'\u0e4e' # 0xEE -> THAI CHARACTER YAMAKKAN
u'\u0e4f' # 0xEF -> THAI CHARACTER FONGMAN
u'\u0e50' # 0xF0 -> THAI DIGIT ZERO
u'\u0e51' # 0xF1 -> THAI DIGIT ONE
u'\u0e52' # 0xF2 -> THAI DIGIT TWO
u'\u0e53' # 0xF3 -> THAI DIGIT THREE
u'\u0e54' # 0xF4 -> THAI DIGIT FOUR
u'\u0e55' # 0xF5 -> THAI DIGIT FIVE
u'\u0e56' # 0xF6 -> THAI DIGIT SIX
u'\u0e57' # 0xF7 -> THAI DIGIT SEVEN
u'\u0e58' # 0xF8 -> THAI DIGIT EIGHT
u'\u0e59' # 0xF9 -> THAI DIGIT NINE
u'\u0e5a' # 0xFA -> THAI CHARACTER ANGKHANKHU
u'\u0e5b' # 0xFB -> THAI CHARACTER KHOMUT
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| gpl-2.0 |
lmazuel/ansible | lib/ansible/modules/system/cronvar.py | 49 | 14310 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# Cronvar Plugin: The goal of this plugin is to provide an indempotent
# method for set cron variable values. It should play well with the
# existing cron module as well as allow for manually added variables.
# Each variable entered will be preceded with a comment describing the
# variable so that it can be found later. This is required to be
# present in order for this plugin to find/modify the variable
#
# This module is based on the crontab module.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: cronvar
short_description: Manage variables in crontabs
description:
- Use this module to manage crontab variables. This module allows
you to create, update, or delete cron variable definitions.
version_added: "2.0"
options:
name:
description:
- Name of the crontab variable.
default: null
required: true
value:
description:
- The value to set this variable to. Required if state=present.
required: false
default: null
insertafter:
required: false
default: null
description:
- Used with C(state=present). If specified, the variable will be inserted
after the variable specified.
insertbefore:
required: false
default: null
description:
- Used with C(state=present). If specified, the variable will be inserted
just before the variable specified.
state:
description:
- Whether to ensure that the variable is present or absent.
required: false
default: present
choices: [ "present", "absent" ]
user:
description:
- The specific user whose crontab should be modified.
required: false
default: root
cron_file:
description:
- If specified, uses this file instead of an individual user's crontab.
Without a leading /, this is assumed to be in /etc/cron.d. With a leading
/, this is taken as absolute.
required: false
default: null
backup:
description:
- If set, create a backup of the crontab before it is modified.
The location of the backup is returned in the C(backup) variable by this module.
required: false
default: false
requirements:
- cron
author: "Doug Luce (@dougluce)"
"""
EXAMPLES = '''
# Ensure a variable exists.
# Creates an entry like "EMAIL=doug@ansibmod.con.com"
- cronvar:
name: EMAIL
value: doug@ansibmod.con.com
# Make sure a variable is gone. This will remove any variable named
# "LEGACY"
- cronvar:
name: LEGACY
state: absent
# Adds a variable to a file under /etc/cron.d
- cronvar:
name: LOGFILE
value: /var/log/yum-autoupdate.log
user: root
cron_file: ansible_yum-autoupdate
'''
import os
import re
import tempfile
import platform
import pipes
import shlex
from ansible.module_utils.basic import *
from ansible.module_utils.pycompat24 import get_exception
CRONCMD = "/usr/bin/crontab"
class CronVarError(Exception):
pass
class CronVar(object):
"""
CronVar object to write variables to crontabs.
user - the user of the crontab (defaults to root)
cron_file - a cron file under /etc/cron.d
"""
def __init__(self, module, user=None, cron_file=None):
self.module = module
self.user = user
self.lines = None
self.wordchars = ''.join(chr(x) for x in range(128) if chr(x) not in ('=', "'", '"', ))
if cron_file:
self.cron_file = ""
if os.path.isabs(cron_file):
self.cron_file = cron_file
else:
self.cron_file = os.path.join('/etc/cron.d', cron_file)
else:
self.cron_file = None
self.read()
def read(self):
# Read in the crontab from the system
self.lines = []
if self.cron_file:
# read the cronfile
try:
f = open(self.cron_file, 'r')
self.lines = f.read().splitlines()
f.close()
except IOError:
e = get_exception()
# cron file does not exist
return
except:
raise CronVarError("Unexpected error:", sys.exc_info()[0])
else:
# using safely quoted shell for now, but this really should be two non-shell calls instead. FIXME
(rc, out, err) = self.module.run_command(self._read_user_execute(), use_unsafe_shell=True)
if rc != 0 and rc != 1: # 1 can mean that there are no jobs.
raise CronVarError("Unable to read crontab")
lines = out.splitlines()
count = 0
for l in lines:
if count > 2 or (not re.match( r'# DO NOT EDIT THIS FILE - edit the master and reinstall.', l) and
not re.match( r'# \(/tmp/.*installed on.*\)', l) and
not re.match( r'# \(.*version.*\)', l)):
self.lines.append(l)
count += 1
def log_message(self, message):
self.module.debug('ansible: "%s"' % message)
def write(self, backup_file=None):
"""
Write the crontab to the system. Saves all information.
"""
if backup_file:
fileh = open(backup_file, 'w')
elif self.cron_file:
fileh = open(self.cron_file, 'w')
else:
filed, path = tempfile.mkstemp(prefix='crontab')
fileh = os.fdopen(filed, 'w')
fileh.write(self.render())
fileh.close()
# return if making a backup
if backup_file:
return
# Add the entire crontab back to the user crontab
if not self.cron_file:
# quoting shell args for now but really this should be two non-shell calls. FIXME
(rc, out, err) = self.module.run_command(self._write_execute(path), use_unsafe_shell=True)
os.unlink(path)
if rc != 0:
self.module.fail_json(msg=err)
def remove_variable_file(self):
try:
os.unlink(self.cron_file)
return True
except OSError:
e = get_exception()
# cron file does not exist
return False
except:
raise CronVarError("Unexpected error:", sys.exc_info()[0])
def parse_for_var(self, line):
lexer = shlex.shlex(line)
lexer.wordchars = self.wordchars
varname = lexer.get_token()
is_env_var = lexer.get_token() == '='
value = ''.join(lexer)
if is_env_var:
return (varname, value)
raise CronVarError("Not a variable.")
def find_variable(self, name):
comment = None
for l in self.lines:
try:
(varname, value) = self.parse_for_var(l)
if varname == name:
return value
except CronVarError:
pass
return None
def get_var_names(self):
var_names = []
for l in self.lines:
try:
(var_name, _) = self.parse_for_var(l)
var_names.append(var_name)
except CronVarError:
pass
return var_names
def add_variable(self, name, value, insertbefore, insertafter):
if insertbefore is None and insertafter is None:
# Add the variable to the top of the file.
self.lines.insert(0, "%s=%s" % (name, value))
else:
newlines = []
for l in self.lines:
try:
(varname, _) = self.parse_for_var(l) # Throws if not a var line
if varname == insertbefore:
newlines.append("%s=%s" % (name, value))
newlines.append(l)
elif varname == insertafter:
newlines.append(l)
newlines.append("%s=%s" % (name, value))
else:
raise CronVarError # Append.
except CronVarError:
newlines.append(l)
self.lines = newlines
def remove_variable(self, name):
self.update_variable(name, None, remove=True)
def update_variable(self, name, value, remove=False):
newlines = []
for l in self.lines:
try:
(varname, _) = self.parse_for_var(l) # Throws if not a var line
if varname != name:
raise CronVarError # Append.
if not remove:
newlines.append("%s=%s" % (name, value))
except CronVarError:
newlines.append(l)
self.lines = newlines
def render(self):
"""
Render a proper crontab
"""
result = '\n'.join(self.lines)
if result and result[-1] not in ['\n', '\r']:
result += '\n'
return result
def _read_user_execute(self):
"""
Returns the command line for reading a crontab
"""
user = ''
if self.user:
if platform.system() == 'SunOS':
return "su %s -c '%s -l'" % (pipes.quote(self.user), pipes.quote(CRONCMD))
elif platform.system() == 'AIX':
return "%s -l %s" % (pipes.quote(CRONCMD), pipes.quote(self.user))
elif platform.system() == 'HP-UX':
return "%s %s %s" % (CRONCMD , '-l', pipes.quote(self.user))
else:
user = '-u %s' % pipes.quote(self.user)
return "%s %s %s" % (CRONCMD , user, '-l')
def _write_execute(self, path):
"""
Return the command line for writing a crontab
"""
user = ''
if self.user:
if platform.system() in ['SunOS', 'HP-UX', 'AIX']:
return "chown %s %s ; su '%s' -c '%s %s'" % (pipes.quote(self.user), pipes.quote(path), pipes.quote(self.user), CRONCMD, pipes.quote(path))
else:
user = '-u %s' % pipes.quote(self.user)
return "%s %s %s" % (CRONCMD , user, pipes.quote(path))
#==================================================
def main():
# The following example playbooks:
#
# - cronvar: name="SHELL" value="/bin/bash"
#
# - name: Set the email
# cronvar: name="EMAILTO" value="doug@ansibmod.con.com"
#
# - name: Get rid of the old new host variable
# cronvar: name="NEW_HOST" state=absent
#
# Would produce:
# SHELL = /bin/bash
# EMAILTO = doug@ansibmod.con.com
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True),
value=dict(required=False),
user=dict(required=False),
cron_file=dict(required=False),
insertafter=dict(default=None),
insertbefore=dict(default=None),
state=dict(default='present', choices=['present', 'absent']),
backup=dict(default=False, type='bool'),
),
mutually_exclusive=[['insertbefore', 'insertafter']],
supports_check_mode=False,
)
name = module.params['name']
value = module.params['value']
user = module.params['user']
cron_file = module.params['cron_file']
insertafter = module.params['insertafter']
insertbefore = module.params['insertbefore']
state = module.params['state']
backup = module.params['backup']
ensure_present = state == 'present'
changed = False
res_args = dict()
# Ensure all files generated are only writable by the owning user. Primarily relevant for the cron_file option.
os.umask(int('022',8))
cronvar = CronVar(module, user, cron_file)
module.debug('cronvar instantiated - name: "%s"' % name)
# --- user input validation ---
if name is None and ensure_present:
module.fail_json(msg="You must specify 'name' to insert a new cron variabale")
if value is None and ensure_present:
module.fail_json(msg="You must specify 'value' to insert a new cron variable")
if name is None and not ensure_present:
module.fail_json(msg="You must specify 'name' to remove a cron variable")
# if requested make a backup before making a change
if backup:
(_, backup_file) = tempfile.mkstemp(prefix='cronvar')
cronvar.write(backup_file)
if cronvar.cron_file and not name and not ensure_present:
changed = cronvar.remove_job_file()
module.exit_json(changed=changed, cron_file=cron_file, state=state)
old_value = cronvar.find_variable(name)
if ensure_present:
if old_value is None:
cronvar.add_variable(name, value, insertbefore, insertafter)
changed = True
elif old_value != value:
cronvar.update_variable(name, value)
changed = True
else:
if old_value is not None:
cronvar.remove_variable(name)
changed = True
res_args = {
"vars": cronvar.get_var_names(),
"changed": changed
}
if changed:
cronvar.write()
# retain the backup only if crontab or cron file have changed
if backup:
if changed:
res_args['backup_file'] = backup_file
else:
os.unlink(backup_file)
if cron_file:
res_args['cron_file'] = cron_file
module.exit_json(**res_args)
# --- should never get here
module.exit_json(msg="Unable to execute cronvar task.")
if __name__ == '__main__':
main()
| gpl-3.0 |
durandj/mymcadmin | mymcadmin/cli/commands/restart.py | 1 | 1046 | """
Restart commands for Minecraft servers
"""
import click
from ..base import mymcadmin, cli_command, rpc_command, error, success
from ... import rpc
@mymcadmin.command()
@click.argument('server_id')
@cli_command
@rpc_command
def restart(rpc_conn, server_id):
"""
Restart a Minecraft server
"""
click.echo('Attempting to restart {}'.format(server_id), nl = False)
with rpc.RpcClient(*rpc_conn) as rpc_client:
rpc_client.server_restart(server_id)
success('Success')
@mymcadmin.command()
@cli_command
@rpc_command
def restart_all(rpc_conn):
"""
Restart all Minecraft servers
"""
click.echo('Restarting all servers...')
with rpc.RpcClient(*rpc_conn) as rpc_client:
result = rpc_client.server_restart_all()
successful = result['success']
failure = result['failure']
for server_id in successful:
success('{} successfully restarted'.format(server_id))
for server_id in failure:
error('{} could not restart properly'.format(server_id))
| mit |
hujiajie/chromium-crosswalk | tools/valgrind/valgrind_test.py | 7 | 33370 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs an exe through Valgrind and puts the intermediate files in a
directory.
"""
import datetime
import glob
import logging
import optparse
import os
import re
import shutil
import stat
import subprocess
import sys
import tempfile
import common
import drmemory_analyze
import memcheck_analyze
class BaseTool(object):
"""Abstract class for running dynamic error detection tools.
Always subclass this and implement ToolCommand with framework- and
tool-specific stuff.
"""
def __init__(self):
temp_parent_dir = None
self.log_parent_dir = ""
if common.IsWindows():
# gpu process on Windows Vista+ runs at Low Integrity and can only
# write to certain directories (http://crbug.com/119131)
#
# TODO(bruening): if scripts die in middle and don't clean up temp
# dir, we'll accumulate files in profile dir. should remove
# really old files automatically.
profile = os.getenv("USERPROFILE")
if profile:
self.log_parent_dir = profile + "\\AppData\\LocalLow\\"
if os.path.exists(self.log_parent_dir):
self.log_parent_dir = common.NormalizeWindowsPath(self.log_parent_dir)
temp_parent_dir = self.log_parent_dir
# Generated every time (even when overridden)
self.temp_dir = tempfile.mkdtemp(prefix="vg_logs_", dir=temp_parent_dir)
self.log_dir = self.temp_dir # overridable by --keep_logs
self.option_parser_hooks = []
# TODO(glider): we may not need some of the env vars on some of the
# platforms.
self._env = {
"G_SLICE" : "always-malloc",
"NSS_DISABLE_UNLOAD" : "1",
"NSS_DISABLE_ARENA_FREE_LIST" : "1",
"GTEST_DEATH_TEST_USE_FORK": "1",
}
def ToolName(self):
raise NotImplementedError, "This method should be implemented " \
"in the tool-specific subclass"
def Analyze(self, check_sanity=False):
raise NotImplementedError, "This method should be implemented " \
"in the tool-specific subclass"
def RegisterOptionParserHook(self, hook):
# Frameworks and tools can add their own flags to the parser.
self.option_parser_hooks.append(hook)
def CreateOptionParser(self):
# Defines Chromium-specific flags.
self._parser = optparse.OptionParser("usage: %prog [options] <program to "
"test>")
self._parser.disable_interspersed_args()
self._parser.add_option("-t", "--timeout",
dest="timeout", metavar="TIMEOUT", default=10000,
help="timeout in seconds for the run (default 10000)")
self._parser.add_option("", "--build-dir",
help="the location of the compiler output")
self._parser.add_option("", "--source-dir",
help="path to top of source tree for this build"
"(used to normalize source paths in baseline)")
self._parser.add_option("", "--gtest_filter", default="",
help="which test case to run")
self._parser.add_option("", "--gtest_repeat",
help="how many times to run each test")
self._parser.add_option("", "--gtest_print_time", action="store_true",
default=False,
help="show how long each test takes")
self._parser.add_option("", "--ignore_exit_code", action="store_true",
default=False,
help="ignore exit code of the test "
"(e.g. test failures)")
self._parser.add_option("", "--keep_logs", action="store_true",
default=False,
help="store memory tool logs in the <tool>.logs "
"directory instead of /tmp.\nThis can be "
"useful for tool developers/maintainers.\n"
"Please note that the <tool>.logs directory "
"will be clobbered on tool startup.")
# To add framework- or tool-specific flags, please add a hook using
# RegisterOptionParserHook in the corresponding subclass.
# See ValgrindTool for an example.
for hook in self.option_parser_hooks:
hook(self, self._parser)
def ParseArgv(self, args):
self.CreateOptionParser()
# self._tool_flags will store those tool flags which we don't parse
# manually in this script.
self._tool_flags = []
known_args = []
""" We assume that the first argument not starting with "-" is a program
name and all the following flags should be passed to the program.
TODO(timurrrr): customize optparse instead
"""
while len(args) > 0 and args[0][:1] == "-":
arg = args[0]
if (arg == "--"):
break
if self._parser.has_option(arg.split("=")[0]):
known_args += [arg]
else:
self._tool_flags += [arg]
args = args[1:]
if len(args) > 0:
known_args += args
self._options, self._args = self._parser.parse_args(known_args)
self._timeout = int(self._options.timeout)
self._source_dir = self._options.source_dir
if self._options.keep_logs:
# log_parent_dir has trailing slash if non-empty
self.log_dir = self.log_parent_dir + "%s.logs" % self.ToolName()
if os.path.exists(self.log_dir):
shutil.rmtree(self.log_dir)
os.mkdir(self.log_dir)
logging.info("Logs are in " + self.log_dir)
self._ignore_exit_code = self._options.ignore_exit_code
if self._options.gtest_filter != "":
self._args.append("--gtest_filter=%s" % self._options.gtest_filter)
if self._options.gtest_repeat:
self._args.append("--gtest_repeat=%s" % self._options.gtest_repeat)
if self._options.gtest_print_time:
self._args.append("--gtest_print_time")
return True
def Setup(self, args):
return self.ParseArgv(args)
def ToolCommand(self):
raise NotImplementedError, "This method should be implemented " \
"in the tool-specific subclass"
def Cleanup(self):
# You may override it in the tool-specific subclass
pass
def Execute(self):
""" Execute the app to be tested after successful instrumentation.
Full execution command-line provided by subclassers via proc."""
logging.info("starting execution...")
proc = self.ToolCommand()
for var in self._env:
common.PutEnvAndLog(var, self._env[var])
return common.RunSubprocess(proc, self._timeout)
def RunTestsAndAnalyze(self, check_sanity):
exec_retcode = self.Execute()
analyze_retcode = self.Analyze(check_sanity)
if analyze_retcode:
logging.error("Analyze failed.")
logging.info("Search the log for '[ERROR]' to see the error reports.")
return analyze_retcode
if exec_retcode:
if self._ignore_exit_code:
logging.info("Test execution failed, but the exit code is ignored.")
else:
logging.error("Test execution failed.")
return exec_retcode
else:
logging.info("Test execution completed successfully.")
if not analyze_retcode:
logging.info("Analysis completed successfully.")
return 0
def Main(self, args, check_sanity, min_runtime_in_seconds):
"""Call this to run through the whole process: Setup, Execute, Analyze"""
start_time = datetime.datetime.now()
retcode = -1
if self.Setup(args):
retcode = self.RunTestsAndAnalyze(check_sanity)
shutil.rmtree(self.temp_dir, ignore_errors=True)
self.Cleanup()
else:
logging.error("Setup failed")
end_time = datetime.datetime.now()
runtime_in_seconds = (end_time - start_time).seconds
hours = runtime_in_seconds / 3600
seconds = runtime_in_seconds % 3600
minutes = seconds / 60
seconds = seconds % 60
logging.info("elapsed time: %02d:%02d:%02d" % (hours, minutes, seconds))
if (min_runtime_in_seconds > 0 and
runtime_in_seconds < min_runtime_in_seconds):
logging.error("Layout tests finished too quickly. "
"It should have taken at least %d seconds. "
"Something went wrong?" % min_runtime_in_seconds)
retcode = -1
return retcode
def Run(self, args, module, min_runtime_in_seconds=0):
MODULES_TO_SANITY_CHECK = ["base"]
check_sanity = module in MODULES_TO_SANITY_CHECK
return self.Main(args, check_sanity, min_runtime_in_seconds)
class ValgrindTool(BaseTool):
"""Abstract class for running Valgrind tools.
Always subclass this and implement ToolSpecificFlags() and
ExtendOptionParser() for tool-specific stuff.
"""
def __init__(self):
super(ValgrindTool, self).__init__()
self.RegisterOptionParserHook(ValgrindTool.ExtendOptionParser)
def UseXML(self):
# Override if tool prefers nonxml output
return True
def ExtendOptionParser(self, parser):
parser.add_option("", "--suppressions", default=[],
action="append",
help="path to a valgrind suppression file")
parser.add_option("", "--indirect", action="store_true",
default=False,
help="set BROWSER_WRAPPER rather than "
"running valgrind directly")
parser.add_option("", "--indirect_webkit_layout", action="store_true",
default=False,
help="set --wrapper rather than running Dr. Memory "
"directly.")
parser.add_option("", "--trace_children", action="store_true",
default=False,
help="also trace child processes")
parser.add_option("", "--num-callers",
dest="num_callers", default=30,
help="number of callers to show in stack traces")
parser.add_option("", "--generate_dsym", action="store_true",
default=False,
help="Generate .dSYM file on Mac if needed. Slow!")
def Setup(self, args):
if not BaseTool.Setup(self, args):
return False
if common.IsMac():
self.PrepareForTestMac()
return True
def PrepareForTestMac(self):
"""Runs dsymutil if needed.
Valgrind for Mac OS X requires that debugging information be in a .dSYM
bundle generated by dsymutil. It is not currently able to chase DWARF
data into .o files like gdb does, so executables without .dSYM bundles or
with the Chromium-specific "fake_dsym" bundles generated by
build/mac/strip_save_dsym won't give source file and line number
information in valgrind.
This function will run dsymutil if the .dSYM bundle is missing or if
it looks like a fake_dsym. A non-fake dsym that already exists is assumed
to be up-to-date.
"""
test_command = self._args[0]
dsym_bundle = self._args[0] + '.dSYM'
dsym_file = os.path.join(dsym_bundle, 'Contents', 'Resources', 'DWARF',
os.path.basename(test_command))
dsym_info_plist = os.path.join(dsym_bundle, 'Contents', 'Info.plist')
needs_dsymutil = True
saved_test_command = None
if os.path.exists(dsym_file) and os.path.exists(dsym_info_plist):
# Look for the special fake_dsym tag in dsym_info_plist.
dsym_info_plist_contents = open(dsym_info_plist).read()
if not re.search('^\s*<key>fake_dsym</key>$', dsym_info_plist_contents,
re.MULTILINE):
# fake_dsym is not set, this is a real .dSYM bundle produced by
# dsymutil. dsymutil does not need to be run again.
needs_dsymutil = False
else:
# fake_dsym is set. dsym_file is a copy of the original test_command
# before it was stripped. Copy it back to test_command so that
# dsymutil has unstripped input to work with. Move the stripped
# test_command out of the way, it will be restored when this is
# done.
saved_test_command = test_command + '.stripped'
os.rename(test_command, saved_test_command)
shutil.copyfile(dsym_file, test_command)
shutil.copymode(saved_test_command, test_command)
if needs_dsymutil:
if self._options.generate_dsym:
# Remove the .dSYM bundle if it exists.
shutil.rmtree(dsym_bundle, True)
dsymutil_command = ['dsymutil', test_command]
# dsymutil is crazy slow. Ideally we'd have a timeout here,
# but common.RunSubprocess' timeout is only checked
# after each line of output; dsymutil is silent
# until the end, and is then killed, which is silly.
common.RunSubprocess(dsymutil_command)
if saved_test_command:
os.rename(saved_test_command, test_command)
else:
logging.info("No real .dSYM for test_command. Line numbers will "
"not be shown. Either tell xcode to generate .dSYM "
"file, or use --generate_dsym option to this tool.")
def ToolCommand(self):
"""Get the valgrind command to run."""
# Note that self._args begins with the exe to be run.
tool_name = self.ToolName()
# Construct the valgrind command.
if 'CHROME_VALGRIND' in os.environ:
path = os.path.join(os.environ['CHROME_VALGRIND'], "bin", "valgrind")
else:
path = "valgrind"
proc = [path, "--tool=%s" % tool_name]
proc += ["--num-callers=%i" % int(self._options.num_callers)]
if self._options.trace_children:
proc += ["--trace-children=yes"]
proc += ["--trace-children-skip='*dbus-daemon*'"]
proc += ["--trace-children-skip='*dbus-launch*'"]
proc += ["--trace-children-skip='*perl*'"]
proc += ["--trace-children-skip='*python*'"]
# This is really Python, but for some reason Valgrind follows it.
proc += ["--trace-children-skip='*lsb_release*'"]
proc += self.ToolSpecificFlags()
proc += self._tool_flags
suppression_count = 0
for suppression_file in self._options.suppressions:
if os.path.exists(suppression_file):
suppression_count += 1
proc += ["--suppressions=%s" % suppression_file]
if not suppression_count:
logging.warning("WARNING: NOT USING SUPPRESSIONS!")
logfilename = self.log_dir + ("/%s." % tool_name) + "%p"
if self.UseXML():
proc += ["--xml=yes", "--xml-file=" + logfilename]
else:
proc += ["--log-file=" + logfilename]
# The Valgrind command is constructed.
# Handle --indirect_webkit_layout separately.
if self._options.indirect_webkit_layout:
# Need to create the wrapper before modifying |proc|.
wrapper = self.CreateBrowserWrapper(proc, webkit=True)
proc = self._args
proc.append("--wrapper")
proc.append(wrapper)
return proc
if self._options.indirect:
wrapper = self.CreateBrowserWrapper(proc)
os.environ["BROWSER_WRAPPER"] = wrapper
logging.info('export BROWSER_WRAPPER=' + wrapper)
proc = []
proc += self._args
return proc
def ToolSpecificFlags(self):
raise NotImplementedError, "This method should be implemented " \
"in the tool-specific subclass"
def CreateBrowserWrapper(self, proc, webkit=False):
"""The program being run invokes Python or something else that can't stand
to be valgrinded, and also invokes the Chrome browser. In this case, use a
magic wrapper to only valgrind the Chrome browser. Build the wrapper here.
Returns the path to the wrapper. It's up to the caller to use the wrapper
appropriately.
"""
command = " ".join(proc)
# Add the PID of the browser wrapper to the logfile names so we can
# separate log files for different UI tests at the analyze stage.
command = command.replace("%p", "$$.%p")
(fd, indirect_fname) = tempfile.mkstemp(dir=self.log_dir,
prefix="browser_wrapper.",
text=True)
f = os.fdopen(fd, "w")
f.write('#!/bin/bash\n'
'echo "Started Valgrind wrapper for this test, PID=$$" >&2\n')
f.write('DIR=`dirname $0`\n'
'TESTNAME_FILE=$DIR/testcase.$$.name\n\n')
if webkit:
# Webkit layout_tests pass the URL as the first line of stdin.
f.write('tee $TESTNAME_FILE | %s "$@"\n' % command)
else:
# Try to get the test case name by looking at the program arguments.
# i.e. Chromium ui_tests used --test-name arg.
# TODO(timurrrr): This doesn't handle "--test-name Test.Name"
# TODO(timurrrr): ui_tests are dead. Where do we use the non-webkit
# wrapper now? browser_tests? What do they do?
f.write('for arg in $@\ndo\n'
' if [[ "$arg" =~ --test-name=(.*) ]]\n then\n'
' echo ${BASH_REMATCH[1]} >$TESTNAME_FILE\n'
' fi\n'
'done\n\n'
'%s "$@"\n' % command)
f.close()
os.chmod(indirect_fname, stat.S_IRUSR|stat.S_IXUSR)
return indirect_fname
def CreateAnalyzer(self):
raise NotImplementedError, "This method should be implemented " \
"in the tool-specific subclass"
def GetAnalyzeResults(self, check_sanity=False):
# Glob all the files in the log directory
filenames = glob.glob(self.log_dir + "/" + self.ToolName() + ".*")
# If we have browser wrapper, the logfiles are named as
# "toolname.wrapper_PID.valgrind_PID".
# Let's extract the list of wrapper_PIDs and name it ppids
ppids = set([int(f.split(".")[-2]) \
for f in filenames if re.search("\.[0-9]+\.[0-9]+$", f)])
analyzer = self.CreateAnalyzer()
if len(ppids) == 0:
# Fast path - no browser wrapper was set.
return analyzer.Report(filenames, None, check_sanity)
ret = 0
for ppid in ppids:
testcase_name = None
try:
f = open(self.log_dir + ("/testcase.%d.name" % ppid))
testcase_name = f.read().strip()
f.close()
wk_layout_prefix="third_party/WebKit/LayoutTests/"
wk_prefix_at = testcase_name.rfind(wk_layout_prefix)
if wk_prefix_at != -1:
testcase_name = testcase_name[wk_prefix_at + len(wk_layout_prefix):]
except IOError:
pass
print "====================================================="
print " Below is the report for valgrind wrapper PID=%d." % ppid
if testcase_name:
print " It was used while running the `%s` test." % testcase_name
else:
print " You can find the corresponding test"
print " by searching the above log for 'PID=%d'" % ppid
sys.stdout.flush()
ppid_filenames = [f for f in filenames \
if re.search("\.%d\.[0-9]+$" % ppid, f)]
# check_sanity won't work with browser wrappers
assert check_sanity == False
ret |= analyzer.Report(ppid_filenames, testcase_name)
print "====================================================="
sys.stdout.flush()
if ret != 0:
print ""
print "The Valgrind reports are grouped by test names."
print "Each test has its PID printed in the log when the test was run"
print "and at the beginning of its Valgrind report."
print "Hint: you can search for the reports by Ctrl+F -> `=#`"
sys.stdout.flush()
return ret
# TODO(timurrrr): Split into a separate file.
class Memcheck(ValgrindTool):
"""Memcheck
Dynamic memory error detector for Linux & Mac
http://valgrind.org/info/tools.html#memcheck
"""
def __init__(self):
super(Memcheck, self).__init__()
self.RegisterOptionParserHook(Memcheck.ExtendOptionParser)
def ToolName(self):
return "memcheck"
def ExtendOptionParser(self, parser):
parser.add_option("--leak-check", "--leak_check", type="string",
default="yes", # --leak-check=yes is equivalent of =full
help="perform leak checking at the end of the run")
parser.add_option("", "--show_all_leaks", action="store_true",
default=False,
help="also show less blatant leaks")
parser.add_option("", "--track_origins", action="store_true",
default=False,
help="Show whence uninitialized bytes came. 30% slower.")
def ToolSpecificFlags(self):
ret = ["--gen-suppressions=all", "--demangle=no"]
ret += ["--leak-check=%s" % self._options.leak_check]
if self._options.show_all_leaks:
ret += ["--show-reachable=yes"]
else:
ret += ["--show-possibly-lost=no"]
if self._options.track_origins:
ret += ["--track-origins=yes"]
# TODO(glider): this is a temporary workaround for http://crbug.com/51716
# Let's see whether it helps.
if common.IsMac():
ret += ["--smc-check=all"]
return ret
def CreateAnalyzer(self):
use_gdb = common.IsMac()
return memcheck_analyze.MemcheckAnalyzer(self._source_dir,
self._options.show_all_leaks,
use_gdb=use_gdb)
def Analyze(self, check_sanity=False):
ret = self.GetAnalyzeResults(check_sanity)
if ret != 0:
logging.info("Please see http://dev.chromium.org/developers/how-tos/"
"using-valgrind for the info on Memcheck/Valgrind")
return ret
class DrMemory(BaseTool):
"""Dr.Memory
Dynamic memory error detector for Windows.
http://dev.chromium.org/developers/how-tos/using-drmemory
It is not very mature at the moment, some things might not work properly.
"""
def __init__(self, full_mode, pattern_mode):
super(DrMemory, self).__init__()
self.full_mode = full_mode
self.pattern_mode = pattern_mode
self.RegisterOptionParserHook(DrMemory.ExtendOptionParser)
def ToolName(self):
return "drmemory"
def ExtendOptionParser(self, parser):
parser.add_option("", "--suppressions", default=[],
action="append",
help="path to a drmemory suppression file")
parser.add_option("", "--follow_python", action="store_true",
default=False, dest="follow_python",
help="Monitor python child processes. If off, neither "
"python children nor any children of python children "
"will be monitored.")
parser.add_option("", "--indirect", action="store_true",
default=False,
help="set BROWSER_WRAPPER rather than "
"running Dr. Memory directly on the harness")
parser.add_option("", "--indirect_webkit_layout", action="store_true",
default=False,
help="set --wrapper rather than running valgrind "
"directly.")
parser.add_option("", "--use_debug", action="store_true",
default=False, dest="use_debug",
help="Run Dr. Memory debug build")
parser.add_option("", "--trace_children", action="store_true",
default=True,
help="TODO: default value differs from Valgrind")
def ToolCommand(self):
"""Get the tool command to run."""
# WINHEAP is what Dr. Memory supports as there are issues w/ both
# jemalloc (https://github.com/DynamoRIO/drmemory/issues/320) and
# tcmalloc (https://github.com/DynamoRIO/drmemory/issues/314)
add_env = {
"CHROME_ALLOCATOR" : "WINHEAP",
"JSIMD_FORCEMMX" : "1", # https://github.com/DynamoRIO/drmemory/issues/540
}
for k,v in add_env.iteritems():
logging.info("export %s=%s", k, v)
os.putenv(k, v)
drmem_cmd = os.getenv("DRMEMORY_COMMAND")
if not drmem_cmd:
raise RuntimeError, "Please set DRMEMORY_COMMAND environment variable " \
"with the path to drmemory.exe"
proc = drmem_cmd.split(" ")
# By default, don't run python (this will exclude python's children as well)
# to reduce runtime. We're not really interested in spending time finding
# bugs in the python implementation.
# With file-based config we must update the file every time, and
# it will affect simultaneous drmem uses by this user. While file-based
# config has many advantages, here we may want this-instance-only
# (https://github.com/DynamoRIO/drmemory/issues/334).
drconfig_cmd = [ proc[0].replace("drmemory.exe", "drconfig.exe") ]
drconfig_cmd += ["-quiet"] # suppress errors about no 64-bit libs
run_drconfig = True
if self._options.follow_python:
logging.info("Following python children")
# -unreg fails if not already registered so query for that first
query_cmd = drconfig_cmd + ["-isreg", "python.exe"]
query_proc = subprocess.Popen(query_cmd, stdout=subprocess.PIPE,
shell=True)
(query_out, query_err) = query_proc.communicate()
if re.search("exe not registered", query_out):
run_drconfig = False # all set
else:
drconfig_cmd += ["-unreg", "python.exe"]
else:
logging.info("Excluding python children")
drconfig_cmd += ["-reg", "python.exe", "-norun"]
if run_drconfig:
drconfig_retcode = common.RunSubprocess(drconfig_cmd, self._timeout)
if drconfig_retcode:
logging.error("Configuring whether to follow python children failed " \
"with %d.", drconfig_retcode)
raise RuntimeError, "Configuring python children failed "
suppression_count = 0
supp_files = self._options.suppressions
if self.full_mode:
supp_files += [s.replace(".txt", "_full.txt") for s in supp_files]
for suppression_file in supp_files:
if os.path.exists(suppression_file):
suppression_count += 1
proc += ["-suppress", common.NormalizeWindowsPath(suppression_file)]
if not suppression_count:
logging.warning("WARNING: NOT USING SUPPRESSIONS!")
# Un-comment to dump Dr.Memory events on error
#proc += ["-dr_ops", "-dumpcore_mask", "-dr_ops", "0x8bff"]
# Un-comment and comment next line to debug Dr.Memory
#proc += ["-dr_ops", "-no_hide"]
#proc += ["-dr_ops", "-msgbox_mask", "-dr_ops", "15"]
#Proc += ["-dr_ops", "-stderr_mask", "-dr_ops", "15"]
# Ensure we see messages about Dr. Memory crashing!
proc += ["-dr_ops", "-stderr_mask", "-dr_ops", "12"]
if self._options.use_debug:
proc += ["-debug"]
proc += ["-logdir", common.NormalizeWindowsPath(self.log_dir)]
if self.log_parent_dir:
# gpu process on Windows Vista+ runs at Low Integrity and can only
# write to certain directories (http://crbug.com/119131)
symcache_dir = os.path.join(self.log_parent_dir, "drmemory.symcache")
elif self._options.build_dir:
# The other case is only possible with -t cmdline.
# Anyways, if we omit -symcache_dir the -logdir's value is used which
# should be fine.
symcache_dir = os.path.join(self._options.build_dir, "drmemory.symcache")
if symcache_dir:
if not os.path.exists(symcache_dir):
try:
os.mkdir(symcache_dir)
except OSError:
logging.warning("Can't create symcache dir?")
if os.path.exists(symcache_dir):
proc += ["-symcache_dir", common.NormalizeWindowsPath(symcache_dir)]
# Use -no_summary to suppress DrMemory's summary and init-time
# notifications. We generate our own with drmemory_analyze.py.
proc += ["-batch", "-no_summary"]
# Un-comment to disable interleaved output. Will also suppress error
# messages normally printed to stderr.
#proc += ["-quiet", "-no_results_to_stderr"]
proc += ["-callstack_max_frames", "40"]
# disable leak scan for now
proc += ["-no_count_leaks", "-no_leak_scan"]
# disable warnings about unaddressable prefetches
proc += ["-no_check_prefetch"]
# crbug.com/413215, no heap mismatch check for Windows release build binary
if common.IsWindows() and "Release" in self._options.build_dir:
proc += ["-no_check_delete_mismatch"]
# We are seeing false positive invalid heap args on 64-bit, so we are
# disabling the feature for now (xref
# https://github.com/DynamoRIO/drmemory/issues/1839).
if common.IsWindows() and "Release_x64" in self._options.build_dir:
proc += ["-no_check_heap_mismatch"]
# make callstacks easier to read
proc += ["-callstack_srcfile_prefix",
"build\\src,chromium\\src,crt_build\\self_x86"]
proc += ["-callstack_modname_hide",
"*drmemory*,chrome.dll"]
boring_callers = common.BoringCallers(mangled=False, use_re_wildcards=False)
# TODO(timurrrr): In fact, we want "starting from .." instead of "below .."
proc += ["-callstack_truncate_below", ",".join(boring_callers)]
if self.pattern_mode:
proc += ["-pattern", "0xf1fd", "-no_count_leaks", "-redzone_size", "0x20"]
elif not self.full_mode:
proc += ["-light"]
proc += self._tool_flags
# Dr.Memory requires -- to separate tool flags from the executable name.
proc += ["--"]
if self._options.indirect or self._options.indirect_webkit_layout:
wrapper_path = os.path.join(self._source_dir,
"tools", "valgrind", "browser_wrapper_win.py")
wrapper = " ".join(["python", wrapper_path] + proc)
self.CreateBrowserWrapper(wrapper)
logging.info("browser wrapper = " + " ".join(proc))
if self._options.indirect_webkit_layout:
proc = self._args
# Layout tests want forward slashes.
wrapper = wrapper.replace('\\', '/')
proc += ["--wrapper", wrapper]
return proc
else:
proc = []
# Note that self._args begins with the name of the exe to be run.
self._args[0] = common.NormalizeWindowsPath(self._args[0])
proc += self._args
return proc
def CreateBrowserWrapper(self, command):
os.putenv("BROWSER_WRAPPER", command)
def Analyze(self, check_sanity=False):
# Use one analyzer for all the log files to avoid printing duplicate reports
#
# TODO(timurrrr): unify this with Valgrind and other tools when we have
# https://github.com/DynamoRIO/drmemory/issues/684
analyzer = drmemory_analyze.DrMemoryAnalyzer()
ret = 0
if not self._options.indirect and not self._options.indirect_webkit_layout:
filenames = glob.glob(self.log_dir + "/*/results.txt")
ret = analyzer.Report(filenames, None, check_sanity)
else:
testcases = glob.glob(self.log_dir + "/testcase.*.logs")
# If we have browser wrapper, the per-test logdirs are named as
# "testcase.wrapper_PID.name".
# Let's extract the list of wrapper_PIDs and name it ppids.
# NOTE: ppids may contain '_', i.e. they are not ints!
ppids = set([f.split(".")[-2] for f in testcases])
for ppid in ppids:
testcase_name = None
try:
f = open("%s/testcase.%s.name" % (self.log_dir, ppid))
testcase_name = f.read().strip()
f.close()
except IOError:
pass
print "====================================================="
print " Below is the report for drmemory wrapper PID=%s." % ppid
if testcase_name:
print " It was used while running the `%s` test." % testcase_name
else:
# TODO(timurrrr): hm, the PID line is suppressed on Windows...
print " You can find the corresponding test"
print " by searching the above log for 'PID=%s'" % ppid
sys.stdout.flush()
ppid_filenames = glob.glob("%s/testcase.%s.logs/*/results.txt" %
(self.log_dir, ppid))
ret |= analyzer.Report(ppid_filenames, testcase_name, False)
print "====================================================="
sys.stdout.flush()
logging.info("Please see http://dev.chromium.org/developers/how-tos/"
"using-drmemory for the info on Dr. Memory")
return ret
class ToolFactory:
def Create(self, tool_name):
if tool_name == "memcheck":
return Memcheck()
if tool_name == "drmemory" or tool_name == "drmemory_light":
# TODO(timurrrr): remove support for "drmemory" when buildbots are
# switched to drmemory_light OR make drmemory==drmemory_full the default
# mode when the tool is mature enough.
return DrMemory(False, False)
if tool_name == "drmemory_full":
return DrMemory(True, False)
if tool_name == "drmemory_pattern":
return DrMemory(False, True)
try:
platform_name = common.PlatformNames()[0]
except common.NotImplementedError:
platform_name = sys.platform + "(Unknown)"
raise RuntimeError, "Unknown tool (tool=%s, platform=%s)" % (tool_name,
platform_name)
def CreateTool(tool):
return ToolFactory().Create(tool)
| bsd-3-clause |
dancingdan/tensorflow | tensorflow/contrib/gan/python/features/python/virtual_batchnorm_test.py | 21 | 11083 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tfgan.python.features.virtual_batchnorm."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.framework.python.ops import variables as contrib_variables_lib
from tensorflow.contrib.gan.python.features.python import virtual_batchnorm_impl as virtual_batchnorm
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import random_seed
from tensorflow.python.layers import normalization
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
class VirtualBatchnormTest(test.TestCase):
def test_syntax(self):
reference_batch = array_ops.zeros([5, 3, 16, 9, 15])
vbn = virtual_batchnorm.VBN(reference_batch, batch_axis=1)
vbn(array_ops.ones([5, 7, 16, 9, 15]))
def test_no_broadcast_needed(self):
"""When `axis` and `batch_axis` are at the end, no broadcast is needed."""
reference_batch = array_ops.zeros([5, 3, 16, 9, 15])
minibatch = array_ops.zeros([5, 3, 16, 3, 15])
vbn = virtual_batchnorm.VBN(reference_batch, axis=-1, batch_axis=-2)
vbn(minibatch)
def test_statistics(self):
"""Check that `_statistics` gives the same result as `nn.moments`."""
random_seed.set_random_seed(1234)
tensors = random_ops.random_normal([4, 5, 7, 3])
for axes in [(3), (0, 2), (1, 2, 3)]:
vb_mean, mean_sq = virtual_batchnorm._statistics(tensors, axes)
mom_mean, mom_var = nn.moments(tensors, axes)
vb_var = mean_sq - math_ops.square(vb_mean)
with self.test_session(use_gpu=True) as sess:
vb_mean_np, vb_var_np, mom_mean_np, mom_var_np = sess.run([
vb_mean, vb_var, mom_mean, mom_var])
self.assertAllClose(mom_mean_np, vb_mean_np)
self.assertAllClose(mom_var_np, vb_var_np)
def test_virtual_statistics(self):
"""Check that `_virtual_statistics` gives same result as `nn.moments`."""
random_seed.set_random_seed(1234)
batch_axis = 0
partial_batch = random_ops.random_normal([4, 5, 7, 3])
single_example = random_ops.random_normal([1, 5, 7, 3])
full_batch = array_ops.concat([partial_batch, single_example], axis=0)
for reduction_axis in range(1, 4):
# Get `nn.moments` on the full batch.
reduction_axes = list(range(4))
del reduction_axes[reduction_axis]
mom_mean, mom_variance = nn.moments(full_batch, reduction_axes)
# Get virtual batch statistics.
vb_reduction_axes = list(range(4))
del vb_reduction_axes[reduction_axis]
del vb_reduction_axes[batch_axis]
vbn = virtual_batchnorm.VBN(partial_batch, reduction_axis)
vb_mean, mean_sq = vbn._virtual_statistics(
single_example, vb_reduction_axes)
vb_variance = mean_sq - math_ops.square(vb_mean)
# Remove singleton batch dim for easy comparisons.
vb_mean = array_ops.squeeze(vb_mean, batch_axis)
vb_variance = array_ops.squeeze(vb_variance, batch_axis)
with self.test_session(use_gpu=True) as sess:
vb_mean_np, vb_var_np, mom_mean_np, mom_var_np = sess.run([
vb_mean, vb_variance, mom_mean, mom_variance])
self.assertAllClose(mom_mean_np, vb_mean_np)
self.assertAllClose(mom_var_np, vb_var_np)
def test_reference_batch_normalization(self):
"""Check that batch norm from VBN agrees with opensource implementation."""
random_seed.set_random_seed(1234)
batch = random_ops.random_normal([6, 5, 7, 3, 3])
for axis in range(5):
# Get `layers` batchnorm result.
bn_normalized = normalization.batch_normalization(
batch, axis, training=True)
# Get VBN's batch normalization on reference batch.
batch_axis = 0 if axis is not 0 else 1 # axis and batch_axis can't same
vbn = virtual_batchnorm.VBN(batch, axis, batch_axis=batch_axis)
vbn_normalized = vbn.reference_batch_normalization()
with self.test_session(use_gpu=True) as sess:
variables_lib.global_variables_initializer().run()
bn_normalized_np, vbn_normalized_np = sess.run(
[bn_normalized, vbn_normalized])
self.assertAllClose(bn_normalized_np, vbn_normalized_np)
def test_same_as_batchnorm(self):
"""Check that batch norm on set X is the same as ref of X / y on `y`."""
random_seed.set_random_seed(1234)
num_examples = 4
examples = [random_ops.random_normal([5, 7, 3]) for _ in
range(num_examples)]
# Get the result of the opensource batch normalization.
batch_normalized = normalization.batch_normalization(
array_ops.stack(examples), training=True)
for i in range(num_examples):
examples_except_i = array_ops.stack(examples[:i] + examples[i+1:])
# Get the result of VBN's batch normalization.
vbn = virtual_batchnorm.VBN(examples_except_i)
vb_normed = array_ops.squeeze(
vbn(array_ops.expand_dims(examples[i], [0])), [0])
with self.test_session(use_gpu=True) as sess:
variables_lib.global_variables_initializer().run()
bn_np, vb_np = sess.run([batch_normalized, vb_normed])
self.assertAllClose(bn_np[i, ...], vb_np)
def test_minibatch_independent(self):
"""Test that virtual batch normalized examples are independent.
Unlike batch normalization, virtual batch normalization has the property
that the virtual batch normalized value of an example is independent of the
other examples in the minibatch. In this test, we verify this property.
"""
random_seed.set_random_seed(1234)
# These can be random, but must be the same for all session calls.
reference_batch = constant_op.constant(
np.random.normal(size=[4, 7, 3]), dtype=dtypes.float32)
fixed_example = constant_op.constant(np.random.normal(size=[7, 3]),
dtype=dtypes.float32)
# Get the VBN object and the virtual batch normalized value for
# `fixed_example`.
vbn = virtual_batchnorm.VBN(reference_batch)
vbn_fixed_example = array_ops.squeeze(
vbn(array_ops.expand_dims(fixed_example, 0)), 0)
with self.test_session(use_gpu=True):
variables_lib.global_variables_initializer().run()
vbn_fixed_example_np = vbn_fixed_example.eval()
# Check that the value is the same for different minibatches, and different
# sized minibatches.
for minibatch_size in range(1, 6):
examples = [random_ops.random_normal([7, 3]) for _ in
range(minibatch_size)]
minibatch = array_ops.stack([fixed_example] + examples)
vbn_minibatch = vbn(minibatch)
cur_vbn_fixed_example = vbn_minibatch[0, ...]
with self.test_session(use_gpu=True):
variables_lib.global_variables_initializer().run()
cur_vbn_fixed_example_np = cur_vbn_fixed_example.eval()
self.assertAllClose(vbn_fixed_example_np, cur_vbn_fixed_example_np)
def test_variable_reuse(self):
"""Test that variable scopes work and inference on a real-ish case."""
tensor1_ref = array_ops.zeros([6, 5, 7, 3, 3])
tensor1_examples = array_ops.zeros([4, 5, 7, 3, 3])
tensor2_ref = array_ops.zeros([4, 2, 3])
tensor2_examples = array_ops.zeros([2, 2, 3])
with variable_scope.variable_scope('dummy_scope', reuse=True):
with self.assertRaisesRegexp(
ValueError, 'does not exist, or was not created with '
'tf.get_variable()'):
virtual_batchnorm.VBN(tensor1_ref)
vbn1 = virtual_batchnorm.VBN(tensor1_ref, name='vbn1')
vbn2 = virtual_batchnorm.VBN(tensor2_ref, name='vbn2')
# Fetch reference and examples after virtual batch normalization. Also
# fetch in variable reuse case.
to_fetch = []
to_fetch.append(vbn1.reference_batch_normalization())
to_fetch.append(vbn2.reference_batch_normalization())
to_fetch.append(vbn1(tensor1_examples))
to_fetch.append(vbn2(tensor2_examples))
variable_scope.get_variable_scope().reuse_variables()
to_fetch.append(vbn1.reference_batch_normalization())
to_fetch.append(vbn2.reference_batch_normalization())
to_fetch.append(vbn1(tensor1_examples))
to_fetch.append(vbn2(tensor2_examples))
self.assertEqual(4, len(contrib_variables_lib.get_variables()))
with self.test_session(use_gpu=True) as sess:
variables_lib.global_variables_initializer().run()
sess.run(to_fetch)
def test_invalid_input(self):
# Reference batch has unknown dimensions.
with self.assertRaisesRegexp(
ValueError, '`reference_batch` has unknown dimensions.'):
virtual_batchnorm.VBN(array_ops.placeholder(dtypes.float32), name='vbn1')
# Axis too negative.
with self.assertRaisesRegexp(
ValueError, 'Value of `axis` argument .* is out of range'):
virtual_batchnorm.VBN(array_ops.zeros([1, 2]), axis=-3, name='vbn2')
# Axis too large.
with self.assertRaisesRegexp(
ValueError, 'Value of `axis` argument .* is out of range'):
virtual_batchnorm.VBN(array_ops.zeros([1, 2]), axis=2, name='vbn3')
# Batch axis too negative.
with self.assertRaisesRegexp(
ValueError, 'Value of `axis` argument .* is out of range'):
virtual_batchnorm.VBN(array_ops.zeros([1, 2]), name='vbn4', batch_axis=-3)
# Batch axis too large.
with self.assertRaisesRegexp(
ValueError, 'Value of `axis` argument .* is out of range'):
virtual_batchnorm.VBN(array_ops.zeros([1, 2]), name='vbn5', batch_axis=2)
# Axis and batch axis are the same.
with self.assertRaisesRegexp(
ValueError, '`axis` and `batch_axis` cannot be the same.'):
virtual_batchnorm.VBN(array_ops.zeros(
[1, 2]), axis=1, name='vbn6', batch_axis=1)
# Reference Tensor and example Tensor have incompatible shapes.
tensor_ref = array_ops.zeros([5, 2, 3])
tensor_examples = array_ops.zeros([3, 2, 3])
vbn = virtual_batchnorm.VBN(tensor_ref, name='vbn7', batch_axis=1)
with self.assertRaisesRegexp(ValueError, 'Shapes .* are incompatible'):
vbn(tensor_examples)
if __name__ == '__main__':
test.main()
| apache-2.0 |
atsolakid/edx-platform | common/djangoapps/edxmako/makoloader.py | 100 | 3107 | import logging
from django.conf import settings
from django.template.base import TemplateDoesNotExist
from django.template.loader import make_origin, get_template_from_string
from django.template.loaders.filesystem import Loader as FilesystemLoader
from django.template.loaders.app_directories import Loader as AppDirectoriesLoader
from edxmako.template import Template
from openedx.core.lib.tempdir import mkdtemp_clean
log = logging.getLogger(__name__)
class MakoLoader(object):
"""
This is a Django loader object which will load the template as a
Mako template if the first line is "## mako". It is based off BaseLoader
in django.template.loader.
"""
is_usable = False
def __init__(self, base_loader):
# base_loader is an instance of a BaseLoader subclass
self.base_loader = base_loader
module_directory = getattr(settings, 'MAKO_MODULE_DIR', None)
if module_directory is None:
log.warning("For more caching of mako templates, set the MAKO_MODULE_DIR in settings!")
module_directory = mkdtemp_clean()
self.module_directory = module_directory
def __call__(self, template_name, template_dirs=None):
return self.load_template(template_name, template_dirs)
def load_template(self, template_name, template_dirs=None):
source, file_path = self.load_template_source(template_name, template_dirs)
if source.startswith("## mako\n"):
# This is a mako template
template = Template(filename=file_path,
module_directory=self.module_directory,
input_encoding='utf-8',
output_encoding='utf-8',
uri=template_name)
return template, None
else:
# This is a regular template
origin = make_origin(file_path, self.load_template_source, template_name, template_dirs)
try:
template = get_template_from_string(source, origin, template_name)
return template, None
except TemplateDoesNotExist:
# If compiling the template we found raises TemplateDoesNotExist, back off to
# returning the source and display name for the template we were asked to load.
# This allows for correct identification (later) of the actual template that does
# not exist.
return source, file_path
def load_template_source(self, template_name, template_dirs=None):
# Just having this makes the template load as an instance, instead of a class.
return self.base_loader.load_template_source(template_name, template_dirs)
def reset(self):
self.base_loader.reset()
class MakoFilesystemLoader(MakoLoader):
is_usable = True
def __init__(self):
MakoLoader.__init__(self, FilesystemLoader())
class MakoAppDirectoriesLoader(MakoLoader):
is_usable = True
def __init__(self):
MakoLoader.__init__(self, AppDirectoriesLoader())
| agpl-3.0 |
tdsimao/tt | django/core/management/commands/syncdb.py | 161 | 8141 | from optparse import make_option
import sys
from django.conf import settings
from django.core.management.base import NoArgsCommand
from django.core.management.color import no_style
from django.core.management.sql import custom_sql_for_model, emit_post_sync_signal
from django.db import connections, router, transaction, models, DEFAULT_DB_ALIAS
from django.utils.datastructures import SortedDict
from django.utils.importlib import import_module
class Command(NoArgsCommand):
option_list = NoArgsCommand.option_list + (
make_option('--noinput', action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.'),
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a database to synchronize. '
'Defaults to the "default" database.'),
)
help = "Create the database tables for all apps in INSTALLED_APPS whose tables haven't already been created."
def handle_noargs(self, **options):
verbosity = int(options.get('verbosity', 1))
interactive = options.get('interactive')
show_traceback = options.get('traceback', False)
# Stealth option -- 'load_initial_data' is used by the testing setup
# process to disable initial fixture loading.
load_initial_data = options.get('load_initial_data', True)
self.style = no_style()
# Import the 'management' module within each installed app, to register
# dispatcher events.
for app_name in settings.INSTALLED_APPS:
try:
import_module('.management', app_name)
except ImportError, exc:
# This is slightly hackish. We want to ignore ImportErrors
# if the "management" module itself is missing -- but we don't
# want to ignore the exception if the management module exists
# but raises an ImportError for some reason. The only way we
# can do this is to check the text of the exception. Note that
# we're a bit broad in how we check the text, because different
# Python implementations may not use the same text.
# CPython uses the text "No module named management"
# PyPy uses "No module named myproject.myapp.management"
msg = exc.args[0]
if not msg.startswith('No module named') or 'management' not in msg:
raise
db = options.get('database', DEFAULT_DB_ALIAS)
connection = connections[db]
cursor = connection.cursor()
# Get a list of already installed *models* so that references work right.
tables = connection.introspection.table_names()
seen_models = connection.introspection.installed_models(tables)
created_models = set()
pending_references = {}
# Build the manifest of apps and models that are to be synchronized
all_models = [
(app.__name__.split('.')[-2],
[m for m in models.get_models(app, include_auto_created=True)
if router.allow_syncdb(db, m)])
for app in models.get_apps()
]
def model_installed(model):
opts = model._meta
converter = connection.introspection.table_name_converter
return not ((converter(opts.db_table) in tables) or
(opts.auto_created and converter(opts.auto_created._meta.db_table) in tables))
manifest = SortedDict(
(app_name, filter(model_installed, model_list))
for app_name, model_list in all_models
)
# Create the tables for each model
if verbosity >= 1:
print "Creating tables ..."
for app_name, model_list in manifest.items():
for model in model_list:
# Create the model's database table, if it doesn't already exist.
if verbosity >= 3:
print "Processing %s.%s model" % (app_name, model._meta.object_name)
sql, references = connection.creation.sql_create_model(model, self.style, seen_models)
seen_models.add(model)
created_models.add(model)
for refto, refs in references.items():
pending_references.setdefault(refto, []).extend(refs)
if refto in seen_models:
sql.extend(connection.creation.sql_for_pending_references(refto, self.style, pending_references))
sql.extend(connection.creation.sql_for_pending_references(model, self.style, pending_references))
if verbosity >= 1 and sql:
print "Creating table %s" % model._meta.db_table
for statement in sql:
cursor.execute(statement)
tables.append(connection.introspection.table_name_converter(model._meta.db_table))
transaction.commit_unless_managed(using=db)
# Send the post_syncdb signal, so individual apps can do whatever they need
# to do at this point.
emit_post_sync_signal(created_models, verbosity, interactive, db)
# The connection may have been closed by a syncdb handler.
cursor = connection.cursor()
# Install custom SQL for the app (but only if this
# is a model we've just created)
if verbosity >= 1:
print "Installing custom SQL ..."
for app_name, model_list in manifest.items():
for model in model_list:
if model in created_models:
custom_sql = custom_sql_for_model(model, self.style, connection)
if custom_sql:
if verbosity >= 2:
print "Installing custom SQL for %s.%s model" % (app_name, model._meta.object_name)
try:
for sql in custom_sql:
cursor.execute(sql)
except Exception, e:
sys.stderr.write("Failed to install custom SQL for %s.%s model: %s\n" % \
(app_name, model._meta.object_name, e))
if show_traceback:
import traceback
traceback.print_exc()
transaction.rollback_unless_managed(using=db)
else:
transaction.commit_unless_managed(using=db)
else:
if verbosity >= 3:
print "No custom SQL for %s.%s model" % (app_name, model._meta.object_name)
if verbosity >= 1:
print "Installing indexes ..."
# Install SQL indicies for all newly created models
for app_name, model_list in manifest.items():
for model in model_list:
if model in created_models:
index_sql = connection.creation.sql_indexes_for_model(model, self.style)
if index_sql:
if verbosity >= 2:
print "Installing index for %s.%s model" % (app_name, model._meta.object_name)
try:
for sql in index_sql:
cursor.execute(sql)
except Exception, e:
sys.stderr.write("Failed to install index for %s.%s model: %s\n" % \
(app_name, model._meta.object_name, e))
transaction.rollback_unless_managed(using=db)
else:
transaction.commit_unless_managed(using=db)
# Load initial_data fixtures (unless that has been disabled)
if load_initial_data:
from django.core.management import call_command
call_command('loaddata', 'initial_data', verbosity=verbosity, database=db)
| gpl-2.0 |
Crypt0s/Ramen | fs_libs/ftputil/ftputil/file_transfer.py | 2 | 6260 | # Copyright (C) 2013, Stefan Schwarzer <sschwarzer@sschwarzer.net>
# See the file LICENSE for licensing terms.
"""
file_transfer.py - upload, download and generic file copy
"""
from __future__ import unicode_literals
import io
import os
#TODO Think a bit more about the API before making it public.
# # Only `chunks` should be used by clients of the ftputil library. Any
# # other functionality is supposed to be used via `FTPHost` objects.
# __all__ = ["chunks"]
__all__ = []
# Maximum size of chunk in `FTPHost.copyfileobj` in bytes.
MAX_COPY_CHUNK_SIZE = 64 * 1024
class LocalFile(object):
"""
Represent a file on the local side which is to be transferred or
is already transferred.
"""
def __init__(self, name, mode):
self.name = os.path.abspath(name)
self.mode = mode
def exists(self):
"""
Return `True` if the path representing this file exists.
Otherwise return `False`.
"""
return os.path.exists(self.name)
def mtime(self):
"""Return the timestamp for the last modification in seconds."""
return os.path.getmtime(self.name)
def mtime_precision(self):
"""Return the precision of the last modification time in seconds."""
# Derived classes might want to use `self`.
# pylint: disable=no-self-use
#
# Assume modification timestamps for local file systems are
# at least precise up to a second.
return 1.0
def fobj(self):
"""Return a file object for the name/path in the constructor."""
return io.open(self.name, self.mode)
class RemoteFile(object):
"""
Represent a file on the remote side which is to be transferred or
is already transferred.
"""
def __init__(self, ftp_host, name, mode):
self._host = ftp_host
self._path = ftp_host.path
self.name = self._path.abspath(name)
self.mode = mode
def exists(self):
"""
Return `True` if the path representing this file exists.
Otherwise return `False`.
"""
return self._path.exists(self.name)
def mtime(self):
"""Return the timestamp for the last modification in seconds."""
# Convert to client time zone (see definition of time
# shift in docstring of `FTPHost.set_time_shift`).
return self._path.getmtime(self.name) - self._host.time_shift()
def mtime_precision(self):
"""Return the precision of the last modification time in seconds."""
# I think using `stat` instead of `lstat` makes more sense here.
return self._host.stat(self.name)._st_mtime_precision
def fobj(self):
"""Return a file object for the name/path in the constructor."""
return self._host.open(self.name, self.mode)
def source_is_newer_than_target(source_file, target_file):
"""
Return `True` if the source is newer than the target, else `False`.
Both arguments are `LocalFile` or `RemoteFile` objects.
It's assumed that the actual modification time is
reported_mtime <= actual_mtime <= reported_mtime + mtime_precision
i. e. that the reported mtime is the actual mtime or rounded down
(truncated).
For the purpose of this test the source is newer than the target
if any of the possible actual source modification times is greater
than the reported target modification time. In other words: If in
doubt, the file should be transferred.
This is the only situation where the source is _not_ considered
newer than the target:
|/////////////////////| possible source mtime
|////////| possible target mtime
That is, the latest possible actual source modification time is
before the first possible actual target modification time.
"""
return (source_file.mtime() + source_file.mtime_precision() >=
target_file.mtime())
def chunks(fobj, max_chunk_size=MAX_COPY_CHUNK_SIZE):
"""
Return an iterator which yields the contents of the file object.
For each iteration, at most `max_chunk_size` bytes are read from
`fobj` and yielded as a byte string. If the file object is
exhausted, then don't yield any more data but stop the iteration,
so the client does _not_ get an empty byte string.
Any exceptions resulting from reading the file object are passed
through to the client.
"""
while True:
chunk = fobj.read(max_chunk_size)
if not chunk:
break
yield chunk
def copyfileobj(source_fobj, target_fobj, max_chunk_size=MAX_COPY_CHUNK_SIZE,
callback=None):
"""Copy data from file-like object source to file-like object target."""
# Inspired by `shutil.copyfileobj` (I don't use the `shutil`
# code directly because it might change)
for chunk in chunks(source_fobj, max_chunk_size):
target_fobj.write(chunk)
if callback is not None:
callback(chunk)
def copy_file(source_file, target_file, conditional, callback):
"""
Copy a file from `source_file` to `target_file`.
These are `LocalFile` or `RemoteFile` objects. Which of them
is a local or a remote file, respectively, is determined by
the arguments. If `conditional` is true, the file is only
copied if the target doesn't exist or is older than the
source. If `conditional` is false, the file is copied
unconditionally. Return `True` if the file was copied, else
`False`.
"""
if conditional:
# Evaluate condition: The target file either doesn't exist or is
# older than the source file. If in doubt (due to imprecise
# timestamps), perform the transfer.
transfer_condition = not target_file.exists() or \
source_is_newer_than_target(source_file, target_file)
if not transfer_condition:
# We didn't transfer.
return False
source_fobj = source_file.fobj()
try:
target_fobj = target_file.fobj()
try:
copyfileobj(source_fobj, target_fobj, callback=callback)
finally:
target_fobj.close()
finally:
source_fobj.close()
# Transfer accomplished
return True
| gpl-3.0 |
peterwilletts24/Python-Scripts | plot_scripts/EMBRACE/plot_from_pp_interp_p_levs_temp_geop_sp_hum.py | 1 | 13209 | """
Load pp, plot and save
"""
import os, sys
#%matplotlib inline
#%pylab inline
import matplotlib
matplotlib.use('Agg')
# Must be before importing matplotlib.pyplot or pylab!
from matplotlib import rc
from matplotlib.font_manager import FontProperties
from matplotlib import rcParams
from mpl_toolkits.basemap import Basemap
rc('font', family = 'serif', serif = 'cmr10')
rc('text', usetex=True)
rcParams['text.usetex']=True
rcParams['text.latex.unicode']=True
rcParams['font.family']='serif'
rcParams['font.serif']='cmr10'
import matplotlib.pyplot as plt
#from matplotlib import figure
import matplotlib as mpl
import matplotlib.cm as mpl_cm
import numpy as np
import iris
import iris.coords as coords
import iris.quickplot as qplt
import iris.plot as iplt
import iris.coord_categorisation
import iris.unit as unit
import cartopy.crs as ccrs
import cartopy.io.img_tiles as cimgt
import matplotlib.ticker as mticker
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
import datetime
from mpl_toolkits.basemap import cm
import imp
from textwrap import wrap
import re
import iris.analysis.cartography
import math
from dateutil import tz
#import multiprocessing as mp
import gc
import types
import pdb
save_path='/nfs/a90/eepdw/Figures/EMBRACE/'
model_name_convert_title = imp.load_source('util', '/nfs/see-fs-01_users/eepdw/python_scripts/modules/model_name_convert_title.py')
unrotate = imp.load_source('util', '/nfs/see-fs-01_users/eepdw/python_scripts/modules/unrotate_pole.py')
#pp_file = ''
plot_diags=['temp', 'sp_hum']
#plot_diags=['sp_hum']
plot_levels = [925, 850, 700, 500]
#experiment_ids = ['dkmbq', 'dklyu']
experiment_ids = ['dkbhu', 'djznw', 'djzny', 'djznq', 'djzns', 'dklwu', 'dklzq'] # All minus large 2
#experiment_ids = ['djzny', 'djznq', 'djzns', 'dkjxq', 'dklyu', 'dkmbq', 'dklwu', 'dklzq', 'dkbhu', 'djznu', 'dkhgu' ] # All 12
#experiment_ids = ['djzny', 'djznq', 'djzns', 'dkjxq', 'dklwu', 'dklzq', 'dkbhu',] # All 12
#experiment_ids = ['dkbhu', 'dkjxq']
experiment_ids = ['dkbhu']
pp_file_path = '/nfs/a90/eepdw/Data/EMBRACE/'
degs_crop_top = 1.7
degs_crop_bottom = 2.5
from iris.coord_categorisation import add_categorised_coord
def add_hour_of_day(cube, coord, name='hour'):
add_categorised_coord(cube, name, coord,
lambda coord, x: coord.units.num2date(x).hour)
figprops = dict(figsize=(8,8), dpi=100)
#cmap=cm.s3pcpn_l
u = unit.Unit('hours since 1970-01-01 00:00:00',calendar='gregorian')
dx, dy = 10, 10
divisor=10 # for lat/lon rounding
lon_high = 101.866
lon_low = 64.115
lat_high = 33.
lat_low =-6.79
lon_low_tick=lon_low -(lon_low%divisor)
lon_high_tick=math.ceil(lon_high/divisor)*divisor
lat_low_tick=lat_low - (lat_low%divisor)
lat_high_tick=math.ceil(lat_high/divisor)*divisor
def main():
for p_level in plot_levels:
# Set pressure height contour min/max
if p_level == 925:
clev_min = 660.
clev_max = 810.
elif p_level == 850:
clev_min = 1435.
clev_max = 1530.
elif p_level == 700:
clev_min = 3090.
clev_max = 3155.
elif p_level == 500:
clev_min = 5800.
clev_max = 5890.
else:
print 'Contour min/max not set for this pressure level'
# Set potential temperature min/max
if p_level == 925:
clevpt_min = 300.
clevpt_max = 312.
elif p_level == 850:
clevpt_min = 302.
clevpt_max = 310.
elif p_level == 700:
clevpt_min = 312.
clevpt_max = 320.
elif p_level == 500:
clevpt_min = 325.
clevpt_max = 332.
else:
print 'Potential temperature min/max not set for this pressure level'
# Set specific humidity min/max
if p_level == 925:
clevsh_min = 0.012
clevsh_max = 0.020
elif p_level == 850:
clevsh_min = 0.007
clevsh_max = 0.017
elif p_level == 700:
clevsh_min = 0.002
clevsh_max = 0.010
elif p_level == 500:
clevsh_min = 0.001
clevsh_max = 0.005
else:
print 'Specific humidity min/max not set for this pressure level'
#clevs_col = np.arange(clev_min, clev_max)
clevs_lin = np.arange(clev_min, clev_max, 5)
p_level_constraint = iris.Constraint(pressure=p_level)
for plot_diag in plot_diags:
for experiment_id in experiment_ids:
expmin1 = experiment_id[:-1]
pp_file = '%s_%s_on_p_levs_mean_by_hour.pp' % (experiment_id, plot_diag)
pfile = '%s%s/%s/%s' % (pp_file_path, expmin1, experiment_id, pp_file)
pcube = iris.load_cube(pfile, p_level_constraint)
# For each hour in cube
height_pp_file = '%s_408_on_p_levs_mean_by_hour.pp' % (experiment_id)
height_pfile = '%s%s/%s/%s' % (pp_file_path, expmin1, experiment_id, height_pp_file)
height_cube = iris.load_cube(height_pfile, p_level_constraint)
print pcube
print height_cube
#time_coords = cube_f.coord('time')
add_hour_of_day(pcube, pcube.coord('time'))
add_hour_of_day(height_cube, height_cube.coord('time'))
#pcube.remove_coord('time')
#cube_diff.remove_coord('time')
#height_cube.remove_coord('time')
#height_cube_diff.remove_coord('time')
#p_cube_difference = iris.analysis.maths.subtract(pcube, cube_diff, dim='hour')
#height_cube_difference = iris.analysis.maths.subtract(height_cube, height_cube_diff, dim='hour')
#pdb.set_trace()
#del height_cube, pcube, height_cube_diff, cube_diff
for t, time_cube in enumerate(pcube.slices(['grid_latitude', 'grid_longitude'])):
#pdb.set_trace()
print time_cube
height_cube_slice = height_cube.extract(iris.Constraint(hour=time_cube.coord('hour').points))
# Get time of averagesfor plot title
h = u.num2date(np.array(time_cube.coord('hour').points, dtype=float)[0]).strftime('%H%M')
#Convert to India time
from_zone = tz.gettz('UTC')
to_zone = tz.gettz('Asia/Kolkata')
h_utc = u.num2date(np.array(time_cube.coord('hour').points, dtype=float)[0]).replace(tzinfo=from_zone)
h_local = h_utc.astimezone(to_zone).strftime('%H%M')
fig = plt.figure(**figprops)
cmap=plt.cm.RdBu_r
ax = plt.axes(projection=ccrs.PlateCarree(), extent=(lon_low,lon_high,lat_low+degs_crop_bottom,lat_high-degs_crop_top))
m =\
Basemap(llcrnrlon=lon_low,llcrnrlat=lat_low,urcrnrlon=lon_high,urcrnrlat=lat_high, rsphere = 6371229)
#pdb.set_trace()
lat = time_cube.coord('grid_latitude').points
lon = time_cube.coord('grid_longitude').points
cs = time_cube.coord_system('CoordSystem')
lons, lats = np.meshgrid(lon, lat)
lons, lats = iris.analysis.cartography.unrotate_pole\
(lons,lats, cs.grid_north_pole_longitude, cs.grid_north_pole_latitude)
x,y = m(lons,lats)
if plot_diag=='temp':
min_contour = clevpt_min
max_contour = clevpt_max
cb_label='K'
main_title='8km Explicit model (dklyu) minus 8km parametrised model geopotential height (grey contours), potential temperature (colours),\
and wind (vectors) %s UTC %s IST' % (h, h_local)
tick_interval=2
clev_number=max_contour-min_contour+1
elif plot_diag=='sp_hum':
min_contour = clevsh_min
max_contour = clevsh_max
cb_label='kg/kg'
main_title='8km Explicit model (dklyu) minus 8km parametrised model geopotential height (grey contours), specific humidity (colours),\
and wind (vectors) %s UTC %s IST' % (h, h_local)
tick_interval=0.002
clev_number=(max_contour-min_contour+0.001)*(10**3)
clevs = np.linspace(min_contour, max_contour, clev_number)
#clevs = np.linspace(-3, 3, 32)
cont = plt.contourf(x,y,time_cube.data, clevs, cmap=cmap, extend='both')
#cont = iplt.contourf(time_cube, clevs, cmap=cmap, extend='both')
cs_lin = iplt.contour(height_cube_slice, clevs_lin,colors='#262626',linewidths=1.)
plt.clabel(cs_lin, fontsize=14, fmt='%d', color='black')
#del time_cube
#plt.clabel(cont, fmt='%d')
#ax.stock_img()
ax.coastlines(resolution='110m', color='#262626')
gl = ax.gridlines(draw_labels=True,linewidth=0.5, color='#262626', alpha=0.5, linestyle='--')
gl.xlabels_top = False
gl.ylabels_right = False
#gl.xlines = False
dx, dy = 10, 10
gl.xlocator = mticker.FixedLocator(range(int(lon_low_tick),int(lon_high_tick)+dx,dx))
gl.ylocator = mticker.FixedLocator(range(int(lat_low_tick),int(lat_high_tick)+dy,dy))
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
gl.xlabel_style = {'size': 12, 'color':'#262626'}
#gl.xlabel_style = {'color': '#262626', 'weight': 'bold'}
gl.ylabel_style = {'size': 12, 'color':'#262626'}
cbar = fig.colorbar(cont, orientation='horizontal', pad=0.05, extend='both')
cbar.set_label('%s' % cb_label, fontsize=10, color='#262626')
#cbar.set_label(time_cube.units, fontsize=10, color='#262626')
cbar.set_ticks(np.arange(min_contour, max_contour+tick_interval,tick_interval))
ticks = (np.arange(min_contour, max_contour+tick_interval,tick_interval))
cbar.set_ticklabels(['${%.1f}$' % i for i in ticks])
cbar.ax.tick_params(labelsize=10, color='#262626')
#main_title='Mean Rainfall for EMBRACE Period -%s UTC (%s IST)' % (h, h_local)
#main_title=time_cube.standard_name.title().replace('_',' ')
#model_info = re.sub(r'[(\']', ' ', model_info)
#model_info = re.sub(r'[\',)]', ' ', model_info)
#print model_info
file_save_name = '%s_%s_%s_hPa_and_geop_height_%s' % (experiment_id, plot_diag, p_level, h)
save_dir = '%s%s/%s' % (save_path, experiment_id, plot_diag)
if not os.path.exists('%s' % save_dir): os.makedirs('%s' % (save_dir))
#plt.show()
fig.savefig('%s/%s_notitle.png' % (save_dir, file_save_name), format='png', bbox_inches='tight')
plt.title('%s UTC %s IST' % (h, h_local))
fig.savefig('%s/%s_short_title.png' % (save_dir, file_save_name) , format='png', bbox_inches='tight')
model_info=re.sub('(.{68} )', '\\1\n', str(model_name_convert_title.main(experiment_id)), 0, re.DOTALL)
plt.title('\n'.join(wrap('%s\n%s' % (main_title, model_info), 1000,replace_whitespace=False)), fontsize=16)
fig.savefig('%s/%s.png' % (save_dir, file_save_name), format='png', bbox_inches='tight')
fig.clf()
plt.close()
#del time_cube
gc.collect()
if __name__ == '__main__':
main()
#proc=mp.Process(target=worker)
#proc.daemon=True
#proc.start()
#proc.join()
| mit |
terranodo/geonode | geonode/__init__.py | 2 | 1311 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
import os
__version__ = (2, 5, 4, 'alpha', 0)
class GeoNodeException(Exception):
"""Base class for exceptions in this module."""
pass
def get_version():
import geonode.version
return geonode.version.get_version(__version__)
def main(global_settings, **settings):
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', settings.get('django_settings'))
app = get_wsgi_application()
return app
| gpl-3.0 |
Azure/azure-sdk-for-python | sdk/core/azure-core/tests/async_tests/test_tracing_decorator_async.py | 1 | 6637 | # ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
"""The tests for decorators_async.py"""
try:
from unittest import mock
except ImportError:
import mock
import sys
import time
import pytest
from azure.core.pipeline import Pipeline, PipelineResponse
from azure.core.pipeline.policies import HTTPPolicy
from azure.core.pipeline.transport import HttpTransport, HttpRequest
from azure.core.settings import settings
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from tracing_common import FakeSpan
@pytest.fixture(scope="module")
def fake_span():
settings.tracing_implementation.set_value(FakeSpan)
class MockClient:
@distributed_trace
def __init__(self, policies=None, assert_current_span=False):
time.sleep(0.001)
self.request = HttpRequest("GET", "https://bing.com")
if policies is None:
policies = []
policies.append(mock.Mock(spec=HTTPPolicy, send=self.verify_request))
self.policies = policies
self.transport = mock.Mock(spec=HttpTransport)
self.pipeline = Pipeline(self.transport, policies=policies)
self.expected_response = mock.Mock(spec=PipelineResponse)
self.assert_current_span = assert_current_span
def verify_request(self, request):
if self.assert_current_span:
assert execution_context.get_current_span() is not None
return self.expected_response
@distributed_trace_async
async def make_request(self, numb_times, **kwargs):
time.sleep(0.001)
if numb_times < 1:
return None
response = self.pipeline.run(self.request, **kwargs)
await self.get_foo(merge_span=True)
kwargs['merge_span'] = True
await self.make_request(numb_times - 1, **kwargs)
return response
@distributed_trace_async
async def merge_span_method(self):
return await self.get_foo(merge_span=True)
@distributed_trace_async
async def no_merge_span_method(self):
return await self.get_foo()
@distributed_trace_async
async def get_foo(self):
time.sleep(0.001)
return 5
@distributed_trace_async(name_of_span="different name")
async def check_name_is_different(self):
time.sleep(0.001)
@distributed_trace_async(tracing_attributes={'foo': 'bar'})
async def tracing_attr(self):
time.sleep(0.001)
@distributed_trace_async
async def raising_exception(self):
raise ValueError("Something went horribly wrong here")
@pytest.mark.usefixtures("fake_span")
class TestAsyncDecorator(object):
@pytest.mark.asyncio
async def test_decorator_tracing_attr(self):
with FakeSpan(name="parent") as parent:
client = MockClient()
await client.tracing_attr()
assert len(parent.children) == 2
assert parent.children[0].name == "MockClient.__init__"
assert parent.children[1].name == "MockClient.tracing_attr"
assert parent.children[1].attributes == {'foo': 'bar'}
@pytest.mark.asyncio
async def test_decorator_has_different_name(self):
with FakeSpan(name="parent") as parent:
client = MockClient()
await client.check_name_is_different()
assert len(parent.children) == 2
assert parent.children[0].name == "MockClient.__init__"
assert parent.children[1].name == "different name"
@pytest.mark.asyncio
async def test_used(self):
with FakeSpan(name="parent") as parent:
client = MockClient(policies=[])
await client.get_foo(parent_span=parent)
await client.get_foo()
assert len(parent.children) == 3
assert parent.children[0].name == "MockClient.__init__"
assert not parent.children[0].children
assert parent.children[1].name == "MockClient.get_foo"
assert not parent.children[1].children
assert parent.children[2].name == "MockClient.get_foo"
assert not parent.children[2].children
@pytest.mark.asyncio
async def test_span_merge_span(self):
with FakeSpan(name="parent") as parent:
client = MockClient()
await client.merge_span_method()
await client.no_merge_span_method()
assert len(parent.children) == 3
assert parent.children[0].name == "MockClient.__init__"
assert not parent.children[0].children
assert parent.children[1].name == "MockClient.merge_span_method"
assert not parent.children[1].children
assert parent.children[2].name == "MockClient.no_merge_span_method"
assert parent.children[2].children[0].name == "MockClient.get_foo"
@pytest.mark.asyncio
async def test_span_complicated(self):
with FakeSpan(name="parent") as parent:
client = MockClient()
await client.make_request(2)
with parent.span("child") as child:
time.sleep(0.001)
await client.make_request(2, parent_span=parent)
assert FakeSpan.get_current_span() == child
await client.make_request(2)
assert len(parent.children) == 4
assert parent.children[0].name == "MockClient.__init__"
assert not parent.children[0].children
assert parent.children[1].name == "MockClient.make_request"
assert not parent.children[1].children
assert parent.children[2].name == "child"
assert parent.children[2].children[0].name == "MockClient.make_request"
assert parent.children[3].name == "MockClient.make_request"
assert not parent.children[3].children
@pytest.mark.asyncio
async def test_span_with_exception(self):
"""Assert that if an exception is raised, the next sibling method is actually a sibling span.
"""
with FakeSpan(name="parent") as parent:
client = MockClient()
try:
await client.raising_exception()
except:
pass
await client.get_foo()
assert len(parent.children) == 3
assert parent.children[0].name == "MockClient.__init__"
assert parent.children[1].name == "MockClient.raising_exception"
# Exception should propagate status for Opencensus
assert parent.children[1].status == 'Something went horribly wrong here'
assert parent.children[2].name == "MockClient.get_foo"
| mit |
j-carpentier/nova | nova/tests/unit/virt/vmwareapi/test_configdrive.py | 17 | 7371 | # Copyright 2013 IBM Corp.
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
import mock
from mox3 import mox
from nova import context
from nova.image import glance
from nova import objects
from nova import test
from nova.tests.unit import fake_instance
import nova.tests.unit.image.fake
from nova.tests.unit import utils
from nova.tests.unit.virt.vmwareapi import fake as vmwareapi_fake
from nova.tests.unit.virt.vmwareapi import stubs
from nova.virt import fake
from nova.virt.vmwareapi import driver
from nova.virt.vmwareapi import images
from nova.virt.vmwareapi import vm_util
from nova.virt.vmwareapi import vmops
class ConfigDriveTestCase(test.NoDBTestCase):
REQUIRES_LOCKING = True
@mock.patch.object(driver.VMwareVCDriver, '_register_openstack_extension')
def setUp(self, mock_register):
super(ConfigDriveTestCase, self).setUp()
vm_util.vm_refs_cache_reset()
self.context = context.RequestContext('fake', 'fake', is_admin=False)
self.flags(cluster_name='test_cluster',
host_ip='test_url',
host_username='test_username',
host_password='test_pass',
use_linked_clone=False, group='vmware')
self.flags(enabled=False, group='vnc')
vmwareapi_fake.reset()
stubs.set_stubs(self.stubs)
nova.tests.unit.image.fake.stub_out_image_service(self.stubs)
self.conn = driver.VMwareVCDriver(fake.FakeVirtAPI)
self.network_info = utils.get_test_network_info()
self.node_name = self.conn._nodename
image_ref = nova.tests.unit.image.fake.get_valid_image_id()
instance_values = {
'vm_state': 'building',
'project_id': 'fake',
'user_id': 'fake',
'name': '1',
'kernel_id': '1',
'ramdisk_id': '1',
'mac_addresses': [{'address': 'de:ad:be:ef:be:ef'}],
'memory_mb': 8192,
'flavor': objects.Flavor(vcpus=4, extra_specs={}),
'instance_type_id': 0,
'vcpus': 4,
'root_gb': 80,
'image_ref': image_ref,
'host': 'fake_host',
'task_state': 'scheduling',
'reservation_id': 'r-3t8muvr0',
'id': 1,
'uuid': 'fake-uuid',
'node': self.node_name,
'metadata': [],
'expected_attrs': ['system_metadata'],
}
self.test_instance = fake_instance.fake_instance_obj(self.context,
**instance_values)
self.test_instance.flavor = objects.Flavor(vcpus=4, memory_mb=8192,
ephemeral_gb=0, swap=0,
extra_specs={})
(image_service, image_id) = glance.get_remote_image_service(context,
image_ref)
metadata = image_service.show(context, image_id)
self.image = {
'id': image_ref,
'disk_format': 'vmdk',
'size': int(metadata['size']),
}
class FakeInstanceMetadata(object):
def __init__(self, instance, content=None, extra_md=None,
network_info=None):
pass
def metadata_for_config_drive(self):
return []
self.useFixture(fixtures.MonkeyPatch(
'nova.api.metadata.base.InstanceMetadata',
FakeInstanceMetadata))
def fake_make_drive(_self, _path):
pass
# We can't actually make a config drive v2 because ensure_tree has
# been faked out
self.stubs.Set(nova.virt.configdrive.ConfigDriveBuilder,
'make_drive', fake_make_drive)
def fake_upload_iso_to_datastore(iso_path, instance, **kwargs):
pass
self.stubs.Set(images,
'upload_iso_to_datastore',
fake_upload_iso_to_datastore)
def tearDown(self):
super(ConfigDriveTestCase, self).tearDown()
vmwareapi_fake.cleanup()
nova.tests.unit.image.fake.FakeImageService_reset()
@mock.patch.object(vmops.VMwareVMOps, '_get_instance_metadata',
return_value='fake_metadata')
def _spawn_vm(self, fake_get_instance_meta,
injected_files=None, admin_password=None,
block_device_info=None):
injected_files = injected_files or []
self.conn.spawn(self.context, self.test_instance, self.image,
injected_files=injected_files,
admin_password=admin_password,
network_info=self.network_info,
block_device_info=block_device_info)
def test_create_vm_with_config_drive_verify_method_invocation(self):
self.test_instance.config_drive = 'True'
self.mox.StubOutWithMock(vmops.VMwareVMOps, '_create_config_drive')
self.mox.StubOutWithMock(vmops.VMwareVMOps, '_attach_cdrom_to_vm')
self.conn._vmops._create_config_drive(self.test_instance,
mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg()
).AndReturn('[ds1] fake.iso')
self.conn._vmops._attach_cdrom_to_vm(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
self.mox.ReplayAll()
# if spawn does not call the _create_config_drive or
# _attach_cdrom_to_vm call with the correct set of parameters
# then mox's VerifyAll will throw a Expected methods never called
# Exception
self._spawn_vm()
def test_create_vm_without_config_drive(self):
self.test_instance.config_drive = None
self.mox.StubOutWithMock(vmops.VMwareVMOps, '_create_config_drive')
self.mox.StubOutWithMock(vmops.VMwareVMOps, '_attach_cdrom_to_vm')
self.mox.ReplayAll()
# if spawn ends up calling _create_config_drive or
# _attach_cdrom_to_vm then mox will log a Unexpected method call
# exception
self._spawn_vm()
def test_create_vm_with_config_drive(self):
self.test_instance.config_drive = 'True'
self._spawn_vm()
| apache-2.0 |
MarcAndreJean/PCONC | Modules/__init__.py | 1 | 1056 | # Importation des modules locaux.
import importlib
# 01
m01 = importlib.import_module("Modules.01-Vue")
# --01.__
m01x01 = importlib.import_module("Modules.01-01-Ordinateur")
m01x02 = importlib.import_module("Modules.01-02-Editeur")
m01x03 = importlib.import_module("Modules.01-03-StatusBar")
m01x04 = importlib.import_module("Modules.01-04-CodeScrolledText")
m01x05 = importlib.import_module("Modules.01-05-TextCin")
# 02
m02 = importlib.import_module("Modules.02-FonctionEditeur")
# 03
m03 = importlib.import_module("Modules.03-Compileur")
# 04
m04 = importlib.import_module("Modules.04-Micro-Ordinateur")
# --04.__
m04x01 = importlib.import_module("Modules.04-01-Bus")
m04x02 = importlib.import_module("Modules.04-02-CPU")
m04x03 = importlib.import_module("Modules.04-03-ALU")
m04x04 = importlib.import_module("Modules.04-04-ROM")
m04x05 = importlib.import_module("Modules.04-05-IO")
m04x06 = importlib.import_module("Modules.04-06-RAM")
# 05
m05 = importlib.import_module("Modules.05-Enum")
# 06
m06 = importlib.import_module("Modules.06-ListenerGUI")
| mit |
ticosax/django | tests/aggregation_regress/tests.py | 11 | 52791 | from __future__ import unicode_literals
import datetime
import pickle
from decimal import Decimal
from operator import attrgetter
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import FieldError
from django.db import connection
from django.db.models import (
F, Q, Avg, Count, Max, StdDev, Sum, Value, Variance,
)
from django.test import TestCase, skipUnlessAnyDBFeature, skipUnlessDBFeature
from django.test.utils import Approximate
from django.utils import six
from .models import (
Alfa, Author, Book, Bravo, Charlie, Clues, Entries, HardbackBook, ItemTag,
Publisher, Store, WithManualPK,
)
class AggregationTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.a1 = Author.objects.create(name='Adrian Holovaty', age=34)
cls.a2 = Author.objects.create(name='Jacob Kaplan-Moss', age=35)
cls.a3 = Author.objects.create(name='Brad Dayley', age=45)
cls.a4 = Author.objects.create(name='James Bennett', age=29)
cls.a5 = Author.objects.create(name='Jeffrey Forcier', age=37)
cls.a6 = Author.objects.create(name='Paul Bissex', age=29)
cls.a7 = Author.objects.create(name='Wesley J. Chun', age=25)
cls.a8 = Author.objects.create(name='Peter Norvig', age=57)
cls.a9 = Author.objects.create(name='Stuart Russell', age=46)
cls.a1.friends.add(cls.a2, cls.a4)
cls.a2.friends.add(cls.a1, cls.a7)
cls.a4.friends.add(cls.a1)
cls.a5.friends.add(cls.a6, cls.a7)
cls.a6.friends.add(cls.a5, cls.a7)
cls.a7.friends.add(cls.a2, cls.a5, cls.a6)
cls.a8.friends.add(cls.a9)
cls.a9.friends.add(cls.a8)
cls.p1 = Publisher.objects.create(name='Apress', num_awards=3)
cls.p2 = Publisher.objects.create(name='Sams', num_awards=1)
cls.p3 = Publisher.objects.create(name='Prentice Hall', num_awards=7)
cls.p4 = Publisher.objects.create(name='Morgan Kaufmann', num_awards=9)
cls.p5 = Publisher.objects.create(name="Jonno's House of Books", num_awards=0)
cls.b1 = Book.objects.create(
isbn='159059725', name='The Definitive Guide to Django: Web Development Done Right',
pages=447, rating=4.5, price=Decimal('30.00'), contact=cls.a1, publisher=cls.p1,
pubdate=datetime.date(2007, 12, 6)
)
cls.b2 = Book.objects.create(
isbn='067232959', name='Sams Teach Yourself Django in 24 Hours',
pages=528, rating=3.0, price=Decimal('23.09'), contact=cls.a3, publisher=cls.p2,
pubdate=datetime.date(2008, 3, 3)
)
cls.b3 = Book.objects.create(
isbn='159059996', name='Practical Django Projects',
pages=300, rating=4.0, price=Decimal('29.69'), contact=cls.a4, publisher=cls.p1,
pubdate=datetime.date(2008, 6, 23)
)
cls.b4 = Book.objects.create(
isbn='013235613', name='Python Web Development with Django',
pages=350, rating=4.0, price=Decimal('29.69'), contact=cls.a5, publisher=cls.p3,
pubdate=datetime.date(2008, 11, 3)
)
cls.b5 = HardbackBook.objects.create(
isbn='013790395', name='Artificial Intelligence: A Modern Approach',
pages=1132, rating=4.0, price=Decimal('82.80'), contact=cls.a8, publisher=cls.p3,
pubdate=datetime.date(1995, 1, 15), weight=4.5)
cls.b6 = HardbackBook.objects.create(
isbn='155860191', name='Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp',
pages=946, rating=5.0, price=Decimal('75.00'), contact=cls.a8, publisher=cls.p4,
pubdate=datetime.date(1991, 10, 15), weight=3.7)
cls.b1.authors.add(cls.a1, cls.a2)
cls.b2.authors.add(cls.a3)
cls.b3.authors.add(cls.a4)
cls.b4.authors.add(cls.a5, cls.a6, cls.a7)
cls.b5.authors.add(cls.a8, cls.a9)
cls.b6.authors.add(cls.a8)
s1 = Store.objects.create(
name='Amazon.com',
original_opening=datetime.datetime(1994, 4, 23, 9, 17, 42),
friday_night_closing=datetime.time(23, 59, 59)
)
s2 = Store.objects.create(
name='Books.com',
original_opening=datetime.datetime(2001, 3, 15, 11, 23, 37),
friday_night_closing=datetime.time(23, 59, 59)
)
s3 = Store.objects.create(
name="Mamma and Pappa's Books",
original_opening=datetime.datetime(1945, 4, 25, 16, 24, 14),
friday_night_closing=datetime.time(21, 30)
)
s1.books.add(cls.b1, cls.b2, cls.b3, cls.b4, cls.b5, cls.b6)
s2.books.add(cls.b1, cls.b3, cls.b5, cls.b6)
s3.books.add(cls.b3, cls.b4, cls.b6)
def assertObjectAttrs(self, obj, **kwargs):
for attr, value in six.iteritems(kwargs):
self.assertEqual(getattr(obj, attr), value)
def test_aggregates_in_where_clause(self):
"""
Regression test for #12822: DatabaseError: aggregates not allowed in
WHERE clause
Tests that the subselect works and returns results equivalent to a
query with the IDs listed.
Before the corresponding fix for this bug, this test passed in 1.1 and
failed in 1.2-beta (trunk).
"""
qs = Book.objects.values('contact').annotate(Max('id'))
qs = qs.order_by('contact').values_list('id__max', flat=True)
# don't do anything with the queryset (qs) before including it as a
# subquery
books = Book.objects.order_by('id')
qs1 = books.filter(id__in=qs)
qs2 = books.filter(id__in=list(qs))
self.assertEqual(list(qs1), list(qs2))
def test_aggregates_in_where_clause_pre_eval(self):
"""
Regression test for #12822: DatabaseError: aggregates not allowed in
WHERE clause
Same as the above test, but evaluates the queryset for the subquery
before it's used as a subquery.
Before the corresponding fix for this bug, this test failed in both
1.1 and 1.2-beta (trunk).
"""
qs = Book.objects.values('contact').annotate(Max('id'))
qs = qs.order_by('contact').values_list('id__max', flat=True)
# force the queryset (qs) for the subquery to be evaluated in its
# current state
list(qs)
books = Book.objects.order_by('id')
qs1 = books.filter(id__in=qs)
qs2 = books.filter(id__in=list(qs))
self.assertEqual(list(qs1), list(qs2))
@skipUnlessDBFeature('supports_subqueries_in_group_by')
def test_annotate_with_extra(self):
"""
Regression test for #11916: Extra params + aggregation creates
incorrect SQL.
"""
# Oracle doesn't support subqueries in group by clause
shortest_book_sql = """
SELECT name
FROM aggregation_regress_book b
WHERE b.publisher_id = aggregation_regress_publisher.id
ORDER BY b.pages
LIMIT 1
"""
# tests that this query does not raise a DatabaseError due to the full
# subselect being (erroneously) added to the GROUP BY parameters
qs = Publisher.objects.extra(select={
'name_of_shortest_book': shortest_book_sql,
}).annotate(total_books=Count('book'))
# force execution of the query
list(qs)
def test_aggregate(self):
# Ordering requests are ignored
self.assertEqual(
Author.objects.order_by("name").aggregate(Avg("age")),
{"age__avg": Approximate(37.444, places=1)}
)
# Implicit ordering is also ignored
self.assertEqual(
Book.objects.aggregate(Sum("pages")),
{"pages__sum": 3703},
)
# Baseline results
self.assertEqual(
Book.objects.aggregate(Sum('pages'), Avg('pages')),
{'pages__sum': 3703, 'pages__avg': Approximate(617.166, places=2)}
)
# Empty values query doesn't affect grouping or results
self.assertEqual(
Book.objects.values().aggregate(Sum('pages'), Avg('pages')),
{'pages__sum': 3703, 'pages__avg': Approximate(617.166, places=2)}
)
# Aggregate overrides extra selected column
self.assertEqual(
Book.objects.extra(select={'price_per_page': 'price / pages'}).aggregate(Sum('pages')),
{'pages__sum': 3703}
)
def test_annotation(self):
# Annotations get combined with extra select clauses
obj = Book.objects.annotate(mean_auth_age=Avg("authors__age")).extra(
select={"manufacture_cost": "price * .5"}).get(pk=self.b2.pk)
self.assertObjectAttrs(
obj,
contact_id=self.a3.id,
isbn='067232959',
mean_auth_age=45.0,
name='Sams Teach Yourself Django in 24 Hours',
pages=528,
price=Decimal("23.09"),
pubdate=datetime.date(2008, 3, 3),
publisher_id=self.p2.id,
rating=3.0
)
# Different DB backends return different types for the extra select computation
self.assertIn(obj.manufacture_cost, (11.545, Decimal('11.545')))
# Order of the annotate/extra in the query doesn't matter
obj = Book.objects.extra(select={'manufacture_cost': 'price * .5'}).annotate(
mean_auth_age=Avg('authors__age')).get(pk=self.b2.pk)
self.assertObjectAttrs(
obj,
contact_id=self.a3.id,
isbn='067232959',
mean_auth_age=45.0,
name='Sams Teach Yourself Django in 24 Hours',
pages=528,
price=Decimal("23.09"),
pubdate=datetime.date(2008, 3, 3),
publisher_id=self.p2.id,
rating=3.0
)
# Different DB backends return different types for the extra select computation
self.assertIn(obj.manufacture_cost, (11.545, Decimal('11.545')))
# Values queries can be combined with annotate and extra
obj = Book.objects.annotate(mean_auth_age=Avg('authors__age')).extra(
select={'manufacture_cost': 'price * .5'}).values().get(pk=self.b2.pk)
manufacture_cost = obj['manufacture_cost']
self.assertIn(manufacture_cost, (11.545, Decimal('11.545')))
del obj['manufacture_cost']
self.assertEqual(obj, {
'id': self.b2.id,
'contact_id': self.a3.id,
'isbn': '067232959',
'mean_auth_age': 45.0,
'name': 'Sams Teach Yourself Django in 24 Hours',
'pages': 528,
'price': Decimal('23.09'),
'pubdate': datetime.date(2008, 3, 3),
'publisher_id': self.p2.id,
'rating': 3.0,
})
# The order of the (empty) values, annotate and extra clauses doesn't
# matter
obj = Book.objects.values().annotate(mean_auth_age=Avg('authors__age')).extra(
select={'manufacture_cost': 'price * .5'}).get(pk=self.b2.pk)
manufacture_cost = obj['manufacture_cost']
self.assertIn(manufacture_cost, (11.545, Decimal('11.545')))
del obj['manufacture_cost']
self.assertEqual(obj, {
'id': self.b2.id,
'contact_id': self.a3.id,
'isbn': '067232959',
'mean_auth_age': 45.0,
'name': 'Sams Teach Yourself Django in 24 Hours',
'pages': 528,
'price': Decimal('23.09'),
'pubdate': datetime.date(2008, 3, 3),
'publisher_id': self.p2.id,
'rating': 3.0
})
# If the annotation precedes the values clause, it won't be included
# unless it is explicitly named
obj = Book.objects.annotate(mean_auth_age=Avg('authors__age')).extra(
select={'price_per_page': 'price / pages'}).values('name').get(pk=self.b1.pk)
self.assertEqual(obj, {
"name": 'The Definitive Guide to Django: Web Development Done Right',
})
obj = Book.objects.annotate(mean_auth_age=Avg('authors__age')).extra(
select={'price_per_page': 'price / pages'}).values('name', 'mean_auth_age').get(pk=self.b1.pk)
self.assertEqual(obj, {
'mean_auth_age': 34.5,
'name': 'The Definitive Guide to Django: Web Development Done Right',
})
# If an annotation isn't included in the values, it can still be used
# in a filter
qs = Book.objects.annotate(n_authors=Count('authors')).values('name').filter(n_authors__gt=2)
self.assertQuerysetEqual(
qs, [
{"name": 'Python Web Development with Django'}
],
lambda b: b,
)
# The annotations are added to values output if values() precedes
# annotate()
obj = Book.objects.values('name').annotate(mean_auth_age=Avg('authors__age')).extra(
select={'price_per_page': 'price / pages'}).get(pk=self.b1.pk)
self.assertEqual(obj, {
'mean_auth_age': 34.5,
'name': 'The Definitive Guide to Django: Web Development Done Right',
})
# Check that all of the objects are getting counted (allow_nulls) and
# that values respects the amount of objects
self.assertEqual(
len(Author.objects.annotate(Avg('friends__age')).values()),
9
)
# Check that consecutive calls to annotate accumulate in the query
qs = Book.objects.values('price').annotate(oldest=Max('authors__age')).order_by('oldest', 'price').annotate(Max('publisher__num_awards'))
self.assertQuerysetEqual(
qs, [
{'price': Decimal("30"), 'oldest': 35, 'publisher__num_awards__max': 3},
{'price': Decimal("29.69"), 'oldest': 37, 'publisher__num_awards__max': 7},
{'price': Decimal("23.09"), 'oldest': 45, 'publisher__num_awards__max': 1},
{'price': Decimal("75"), 'oldest': 57, 'publisher__num_awards__max': 9},
{'price': Decimal("82.8"), 'oldest': 57, 'publisher__num_awards__max': 7}
],
lambda b: b,
)
def test_aggrate_annotation(self):
# Aggregates can be composed over annotations.
# The return type is derived from the composed aggregate
vals = Book.objects.all().annotate(num_authors=Count('authors__id')).aggregate(Max('pages'), Max('price'), Sum('num_authors'), Avg('num_authors'))
self.assertEqual(vals, {
'num_authors__sum': 10,
'num_authors__avg': Approximate(1.666, places=2),
'pages__max': 1132,
'price__max': Decimal("82.80")
})
# Regression for #15624 - Missing SELECT columns when using values, annotate
# and aggregate in a single query
self.assertEqual(
Book.objects.annotate(c=Count('authors')).values('c').aggregate(Max('c')),
{'c__max': 3}
)
def test_field_error(self):
# Bad field requests in aggregates are caught and reported
self.assertRaises(
FieldError,
lambda: Book.objects.all().aggregate(num_authors=Count('foo'))
)
self.assertRaises(
FieldError,
lambda: Book.objects.all().annotate(num_authors=Count('foo'))
)
self.assertRaises(
FieldError,
lambda: Book.objects.all().annotate(num_authors=Count('authors__id')).aggregate(Max('foo'))
)
def test_more(self):
# Old-style count aggregations can be mixed with new-style
self.assertEqual(
Book.objects.annotate(num_authors=Count('authors')).count(),
6
)
# Non-ordinal, non-computed Aggregates over annotations correctly
# inherit the annotation's internal type if the annotation is ordinal
# or computed
vals = Book.objects.annotate(num_authors=Count('authors')).aggregate(Max('num_authors'))
self.assertEqual(
vals,
{'num_authors__max': 3}
)
vals = Publisher.objects.annotate(avg_price=Avg('book__price')).aggregate(Max('avg_price'))
self.assertEqual(
vals,
{'avg_price__max': 75.0}
)
# Aliases are quoted to protected aliases that might be reserved names
vals = Book.objects.aggregate(number=Max('pages'), select=Max('pages'))
self.assertEqual(
vals,
{'number': 1132, 'select': 1132}
)
# Regression for #10064: select_related() plays nice with aggregates
obj = Book.objects.select_related('publisher').annotate(
num_authors=Count('authors')).values().get(isbn='013790395')
self.assertEqual(obj, {
'contact_id': self.a8.id,
'id': self.b5.id,
'isbn': '013790395',
'name': 'Artificial Intelligence: A Modern Approach',
'num_authors': 2,
'pages': 1132,
'price': Decimal("82.8"),
'pubdate': datetime.date(1995, 1, 15),
'publisher_id': self.p3.id,
'rating': 4.0,
})
# Regression for #10010: exclude on an aggregate field is correctly
# negated
self.assertEqual(
len(Book.objects.annotate(num_authors=Count('authors'))),
6
)
self.assertEqual(
len(Book.objects.annotate(num_authors=Count('authors')).filter(num_authors__gt=2)),
1
)
self.assertEqual(
len(Book.objects.annotate(num_authors=Count('authors')).exclude(num_authors__gt=2)),
5
)
self.assertEqual(
len(Book.objects.annotate(num_authors=Count('authors')).filter(num_authors__lt=3).exclude(num_authors__lt=2)),
2
)
self.assertEqual(
len(Book.objects.annotate(num_authors=Count('authors')).exclude(num_authors__lt=2).filter(num_authors__lt=3)),
2
)
def test_aggregate_fexpr(self):
# Aggregates can be used with F() expressions
# ... where the F() is pushed into the HAVING clause
qs = Publisher.objects.annotate(num_books=Count('book')).filter(num_books__lt=F('num_awards') / 2).order_by('name').values('name', 'num_books', 'num_awards')
self.assertQuerysetEqual(
qs, [
{'num_books': 1, 'name': 'Morgan Kaufmann', 'num_awards': 9},
{'num_books': 2, 'name': 'Prentice Hall', 'num_awards': 7}
],
lambda p: p,
)
qs = Publisher.objects.annotate(num_books=Count('book')).exclude(num_books__lt=F('num_awards') / 2).order_by('name').values('name', 'num_books', 'num_awards')
self.assertQuerysetEqual(
qs, [
{'num_books': 2, 'name': 'Apress', 'num_awards': 3},
{'num_books': 0, 'name': "Jonno's House of Books", 'num_awards': 0},
{'num_books': 1, 'name': 'Sams', 'num_awards': 1}
],
lambda p: p,
)
# ... and where the F() references an aggregate
qs = Publisher.objects.annotate(num_books=Count('book')).filter(num_awards__gt=2 * F('num_books')).order_by('name').values('name', 'num_books', 'num_awards')
self.assertQuerysetEqual(
qs, [
{'num_books': 1, 'name': 'Morgan Kaufmann', 'num_awards': 9},
{'num_books': 2, 'name': 'Prentice Hall', 'num_awards': 7}
],
lambda p: p,
)
qs = Publisher.objects.annotate(num_books=Count('book')).exclude(num_books__lt=F('num_awards') / 2).order_by('name').values('name', 'num_books', 'num_awards')
self.assertQuerysetEqual(
qs, [
{'num_books': 2, 'name': 'Apress', 'num_awards': 3},
{'num_books': 0, 'name': "Jonno's House of Books", 'num_awards': 0},
{'num_books': 1, 'name': 'Sams', 'num_awards': 1}
],
lambda p: p,
)
def test_db_col_table(self):
# Tests on fields with non-default table and column names.
qs = Clues.objects.values('EntryID__Entry').annotate(Appearances=Count('EntryID'), Distinct_Clues=Count('Clue', distinct=True))
self.assertQuerysetEqual(qs, [])
qs = Entries.objects.annotate(clue_count=Count('clues__ID'))
self.assertQuerysetEqual(qs, [])
def test_boolean_conversion(self):
# Aggregates mixed up ordering of columns for backend's convert_values
# method. Refs #21126.
e = Entries.objects.create(Entry='foo')
c = Clues.objects.create(EntryID=e, Clue='bar')
qs = Clues.objects.select_related('EntryID').annotate(Count('ID'))
self.assertQuerysetEqual(
qs, [c], lambda x: x)
self.assertEqual(qs[0].EntryID, e)
self.assertIs(qs[0].EntryID.Exclude, False)
def test_empty(self):
# Regression for #10089: Check handling of empty result sets with
# aggregates
self.assertEqual(
Book.objects.filter(id__in=[]).count(),
0
)
vals = Book.objects.filter(id__in=[]).aggregate(num_authors=Count('authors'), avg_authors=Avg('authors'), max_authors=Max('authors'), max_price=Max('price'), max_rating=Max('rating'))
self.assertEqual(
vals,
{'max_authors': None, 'max_rating': None, 'num_authors': 0, 'avg_authors': None, 'max_price': None}
)
qs = Publisher.objects.filter(name="Jonno's House of Books").annotate(num_authors=Count('book__authors'), avg_authors=Avg('book__authors'), max_authors=Max('book__authors'), max_price=Max('book__price'), max_rating=Max('book__rating')).values()
self.assertQuerysetEqual(
qs, [
{'max_authors': None, 'name': "Jonno's House of Books", 'num_awards': 0, 'max_price': None, 'num_authors': 0, 'max_rating': None, 'id': self.p5.id, 'avg_authors': None}
],
lambda p: p
)
def test_more_more(self):
# Regression for #10113 - Fields mentioned in order_by() must be
# included in the GROUP BY. This only becomes a problem when the
# order_by introduces a new join.
self.assertQuerysetEqual(
Book.objects.annotate(num_authors=Count('authors')).order_by('publisher__name', 'name'), [
"Practical Django Projects",
"The Definitive Guide to Django: Web Development Done Right",
"Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp",
"Artificial Intelligence: A Modern Approach",
"Python Web Development with Django",
"Sams Teach Yourself Django in 24 Hours",
],
lambda b: b.name
)
# Regression for #10127 - Empty select_related() works with annotate
qs = Book.objects.filter(rating__lt=4.5).select_related().annotate(Avg('authors__age'))
self.assertQuerysetEqual(
qs, [
('Artificial Intelligence: A Modern Approach', 51.5, 'Prentice Hall', 'Peter Norvig'),
('Practical Django Projects', 29.0, 'Apress', 'James Bennett'),
('Python Web Development with Django', Approximate(30.333, places=2), 'Prentice Hall', 'Jeffrey Forcier'),
('Sams Teach Yourself Django in 24 Hours', 45.0, 'Sams', 'Brad Dayley')
],
lambda b: (b.name, b.authors__age__avg, b.publisher.name, b.contact.name)
)
# Regression for #10132 - If the values() clause only mentioned extra
# (select=) columns, those columns are used for grouping
qs = Book.objects.extra(select={'pub': 'publisher_id'}).values('pub').annotate(Count('id')).order_by('pub')
self.assertQuerysetEqual(
qs, [
{'pub': self.b1.id, 'id__count': 2},
{'pub': self.b2.id, 'id__count': 1},
{'pub': self.b3.id, 'id__count': 2},
{'pub': self.b4.id, 'id__count': 1}
],
lambda b: b
)
qs = Book.objects.extra(select={'pub': 'publisher_id', 'foo': 'pages'}).values('pub').annotate(Count('id')).order_by('pub')
self.assertQuerysetEqual(
qs, [
{'pub': self.p1.id, 'id__count': 2},
{'pub': self.p2.id, 'id__count': 1},
{'pub': self.p3.id, 'id__count': 2},
{'pub': self.p4.id, 'id__count': 1}
],
lambda b: b
)
# Regression for #10182 - Queries with aggregate calls are correctly
# realiased when used in a subquery
ids = Book.objects.filter(pages__gt=100).annotate(n_authors=Count('authors')).filter(n_authors__gt=2).order_by('n_authors')
self.assertQuerysetEqual(
Book.objects.filter(id__in=ids), [
"Python Web Development with Django",
],
lambda b: b.name
)
# Regression for #15709 - Ensure each group_by field only exists once
# per query
qstr = str(Book.objects.values('publisher').annotate(max_pages=Max('pages')).order_by().query)
# Check that there is just one GROUP BY clause (zero commas means at
# most one clause)
self.assertEqual(qstr[qstr.index('GROUP BY'):].count(', '), 0)
def test_duplicate_alias(self):
# Regression for #11256 - duplicating a default alias raises ValueError.
self.assertRaises(ValueError, Book.objects.all().annotate, Avg('authors__age'), authors__age__avg=Avg('authors__age'))
def test_field_name_conflict(self):
# Regression for #11256 - providing an aggregate name that conflicts with a field name on the model raises ValueError
self.assertRaises(ValueError, Author.objects.annotate, age=Avg('friends__age'))
def test_m2m_name_conflict(self):
# Regression for #11256 - providing an aggregate name that conflicts with an m2m name on the model raises ValueError
self.assertRaises(ValueError, Author.objects.annotate, friends=Count('friends'))
def test_values_queryset_non_conflict(self):
# Regression for #14707 -- If you're using a values query set, some potential conflicts are avoided.
# age is a field on Author, so it shouldn't be allowed as an aggregate.
# But age isn't included in values(), so it is.
results = Author.objects.values('name').annotate(age=Count('book_contact_set')).order_by('name')
self.assertEqual(len(results), 9)
self.assertEqual(results[0]['name'], 'Adrian Holovaty')
self.assertEqual(results[0]['age'], 1)
# Same problem, but aggregating over m2m fields
results = Author.objects.values('name').annotate(age=Avg('friends__age')).order_by('name')
self.assertEqual(len(results), 9)
self.assertEqual(results[0]['name'], 'Adrian Holovaty')
self.assertEqual(results[0]['age'], 32.0)
# Same problem, but colliding with an m2m field
results = Author.objects.values('name').annotate(friends=Count('friends')).order_by('name')
self.assertEqual(len(results), 9)
self.assertEqual(results[0]['name'], 'Adrian Holovaty')
self.assertEqual(results[0]['friends'], 2)
def test_reverse_relation_name_conflict(self):
# Regression for #11256 - providing an aggregate name that conflicts with a reverse-related name on the model raises ValueError
self.assertRaises(ValueError, Author.objects.annotate, book_contact_set=Avg('friends__age'))
def test_pickle(self):
# Regression for #10197 -- Queries with aggregates can be pickled.
# First check that pickling is possible at all. No crash = success
qs = Book.objects.annotate(num_authors=Count('authors'))
pickle.dumps(qs)
# Then check that the round trip works.
query = qs.query.get_compiler(qs.db).as_sql()[0]
qs2 = pickle.loads(pickle.dumps(qs))
self.assertEqual(
qs2.query.get_compiler(qs2.db).as_sql()[0],
query,
)
def test_more_more_more(self):
# Regression for #10199 - Aggregate calls clone the original query so
# the original query can still be used
books = Book.objects.all()
books.aggregate(Avg("authors__age"))
self.assertQuerysetEqual(
books.all(), [
'Artificial Intelligence: A Modern Approach',
'Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp',
'Practical Django Projects',
'Python Web Development with Django',
'Sams Teach Yourself Django in 24 Hours',
'The Definitive Guide to Django: Web Development Done Right'
],
lambda b: b.name
)
# Regression for #10248 - Annotations work with DateQuerySets
qs = Book.objects.annotate(num_authors=Count('authors')).filter(num_authors=2).dates('pubdate', 'day')
self.assertQuerysetEqual(
qs, [
datetime.date(1995, 1, 15),
datetime.date(2007, 12, 6),
],
lambda b: b
)
# Regression for #10290 - extra selects with parameters can be used for
# grouping.
qs = Book.objects.annotate(mean_auth_age=Avg('authors__age')).extra(select={'sheets': '(pages + %s) / %s'}, select_params=[1, 2]).order_by('sheets').values('sheets')
self.assertQuerysetEqual(
qs, [
150,
175,
224,
264,
473,
566
],
lambda b: int(b["sheets"])
)
# Regression for 10425 - annotations don't get in the way of a count()
# clause
self.assertEqual(
Book.objects.values('publisher').annotate(Count('publisher')).count(),
4
)
self.assertEqual(
Book.objects.annotate(Count('publisher')).values('publisher').count(),
6
)
# Note: intentionally no order_by(), that case needs tests, too.
publishers = Publisher.objects.filter(id__in=[1, 2])
self.assertEqual(
sorted(p.name for p in publishers),
[
"Apress",
"Sams"
]
)
publishers = publishers.annotate(n_books=Count("book"))
sorted_publishers = sorted(publishers, key=lambda x: x.name)
self.assertEqual(
sorted_publishers[0].n_books,
2
)
self.assertEqual(
sorted_publishers[1].n_books,
1
)
self.assertEqual(
sorted(p.name for p in publishers),
[
"Apress",
"Sams"
]
)
books = Book.objects.filter(publisher__in=publishers)
self.assertQuerysetEqual(
books, [
"Practical Django Projects",
"Sams Teach Yourself Django in 24 Hours",
"The Definitive Guide to Django: Web Development Done Right",
],
lambda b: b.name
)
self.assertEqual(
sorted(p.name for p in publishers),
[
"Apress",
"Sams"
]
)
# Regression for 10666 - inherited fields work with annotations and
# aggregations
self.assertEqual(
HardbackBook.objects.aggregate(n_pages=Sum('book_ptr__pages')),
{'n_pages': 2078}
)
self.assertEqual(
HardbackBook.objects.aggregate(n_pages=Sum('pages')),
{'n_pages': 2078},
)
qs = HardbackBook.objects.annotate(n_authors=Count('book_ptr__authors')).values('name', 'n_authors')
self.assertQuerysetEqual(
qs, [
{'n_authors': 2, 'name': 'Artificial Intelligence: A Modern Approach'},
{'n_authors': 1, 'name': 'Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp'}
],
lambda h: h
)
qs = HardbackBook.objects.annotate(n_authors=Count('authors')).values('name', 'n_authors')
self.assertQuerysetEqual(
qs, [
{'n_authors': 2, 'name': 'Artificial Intelligence: A Modern Approach'},
{'n_authors': 1, 'name': 'Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp'}
],
lambda h: h,
)
# Regression for #10766 - Shouldn't be able to reference an aggregate
# fields in an aggregate() call.
self.assertRaises(
FieldError,
lambda: Book.objects.annotate(mean_age=Avg('authors__age')).annotate(Avg('mean_age'))
)
def test_empty_filter_count(self):
self.assertEqual(
Author.objects.filter(id__in=[]).annotate(Count("friends")).count(),
0
)
def test_empty_filter_aggregate(self):
self.assertEqual(
Author.objects.filter(id__in=[]).annotate(Count("friends")).aggregate(Count("pk")),
{"pk__count": None}
)
def test_none_call_before_aggregate(self):
# Regression for #11789
self.assertEqual(
Author.objects.none().aggregate(Avg('age')),
{'age__avg': None}
)
def test_annotate_and_join(self):
self.assertEqual(
Author.objects.annotate(c=Count("friends__name")).exclude(friends__name="Joe").count(),
Author.objects.count()
)
def test_f_expression_annotation(self):
# Books with less than 200 pages per author.
qs = Book.objects.values("name").annotate(
n_authors=Count("authors")
).filter(
pages__lt=F("n_authors") * 200
).values_list("pk")
self.assertQuerysetEqual(
Book.objects.filter(pk__in=qs), [
"Python Web Development with Django"
],
attrgetter("name")
)
def test_values_annotate_values(self):
qs = Book.objects.values("name").annotate(
n_authors=Count("authors")
).values_list("pk", flat=True)
self.assertEqual(list(qs), list(Book.objects.values_list("pk", flat=True)))
def test_having_group_by(self):
# Test that when a field occurs on the LHS of a HAVING clause that it
# appears correctly in the GROUP BY clause
qs = Book.objects.values_list("name").annotate(
n_authors=Count("authors")
).filter(
pages__gt=F("n_authors")
).values_list("name", flat=True)
# Results should be the same, all Books have more pages than authors
self.assertEqual(
list(qs), list(Book.objects.values_list("name", flat=True))
)
def test_values_list_annotation_args_ordering(self):
"""
Annotate *args ordering should be preserved in values_list results.
**kwargs comes after *args.
Regression test for #23659.
"""
books = Book.objects.values_list("publisher__name").annotate(
Count("id"), Avg("price"), Avg("authors__age"), avg_pgs=Avg("pages")
).order_by("-publisher__name")
self.assertEqual(books[0], ('Sams', 1, 23.09, 45.0, 528.0))
def test_annotation_disjunction(self):
qs = Book.objects.annotate(n_authors=Count("authors")).filter(
Q(n_authors=2) | Q(name="Python Web Development with Django")
)
self.assertQuerysetEqual(
qs, [
"Artificial Intelligence: A Modern Approach",
"Python Web Development with Django",
"The Definitive Guide to Django: Web Development Done Right",
],
attrgetter("name")
)
qs = Book.objects.annotate(n_authors=Count("authors")).filter(
Q(name="The Definitive Guide to Django: Web Development Done Right") | (Q(name="Artificial Intelligence: A Modern Approach") & Q(n_authors=3))
)
self.assertQuerysetEqual(
qs, [
"The Definitive Guide to Django: Web Development Done Right",
],
attrgetter("name")
)
qs = Publisher.objects.annotate(
rating_sum=Sum("book__rating"),
book_count=Count("book")
).filter(
Q(rating_sum__gt=5.5) | Q(rating_sum__isnull=True)
).order_by('pk')
self.assertQuerysetEqual(
qs, [
"Apress",
"Prentice Hall",
"Jonno's House of Books",
],
attrgetter("name")
)
qs = Publisher.objects.annotate(
rating_sum=Sum("book__rating"),
book_count=Count("book")
).filter(
Q(rating_sum__gt=F("book_count")) | Q(rating_sum=None)
).order_by("num_awards")
self.assertQuerysetEqual(
qs, [
"Jonno's House of Books",
"Sams",
"Apress",
"Prentice Hall",
"Morgan Kaufmann"
],
attrgetter("name")
)
def test_quoting_aggregate_order_by(self):
qs = Book.objects.filter(
name="Python Web Development with Django"
).annotate(
authorCount=Count("authors")
).order_by("authorCount")
self.assertQuerysetEqual(
qs, [
("Python Web Development with Django", 3),
],
lambda b: (b.name, b.authorCount)
)
@skipUnlessDBFeature('supports_stddev')
def test_stddev(self):
self.assertEqual(
Book.objects.aggregate(StdDev('pages')),
{'pages__stddev': Approximate(311.46, 1)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('rating')),
{'rating__stddev': Approximate(0.60, 1)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('price')),
{'price__stddev': Approximate(24.16, 2)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('pages', sample=True)),
{'pages__stddev': Approximate(341.19, 2)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('rating', sample=True)),
{'rating__stddev': Approximate(0.66, 2)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('price', sample=True)),
{'price__stddev': Approximate(26.46, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('pages')),
{'pages__variance': Approximate(97010.80, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('rating')),
{'rating__variance': Approximate(0.36, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('price')),
{'price__variance': Approximate(583.77, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('pages', sample=True)),
{'pages__variance': Approximate(116412.96, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('rating', sample=True)),
{'rating__variance': Approximate(0.44, 2)}
)
self.assertEqual(
Book.objects.aggregate(Variance('price', sample=True)),
{'price__variance': Approximate(700.53, 2)}
)
def test_filtering_by_annotation_name(self):
# Regression test for #14476
# The name of the explicitly provided annotation name in this case
# poses no problem
qs = Author.objects.annotate(book_cnt=Count('book')).filter(book_cnt=2).order_by('name')
self.assertQuerysetEqual(
qs,
['Peter Norvig'],
lambda b: b.name
)
# Neither in this case
qs = Author.objects.annotate(book_count=Count('book')).filter(book_count=2).order_by('name')
self.assertQuerysetEqual(
qs,
['Peter Norvig'],
lambda b: b.name
)
# This case used to fail because the ORM couldn't resolve the
# automatically generated annotation name `book__count`
qs = Author.objects.annotate(Count('book')).filter(book__count=2).order_by('name')
self.assertQuerysetEqual(
qs,
['Peter Norvig'],
lambda b: b.name
)
def test_annotate_joins(self):
"""
Test that the base table's join isn't promoted to LOUTER. This could
cause the query generation to fail if there is an exclude() for fk-field
in the query, too. Refs #19087.
"""
qs = Book.objects.annotate(n=Count('pk'))
self.assertIs(qs.query.alias_map['aggregation_regress_book'].join_type, None)
# Check that the query executes without problems.
self.assertEqual(len(qs.exclude(publisher=-1)), 6)
@skipUnlessAnyDBFeature('allows_group_by_pk', 'allows_group_by_selected_pks')
def test_aggregate_duplicate_columns(self):
# Regression test for #17144
results = Author.objects.annotate(num_contacts=Count('book_contact_set'))
# There should only be one GROUP BY clause, for the `id` column.
# `name` and `age` should not be grouped on.
_, _, group_by = results.query.get_compiler(using='default').pre_sql_setup()
self.assertEqual(len(group_by), 1)
self.assertIn('id', group_by[0][0])
self.assertNotIn('name', group_by[0][0])
self.assertNotIn('age', group_by[0][0])
# Ensure that we get correct results.
self.assertEqual(
[(a.name, a.num_contacts) for a in results.order_by('name')],
[
('Adrian Holovaty', 1),
('Brad Dayley', 1),
('Jacob Kaplan-Moss', 0),
('James Bennett', 1),
('Jeffrey Forcier', 1),
('Paul Bissex', 0),
('Peter Norvig', 2),
('Stuart Russell', 0),
('Wesley J. Chun', 0),
]
)
@skipUnlessAnyDBFeature('allows_group_by_pk', 'allows_group_by_selected_pks')
def test_aggregate_duplicate_columns_only(self):
# Works with only() too.
results = Author.objects.only('id', 'name').annotate(num_contacts=Count('book_contact_set'))
_, _, grouping = results.query.get_compiler(using='default').pre_sql_setup()
self.assertEqual(len(grouping), 1)
self.assertIn('id', grouping[0][0])
self.assertNotIn('name', grouping[0][0])
self.assertNotIn('age', grouping[0][0])
# Ensure that we get correct results.
self.assertEqual(
[(a.name, a.num_contacts) for a in results.order_by('name')],
[
('Adrian Holovaty', 1),
('Brad Dayley', 1),
('Jacob Kaplan-Moss', 0),
('James Bennett', 1),
('Jeffrey Forcier', 1),
('Paul Bissex', 0),
('Peter Norvig', 2),
('Stuart Russell', 0),
('Wesley J. Chun', 0),
]
)
@skipUnlessAnyDBFeature('allows_group_by_pk', 'allows_group_by_selected_pks')
def test_aggregate_duplicate_columns_select_related(self):
# And select_related()
results = Book.objects.select_related('contact').annotate(
num_authors=Count('authors'))
_, _, grouping = results.query.get_compiler(using='default').pre_sql_setup()
# In the case of `group_by_selected_pks` we also group by contact.id because of the select_related.
self.assertEqual(len(grouping), 1 if connection.features.allows_group_by_pk else 2)
self.assertIn('id', grouping[0][0])
self.assertNotIn('name', grouping[0][0])
self.assertNotIn('contact', grouping[0][0])
# Ensure that we get correct results.
self.assertEqual(
[(b.name, b.num_authors) for b in results.order_by('name')],
[
('Artificial Intelligence: A Modern Approach', 2),
('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 1),
('Practical Django Projects', 1),
('Python Web Development with Django', 3),
('Sams Teach Yourself Django in 24 Hours', 1),
('The Definitive Guide to Django: Web Development Done Right', 2)
]
)
def test_reverse_join_trimming(self):
qs = Author.objects.annotate(Count('book_contact_set__contact'))
self.assertIn(' JOIN ', str(qs.query))
def test_aggregation_with_generic_reverse_relation(self):
"""
Regression test for #10870: Aggregates with joins ignore extra
filters provided by setup_joins
tests aggregations with generic reverse relations
"""
django_book = Book.objects.get(name='Practical Django Projects')
ItemTag.objects.create(object_id=django_book.id, tag='intermediate',
content_type=ContentType.objects.get_for_model(django_book))
ItemTag.objects.create(object_id=django_book.id, tag='django',
content_type=ContentType.objects.get_for_model(django_book))
# Assign a tag to model with same PK as the book above. If the JOIN
# used in aggregation doesn't have content type as part of the
# condition the annotation will also count the 'hi mom' tag for b.
wmpk = WithManualPK.objects.create(id=django_book.pk)
ItemTag.objects.create(object_id=wmpk.id, tag='hi mom',
content_type=ContentType.objects.get_for_model(wmpk))
ai_book = Book.objects.get(name__startswith='Paradigms of Artificial Intelligence')
ItemTag.objects.create(object_id=ai_book.id, tag='intermediate',
content_type=ContentType.objects.get_for_model(ai_book))
self.assertEqual(Book.objects.aggregate(Count('tags')), {'tags__count': 3})
results = Book.objects.annotate(Count('tags')).order_by('-tags__count', 'name')
self.assertEqual(
[(b.name, b.tags__count) for b in results],
[
('Practical Django Projects', 2),
('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 1),
('Artificial Intelligence: A Modern Approach', 0),
('Python Web Development with Django', 0),
('Sams Teach Yourself Django in 24 Hours', 0),
('The Definitive Guide to Django: Web Development Done Right', 0)
]
)
def test_negated_aggregation(self):
expected_results = Author.objects.exclude(
pk__in=Author.objects.annotate(book_cnt=Count('book')).filter(book_cnt=2)
).order_by('name')
expected_results = [a.name for a in expected_results]
qs = Author.objects.annotate(book_cnt=Count('book')).exclude(
Q(book_cnt=2), Q(book_cnt=2)).order_by('name')
self.assertQuerysetEqual(
qs,
expected_results,
lambda b: b.name
)
expected_results = Author.objects.exclude(
pk__in=Author.objects.annotate(book_cnt=Count('book')).filter(book_cnt=2)
).order_by('name')
expected_results = [a.name for a in expected_results]
qs = Author.objects.annotate(book_cnt=Count('book')).exclude(Q(book_cnt=2) | Q(book_cnt=2)).order_by('name')
self.assertQuerysetEqual(
qs,
expected_results,
lambda b: b.name
)
def test_name_filters(self):
qs = Author.objects.annotate(Count('book')).filter(
Q(book__count__exact=2) | Q(name='Adrian Holovaty')
).order_by('name')
self.assertQuerysetEqual(
qs,
['Adrian Holovaty', 'Peter Norvig'],
lambda b: b.name
)
def test_name_expressions(self):
# Test that aggregates are spotted correctly from F objects.
# Note that Adrian's age is 34 in the fixtures, and he has one book
# so both conditions match one author.
qs = Author.objects.annotate(Count('book')).filter(
Q(name='Peter Norvig') | Q(age=F('book__count') + 33)
).order_by('name')
self.assertQuerysetEqual(
qs,
['Adrian Holovaty', 'Peter Norvig'],
lambda b: b.name
)
def test_ticket_11293(self):
q1 = Q(price__gt=50)
q2 = Q(authors__count__gt=1)
query = Book.objects.annotate(Count('authors')).filter(
q1 | q2).order_by('pk')
self.assertQuerysetEqual(
query, [1, 4, 5, 6],
lambda b: b.pk)
def test_ticket_11293_q_immutable(self):
"""
Check that splitting a q object to parts for where/having doesn't alter
the original q-object.
"""
q1 = Q(isbn='')
q2 = Q(authors__count__gt=1)
query = Book.objects.annotate(Count('authors'))
query.filter(q1 | q2)
self.assertEqual(len(q2.children), 1)
def test_fobj_group_by(self):
"""
Check that an F() object referring to related column works correctly
in group by.
"""
qs = Book.objects.annotate(
acount=Count('authors')
).filter(
acount=F('publisher__num_awards')
)
self.assertQuerysetEqual(
qs, ['Sams Teach Yourself Django in 24 Hours'],
lambda b: b.name)
def test_annotate_reserved_word(self):
"""
Regression #18333 - Ensure annotated column name is properly quoted.
"""
vals = Book.objects.annotate(select=Count('authors__id')).aggregate(Sum('select'), Avg('select'))
self.assertEqual(vals, {
'select__sum': 10,
'select__avg': Approximate(1.666, places=2),
})
def test_annotate_on_relation(self):
book = Book.objects.annotate(avg_price=Avg('price'), publisher_name=F('publisher__name')).get(pk=self.b1.pk)
self.assertEqual(book.avg_price, 30.00)
self.assertEqual(book.publisher_name, "Apress")
def test_aggregate_on_relation(self):
# A query with an existing annotation aggregation on a relation should
# succeed.
qs = Book.objects.annotate(avg_price=Avg('price')).aggregate(
publisher_awards=Sum('publisher__num_awards')
)
self.assertEqual(qs['publisher_awards'], 30)
def test_annotate_distinct_aggregate(self):
# There are three books with rating of 4.0 and two of the books have
# the same price. Hence, the distinct removes one rating of 4.0
# from the results.
vals1 = Book.objects.values('rating', 'price').distinct().aggregate(result=Sum('rating'))
vals2 = Book.objects.aggregate(result=Sum('rating') - Value(4.0))
self.assertEqual(vals1, vals2)
class JoinPromotionTests(TestCase):
def test_ticket_21150(self):
b = Bravo.objects.create()
c = Charlie.objects.create(bravo=b)
qs = Charlie.objects.select_related('alfa').annotate(Count('bravo__charlie'))
self.assertQuerysetEqual(
qs, [c], lambda x: x)
self.assertIs(qs[0].alfa, None)
a = Alfa.objects.create()
c.alfa = a
c.save()
# Force re-evaluation
qs = qs.all()
self.assertQuerysetEqual(
qs, [c], lambda x: x)
self.assertEqual(qs[0].alfa, a)
def test_existing_join_not_promoted(self):
# No promotion for existing joins
qs = Charlie.objects.filter(alfa__name__isnull=False).annotate(Count('alfa__name'))
self.assertIn(' INNER JOIN ', str(qs.query))
# Also, the existing join is unpromoted when doing filtering for already
# promoted join.
qs = Charlie.objects.annotate(Count('alfa__name')).filter(alfa__name__isnull=False)
self.assertIn(' INNER JOIN ', str(qs.query))
# But, as the join is nullable first use by annotate will be LOUTER
qs = Charlie.objects.annotate(Count('alfa__name'))
self.assertIn(' LEFT OUTER JOIN ', str(qs.query))
def test_non_nullable_fk_not_promoted(self):
qs = Book.objects.annotate(Count('contact__name'))
self.assertIn(' INNER JOIN ', str(qs.query))
| bsd-3-clause |
nvoron23/avos | openstack_dashboard/dashboards/project/data_processing/nodegroup_templates/tabs.py | 12 | 3162 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import tabs
from openstack_dashboard.api import network
from openstack_dashboard.api import nova
from openstack_dashboard.api import sahara as saharaclient
LOG = logging.getLogger(__name__)
class GeneralTab(tabs.Tab):
name = _("General Info")
slug = "nodegroup_template_details_tab"
template_name = (
"project/data_processing.nodegroup_templates/_details.html")
def get_context_data(self, request):
template_id = self.tab_group.kwargs['template_id']
try:
template = saharaclient.nodegroup_template_get(
request, template_id)
except Exception:
template = {}
exceptions.handle(request,
_("Unable to fetch node group template."))
try:
flavor = nova.flavor_get(request, template.flavor_id)
except Exception:
flavor = {}
exceptions.handle(request,
_("Unable to fetch flavor for template."))
floating_ip_pool_name = None
if template.floating_ip_pool:
try:
floating_ip_pool_name = self._get_floating_ip_pool_name(
request, template.floating_ip_pool)
except Exception:
exceptions.handle(request,
_("Unable to fetch floating ip pools."))
return {"template": template, "flavor": flavor,
"floating_ip_pool_name": floating_ip_pool_name}
def _get_floating_ip_pool_name(self, request, pool_id):
pools = [pool for pool in network.floating_ip_pools_list(
request) if pool.id == pool_id]
return pools[0].name if pools else pool_id
class ConfigsTab(tabs.Tab):
name = _("Service Configurations")
slug = "nodegroup_template_service_configs_tab"
template_name = (
"project/data_processing.nodegroup_templates/_service_confs.html")
def get_context_data(self, request):
template_id = self.tab_group.kwargs['template_id']
try:
template = saharaclient.nodegroup_template_get(
request, template_id)
except Exception:
template = {}
exceptions.handle(request,
_("Unable to fetch node group template."))
return {"template": template}
class NodegroupTemplateDetailsTabs(tabs.TabGroup):
slug = "nodegroup_template_details"
tabs = (GeneralTab, ConfigsTab, )
sticky = True
| apache-2.0 |
JackKelly/babysitter | babysitter/babysitter_tests.py | 1 | 3426 | from __future__ import print_function
import babysitter
import unittest
import StringIO
import datetime
class TestLoadConfig(unittest.TestCase):
def setUp(self):
# TODO: setup logger if necessary
self.manager = babysitter.Manager()
def test_file(self):
self.manager.append(babysitter.File(name="/tmp", timeout=1000000))
self.assertIsInstance(self.manager.checkers[0], babysitter.File)
self.assertEqual(self.manager.checkers[0].name, '/tmp')
self.assertEqual(self.manager.checkers[0].timeout, 1000000)
self.assertTrue(self.manager.checkers[0].state() == babysitter.OK)
def test_process(self):
self.manager.append(babysitter.Process(name="init", restart_command="sudo service init restart"))
self.assertIsInstance(self.manager.checkers[0], babysitter.Process)
self.assertEqual(self.manager.checkers[0].name, 'init')
self.assertEqual(self.manager.checkers[0].restart_command,
'sudo service init restart')
self.assertTrue(self.manager.checkers[0].state() == babysitter.OK)
def test_disk_space(self):
self.manager.append(babysitter.DiskSpaceRemaining(threshold=20, path="/"))
self.assertIsInstance(self.manager.checkers[0], babysitter.DiskSpaceRemaining)
self.assertEqual(self.manager.checkers[0].threshold, 20)
self.assertEqual(self.manager.checkers[0].path, "/")
self.assertTrue(self.manager.checkers[0].state() == babysitter.OK)
def test_time_until_full(self):
self.manager.append(babysitter.DiskSpaceRemaining(threshold=20, path="/"))
# Fake parameters so it looks like we're using 0.1MB per second
self.manager.checkers[0].initial_space_remaining = \
self.manager.checkers[0].available_space() + 0.1
self.manager.checkers[0].initial_time = \
datetime.datetime.now() - datetime.timedelta(seconds=1)
self.assertAlmostEqual(self.manager.checkers[0].space_decay_rate(), -0.1, 1)
print(self.manager.checkers[0])
def test_heartbeat(self):
self.manager.heartbeat.hour = 8
self.manager.heartbeat.cmd = "ls"
self.manager.heartbeat.html_file = "index.html"
self.assertEqual(self.manager.heartbeat.hour, 8)
self.assertEqual(self.manager.heartbeat.cmd, "ls")
self.assertEqual(self.manager.heartbeat.html_file, "index.html")
self.assertEqual(self.manager.heartbeat.last_checked, datetime.datetime.now().hour)
self._run_heartbeat_tests()
def test_heartbeat_just_hour(self):
self.manager.heartbeat.hour = 8
self.assertEqual(self.manager.heartbeat.hour, 8)
self._run_heartbeat_tests()
def _run_heartbeat_tests(self):
# test need_to_send by mocking up times
self.manager.heartbeat.hour = datetime.datetime.now().hour
self.manager.heartbeat.last_checked = datetime.datetime.now().hour-1
self.assertTrue( self.manager._need_to_send_heartbeat() )
self.assertFalse( self.manager._need_to_send_heartbeat() )
# test _send_heartbeat
self.manager._send_heartbeat()
def test_none(self):
self.assertFalse( self.manager._need_to_send_heartbeat() )
if __name__ == '__main__':
unittest.main()
| mit |
asi1024/competitive-library | cpp/docs/title.py | 2 | 3276 | #!/usr/bin/env python
import collections
import json
import os
max_src_len = 3
def category(path, name, verifier):
def ext(fname):
return fname.split('.')[-1]
def extract_ext(fname):
return '.'.join(fname.split('.')[:-1])
def get_relpath(path, start):
return os.path.normpath(os.path.relpath(path, start))
def sort_rank(src_name):
if os.path.dirname(src_name).find('tests') != -1:
return 0
elif ext(src_name) == 'cpp':
return 1
else:
return 2
def sort_src(src_list):
src_with_rank = [(sort_rank(_), _) for _ in src_list]
src_with_rank.sort()
return [name for _, name in src_with_rank]
try:
files = [f.strip() for f in os.listdir(path)]
if not files:
raise os.FileNotFoundError
except os.FileNotFoundError:
return
files_ext = [(0 if ext(f) == 'hpp' else 1, f) for f in files
if ext(f) in ('hpp', 'cpp')]
files_ext.sort()
print('## ' + name)
print('')
print('| Algorithm | Verified | AOJ Problems |')
print('|:---------:|:--------:|:-------------|')
for _, fname in files_ext:
algorithm = '[{}](./{}/{})'.format(
fname, get_relpath(path, 'cpp'), extract_ext(fname))
fpath = path + '/' + fname
if fpath in verifier:
validated = '<font color="ForestGreen">Yes</font>'
src_list = ['[{}](./{})'.format(
os.path.basename(src_path),
extract_ext(get_relpath(src_path, 'cpp')))
for src_path in sort_src(verifier[fpath])]
if len(src_list) > max_src_len:
src_str = '<br>'.join(src_list[:max_src_len]) + ' etc...'
else:
src_str = '<br>'.join(src_list)
else:
validated = '<font color="Red">No</font>'
src_str = ''
print('| {} | {} | {} |'.format(algorithm, validated, src_str))
print('')
def get_verifier_dict():
memo_set = set()
res = {}
def page(path):
if path in memo_set:
return
memo_set.add(path)
for s in open(path):
s = s.strip()
if s.startswith('#include') and s.find('"') != -1:
relpath = s.split('"')[1]
key = os.path.normpath(os.path.dirname(path) + '/' + relpath)
if key not in res:
res[key] = []
res[key].append(path)
page(key)
def directory(path):
for fname in os.listdir(path):
if os.path.isdir(path + '/' + fname):
directory(path + '/' + fname)
elif fname.endswith('.cpp') or fname.endswith('.hpp'):
page(path + '/' + fname)
directory('cpp/tests')
return res
if __name__ == '__main__':
f = open('cpp/include/TITLE.json', 'r')
print('---')
print('title: C++')
print('---')
decoder = json.JSONDecoder(object_pairs_hook=collections.OrderedDict)
json = decoder.decode(''.join(f.readlines()))
verifier_dict = get_verifier_dict()
f.close()
for key, value in json.items():
category('cpp/include/{}'.format(key), value, verifier_dict)
| mit |
zsoltdudas/lis-tempest | tempest/lib/services/compute/versions_client.py | 5 | 2531 | # Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from oslo_serialization import jsonutils as json
from six.moves import urllib
from tempest.lib.api_schema.response.compute.v2_1 import versions as schema
from tempest.lib.common import rest_client
from tempest.lib.services.compute import base_compute_client
class VersionsClient(base_compute_client.BaseComputeClient):
def _get_base_version_url(self):
# NOTE: The URL which is got from keystone's catalog contains
# API version and project-id like "/app-name/v2/{project-id}" or
# "/v2/{project-id}", but we need to access the URL which doesn't
# contain API version for getting API versions. For that, here
# should use raw_request() instead of get().
endpoint = self.base_url
url = urllib.parse.urlsplit(endpoint)
new_path = re.split(r'(^|/)+v\d+(\.\d+)?', url.path)[0]
url = list(url)
url[2] = new_path + '/'
return urllib.parse.urlunsplit(url)
def list_versions(self):
version_url = self._get_base_version_url()
resp, body = self.raw_request(version_url, 'GET')
body = json.loads(body)
self.validate_response(schema.list_versions, resp, body)
return rest_client.ResponseBody(resp, body)
def get_version_by_url(self, version_url):
"""Get the version document by url.
This gets the version document for a url, useful in testing
the contents of things like /v2/ or /v2.1/ in Nova. That
controller needs authenticated access, so we have to get
ourselves a token before making the request.
"""
# we need a token for this request
resp, body = self.raw_request(version_url, 'GET',
{'X-Auth-Token': self.token})
body = json.loads(body)
self.validate_response(schema.get_one_version, resp, body)
return rest_client.ResponseBody(resp, body)
| apache-2.0 |
tealover/nova | nova/api/openstack/compute/plugins/v3/fping.py | 26 | 4894 | # Copyright 2011 Grid Dynamics
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
import os
from oslo_config import cfg
import six
from webob import exc
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import compute
from nova.i18n import _
from nova import utils
ALIAS = "os-fping"
authorize = extensions.os_compute_authorizer(ALIAS)
CONF = cfg.CONF
CONF.import_opt('fping_path', 'nova.api.openstack.compute.contrib.fping')
class FpingController(wsgi.Controller):
def __init__(self, network_api=None):
self.compute_api = compute.API(skip_policy_check=True)
self.last_call = {}
def check_fping(self):
if not os.access(CONF.fping_path, os.X_OK):
raise exc.HTTPServiceUnavailable(
explanation=_("fping utility is not found."))
@staticmethod
def fping(ips):
fping_ret = utils.execute(CONF.fping_path, *ips,
check_exit_code=False)
if not fping_ret:
return set()
alive_ips = set()
for line in fping_ret[0].split("\n"):
ip = line.split(" ", 1)[0]
if "alive" in line:
alive_ips.add(ip)
return alive_ips
@staticmethod
def _get_instance_ips(context, instance):
ret = []
for network in common.get_networks_for_instance(
context, instance).values():
all_ips = itertools.chain(network["ips"], network["floating_ips"])
ret += [ip["address"] for ip in all_ips]
return ret
@extensions.expected_errors(503)
def index(self, req):
context = req.environ["nova.context"]
search_opts = dict(deleted=False)
if "all_tenants" in req.GET:
authorize(context, action='all_tenants')
else:
authorize(context)
if context.project_id:
search_opts["project_id"] = context.project_id
else:
search_opts["user_id"] = context.user_id
self.check_fping()
include = req.GET.get("include", None)
if include:
include = set(include.split(","))
exclude = set()
else:
include = None
exclude = req.GET.get("exclude", None)
if exclude:
exclude = set(exclude.split(","))
else:
exclude = set()
instance_list = self.compute_api.get_all(
context, search_opts=search_opts, want_objects=True)
ip_list = []
instance_ips = {}
instance_projects = {}
for instance in instance_list:
uuid = instance.uuid
if uuid in exclude or (include is not None and
uuid not in include):
continue
ips = [str(ip) for ip in self._get_instance_ips(context, instance)]
instance_ips[uuid] = ips
instance_projects[uuid] = instance.project_id
ip_list += ips
alive_ips = self.fping(ip_list)
res = []
for instance_uuid, ips in six.iteritems(instance_ips):
res.append({
"id": instance_uuid,
"project_id": instance_projects[instance_uuid],
"alive": bool(set(ips) & alive_ips),
})
return {"servers": res}
@extensions.expected_errors((404, 503))
def show(self, req, id):
context = req.environ["nova.context"]
authorize(context)
self.check_fping()
instance = common.get_instance(self.compute_api, context, id)
ips = [str(ip) for ip in self._get_instance_ips(context, instance)]
alive_ips = self.fping(ips)
return {
"server": {
"id": instance.uuid,
"project_id": instance.project_id,
"alive": bool(set(ips) & alive_ips),
}
}
class Fping(extensions.V3APIExtensionBase):
"""Fping Management Extension."""
name = "Fping"
alias = ALIAS
version = 1
def get_resources(self):
res = extensions.ResourceExtension(ALIAS, FpingController())
return [res]
def get_controller_extensions(self):
return []
| apache-2.0 |
Laimiux/mydeatree | django/contrib/gis/geos/prototypes/predicates.py | 623 | 1777 | """
This module houses the GEOS ctypes prototype functions for the
unary and binary predicate operations on geometries.
"""
from ctypes import c_char, c_char_p, c_double
from django.contrib.gis.geos.libgeos import GEOM_PTR
from django.contrib.gis.geos.prototypes.errcheck import check_predicate
from django.contrib.gis.geos.prototypes.threadsafe import GEOSFunc
## Binary & unary predicate functions ##
def binary_predicate(func, *args):
"For GEOS binary predicate functions."
argtypes = [GEOM_PTR, GEOM_PTR]
if args: argtypes += args
func.argtypes = argtypes
func.restype = c_char
func.errcheck = check_predicate
return func
def unary_predicate(func):
"For GEOS unary predicate functions."
func.argtypes = [GEOM_PTR]
func.restype = c_char
func.errcheck = check_predicate
return func
## Unary Predicates ##
geos_hasz = unary_predicate(GEOSFunc('GEOSHasZ'))
geos_isempty = unary_predicate(GEOSFunc('GEOSisEmpty'))
geos_isring = unary_predicate(GEOSFunc('GEOSisRing'))
geos_issimple = unary_predicate(GEOSFunc('GEOSisSimple'))
geos_isvalid = unary_predicate(GEOSFunc('GEOSisValid'))
## Binary Predicates ##
geos_contains = binary_predicate(GEOSFunc('GEOSContains'))
geos_crosses = binary_predicate(GEOSFunc('GEOSCrosses'))
geos_disjoint = binary_predicate(GEOSFunc('GEOSDisjoint'))
geos_equals = binary_predicate(GEOSFunc('GEOSEquals'))
geos_equalsexact = binary_predicate(GEOSFunc('GEOSEqualsExact'), c_double)
geos_intersects = binary_predicate(GEOSFunc('GEOSIntersects'))
geos_overlaps = binary_predicate(GEOSFunc('GEOSOverlaps'))
geos_relatepattern = binary_predicate(GEOSFunc('GEOSRelatePattern'), c_char_p)
geos_touches = binary_predicate(GEOSFunc('GEOSTouches'))
geos_within = binary_predicate(GEOSFunc('GEOSWithin'))
| bsd-3-clause |
mattcongy/itshop | docker-images/taigav2/taiga-back/taiga/export_import/services/__init__.py | 2 | 1144 | # -*- coding: utf-8 -*-
# Copyright (C) 2014-2016 Andrey Antukh <niwi@niwi.nz>
# Copyright (C) 2014-2016 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014-2016 David Barragán <bameda@dbarragan.com>
# Copyright (C) 2014-2016 Alejandro Alonso <alejandro.alonso@kaleidos.net>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# This makes all code that import services works and
# is not the baddest practice ;)
from .render import render_project
from . import render
from .store import store_project_from_dict
from . import store
| mit |
neumerance/cloudloon2 | openstack_dashboard/usage/views.py | 6 | 2072 | import logging
from horizon import tables
from openstack_dashboard.usage import base
LOG = logging.getLogger(__name__)
class UsageView(tables.DataTableView):
usage_class = None
show_terminated = True
def __init__(self, *args, **kwargs):
super(UsageView, self).__init__(*args, **kwargs)
if not issubclass(self.usage_class, base.BaseUsage):
raise AttributeError("You must specify a usage_class attribute "
"which is a subclass of BaseUsage.")
def get_template_names(self):
if self.request.GET.get('format', 'html') == 'csv':
return ".".join((self.template_name.rsplit('.', 1)[0], 'csv'))
return self.template_name
def get_content_type(self):
if self.request.GET.get('format', 'html') == 'csv':
return "text/csv"
return "text/html"
def get_data(self):
project_id = self.kwargs.get('project_id', self.request.user.tenant_id)
self.usage = self.usage_class(self.request, project_id)
self.usage.summarize(*self.usage.get_date_range())
self.usage.get_limits()
self.kwargs['usage'] = self.usage
return self.usage.usage_list
def get_context_data(self, **kwargs):
context = super(UsageView, self).get_context_data(**kwargs)
context['table'].kwargs['usage'] = self.usage
context['form'] = self.usage.form
context['usage'] = self.usage
return context
def render_to_response(self, context, **response_kwargs):
if self.request.GET.get('format', 'html') == 'csv':
render_class = self.csv_response_class
response_kwargs.setdefault("filename", "usage.csv")
else:
render_class = self.response_class
resp = render_class(request=self.request,
template=self.get_template_names(),
context=context,
content_type=self.get_content_type(),
**response_kwargs)
return resp
| apache-2.0 |
ekoeppen/panstamp-python | pyswap/src/swap/__init__.py | 4 | 1099 | #########################################################################
#
# __init__
#
# Copyright (c) 2011 Daniel Berenguer <dberenguer@usapiens.com>
#
# This file is part of the panStamp project.
#
# panStamp is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# panStamp is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with panLoader; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
# USA
#
#########################################################################
__author__="Daniel Berenguer"
__date__ ="$Feb 21, 2012"
#########################################################################
| gpl-2.0 |
eBay/restcommander | play-1.2.4/python/Lib/hotshot/__init__.py | 215 | 2670 | """High-perfomance logging profiler, mostly written in C."""
import _hotshot
from _hotshot import ProfilerError
from warnings import warnpy3k as _warnpy3k
_warnpy3k("The 'hotshot' module is not supported in 3.x, "
"use the 'profile' module instead.", stacklevel=2)
class Profile:
def __init__(self, logfn, lineevents=0, linetimings=1):
self.lineevents = lineevents and 1 or 0
self.linetimings = (linetimings and lineevents) and 1 or 0
self._prof = p = _hotshot.profiler(
logfn, self.lineevents, self.linetimings)
# Attempt to avoid confusing results caused by the presence of
# Python wrappers around these functions, but only if we can
# be sure the methods have not been overridden or extended.
if self.__class__ is Profile:
self.close = p.close
self.start = p.start
self.stop = p.stop
self.addinfo = p.addinfo
def close(self):
"""Close the logfile and terminate the profiler."""
self._prof.close()
def fileno(self):
"""Return the file descriptor of the profiler's log file."""
return self._prof.fileno()
def start(self):
"""Start the profiler."""
self._prof.start()
def stop(self):
"""Stop the profiler."""
self._prof.stop()
def addinfo(self, key, value):
"""Add an arbitrary labelled value to the profile log."""
self._prof.addinfo(key, value)
# These methods offer the same interface as the profile.Profile class,
# but delegate most of the work to the C implementation underneath.
def run(self, cmd):
"""Profile an exec-compatible string in the script
environment.
The globals from the __main__ module are used as both the
globals and locals for the script.
"""
import __main__
dict = __main__.__dict__
return self.runctx(cmd, dict, dict)
def runctx(self, cmd, globals, locals):
"""Evaluate an exec-compatible string in a specific
environment.
The string is compiled before profiling begins.
"""
code = compile(cmd, "<string>", "exec")
self._prof.runcode(code, globals, locals)
return self
def runcall(self, func, *args, **kw):
"""Profile a single call of a callable.
Additional positional and keyword arguments may be passed
along; the result of the call is returned, and exceptions are
allowed to propogate cleanly, while ensuring that profiling is
disabled on the way out.
"""
return self._prof.runcall(func, args, kw)
| apache-2.0 |
vijeth-aradhya/coala-bears | bears/c_languages/ClangComplexityBear.py | 23 | 4936 | from clang.cindex import Index, CursorKind
from coalib.bears.LocalBear import LocalBear
from coalib.results.Result import Result
from coalib.results.SourceRange import SourceRange
from coalib.bearlib import deprecate_settings
from bears.c_languages.ClangBear import clang_available, ClangBear
class ClangComplexityBear(LocalBear):
"""
Calculates cyclomatic complexity of each function and displays it to the
user.
"""
LANGUAGES = ClangBear.LANGUAGES
REQUIREMENTS = ClangBear.REQUIREMENTS
AUTHORS = {'The coala developers'}
AUTHORS_EMAILS = {'coala-devel@googlegroups.com'}
LICENSE = 'AGPL-3.0'
CAN_DETECT = {'Complexity'}
check_prerequisites = classmethod(clang_available)
_decisive_cursor_kinds = {
CursorKind.IF_STMT, CursorKind.WHILE_STMT, CursorKind.FOR_STMT,
CursorKind.DEFAULT_STMT, CursorKind.CASE_STMT}
def function_key_points(self, cursor, top_function_level=False):
"""
Calculates number of function's decision points and exit points.
:param top_function_level: Whether cursor is in the top level of
the function.
"""
decisions, exits = 0, 0
for child in cursor.get_children():
if child.kind in self._decisive_cursor_kinds:
decisions += 1
elif child.kind == CursorKind.RETURN_STMT:
exits += 1
if top_function_level:
# There is no point to move forward, so just return.
return decisions, exits
child_decisions, child_exits = self.function_key_points(child)
decisions += child_decisions
exits += child_exits
if top_function_level:
# Implicit return statement.
exits += 1
return decisions, exits
def complexities(self, cursor, filename):
"""
Calculates cyclomatic complexities of functions.
"""
file = cursor.location.file
if file is not None and file.name != filename:
# There is nothing to do in another file.
return
if cursor.kind == CursorKind.FUNCTION_DECL:
child = next((child for child in cursor.get_children()
if child.kind != CursorKind.PARM_DECL),
None)
if child:
decisions, exits = self.function_key_points(child, True)
complexity = max(1, decisions - exits + 2)
yield cursor, complexity
else:
for child in cursor.get_children():
yield from self.complexities(child, filename)
@deprecate_settings(cyclomatic_complexity='max_complexity')
def run(self, filename, file, cyclomatic_complexity: int=8):
"""
Check for all functions if they are too complicated using the
cyclomatic complexity metric.
You can read more about this metric at
<https://www.wikiwand.com/en/Cyclomatic_complexity>.
:param cyclomatic_complexity: Maximum cyclomatic complexity that is
considered to be normal. The value of 10 had
received substantial corroborating evidence.
But the general recommendation: "For each
module, either limit cyclomatic complexity to
[the agreed-upon limit] or provide a written
explanation of why the limit was exceeded."
"""
root = Index.create().parse(filename).cursor
for cursor, complexity in self.complexities(root, filename):
if complexity > cyclomatic_complexity:
affected_code = (SourceRange.from_clang_range(cursor.extent),)
yield Result(
self,
"The function '{function}' should be simplified. Its "
'cyclomatic complexity is {complexity} which exceeds '
'maximal recommended value '
'of {rec_value}.'.format(
function=cursor.displayname,
complexity=complexity,
rec_value=cyclomatic_complexity),
affected_code=affected_code,
additional_info=(
'The cyclomatic complexity is a metric that measures '
'how complicated a function is by counting branches '
'and exits of each function.\n\n'
'Your function seems to be complicated and should be '
'refactored so that it can be understood by other '
'people easily.\n\nSee '
'<http://www.wikiwand.com/en/Cyclomatic_complexity>'
' for more information.'))
| agpl-3.0 |
VishvajitP/django-extensions | django_extensions/management/commands/sqlcreate.py | 27 | 3176 | import socket
import sys
from optparse import make_option
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django_extensions.management.utils import signalcommand
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('-R', '--router', action='store',
dest='router', default='default',
help='Use this router-database other then defined in settings.py'),
make_option('-D', '--drop', action='store_true',
dest='drop', default=False,
help='If given, includes commands to drop any existing user and database.'),
)
help = """Generates the SQL to create your database for you, as specified in settings.py
The envisioned use case is something like this:
./manage.py sqlcreate [--router=<routername>] | mysql -u <db_administrator> -p
./manage.py sqlcreate [--router=<routername>] | psql -U <db_administrator> -W"""
requires_system_checks = False
can_import_settings = True
@signalcommand
def handle(self, *args, **options):
router = options.get('router')
dbinfo = settings.DATABASES.get(router)
if dbinfo is None:
raise CommandError("Unknown database router %s" % router)
engine = dbinfo.get('ENGINE').split('.')[-1]
dbuser = dbinfo.get('USER')
dbpass = dbinfo.get('PASSWORD')
dbname = dbinfo.get('NAME')
dbhost = dbinfo.get('HOST')
dbclient = socket.gethostname()
# django settings file tells you that localhost should be specified by leaving
# the DATABASE_HOST blank
if not dbhost:
dbhost = 'localhost'
if engine == 'mysql':
sys.stderr.write("""-- WARNING!: https://docs.djangoproject.com/en/dev/ref/databases/#collation-settings
-- Please read this carefully! Collation will be set to utf8_bin to have case-sensitive data.
""")
print("CREATE DATABASE %s CHARACTER SET utf8 COLLATE utf8_bin;" % dbname)
print("GRANT ALL PRIVILEGES ON %s.* to '%s'@'%s' identified by '%s';" % (
dbname, dbuser, dbclient, dbpass
))
elif engine == 'postgresql_psycopg2':
if options.get('drop'):
print("DROP DATABASE IF EXISTS %s;" % (dbname,))
print("DROP USER IF EXISTS %s;" % (dbuser,))
print("CREATE USER %s WITH ENCRYPTED PASSWORD '%s' CREATEDB;" % (dbuser, dbpass))
print("CREATE DATABASE %s WITH ENCODING 'UTF-8' OWNER \"%s\";" % (dbname, dbuser))
print("GRANT ALL PRIVILEGES ON DATABASE %s TO %s;" % (dbname, dbuser))
elif engine == 'sqlite3':
sys.stderr.write("-- manage.py syncdb will automatically create a sqlite3 database file.\n")
else:
# CREATE DATABASE is not SQL standard, but seems to be supported by most.
sys.stderr.write("-- Don't know how to handle '%s' falling back to SQL.\n" % engine)
print("CREATE DATABASE %s;" % dbname)
print("GRANT ALL PRIVILEGES ON DATABASE %s to %s" % (dbname, dbuser))
| mit |
zulumarketing/html2pdf | xhtml2pdf/paragraph.py | 20 | 23868 | #!/bin/env/python
# -*- coding: utf-8 -*-
# Copyright 2010 Dirk Holtwick, holtwick.it
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A paragraph class to be used with ReportLab Platypus.
TODO
====
- Bullets
- Weblinks and internal links
- Borders and margins (Box)
- Underline, Background, Strike
- Images
- Hyphenation
+ Alignment
+ Breakline, empty lines
+ TextIndent
- Sub and super
"""
from reportlab.lib.enums import TA_CENTER, TA_JUSTIFY, TA_LEFT, TA_RIGHT
from reportlab.pdfbase.pdfmetrics import stringWidth
from reportlab.platypus.flowables import Flowable
from reportlab.lib.colors import Color
class Style(dict):
"""
Style.
Single place for style definitions: Paragraphs and Fragments. The
naming follows the convention of CSS written in camelCase letters.
"""
DEFAULT = {
"textAlign": TA_LEFT,
"textIndent": 0.0,
"width": None,
"height": None,
"fontName": "Times-Roman",
"fontSize": 10.0,
"color": Color(0, 0, 0),
"lineHeight": 1.5,
"lineHeightAbsolute": None,
"pdfLineSpacing": 0,
"link": None,
}
def __init__(self, **kw):
self.update(self.DEFAULT)
self.update(kw)
self.spaceBefore = 0
self.spaceAfter = 0
self.keepWithNext = False
class Box(dict):
"""
Box.
Handles the following styles:
backgroundColor, backgroundImage
paddingLeft, paddingRight, paddingTop, paddingBottom
marginLeft, marginRight, marginTop, marginBottom
borderLeftColor, borderLeftWidth, borderLeftStyle
borderRightColor, borderRightWidth, borderRightStyle
borderTopColor, borderTopWidth, borderTopStyle
borderBottomColor, borderBottomWidth, borderBottomStyle
Not used in inline Elements:
paddingTop, paddingBottom
marginTop, marginBottom
"""
name = "box"
def drawBox(self, canvas, x, y, w, h):
canvas.saveState()
# Background
bg = self.get("backgroundColor", None)
if bg is not None:
# draw a filled rectangle (with no stroke) using bg color
canvas.setFillColor(bg)
canvas.rect(x, y, w, h, fill=1, stroke=0)
# Borders
def _drawBorderLine(bstyle, width, color, x1, y1, x2, y2):
# We need width and border style to be able to draw a border
if width and bstyle:
# If no color for border is given, the text color is used (like defined by W3C)
if color is None:
color = self.get("textColor", Color(0, 0, 0))
# print "Border", bstyle, width, color
if color is not None:
canvas.setStrokeColor(color)
canvas.setLineWidth(width)
canvas.line(x1, y1, x2, y2)
_drawBorderLine(self.get("borderLeftStyle", None),
self.get("borderLeftWidth", None),
self.get("borderLeftColor", None),
x, y, x, y + h)
_drawBorderLine(self.get("borderRightStyle", None),
self.get("borderRightWidth", None),
self.get("borderRightColor", None),
x + w, y, x + w, y + h)
_drawBorderLine(self.get("borderTopStyle", None),
self.get("borderTopWidth", None),
self.get("borderTopColor", None),
x, y + h, x + w, y + h)
_drawBorderLine(self.get("borderBottomStyle", None),
self.get("borderBottomWidth", None),
self.get("borderBottomColor", None),
x, y, x + w, y)
canvas.restoreState()
class Fragment(Box):
"""
Fragment.
text: String containing text
fontName:
fontSize:
width: Width of string
height: Height of string
"""
name = "fragment"
isSoft = False
isText = False
isLF = False
def calc(self):
self["width"] = 0
class Word(Fragment):
"""
A single word.
"""
name = "word"
isText = True
def calc(self):
"""
XXX Cache stringWith if not accelerated?!
"""
self["width"] = stringWidth(self["text"], self["fontName"], self["fontSize"])
class Space(Fragment):
"""
A space between fragments that is the usual place for line breaking.
"""
name = "space"
isSoft = True
def calc(self):
self["width"] = stringWidth(" ", self["fontName"], self["fontSize"])
class LineBreak(Fragment):
"""
Line break.
"""
name = "br"
isSoft = True
isLF = True
pass
class BoxBegin(Fragment):
name = "begin"
def calc(self):
self["width"] = self.get("marginLeft", 0) + self.get("paddingLeft", 0) # + border if border
def draw(self, canvas, y):
# if not self["length"]:
x = self.get("marginLeft", 0) + self["x"]
w = self["length"] + self.get("paddingRight", 0)
h = self["fontSize"]
self.drawBox(canvas, x, y, w, h)
class BoxEnd(Fragment):
name = "end"
def calc(self):
self["width"] = self.get("marginRight", 0) + self.get("paddingRight", 0) # + border
class Image(Fragment):
name = "image"
pass
class Line(list):
"""
Container for line fragments.
"""
LINEHEIGHT = 1.0
def __init__(self, style):
self.width = 0
self.height = 0
self.isLast = False
self.style = style
self.boxStack = []
list.__init__(self)
def doAlignment(self, width, alignment):
# Apply alignment
if alignment != TA_LEFT:
lineWidth = self[- 1]["x"] + self[- 1]["width"]
emptySpace = width - lineWidth
if alignment == TA_RIGHT:
for frag in self:
frag["x"] += emptySpace
elif alignment == TA_CENTER:
for frag in self:
frag["x"] += emptySpace / 2.0
elif alignment == TA_JUSTIFY and not self.isLast: # XXX last line before split
delta = emptySpace / (len(self) - 1)
for i, frag in enumerate(self):
frag["x"] += i * delta
# Boxes
for frag in self:
x = frag["x"] + frag["width"]
# print "***", x, frag["x"]
if isinstance(frag, BoxBegin):
self.boxStack.append(frag)
elif isinstance(frag, BoxEnd):
if self.boxStack:
frag = self.boxStack.pop()
frag["length"] = x - frag["x"]
# Handle the rest
for frag in self.boxStack:
# print "***", x, frag["x"]
frag["length"] = x - frag["x"]
def doLayout(self, width):
"""
Align words in previous line.
"""
# Calculate dimensions
self.width = width
self.height = self.lineHeight = max(frag.get("fontSize", 0) * self.LINEHEIGHT for frag in self)
# Apply line height
self.fontSize = max(frag.get("fontSize", 0) for frag in self)
y = (self.lineHeight - self.fontSize) # / 2
for frag in self:
frag["y"] = y
return self.height
def dumpFragments(self):
print ("Line", 40 * "-")
for frag in self:
print ("%s") % frag.get("text", frag.name.upper()),
print()
class Text(list):
"""
Container for text fragments.
Helper functions for splitting text into lines and calculating sizes
and positions.
"""
def __init__(self, data=None, style=None):
# Mutable arguments are a shit idea
if data is None:
data = []
self.lines = []
self.width = 0
self.height = 0
self.maxWidth = 0
self.maxHeight = 0
self.style = style
list.__init__(self, data)
def calc(self):
"""
Calculate sizes of fragments.
"""
for word in self:
word.calc()
def splitIntoLines(self, maxWidth, maxHeight, splitted=False):
"""
Split text into lines and calculate X positions. If we need more
space in height than available we return the rest of the text
"""
self.lines = []
self.height = 0
self.maxWidth = self.width = maxWidth
self.maxHeight = maxHeight
boxStack = []
style = self.style
x = 0
# Start with indent in first line of text
if not splitted:
x = style["textIndent"]
lenText = len(self)
pos = 0
while pos < lenText:
# Reset values for new line
posBegin = pos
line = Line(style)
# Update boxes for next line
for box in copy.copy(boxStack):
box["x"] = 0
line.append(BoxBegin(box))
while pos < lenText:
# Get fragment, its width and set X
frag = self[pos]
fragWidth = frag["width"]
frag["x"] = x
pos += 1
# Keep in mind boxes for next lines
if isinstance(frag, BoxBegin):
boxStack.append(frag)
elif isinstance(frag, BoxEnd):
boxStack.pop()
# If space or linebreak handle special way
if frag.isSoft:
if frag.isLF:
line.append(frag)
break
# First element of line should not be a space
if x == 0:
continue
# Keep in mind last possible line break
# The elements exceed the current line
elif fragWidth + x > maxWidth:
break
# Add fragment to line and update x
x += fragWidth
line.append(frag)
# Remove trailing white spaces
while line and line[-1].name in ("space", "br"):
# print "Pop",
line.pop()
# Add line to list
line.dumpFragments()
# if line:
self.height += line.doLayout(self.width)
self.lines.append(line)
# If not enough space for current line force to split
if self.height > maxHeight:
return posBegin
# Reset variables
x = 0
# Apply alignment
self.lines[- 1].isLast = True
for line in self.lines:
line.doAlignment(maxWidth, style["textAlign"])
return None
def dumpLines(self):
"""
For debugging dump all line and their content
"""
for i, line in enumerate(self.lines):
print ("Line %d:") % i,
line.dumpFragments()
class Paragraph(Flowable):
"""
A simple Paragraph class respecting alignment.
Does text without tags.
Respects only the following global style attributes:
fontName, fontSize, leading, firstLineIndent, leftIndent,
rightIndent, textColor, alignment.
(spaceBefore, spaceAfter are handled by the Platypus framework.)
"""
def __init__(self, text, style, debug=False, splitted=False, **kwDict):
Flowable.__init__(self)
self.text = text
self.text.calc()
self.style = style
self.text.style = style
self.debug = debug
self.splitted = splitted
# More attributes
for k, v in kwDict.iteritems():
setattr(self, k, v)
# set later...
self.splitIndex = None
# overwritten methods from Flowable class
def wrap(self, availWidth, availHeight):
"""
Determine the rectangle this paragraph really needs.
"""
# memorize available space
self.avWidth = availWidth
self.avHeight = availHeight
if self.debug:
print ("*** wrap (%f, %f)") % (availWidth, availHeight)
if not self.text:
if self.debug:
print ("*** wrap (%f, %f) needed") % (0, 0)
return 0, 0
# Split lines
width = availWidth
self.splitIndex = self.text.splitIntoLines(width, availHeight)
self.width, self.height = availWidth, self.text.height
if self.debug:
print ("*** wrap (%f, %f) needed, splitIndex %r") % (self.width, self.height, self.splitIndex)
return self.width, self.height
def split(self, availWidth, availHeight):
"""
Split ourself in two paragraphs.
"""
if self.debug:
print ("*** split (%f, %f)") % (availWidth, availHeight)
splitted = []
if self.splitIndex:
text1 = self.text[:self.splitIndex]
text2 = self.text[self.splitIndex:]
p1 = Paragraph(Text(text1), self.style, debug=self.debug)
p2 = Paragraph(Text(text2), self.style, debug=self.debug, splitted=True)
splitted = [p1, p2]
if self.debug:
print ("*** text1 %s / text %s") % (len(text1), len(text2))
if self.debug:
print ('*** return %s') % self.splitted
return splitted
def draw(self):
"""
Render the content of the paragraph.
"""
if self.debug:
print ("*** draw")
if not self.text:
return
canvas = self.canv
style = self.style
canvas.saveState()
# Draw box arround paragraph for debugging
if self.debug:
bw = 0.5
bc = Color(1, 1, 0)
bg = Color(0.9, 0.9, 0.9)
canvas.setStrokeColor(bc)
canvas.setLineWidth(bw)
canvas.setFillColor(bg)
canvas.rect(
style.leftIndent,
0,
self.width,
self.height,
fill=1,
stroke=1)
y = 0
dy = self.height
for line in self.text.lines:
y += line.height
for frag in line:
# Box
if hasattr(frag, "draw"):
frag.draw(canvas, dy - y)
# Text
if frag.get("text", ""):
canvas.setFont(frag["fontName"], frag["fontSize"])
canvas.setFillColor(frag.get("color", style["color"]))
canvas.drawString(frag["x"], dy - y + frag["y"], frag["text"])
# XXX LINK
link = frag.get("link", None)
if link:
_scheme_re = re.compile('^[a-zA-Z][-+a-zA-Z0-9]+$')
x, y, w, h = frag["x"], dy - y, frag["width"], frag["fontSize"]
rect = (x, y, w, h)
if isinstance(link, unicode):
link = link.encode('utf8')
parts = link.split(':', 1)
scheme = len(parts) == 2 and parts[0].lower() or ''
if _scheme_re.match(scheme) and scheme != 'document':
kind = scheme.lower() == 'pdf' and 'GoToR' or 'URI'
if kind == 'GoToR':
link = parts[1]
canvas.linkURL(link, rect, relative=1, kind=kind)
else:
if link[0] == '#':
link = link[1:]
scheme = ''
canvas.linkRect("", scheme != 'document' and link or parts[1], rect, relative=1)
canvas.restoreState()
if __name__ == "__main__":
# TODO: This should be a test, not a main!
from reportlab.platypus import SimpleDocTemplate
from reportlab.lib.styles import *
from reportlab.rl_config import *
from reportlab.lib.units import *
import os
import copy
import re
styles = getSampleStyleSheet()
ALIGNMENTS = (TA_LEFT, TA_RIGHT, TA_CENTER, TA_JUSTIFY)
TEXT = """
Lörem ipsum dolor sit amet, consectetur adipisicing elit,
sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.
Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi
ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit
in voluptate velit esse cillum dolore eu fugiat nulla pariatur.
Excepteur sint occaecat cupidatat non proident, sunt in culpa qui
officia deserunt mollit anim id est laborum. Lorem ipsum dolor sit amet,
consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore
et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure
dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat
nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt
in culpa qui officia deserunt mollit anim id est laborum. Lorem ipsum
dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor
incididunt ut labore et dolore magna aliqua.
""".strip()
def textGenerator(data, fn, fs):
i = 1
for word in re.split('\s+', data):
if word:
yield Word(
text="[%d|%s]" % (i, word),
fontName=fn,
fontSize=fs
)
yield Space(
fontName=fn,
fontSize=fs
)
def createText(data, fn, fs):
text = Text(list(textGenerator(data, fn, fs)))
return text
def makeBorder(width, style="solid", color=Color(1, 0, 0)):
return dict(
borderLeftColor=color,
borderLeftWidth=width,
borderLeftStyle=style,
borderRightColor=color,
borderRightWidth=width,
borderRightStyle=style,
borderTopColor=color,
borderTopWidth=width,
borderTopStyle=style,
borderBottomColor=color,
borderBottomWidth=width,
borderBottomStyle=style
)
def test():
doc = SimpleDocTemplate("test.pdf")
story = []
style = Style(fontName="Helvetica", textIndent=24.0)
fn = style["fontName"]
fs = style["fontSize"]
sampleText1 = createText(TEXT[:100], fn, fs)
sampleText2 = createText(TEXT[100:], fn, fs)
text = Text(sampleText1 + [
Space(
fontName=fn,
fontSize=fs),
Word(
text="TrennbarTrennbar",
pairs=[("Trenn-", "barTrennbar")],
fontName=fn,
fontSize=fs),
Space(
fontName=fn,
fontSize=fs),
Word(
text="Normal",
color=Color(1, 0, 0),
fontName=fn,
fontSize=fs),
Space(
fontName=fn,
fontSize=fs),
Word(
text="gGrößer",
fontName=fn,
fontSize=fs * 1.5),
Space(
fontName=fn,
fontSize=fs),
Word(
text="Bold",
fontName="Times-Bold",
fontSize=fs),
Space(
fontName=fn,
fontSize=fs),
Word(
text="jItalic",
fontName="Times-Italic",
fontSize=fs),
Space(
fontName=fn,
fontSize=fs),
# <span style="border: 1px solid red;">ipsum <span style="border: 1px solid green; padding: 4px; padding-left: 20px; background: yellow; margin-bottom: 8px; margin-left: 10px;">
# Lo<font size="12pt">re</font>m</span> <span style="background:blue; height: 30px;">ipsum</span> Lorem</span>
BoxBegin(
fontName=fn,
fontSize=fs,
**makeBorder(0.5, "solid", Color(0, 1, 0))),
Word(
text="Lorem",
fontName="Times-Bold",
fontSize=fs),
Word(
text="Lorem",
fontName=fn,
fontSize=fs),
Word(
text="Lorem",
fontName=fn,
fontSize=fs),
Word(
text="Lorem",
fontName=fn,
fontSize=fs),
Word(
text="Lorem",
fontName=fn,
fontSize=fs),
Word(
text="Lorem",
fontName=fn,
fontSize=fs),
Word(
text="Lorem",
fontName=fn,
fontSize=fs),
Word(
text="Lorem",
fontName=fn,
fontSize=fs),
Word(
text="Lorem",
fontName=fn,
fontSize=fs),
Word(
text="Lorem",
fontName="Times-Bold",
fontSize=fs),
Space(
fontName=fn,
fontSize=fs),
Word(
text="Lorem",
fontName=fn,
fontSize=fs),
Space(
fontName=fn,
fontSize=fs),
Word(
text="Lorem",
fontName=fn,
fontSize=fs),
Space(
fontName=fn,
fontSize=fs),
Word(
text="Lorem",
fontName=fn,
fontSize=fs),
Space(
fontName=fn,
fontSize=fs),
BoxBegin(
fontName=fn,
fontSize=fs,
backgroundColor=Color(1, 1, 0),
**makeBorder(1, "solid", Color(1, 0, 0))),
Word(
text="Lorem",
fontName=fn,
fontSize=fs),
BoxEnd(),
Space(
fontName=fn,
fontSize=fs),
Word(
text="Lorem",
fontName=fn,
fontSize=fs),
Space(
fontName=fn,
fontSize=fs),
BoxEnd(),
LineBreak(
fontName=fn,
fontSize=fs),
LineBreak(
fontName=fn,
fontSize=fs),
] + sampleText2)
story.append(Paragraph(
copy.copy(text),
style,
debug=0))
for i in range(10):
style = copy.deepcopy(style)
style["textAlign"] = ALIGNMENTS[i % 4]
text = createText(("(%d) " % i) + TEXT, fn, fs)
story.append(Paragraph(
copy.copy(text),
style,
debug=0))
doc.build(story)
test()
os.system("start test.pdf")
# FIXME: Useless line?
# createText(TEXT, styles["Normal"].fontName, styles["Normal"].fontSize)
| apache-2.0 |
jessrosenfield/pants | tests/python/pants_test/android/tasks/test_aapt_gen.py | 15 | 5264 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from pants.backend.android.tasks.aapt_gen import AaptGen
from pants_test.android.test_android_base import TestAndroidBase, distribution
class TestAaptGen(TestAndroidBase):
@classmethod
def task_type(cls):
return AaptGen
def test_aapt_gen_smoke(self):
task = self.create_task(self.context())
task.execute()
def test_relative_genfile(self):
with self.android_binary(package_name='org.pantsbuild.examples.hello') as binary:
self.assertEqual(AaptGen._relative_genfile(binary),
os.path.join('org', 'pantsbuild', 'examples', 'hello', 'R.java'))
def test_create_sdk_jar_deps(self):
with distribution() as dist:
with self.android_binary(target_name='binary1', target_sdk='18') as binary1:
with self.android_binary(target_name='binary2', target_sdk='19') as binary2:
self.set_options(sdk_path=dist)
task = self.create_task(self.context())
targets = [binary1, binary2]
task.create_sdk_jar_deps(targets)
self.assertNotEquals(task._jar_library_by_sdk['19'], task._jar_library_by_sdk['18'])
def test_aapt_out_different_sdk(self):
with self.android_binary(target_name='binary1', target_sdk='18') as binary1:
with self.android_binary(target_name='binary2', target_sdk='19') as binary2:
task = self.create_task(self.context())
self.assertNotEqual(task.aapt_out(binary1), task.aapt_out(binary2))
def test_aapt_out_same_sdk(self):
with self.android_binary(target_name='binary1', target_sdk='19') as binary1:
with self.android_binary(target_name='binary2', target_sdk='19') as binary2:
task = self.create_task(self.context())
self.assertEquals(task.aapt_out(binary1), task.aapt_out(binary2))
def test_aapt_tool(self):
with distribution() as dist:
with self.android_binary() as android_binary:
self.set_options(sdk_path=dist, build_tools_version='20.0.0')
task = self.create_task(self.context())
aapt_tool = task.aapt_tool(android_binary)
self.assertEquals(os.path.basename(os.path.dirname(aapt_tool)), '20.0.0')
self.assertEquals(os.path.basename(aapt_tool), 'aapt')
def test_android_tool(self):
with distribution() as dist:
with self.android_binary() as android_binary:
self.set_options(sdk_path=dist, target_sdk='18')
task = self.create_task(self.context())
android_jar = task.android_jar(android_binary)
self.assertEquals(os.path.basename(os.path.dirname(android_jar)), 'android-18')
self.assertEquals(os.path.basename(android_jar), 'android.jar')
def test_render_args(self):
with distribution() as dist:
with self.android_resources() as resources:
with self.android_binary(dependencies=[resources]) as binary:
self.set_options(sdk_path=dist)
task = self.create_task(self.context())
rendered_args = task._render_args(binary, binary.manifest, [resources.resource_dir])
self.assertEquals(os.path.basename(rendered_args[0]), 'aapt')
def test_priority_order_in_render_args(self):
with distribution() as dist:
with self.android_resources(target_name='binary_resources') as res1:
with self.android_resources(target_name='library_resources') as res2:
with self.android_library(dependencies=[res2]) as library:
with self.android_binary(dependencies=[res1, library]) as binary:
self.set_options(sdk_path=dist)
task = self.create_task(self.context())
res_dirs = [res1.resource_dir, res2.resource_dir]
rendered_args = task._render_args(binary, binary.manifest, res_dirs)
args_string = ' '.join(rendered_args)
self.assertIn('--auto-add-overlay -S {} -S '
'{}'.format(res1.resource_dir, res2.resource_dir), args_string)
def test_render_args_force_ignored(self):
with distribution() as dist:
with self.android_resources() as resources:
with self.android_binary(dependencies=[resources]) as binary:
ignored = '!picasa.ini:!*~:BUILD*'
self.set_options(sdk_path=dist, ignored_assets=ignored)
task = self.create_task(self.context())
rendered_args = task._render_args(binary, binary.manifest, [resources.resource_dir])
self.assertIn(ignored, rendered_args)
def test_create_target(self):
with distribution() as dist:
with self.android_library() as library:
with self.android_binary(dependencies=[library]) as android_binary:
self.set_options(sdk_path=dist)
task = self.create_task(self.context())
targets = [android_binary]
task.create_sdk_jar_deps(targets)
created_target = task.create_target(android_binary, library)
self.assertEqual(created_target.derived_from, library)
self.assertTrue(created_target.is_synthetic)
| apache-2.0 |
abali96/Shapely | tests/threading_test.py | 10 | 1032 | import threading
from binascii import b2a_hex
def main():
num_threads = 10
use_threads = True
if not use_threads:
# Run core code
runShapelyBuilding()
else:
threads = [threading.Thread(target=runShapelyBuilding, name=str(i),
args=(i,)) for i in range(num_threads)]
for t in threads:
t.start()
for t in threads:
t.join()
def runShapelyBuilding(num):
print("%s: Running shapely tests on wkb" % num)
import shapely.geos
print("%s GEOS Handle: %s" % (num, shapely.geos.lgeos.geos_handle))
import shapely.wkt
import shapely.wkb
p = shapely.wkt.loads("POINT (0 0)")
print("%s WKT: %s" % (num, shapely.wkt.dumps(p)))
wkb = shapely.wkb.dumps(p)
print("%s WKB: %s" % (num, b2a_hex(wkb)))
for i in range(10):
shapely.wkb.loads(wkb)
print("%s GEOS Handle: %s" % (num, shapely.geos.lgeos.geos_handle))
print("Done %s" % num)
if __name__ == '__main__':
main()
| bsd-3-clause |
open-e/JovianDSS-Flocker | joviandss_flocker_driver/jovian_common/rest_proxy.py | 1 | 4403 | # __ _ ___ __ __
# \ \ _____ _(_) __ _ _ __ / \/ _\/ _\
# \ \/ _ \ \ / / |/ _` | '_ \ / /\ /\ \ \ \
# /\_/ / (_) \ V /| | (_| | | | |/ /_// _\ \_\ \
# \___/ \___/ \_/ |_|\__,_|_| |_/____/ \__/\__/
#
#
# Copyright (c) 2016 Open-E, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import requests
from requests.packages.urllib3 import disable_warnings
from requests.packages.urllib3.exceptions import InsecureRequestWarning
from base64 import b64encode
from . import exception as jexc
class JovianRESTProxy(object):
"""Jovian REST API proxy"""
def __init__(self, LOG, config):
"""
:param config: config is like dict
"""
disable_warnings(InsecureRequestWarning)
self.LOG = LOG
self.user = config.get('jovian_user', 'admin')
self.password = config.get('jovian_password', 'admin')
self.retry_n = config.get('jovian_rest_send_repeats', 3)
self.verify = False
self.header = {'connection': 'keep-alive',
'Content-Type': 'application/json',
'authorization': 'Basic ' +
b64encode('{}:{}'.format(self.user, self.password)).decode('utf-8')}
def request(self, request_method, url, json_data=None):
for i in range(self.retry_n):
self.LOG.debug(
"JovianDSS: Sending request of type {} to {}. \
Attempt: {}.".format(request_method, url, i))
try:
ret = self.request_routine(url, request_method, json_data)
# Work aroud for case when we have backend internal Fail.
# OS Fail
if ret["code"] == 500:
if ret["error"] is not None:
if ("errno" in ret["error"]) and \
("class" in ret["error"]):
if (ret["error"]["errno"] is 2) and\
(ret["error"]["class"] ==
"exceptions.OSError"):
self.LOG.error(
"JovianDSS: Facing exceptions.OSError!")
continue
return ret
except requests.HTTPError as err:
self.LOG.error("Unable to execute: {}".format(err))
continue
except requests.ConnectionError as err:
self.LOG.error("Unable to execute: {}".format(err))
raise jexc.JDSSRESTProxyException("Fail to execute {}, {} times in row."
.format(url, self.retry_n))
def request_routine(self, url, request_method, json_data=None):
"""Make an HTTPS request and return the results
"""
response_obj = requests.request(request_method,
url=url,
headers=self.header,
data=json.dumps(json_data),
verify=self.verify)
self.LOG.debug('JovianDSS: Response code: %s' %
response_obj.status_code)
self.LOG.debug('JovianDSS: Response data: %s' % response_obj.text)
ret = dict()
ret['code'] = response_obj.status_code
if '{' in response_obj.text and '}' in response_obj.text:
if "error" in response_obj.text:
ret["error"] = json.loads(response_obj.text)["error"]
else:
ret["error"] = None
if "data" in response_obj.text:
ret["data"] = json.loads(response_obj.text)["data"]
else:
ret["data"] = None
return ret
| apache-2.0 |
helium/helium-commander | helium_commander/commands/label.py | 1 | 6749 | import click
from helium_commander import (
Client,
Label,
Sensor,
Element,
device_sort_option,
device_mac_option,
metadata_filter_option,
ResourceParamType
)
from helium_commander.commands import metadata, timeseries
from collections import namedtuple
pass_client = click.make_pass_decorator(Client)
label_includes = [Sensor, Element]
LabelActionResources = namedtuple('LabelResourceActions',
['add', 'remove', 'replace'])
def lookup_label_action_resources(client, cls, mac=False, **kwargs):
"""Look up resources for a label."""
def _lookup(action, resources):
id_reps = kwargs.pop(action, None)
if not id_reps:
return None # No change
if 'none' in id_reps:
return [] # Empty out the resources
return [cls.lookup(client, id, resources=resources, mac=mac)
for id in id_reps]
all_resources = cls.all(client)
return LabelActionResources(_lookup('add', all_resources),
_lookup('remove', all_resources),
_lookup('replace', all_resources))
@click.group()
def cli():
"""Operations on labels of sensors.
"""
pass
@cli.command()
@click.argument('label', required=False)
@metadata_filter_option
@pass_client
def list(client, label, **kwargs):
"""List labels.
Lists information for a given LABEL or all labels in the
organization.
"""
if label:
labels = [Label.lookup(client, label, include=label_includes)]
else:
metadata = kwargs.get('metadata') or None
labels = Label.where(client, include=label_includes, metadata=metadata)
Label.display(client, labels, include=label_includes)
cli.add_command(timeseries.cli(Label, history=False,
writable=False, device=False))
@cli.command()
@click.argument('name')
@click.option('--sensors',
type=ResourceParamType(metavar='SENSOR'),
help="Add sensors to a label")
@click.option('--elements',
type=ResourceParamType(metavar='ELEMENT'),
help="Add elements to a label")
@click.pass_context
def create(ctx, name, sensors, elements):
"""Create a label.
Creates a label with a given NAME and an (optional) list of
sensors and elements associated with that label.
"""
client = ctx.find_object(Client)
sensors = sensors or []
if sensors:
all_sensors = Sensor.all(client)
sensors = [Sensor.lookup(client, id, resources=all_sensors)
for id in sensors]
elements = elements or []
if elements:
all_elements = Element.all(client)
elements = [Element.lookup(client, id, resources=all_elements)
for id in elements]
label = Label.create(client, attributes={
'name': name
})
if sensors:
label.update_sensors(sensors)
if elements:
label.update_elements(elements)
label = Label.find(client, label.id, include=label_includes)
Label.display(client, [label], include=label_includes)
@cli.command()
@click.argument('label', nargs=-1)
@pass_client
def delete(client, label):
"""Delete one or more labels.
Deletes the LABELs with the given ids
"""
all_labels = Label.all(client)
label = [Label.lookup(client, id, resources=all_labels) for id in label]
for entry in label:
entry.delete()
click.echo("Deleted {} ".format(entry.id))
@cli.command()
@click.argument('label')
@click.option('--name',
help="the new name for the label")
@pass_client
def update(client, label, name):
"""Update a label.
Changes basic attributes on a label.
To add or remove sensors or elements from a label see the `label
element` and `label sensor` commands.
"""
label = Label.lookup(client, label)
if name:
label.update(attributes={
'name': name
})
label = Label.find(client, label.id, include=label_includes)
Label.display(client, [label], include=label_includes)
cli.add_command(metadata.cli(Label))
@cli.command()
@click.argument('label')
@click.option('--add',
type=ResourceParamType(metavar='SENSOR'),
help="Add sensors to a label")
@click.option('--remove',
type=ResourceParamType(metavar='SENSOR'),
help="Remove sensors from a label")
@click.option('--replace',
type=ResourceParamType(metavar='SENSOR'),
help="Replace all sensors in a label")
@device_sort_option
@device_mac_option
@pass_client
def sensor(client, label, mac, **kwargs):
"""List sensors for a label.
List sensors for a given LABEL.
Add, remove or replace sensors from the LABEL by using the --add,
--remove or --replace arguments respectively. Note that you can
specify "none" with these to indicate an empty list.
"""
label = Label.lookup(client, label)
actions = lookup_label_action_resources(client, Sensor,
mac=mac, **kwargs)
if actions.add is not None:
label.add_sensors(actions.add)
if actions.remove is not None:
label.remove_sensors(actions.remove)
if actions.replace is not None:
label.update_sensors(actions.replace)
sensors = label.sensors()
Sensor.display(client, sensors, **kwargs)
@cli.command()
@click.argument('label')
@click.option('--add',
type=ResourceParamType(metavar='SENSOR'),
help="Add sensors to a label")
@click.option('--remove',
type=ResourceParamType(metavar='SENSOR'),
help="Remove sensors from a label")
@click.option('--replace',
type=ResourceParamType(metavar='SENSOR'),
help="Replace all sensors in a label")
@device_sort_option
@device_mac_option
@pass_client
def element(client, label, mac, **kwargs):
"""List elements for a label.
List elements for a given LABEL.
Add, remove or replace sensors from the LABEL by using the --add,
--remove or --replace arguments respectively. Note that you can
specify "none" with these to indicate an empty list.
"""
label = Label.lookup(client, label)
actions = lookup_label_action_resources(client, Element,
mac=mac, **kwargs)
if actions.add is not None:
label.add_elements(actions.add)
if actions.remove is not None:
label.remove_elements(actions.remove)
if actions.replace is not None:
label.update_elements(actions.replace)
elements = label.elements()
Element.display(client, elements, **kwargs)
| bsd-3-clause |
ruguevara/neon | neon/layers/tests/test_fully_connected.py | 9 | 3084 | #!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright 2014 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
from nose.plugins.attrib import attr
from neon.backends.cpu import CPU
from neon.layers import FCLayer
from neon.params import IdentityValGen
from neon.util.testing import assert_tensor_equal
nin = 3
nout = 2
batch_size = 10
def check_fprop(layer, backend):
inputs = backend.ones((nin, batch_size))
output = backend.ones((nout, batch_size))
layer.fprop(inputs)
assert_tensor_equal(layer.output, output)
def check_bprop(layer, backend):
errors = backend.ones((nout, batch_size))
deltas = backend.zeros((nin, batch_size))
deltas[:2] = backend.ones((nout, batch_size))
# initialize deltas since they are not set
# by the layer initialize method.
layer.deltas = backend.ones((nin, batch_size))
# layers should be refactored to remove references
# to external layers. inputs can be cached during
# fprop.
class PreviousLayer(object):
def __init__(self):
self.is_data = True
self.output = backend.ones((nin, batch_size))
layer.prev_layer = PreviousLayer()
layer.bprop(errors)
assert_tensor_equal(layer.deltas, deltas)
class TestFullyConnectedLayer(object):
def create_layer(self, backend):
weight_init = IdentityValGen()
layer = FCLayer(nin=nin,
nout=nout,
batch_size=batch_size,
weight_init=weight_init,
backend=backend)
layer.set_weight_shape()
layer.initialize([])
return layer
def test_cpu_fprop(self):
backend = CPU(rng_seed=0)
layer = self.create_layer(backend=backend)
check_fprop(layer, backend)
def test_cpu_bprop(self):
backend = CPU(rng_seed=0)
layer = self.create_layer(backend=backend)
check_bprop(layer, backend)
@attr('cuda')
def test_gpu_fprop(self):
from neon.backends.cc2 import GPU
backend = GPU(rng_seed=0)
layer = self.create_layer(backend=backend)
check_fprop(layer, backend)
@attr('cuda')
def test_gpu_bprop(self):
from neon.backends.cc2 import GPU
backend = GPU(rng_seed=0)
layer = self.create_layer(backend=backend)
check_bprop(layer, backend)
| apache-2.0 |
skirsdeda/django | django/db/models/fields/files.py | 22 | 18693 | import datetime
import os
from django import forms
from django.db.models.fields import Field
from django.core import checks
from django.core.files.base import File
from django.core.files.storage import default_storage
from django.core.files.images import ImageFile
from django.db.models import signals
from django.utils.encoding import force_str, force_text
from django.utils import six
from django.utils.translation import ugettext_lazy as _
class FieldFile(File):
def __init__(self, instance, field, name):
super(FieldFile, self).__init__(None, name)
self.instance = instance
self.field = field
self.storage = field.storage
self._committed = True
def __eq__(self, other):
# Older code may be expecting FileField values to be simple strings.
# By overriding the == operator, it can remain backwards compatibility.
if hasattr(other, 'name'):
return self.name == other.name
return self.name == other
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.name)
# The standard File contains most of the necessary properties, but
# FieldFiles can be instantiated without a name, so that needs to
# be checked for here.
def _require_file(self):
if not self:
raise ValueError("The '%s' attribute has no file associated with it." % self.field.name)
def _get_file(self):
self._require_file()
if not hasattr(self, '_file') or self._file is None:
self._file = self.storage.open(self.name, 'rb')
return self._file
def _set_file(self, file):
self._file = file
def _del_file(self):
del self._file
file = property(_get_file, _set_file, _del_file)
def _get_path(self):
self._require_file()
return self.storage.path(self.name)
path = property(_get_path)
def _get_url(self):
self._require_file()
return self.storage.url(self.name)
url = property(_get_url)
def _get_size(self):
self._require_file()
if not self._committed:
return self.file.size
return self.storage.size(self.name)
size = property(_get_size)
def open(self, mode='rb'):
self._require_file()
self.file.open(mode)
# open() doesn't alter the file's contents, but it does reset the pointer
open.alters_data = True
# In addition to the standard File API, FieldFiles have extra methods
# to further manipulate the underlying file, as well as update the
# associated model instance.
def save(self, name, content, save=True):
name = self.field.generate_filename(self.instance, name)
self.name = self.storage.save(name, content)
setattr(self.instance, self.field.name, self.name)
# Update the filesize cache
self._size = content.size
self._committed = True
# Save the object because it has changed, unless save is False
if save:
self.instance.save()
save.alters_data = True
def delete(self, save=True):
if not self:
return
# Only close the file if it's already open, which we know by the
# presence of self._file
if hasattr(self, '_file'):
self.close()
del self.file
self.storage.delete(self.name)
self.name = None
setattr(self.instance, self.field.name, self.name)
# Delete the filesize cache
if hasattr(self, '_size'):
del self._size
self._committed = False
if save:
self.instance.save()
delete.alters_data = True
def _get_closed(self):
file = getattr(self, '_file', None)
return file is None or file.closed
closed = property(_get_closed)
def close(self):
file = getattr(self, '_file', None)
if file is not None:
file.close()
def __getstate__(self):
# FieldFile needs access to its associated model field and an instance
# it's attached to in order to work properly, but the only necessary
# data to be pickled is the file's name itself. Everything else will
# be restored later, by FileDescriptor below.
return {'name': self.name, 'closed': False, '_committed': True, '_file': None}
class FileDescriptor(object):
"""
The descriptor for the file attribute on the model instance. Returns a
FieldFile when accessed so you can do stuff like::
>>> from myapp.models import MyModel
>>> instance = MyModel.objects.get(pk=1)
>>> instance.file.size
Assigns a file object on assignment so you can do::
>>> with open('/tmp/hello.world', 'r') as f:
... instance.file = File(f)
"""
def __init__(self, field):
self.field = field
def __get__(self, instance=None, owner=None):
if instance is None:
raise AttributeError(
"The '%s' attribute can only be accessed from %s instances."
% (self.field.name, owner.__name__))
# This is slightly complicated, so worth an explanation.
# instance.file`needs to ultimately return some instance of `File`,
# probably a subclass. Additionally, this returned object needs to have
# the FieldFile API so that users can easily do things like
# instance.file.path and have that delegated to the file storage engine.
# Easy enough if we're strict about assignment in __set__, but if you
# peek below you can see that we're not. So depending on the current
# value of the field we have to dynamically construct some sort of
# "thing" to return.
# The instance dict contains whatever was originally assigned
# in __set__.
file = instance.__dict__[self.field.name]
# If this value is a string (instance.file = "path/to/file") or None
# then we simply wrap it with the appropriate attribute class according
# to the file field. [This is FieldFile for FileFields and
# ImageFieldFile for ImageFields; it's also conceivable that user
# subclasses might also want to subclass the attribute class]. This
# object understands how to convert a path to a file, and also how to
# handle None.
if isinstance(file, six.string_types) or file is None:
attr = self.field.attr_class(instance, self.field, file)
instance.__dict__[self.field.name] = attr
# Other types of files may be assigned as well, but they need to have
# the FieldFile interface added to the. Thus, we wrap any other type of
# File inside a FieldFile (well, the field's attr_class, which is
# usually FieldFile).
elif isinstance(file, File) and not isinstance(file, FieldFile):
file_copy = self.field.attr_class(instance, self.field, file.name)
file_copy.file = file
file_copy._committed = False
instance.__dict__[self.field.name] = file_copy
# Finally, because of the (some would say boneheaded) way pickle works,
# the underlying FieldFile might not actually itself have an associated
# file. So we need to reset the details of the FieldFile in those cases.
elif isinstance(file, FieldFile) and not hasattr(file, 'field'):
file.instance = instance
file.field = self.field
file.storage = self.field.storage
# That was fun, wasn't it?
return instance.__dict__[self.field.name]
def __set__(self, instance, value):
instance.__dict__[self.field.name] = value
class FileField(Field):
# The class to wrap instance attributes in. Accessing the file object off
# the instance will always return an instance of attr_class.
attr_class = FieldFile
# The descriptor to use for accessing the attribute off of the class.
descriptor_class = FileDescriptor
description = _("File")
def __init__(self, verbose_name=None, name=None, upload_to='', storage=None, **kwargs):
self._primary_key_set_explicitly = 'primary_key' in kwargs
self._unique_set_explicitly = 'unique' in kwargs
self.storage = storage or default_storage
self.upload_to = upload_to
if callable(upload_to):
self.generate_filename = upload_to
kwargs['max_length'] = kwargs.get('max_length', 100)
super(FileField, self).__init__(verbose_name, name, **kwargs)
def check(self, **kwargs):
errors = super(FileField, self).check(**kwargs)
errors.extend(self._check_unique())
errors.extend(self._check_primary_key())
return errors
def _check_unique(self):
if self._unique_set_explicitly:
return [
checks.Error(
"'unique' is not a valid argument for a %s." % self.__class__.__name__,
hint=None,
obj=self,
id='fields.E200',
)
]
else:
return []
def _check_primary_key(self):
if self._primary_key_set_explicitly:
return [
checks.Error(
"'primary_key' is not a valid argument for a %s." % self.__class__.__name__,
hint=None,
obj=self,
id='fields.E201',
)
]
else:
return []
def deconstruct(self):
name, path, args, kwargs = super(FileField, self).deconstruct()
if kwargs.get("max_length", None) == 100:
del kwargs["max_length"]
kwargs['upload_to'] = self.upload_to
if self.storage is not default_storage:
kwargs['storage'] = self.storage
return name, path, args, kwargs
def get_internal_type(self):
return "FileField"
def get_prep_lookup(self, lookup_type, value):
if hasattr(value, 'name'):
value = value.name
return super(FileField, self).get_prep_lookup(lookup_type, value)
def get_prep_value(self, value):
"Returns field's value prepared for saving into a database."
value = super(FileField, self).get_prep_value(value)
# Need to convert File objects provided via a form to unicode for database insertion
if value is None:
return None
return six.text_type(value)
def pre_save(self, model_instance, add):
"Returns field's value just before saving."
file = super(FileField, self).pre_save(model_instance, add)
if file and not file._committed:
# Commit the file to storage prior to saving the model
file.save(file.name, file, save=False)
return file
def contribute_to_class(self, cls, name, **kwargs):
super(FileField, self).contribute_to_class(cls, name, **kwargs)
setattr(cls, self.name, self.descriptor_class(self))
def get_directory_name(self):
return os.path.normpath(force_text(datetime.datetime.now().strftime(force_str(self.upload_to))))
def get_filename(self, filename):
return os.path.normpath(self.storage.get_valid_name(os.path.basename(filename)))
def generate_filename(self, instance, filename):
return os.path.join(self.get_directory_name(), self.get_filename(filename))
def save_form_data(self, instance, data):
# Important: None means "no change", other false value means "clear"
# This subtle distinction (rather than a more explicit marker) is
# needed because we need to consume values that are also sane for a
# regular (non Model-) Form to find in its cleaned_data dictionary.
if data is not None:
# This value will be converted to unicode and stored in the
# database, so leaving False as-is is not acceptable.
if not data:
data = ''
setattr(instance, self.name, data)
def formfield(self, **kwargs):
defaults = {'form_class': forms.FileField, 'max_length': self.max_length}
# If a file has been provided previously, then the form doesn't require
# that a new file is provided this time.
# The code to mark the form field as not required is used by
# form_for_instance, but can probably be removed once form_for_instance
# is gone. ModelForm uses a different method to check for an existing file.
if 'initial' in kwargs:
defaults['required'] = False
defaults.update(kwargs)
return super(FileField, self).formfield(**defaults)
class ImageFileDescriptor(FileDescriptor):
"""
Just like the FileDescriptor, but for ImageFields. The only difference is
assigning the width/height to the width_field/height_field, if appropriate.
"""
def __set__(self, instance, value):
previous_file = instance.__dict__.get(self.field.name)
super(ImageFileDescriptor, self).__set__(instance, value)
# To prevent recalculating image dimensions when we are instantiating
# an object from the database (bug #11084), only update dimensions if
# the field had a value before this assignment. Since the default
# value for FileField subclasses is an instance of field.attr_class,
# previous_file will only be None when we are called from
# Model.__init__(). The ImageField.update_dimension_fields method
# hooked up to the post_init signal handles the Model.__init__() cases.
# Assignment happening outside of Model.__init__() will trigger the
# update right here.
if previous_file is not None:
self.field.update_dimension_fields(instance, force=True)
class ImageFieldFile(ImageFile, FieldFile):
def delete(self, save=True):
# Clear the image dimensions cache
if hasattr(self, '_dimensions_cache'):
del self._dimensions_cache
super(ImageFieldFile, self).delete(save)
class ImageField(FileField):
attr_class = ImageFieldFile
descriptor_class = ImageFileDescriptor
description = _("Image")
def __init__(self, verbose_name=None, name=None, width_field=None,
height_field=None, **kwargs):
self.width_field, self.height_field = width_field, height_field
super(ImageField, self).__init__(verbose_name, name, **kwargs)
def check(self, **kwargs):
errors = super(ImageField, self).check(**kwargs)
errors.extend(self._check_image_library_installed())
return errors
def _check_image_library_installed(self):
try:
from PIL import Image # NOQA
except ImportError:
return [
checks.Error(
'Cannot use ImageField because Pillow is not installed.',
hint=('Get Pillow at https://pypi.python.org/pypi/Pillow '
'or run command "pip install Pillow".'),
obj=self,
id='fields.E210',
)
]
else:
return []
def deconstruct(self):
name, path, args, kwargs = super(ImageField, self).deconstruct()
if self.width_field:
kwargs['width_field'] = self.width_field
if self.height_field:
kwargs['height_field'] = self.height_field
return name, path, args, kwargs
def contribute_to_class(self, cls, name, **kwargs):
super(ImageField, self).contribute_to_class(cls, name, **kwargs)
# Attach update_dimension_fields so that dimension fields declared
# after their corresponding image field don't stay cleared by
# Model.__init__, see bug #11196.
# Only run post-initialization dimension update on non-abstract models
if not cls._meta.abstract:
signals.post_init.connect(self.update_dimension_fields, sender=cls)
def update_dimension_fields(self, instance, force=False, *args, **kwargs):
"""
Updates field's width and height fields, if defined.
This method is hooked up to model's post_init signal to update
dimensions after instantiating a model instance. However, dimensions
won't be updated if the dimensions fields are already populated. This
avoids unnecessary recalculation when loading an object from the
database.
Dimensions can be forced to update with force=True, which is how
ImageFileDescriptor.__set__ calls this method.
"""
# Nothing to update if the field doesn't have dimension fields.
has_dimension_fields = self.width_field or self.height_field
if not has_dimension_fields:
return
# getattr will call the ImageFileDescriptor's __get__ method, which
# coerces the assigned value into an instance of self.attr_class
# (ImageFieldFile in this case).
file = getattr(instance, self.attname)
# Nothing to update if we have no file and not being forced to update.
if not file and not force:
return
dimension_fields_filled = not(
(self.width_field and not getattr(instance, self.width_field))
or (self.height_field and not getattr(instance, self.height_field))
)
# When both dimension fields have values, we are most likely loading
# data from the database or updating an image field that already had
# an image stored. In the first case, we don't want to update the
# dimension fields because we are already getting their values from the
# database. In the second case, we do want to update the dimensions
# fields and will skip this return because force will be True since we
# were called from ImageFileDescriptor.__set__.
if dimension_fields_filled and not force:
return
# file should be an instance of ImageFieldFile or should be None.
if file:
width = file.width
height = file.height
else:
# No file, so clear dimensions fields.
width = None
height = None
# Update the width and height fields.
if self.width_field:
setattr(instance, self.width_field, width)
if self.height_field:
setattr(instance, self.height_field, height)
def formfield(self, **kwargs):
defaults = {'form_class': forms.ImageField}
defaults.update(kwargs)
return super(ImageField, self).formfield(**defaults)
| bsd-3-clause |
abalkin/numpy | numpy/core/tests/test_extint128.py | 17 | 5643 | import itertools
import contextlib
import operator
import pytest
import numpy as np
import numpy.core._multiarray_tests as mt
from numpy.testing import assert_raises, assert_equal
INT64_MAX = np.iinfo(np.int64).max
INT64_MIN = np.iinfo(np.int64).min
INT64_MID = 2**32
# int128 is not two's complement, the sign bit is separate
INT128_MAX = 2**128 - 1
INT128_MIN = -INT128_MAX
INT128_MID = 2**64
INT64_VALUES = (
[INT64_MIN + j for j in range(20)] +
[INT64_MAX - j for j in range(20)] +
[INT64_MID + j for j in range(-20, 20)] +
[2*INT64_MID + j for j in range(-20, 20)] +
[INT64_MID//2 + j for j in range(-20, 20)] +
list(range(-70, 70))
)
INT128_VALUES = (
[INT128_MIN + j for j in range(20)] +
[INT128_MAX - j for j in range(20)] +
[INT128_MID + j for j in range(-20, 20)] +
[2*INT128_MID + j for j in range(-20, 20)] +
[INT128_MID//2 + j for j in range(-20, 20)] +
list(range(-70, 70)) +
[False] # negative zero
)
INT64_POS_VALUES = [x for x in INT64_VALUES if x > 0]
@contextlib.contextmanager
def exc_iter(*args):
"""
Iterate over Cartesian product of *args, and if an exception is raised,
add information of the current iterate.
"""
value = [None]
def iterate():
for v in itertools.product(*args):
value[0] = v
yield v
try:
yield iterate()
except Exception:
import traceback
msg = "At: %r\n%s" % (repr(value[0]),
traceback.format_exc())
raise AssertionError(msg)
def test_safe_binop():
# Test checked arithmetic routines
ops = [
(operator.add, 1),
(operator.sub, 2),
(operator.mul, 3)
]
with exc_iter(ops, INT64_VALUES, INT64_VALUES) as it:
for xop, a, b in it:
pyop, op = xop
c = pyop(a, b)
if not (INT64_MIN <= c <= INT64_MAX):
assert_raises(OverflowError, mt.extint_safe_binop, a, b, op)
else:
d = mt.extint_safe_binop(a, b, op)
if c != d:
# assert_equal is slow
assert_equal(d, c)
def test_to_128():
with exc_iter(INT64_VALUES) as it:
for a, in it:
b = mt.extint_to_128(a)
if a != b:
assert_equal(b, a)
def test_to_64():
with exc_iter(INT128_VALUES) as it:
for a, in it:
if not (INT64_MIN <= a <= INT64_MAX):
assert_raises(OverflowError, mt.extint_to_64, a)
else:
b = mt.extint_to_64(a)
if a != b:
assert_equal(b, a)
def test_mul_64_64():
with exc_iter(INT64_VALUES, INT64_VALUES) as it:
for a, b in it:
c = a * b
d = mt.extint_mul_64_64(a, b)
if c != d:
assert_equal(d, c)
def test_add_128():
with exc_iter(INT128_VALUES, INT128_VALUES) as it:
for a, b in it:
c = a + b
if not (INT128_MIN <= c <= INT128_MAX):
assert_raises(OverflowError, mt.extint_add_128, a, b)
else:
d = mt.extint_add_128(a, b)
if c != d:
assert_equal(d, c)
def test_sub_128():
with exc_iter(INT128_VALUES, INT128_VALUES) as it:
for a, b in it:
c = a - b
if not (INT128_MIN <= c <= INT128_MAX):
assert_raises(OverflowError, mt.extint_sub_128, a, b)
else:
d = mt.extint_sub_128(a, b)
if c != d:
assert_equal(d, c)
def test_neg_128():
with exc_iter(INT128_VALUES) as it:
for a, in it:
b = -a
c = mt.extint_neg_128(a)
if b != c:
assert_equal(c, b)
def test_shl_128():
with exc_iter(INT128_VALUES) as it:
for a, in it:
if a < 0:
b = -(((-a) << 1) & (2**128-1))
else:
b = (a << 1) & (2**128-1)
c = mt.extint_shl_128(a)
if b != c:
assert_equal(c, b)
def test_shr_128():
with exc_iter(INT128_VALUES) as it:
for a, in it:
if a < 0:
b = -((-a) >> 1)
else:
b = a >> 1
c = mt.extint_shr_128(a)
if b != c:
assert_equal(c, b)
def test_gt_128():
with exc_iter(INT128_VALUES, INT128_VALUES) as it:
for a, b in it:
c = a > b
d = mt.extint_gt_128(a, b)
if c != d:
assert_equal(d, c)
@pytest.mark.slow
def test_divmod_128_64():
with exc_iter(INT128_VALUES, INT64_POS_VALUES) as it:
for a, b in it:
if a >= 0:
c, cr = divmod(a, b)
else:
c, cr = divmod(-a, b)
c = -c
cr = -cr
d, dr = mt.extint_divmod_128_64(a, b)
if c != d or d != dr or b*d + dr != a:
assert_equal(d, c)
assert_equal(dr, cr)
assert_equal(b*d + dr, a)
def test_floordiv_128_64():
with exc_iter(INT128_VALUES, INT64_POS_VALUES) as it:
for a, b in it:
c = a // b
d = mt.extint_floordiv_128_64(a, b)
if c != d:
assert_equal(d, c)
def test_ceildiv_128_64():
with exc_iter(INT128_VALUES, INT64_POS_VALUES) as it:
for a, b in it:
c = (a + b - 1) // b
d = mt.extint_ceildiv_128_64(a, b)
if c != d:
assert_equal(d, c)
| bsd-3-clause |
ViKomprenas/nsndswap | nsndswap/viko_nsnd.py | 1 | 13689 | #!/usr/bin/env python3
# nsndswap/viko_nsnd.py
# copyright 2017 ViKomprenas, 2-clause BSD license (LICENSE.md)
from nsndswap.util import Track
nsnd = {
# A Shade of Two
"criticalErr0r": ["Taureg"],
"Exploreation": ["Explore", "Upward Movement (Dave Owns)"],
"Taureg": ["Sburban Jungle", "Beatdown"],
"HammertimeVsThatBlackDoggo": ["Beatdown", "Doctor", "Sburban Jungle", "Liquid Negrocity", "BeatVale", "Penumbra Phantasm"],
"Saviour of the Dancing Demon": ["Doctor", "Penumbra Phantasm", "Beatdown", "Sburban Jungle"],
"Player 2": ["Sburban Jungle", "Beatdown", "Liquid Negrocity", "Doctor", "Dance of Thorns"],
"Cascadium Dioxide": ["Cascade (Beta)", "Flare", "Doctor", "Penumbra Phantasm", "Black Rose / Green Sun", "Black Hole / Green Sun", "Sburban Jungle"],
"Unnamed Jungle Club Remix (Extra)": ["Sburban Jungle"],
"Tales of an Unknown Universe": [],
# M3l0m4ni4c soundcloud
"At Shadow's Edge": ["Penumbra Phantasm", "Amen, Brother"],
"Whirlwind (L8 for D8 Version)": ["Whirlwind", "Patient", "Penumbra Phantasm", "Doctor", "Showtime", "Crystalanthemums", "Crystamanthequins", "Spider's Claw", "Vriska's Theme"],
"Wishful Thinking": ["Skies of Skaia", "Skaian Summoning", "Theme", "Rex Duodecim Angelus", "Penumbra Phantasm", "Upward Movement (Dave Owns)", "Lotus", "Homestuck Anthem", "Ruins", "Explore", "Skaian Skuffle", "Sburban Jungle", "Cascade (Beta)", "Overture (Canon Edit)", "Even in Death"],
"\N{Dingbat Circled Sans-Serif Digit Eight}": ["Spider's Claw", "Vriska's Theme", "Rex Duodecim Angelus", "Amen, Brother"],
"Blacker Than Licorice": ["Three in the Morning", "Liquid Negrocity", "Descend", "Umbral Ultimatum", "Walk-Stab-Walk", "Cascade (Beta)", "The Ballad of Jack Noir", "Lotus", "Non Compos Mentis", "Three's a Crowd", "Calamity", "Explore", "Flight of the White Wolf", "Amen, Brother", "Harlequin"],
"Whirlwind": ["Showtime", "Doctor", "Patient", "Savior of the Waking World", "Penumbra Phantasm"],
"Ignition": ["Flare (Cascade Cut)", "MeGaLoVania"],
# ViKomprenas soundcloud
"Another Elevator": ["Another Jungle", "Sburban Elevator"],
"Elevator #3": ["Jungle #3", "Sburban Elevator"],
"unfinished medley for power464646": ["Carbon Nadsat/Cuestick Genius", "Rhapsody in Green", "Havoc", "Eternity, Served Cold", "Showdown", "Riches to Ruins Movements I & II", "Negastrife", "PPiSHWA", "Carne Vale"],
"Saiddit Lullaby": ["Colt Blooded", "Look Where We Are"],
# UNDERTALE Soundtrack
"Once Upon a Time": [],
"Start Menu": ["Once Upon a Time"],
"Your Best Friend": [],
"Fallen Down": [],
"Ruins (Undertale)": [],
"Uwa!! So Temperate♫": [],
"Anticipation (Undertale)": ["Enemy Approaching"],
"Unnecessary Tension": [],
"Enemy Approaching": [],
"Ghost Fight": [],
"Determination": [],
"Home (Undertale)": ["Once Upon a Time"],
"Home (Music Box)": ["Home (Undertale)"],
"Heartache": ["Enemy Approaching"],
"sans.": [],
"Nyeh Heh Heh!": [],
"Snowy": [],
"Uwa!! So Holiday♫": ["Uwa!! So Temperate♫"],
"Dogbass": ["Ghost Fight"],
"Mysterious Place": [],
"Dogsong": ["Enemy Approaching"],
"Snowdin Town": ["Snowy"],
"Shop": ["Snowy", "Snowdin Town"],
"Bonetrousle": ["Nyeh Heh Heh!"],
"Dating Start!": ["Snowdin Town"],
"Dating Tense!": ["Undyne"],
"Dating Fight!": ["Dating Start!"],
"Premonition (Undertale)": ["You Idiot"],
"Danger Mystery": [],
"Undyne": [],
"Waterfall": ["Ruins (Undertale)"],
"Run!": ["Undyne"],
"Quiet Water": ["Ruins (Undertale)"],
"Memory": ["His Theme"],
"Bird That Carries You Over a Disproportionately Small Gap": ["Alphys"],
"Dummy!": ["Ghost Fight"],
"Pathetic House": ["Ghost Fight"],
"Spooktune": [],
"Spookwave": ["Spooktune"],
"Ghouliday": ["Jingle Bells"],
"Chill": [],
"Thundersnail": [],
"Temmie Village": ["Dogsong"],
"Tem Shop": ["Temmie Village"],
"NGAHHH!!": ["Undyne", "Ruins (Undertale)"],
"Spear of Justice": ["NGAHHH!!"],
"Ooo": [],
"Alphys": [],
"It's Showtime!": [],
"Metal Crusher": ["Noisemaster's Theme"],
"Another Medium": ["Waterfall", "Patient"],
"Uwa!! So HEATS!!♫": ["Uwa!! So Temperate♫"],
"Stronger Monsters": ["Enemy Approaching"],
"Hotel": ["Once Upon a Time"],
"Can You Really Call This A Hotel, I Didn't Receive A Mint On My Pillow Or Anything": ["Hotel"],
"Confession": ["Snowdin Town"],
"Live Report": ["It's Showtime!"],
"Death Report": ["Live Report"],
"Spider Dance": ["Ghost Fight"],
"Wrong Enemy ?!": [],
"Oh! One True Love": [],
"Oh! Dungeon": ["Oh! One True Love"],
"It's Raining Somewhere Else": ["sans."],
"CORE Approach": ["Hotel"],
"CORE": ["Another Medium"],
"Last Episode!": ["Metal Crusher"],
"Oh My...": [],
"Death by Glamour": ["Another Medium", "Metal Crusher", "It's Showtime!"],
"For the Fans": ["Oh! One True Love"],
"Long Elevator": [],
"Undertale": ["His Theme", "Once Upon a Time"],
"Song That Might Play When You Fight Sans": ["sans.", "Nyeh Heh Heh!"],
"The Choice": ["Undertale"],
"Small Shock": [],
"Barrier": [],
"Bergentrückung": ["Once Upon a Time"],
"ASGORE": ["Bergentrückung", "Heartache", "Determination", "Undyne"],
"You Idiot": [],
"Your Best Nightmare": ["You Idiot", "Your Best Friend"],
"Finale": ["Your Best Friend", "His Theme"],
"An Ending": ["Ruins (Undertale)"],
"She's Playing Piano": ["Alphys"],
"Here We Are": ["Alphys"],
"Amalgam": ["Enemy Approaching"],
"Fallen Down (Reprise)": ["Fallen Down", "Once Upon a Time"],
"Don't Give Up": ["An Ending"],
"Hopes and Dreams": ["Once Upon a Time", "Your Best Friend", "Snowdin Town"],
"Burn in Despair!": ["You Idiot"],
"SAVE the World": ["Once Upon a Time", "Your Best Friend"],
"His Theme": [],
"Final Power": ["Hopes and Dreams"],
"Reunited": ["Once Upon a Time", "Snowdin Town"],
"Menu (Full)": ["Start Menu"],
"Respite": ["Ruins (Undertale)"],
"Bring It In, Guys!": ["Enemy Approaching", "Nyeh Heh Heh!", "sans.", "Snowy", "Snowdin Town", "Undyne", "Ruins (Undertale)", "Death by Glamour", "Another Medium", "Bergentrückung", "Fallen Down", "Once Upon a Time"],
"Last Goodbye": ["Once Upon a Time"],
"But the Earth Refused to Die": ["Ruins (Undertale)"],
"Battle Against a True Hero": ["Ruins (Undertale)", "Alphys"],
"Power of \"NEO\"": ["Battle Against a True Hero"],
"MEGALOVANIA": ["Megalovania"],
"Good Night": ["Once Upon a Time"],
# UNDERTALE miscellany
"Empty House": ["Fallen Down"],
"Meat Factory": [],
"Happy Town": [],
"Trouble Dingle": [],
"Gaster's Theme": [],
"Grandpa Semi": ["Metal Crusher"],
"King Description": ["Determination"],
"Dance of Dog": [],
"Sigh of Dog": [],
"Alphys Lab (Unused)": ["Alphys"],
"Undyne Battle (Unused)": ["Undyne", "Ruins (Undertale)"],
"Dog Hole": [],
"Dogtroid": ["Dogsong"],
"Undertale (Beta)": ["Once Upon a Time"],
# Misc
"SPIDER DANCE FROM UNDERTALE BUT NOW ITS SKA I GUESS???": ["Spider Dance"],
# Redditstuck Vol. 1
"Cogs": [],
"Hobbyist": [],
"Spool": [],
"Crushing Weight": [],
"Assertion": [],
"Visions of Grim": [],
"Colt Blooded": ["Glass Houses"],
"Incubation": [],
"What A Hoot": [],
"Saoshyant": ["Hobbyist"],
"Alloy": [],
"Conflict!": [],
"Sticky Steps": [],
"Happy Thoughts And Fairy Dust": [],
"Effervescence": [],
"Malediction": [],
"Solid Water Whispers": [],
"Divine Intervention": [],
"Thank You Based Prince": [],
"Look Where We Are": [],
"Hyperion": [],
# Redditstuck Vol. 2
"Prelude For A Leading Lady": [],
"Land of Glaciers and Magma": [],
"Inflection": [],
"Halcyon": ["My Favorite Things"],
"Symmet": ["Symmet [Midi]"],
"Symmet [Midi]": [],
"In The Sorcerer's Tower": [],
"Siphon": [],
"Calmshit": [],
"The Dremaer And The Dream": [],
"Lewd Lunacy": [],
"Slippery Slope": ["Sticky Steps"],
"Gnade": [],
"Calm Before Storm": [],
"Glass Houses": [],
"To Cross The Void": [],
"Frieden": ["Ruhe"],
"Where Are We Now": ["Look Where We Are"],
"(I Can't Get No) Smooth Pencils": [],
"Pizza Bagels": [],
"The Fake-Out": [],
"Maelstrom": [],
# Hypertonic Dreams
"Palfrey Motive": [],
"Vitriolium": [],
"Redox Redux": [],
"All Saints to Arms": [],
"Blueshift": [],
"Den of Sin": [],
"Duck Muffler": [],
"Paperthin": ["Jingle Bell Rock"],
"Reprieve": [],
"Notes From The Left Field": [],
"Hard Pill to Swallow": [],
"Sad Mountain Dew Music": ["Sticky Steps"],
"Tryptophantasm": [],
"High Upon Hinterland": [],
"Untruly Yours": [],
"Burnout Flounce": [],
"Ex Nihilo": [],
# WarxTron soundcloud
"Mannequin (WarxTron)": ["Mannequin"],
"Solid Griscosity": ["Liquid Negrocity"],
"Sunset (WarxTron)": ["Sunset (Toby Fox)"],
"Last Breath of the Heir": ["Showtime (Piano Refrain)", "Doctor", "Penumbra Phantasm", "Harlequin", "Liquid Negrocity"],
"Doctor Percocets": ["Doctor", "Mask Off", "Penumbra Phantasm"],
"Patient (Full Mix)": ["Patient"],
"Omnipotential Bark": ["Walk-Stab-Walk", "Sburban Jungle", "Atomic Bonsai", "Umbral Ultimatum", "Penumbra Phantasm", "Cascade", "Liquid Negrocity", "Dissension"],
"Savior of the Hard Rock World": ["Savior of the Waking World"],
"Waking Up": ["Penumbra Phantasm", "Doctor", "Get Up"],
"8 BIT Liquid Negrocity": ["Liquid Negrocity"],
"Breath Of Air": ["Doctor", "Penumbra Phantasm"],
"Climbing for Eternity": ["Endless Climb"],
"The Legend of the Sovereign Slayer": ["Liquid Negrocity", "The Ballad of Jack Noir"],
"Strider Squadron": ["Upward Movement", "Beatdown", "Atomyk Ebonpyre", "Candles and Clockwork", "Unite Synchronization", "Sburban Jungle"],
"It's Showtime (Homestuck Version)": ["Showtime", "It's Showtime"],
"Doctor Lotus": ["Doctor", "Lotus"],
"Legion of the Signless": ["Crustacean", "Chorale for Jaspers", "English"],
"8-bit Keepers": ["Keepers"],
"Calliope: Escape": [],
"The Sun Is The Largest Star Setted In The Universe": ["Sunsetter", "Starsetter", "Sburban Jungle"],
"Soft As The Clouds": ["Softly", "Softbit", "TBoSRE", "Sunsetter", "Penumbra Phantasm", "Dissension"],
"Battle Against a Furry Dictator": ["Prospit Dreamers", "Furry Monarch"],
# Phase
"Epoch": ["Beatdown"], # combined
"Advanced Medical Action": ["Doctor"],
"StarLight": [],
"Edge Of Oblivion": ["At The Price of Oblivion"],
"Strife In Sea Hitlers Palace": ["PPiSHWA"], # sic
"Awaken": ["MeGaLoVania", "Sburban Jungle", "Showtime"],
"DeadTone": ["Dead Beat"],
# Skipping Creatrix (duplicated w/ 9)
"Final Fight": ["Doctor", "Even in Death", "Showtime", "MeGaLoVania", "PPiSHWA", "Beatdown"],
# TirantBacon soundcloud
"Non Compos Mentis Remix": ["Non Compos Mentis"],
"Tomahawk Head - Shameful MIDI / soundfont swap edition": ["Tomahawk Head"],
"Meganumbra Phantasm": ["Penumbra Phantasm", "Doctor"],
"Crystalanthemix (WIP)": ["Crystalanthemums"],
"earthlovania music dongers": ["Sunsetter", "MeGaLoVania", "Skies of Skaia"],
"Jhons Rom (Old)": [],
"I'm extremely sorry for this.": ["Walls Covered in Blood", "Soulja Boy"],
"Admiral Bone-To-Pick (Wub Machine Remix)": ["Admiral Bone-To-Pick"],
"Gaia King": ["Gaia Queen"],
# Cerulean soundcloud
"Life Support": ["Doctor"],
"The Last Voyage": ["Eridan's Theme"],
"Moonlight Slaughter": ["Liquid Negrocity"],
# Redditstuck Vol. 3: Small Medium at Large
# List compiled by Bin
"Burnt Bridges": [],
"Down Vice Street": [],
"Down Lick Street": [],
"Sparkle Fairy": [],
"(That's) Inadvisable": [],
"COblivion": [],
"Clear Voyance": ["Gnade"],
"Swing and a Miss": ["Pinkie's Brew"],
"Attendre": [],
"Bushito": [],
"Smut Mania": [],
"Pony Blooded": [],
"Kotomundi": [],
"Ypsilanti": [],
"Hubris": [],
"Robot": [],
"sev": ["Kotomundi", "Hyperion", "Calmshit"],
"Bygone": [],
"Ruhe": [],
"Coltish": [],
"Unreal City": [],
# Commander Cello soundcloud
"Dead Dignitary": ["Liquid Negrocity"],
# FriendlyCoy soundcloud
"showtimejit.mp3": ["Joey: Play Haunting Refrain"],
# Jit tumblr
"Joey: Play Haunting Refrain": [],
# GoldenSkylord soundcloud
"FLburnward": ["Doctor"],
# Dissectum/Rom/Doctor Chaos soundcloud
"Seethe": ["Flare"],
# Xoro Laventer soundcloud
"Starcrusher": ["I absolutely LOATHE this Drummer."],
# Act Omega - Early Access
"Year One": ["Sburban Jungle", "Cascade (Beta)", "Umbral Ultimatum", "Penumbra Phantasm", "IaMotMC"],
"Cog": ["Atomyk Ebonpyre", "Beatdown", "Cascade (Beta)"],
"Blastoff": ["Sburban Jungle", "Cascade (Beta)", "Liquid Negrocity", "Aggrieve", "Penumbra Phantasm"],
"Extermination": ["Extermination Alpha", "Sburban Jungle", "Cascade (Beta)"],
"Queen Folly": ["Extermination Alpha"],
"Wicked Witch": ["Extermination Alpha"],
"Sburban Resolution": ["Sburban Jungle"],
"Torrent (Bonus)": ["Sburban Jungle", "Cascade (Beta)", "Dance of Thorns", "Even in Death", "Upward Movement", "Love You", "Doctor"],
"Savoir Allegro (Bonus)": ["Doctor"],
"Extermination Alpha": [],
"Meowlevatorstuck (Cat Tax)": ["Elevatorstuck"],
}
def parse():
return [Track(x, y) for x, y in nsnd.items() if y is not NotImplementedError]
| bsd-2-clause |
bkirui/odoo | addons/base_gengo/res_company.py | 321 | 1890 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class res_company(osv.Model):
_name = "res.company"
_inherit = "res.company"
_columns = {
"gengo_private_key": fields.text("Gengo Private Key", copy=False, groups="base.group_system"),
"gengo_public_key": fields.text("Gengo Public Key", copy=False, groups="base.group_user"),
"gengo_comment": fields.text("Comments", help="This comment will be automatically be enclosed in each an every request sent to Gengo", groups="base.group_user"),
"gengo_auto_approve": fields.boolean("Auto Approve Translation ?", help="Jobs are Automatically Approved by Gengo.", groups="base.group_user"),
"gengo_sandbox": fields.boolean("Sandbox Mode", help="Check this box if you're using the sandbox mode of Gengo, mainly used for testing purpose."),
}
_defaults = {
"gengo_auto_approve": True,
}
| agpl-3.0 |
Medium/phantomjs-1 | src/breakpad/src/tools/gyp/test/sibling/gyptest-all.py | 151 | 1061 | #!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
"""
import TestGyp
test = TestGyp.TestGyp()
test.run_gyp('build/all.gyp', chdir='src')
test.build('build/all.gyp', test.ALL, chdir='src')
chdir = 'src/build'
# The top-level Makefile is in the directory where gyp was run.
# TODO(mmoss) Should the Makefile go in the directory of the passed in .gyp
# file? What about when passing in multiple .gyp files? Would sub-project
# Makefiles (see http://codereview.chromium.org/340008 comments) solve this?
if test.format == 'make':
chdir = 'src'
if test.format == 'xcode':
chdir = 'src/prog1'
test.run_built_executable('prog1',
chdir=chdir,
stdout="Hello from prog1.c\n")
if test.format == 'xcode':
chdir = 'src/prog2'
test.run_built_executable('prog2',
chdir=chdir,
stdout="Hello from prog2.c\n")
test.pass_test()
| bsd-3-clause |
Jionglun/w17test_2 | static/Brython3.1.1-20150328-091302/Lib/tempfile.py | 728 | 22357 | """Temporary files.
This module provides generic, low- and high-level interfaces for
creating temporary files and directories. The interfaces listed
as "safe" just below can be used without fear of race conditions.
Those listed as "unsafe" cannot, and are provided for backward
compatibility only.
This module also provides some data items to the user:
TMP_MAX - maximum number of names that will be tried before
giving up.
tempdir - If this is set to a string before the first use of
any routine from this module, it will be considered as
another candidate location to store temporary files.
"""
__all__ = [
"NamedTemporaryFile", "TemporaryFile", # high level safe interfaces
"SpooledTemporaryFile", "TemporaryDirectory",
"mkstemp", "mkdtemp", # low level safe interfaces
"mktemp", # deprecated unsafe interface
"TMP_MAX", "gettempprefix", # constants
"tempdir", "gettempdir"
]
# Imports.
import warnings as _warnings
import sys as _sys
import io as _io
import os as _os
import errno as _errno
from random import Random as _Random
try:
import fcntl as _fcntl
except ImportError:
def _set_cloexec(fd):
pass
else:
def _set_cloexec(fd):
try:
flags = _fcntl.fcntl(fd, _fcntl.F_GETFD, 0)
except OSError:
pass
else:
# flags read successfully, modify
flags |= _fcntl.FD_CLOEXEC
_fcntl.fcntl(fd, _fcntl.F_SETFD, flags)
try:
import _thread
except ImportError:
import _dummy_thread as _thread
_allocate_lock = _thread.allocate_lock
_text_openflags = _os.O_RDWR | _os.O_CREAT | _os.O_EXCL
if hasattr(_os, 'O_NOINHERIT'):
_text_openflags |= _os.O_NOINHERIT
if hasattr(_os, 'O_NOFOLLOW'):
_text_openflags |= _os.O_NOFOLLOW
_bin_openflags = _text_openflags
if hasattr(_os, 'O_BINARY'):
_bin_openflags |= _os.O_BINARY
if hasattr(_os, 'TMP_MAX'):
TMP_MAX = _os.TMP_MAX
else:
TMP_MAX = 10000
# Although it does not have an underscore for historical reasons, this
# variable is an internal implementation detail (see issue 10354).
template = "tmp"
# Internal routines.
_once_lock = _allocate_lock()
if hasattr(_os, "lstat"):
_stat = _os.lstat
elif hasattr(_os, "stat"):
_stat = _os.stat
else:
# Fallback. All we need is something that raises OSError if the
# file doesn't exist.
def _stat(fn):
f = open(fn)
f.close()
def _exists(fn):
try:
_stat(fn)
except OSError:
return False
else:
return True
class _RandomNameSequence:
"""An instance of _RandomNameSequence generates an endless
sequence of unpredictable strings which can safely be incorporated
into file names. Each string is six characters long. Multiple
threads can safely use the same instance at the same time.
_RandomNameSequence is an iterator."""
characters = "abcdefghijklmnopqrstuvwxyz0123456789_"
@property
def rng(self):
cur_pid = _os.getpid()
if cur_pid != getattr(self, '_rng_pid', None):
self._rng = _Random()
self._rng_pid = cur_pid
return self._rng
def __iter__(self):
return self
def __next__(self):
c = self.characters
choose = self.rng.choice
letters = [choose(c) for dummy in "123456"]
return ''.join(letters)
def _candidate_tempdir_list():
"""Generate a list of candidate temporary directories which
_get_default_tempdir will try."""
dirlist = []
# First, try the environment.
for envname in 'TMPDIR', 'TEMP', 'TMP':
dirname = _os.getenv(envname)
if dirname: dirlist.append(dirname)
# Failing that, try OS-specific locations.
if _os.name == 'nt':
dirlist.extend([ r'c:\temp', r'c:\tmp', r'\temp', r'\tmp' ])
else:
dirlist.extend([ '/tmp', '/var/tmp', '/usr/tmp' ])
# As a last resort, the current directory.
try:
dirlist.append(_os.getcwd())
except (AttributeError, OSError):
dirlist.append(_os.curdir)
return dirlist
def _get_default_tempdir():
"""Calculate the default directory to use for temporary files.
This routine should be called exactly once.
We determine whether or not a candidate temp dir is usable by
trying to create and write to a file in that directory. If this
is successful, the test file is deleted. To prevent denial of
service, the name of the test file must be randomized."""
namer = _RandomNameSequence()
dirlist = _candidate_tempdir_list()
for dir in dirlist:
if dir != _os.curdir:
dir = _os.path.normcase(_os.path.abspath(dir))
# Try only a few names per directory.
for seq in range(100):
name = next(namer)
filename = _os.path.join(dir, name)
try:
fd = _os.open(filename, _bin_openflags, 0o600)
try:
try:
with _io.open(fd, 'wb', closefd=False) as fp:
fp.write(b'blat')
finally:
_os.close(fd)
finally:
_os.unlink(filename)
return dir
except FileExistsError:
pass
except OSError:
break # no point trying more names in this directory
raise FileNotFoundError(_errno.ENOENT,
"No usable temporary directory found in %s" %
dirlist)
_name_sequence = None
def _get_candidate_names():
"""Common setup sequence for all user-callable interfaces."""
global _name_sequence
if _name_sequence is None:
_once_lock.acquire()
try:
if _name_sequence is None:
_name_sequence = _RandomNameSequence()
finally:
_once_lock.release()
return _name_sequence
def _mkstemp_inner(dir, pre, suf, flags):
"""Code common to mkstemp, TemporaryFile, and NamedTemporaryFile."""
names = _get_candidate_names()
for seq in range(TMP_MAX):
name = next(names)
file = _os.path.join(dir, pre + name + suf)
try:
fd = _os.open(file, flags, 0o600)
_set_cloexec(fd)
return (fd, _os.path.abspath(file))
except FileExistsError:
continue # try again
except PermissionError:
# This exception is thrown when a directory with the chosen name
# already exists on windows.
if _os.name == 'nt':
continue
else:
raise
raise FileExistsError(_errno.EEXIST,
"No usable temporary file name found")
# User visible interfaces.
def gettempprefix():
"""Accessor for tempdir.template."""
return template
tempdir = None
def gettempdir():
"""Accessor for tempfile.tempdir."""
global tempdir
if tempdir is None:
_once_lock.acquire()
try:
if tempdir is None:
tempdir = _get_default_tempdir()
finally:
_once_lock.release()
return tempdir
def mkstemp(suffix="", prefix=template, dir=None, text=False):
"""User-callable function to create and return a unique temporary
file. The return value is a pair (fd, name) where fd is the
file descriptor returned by os.open, and name is the filename.
If 'suffix' is specified, the file name will end with that suffix,
otherwise there will be no suffix.
If 'prefix' is specified, the file name will begin with that prefix,
otherwise a default prefix is used.
If 'dir' is specified, the file will be created in that directory,
otherwise a default directory is used.
If 'text' is specified and true, the file is opened in text
mode. Else (the default) the file is opened in binary mode. On
some operating systems, this makes no difference.
The file is readable and writable only by the creating user ID.
If the operating system uses permission bits to indicate whether a
file is executable, the file is executable by no one. The file
descriptor is not inherited by children of this process.
Caller is responsible for deleting the file when done with it.
"""
if dir is None:
dir = gettempdir()
if text:
flags = _text_openflags
else:
flags = _bin_openflags
return _mkstemp_inner(dir, prefix, suffix, flags)
def mkdtemp(suffix="", prefix=template, dir=None):
"""User-callable function to create and return a unique temporary
directory. The return value is the pathname of the directory.
Arguments are as for mkstemp, except that the 'text' argument is
not accepted.
The directory is readable, writable, and searchable only by the
creating user.
Caller is responsible for deleting the directory when done with it.
"""
if dir is None:
dir = gettempdir()
names = _get_candidate_names()
for seq in range(TMP_MAX):
name = next(names)
file = _os.path.join(dir, prefix + name + suffix)
try:
_os.mkdir(file, 0o700)
return file
except FileExistsError:
continue # try again
raise FileExistsError(_errno.EEXIST,
"No usable temporary directory name found")
def mktemp(suffix="", prefix=template, dir=None):
"""User-callable function to return a unique temporary file name. The
file is not created.
Arguments are as for mkstemp, except that the 'text' argument is
not accepted.
This function is unsafe and should not be used. The file name
refers to a file that did not exist at some point, but by the time
you get around to creating it, someone else may have beaten you to
the punch.
"""
## from warnings import warn as _warn
## _warn("mktemp is a potential security risk to your program",
## RuntimeWarning, stacklevel=2)
if dir is None:
dir = gettempdir()
names = _get_candidate_names()
for seq in range(TMP_MAX):
name = next(names)
file = _os.path.join(dir, prefix + name + suffix)
if not _exists(file):
return file
raise FileExistsError(_errno.EEXIST,
"No usable temporary filename found")
class _TemporaryFileWrapper:
"""Temporary file wrapper
This class provides a wrapper around files opened for
temporary use. In particular, it seeks to automatically
remove the file when it is no longer needed.
"""
def __init__(self, file, name, delete=True):
self.file = file
self.name = name
self.close_called = False
self.delete = delete
def __getattr__(self, name):
# Attribute lookups are delegated to the underlying file
# and cached for non-numeric results
# (i.e. methods are cached, closed and friends are not)
file = self.__dict__['file']
a = getattr(file, name)
if not isinstance(a, int):
setattr(self, name, a)
return a
# The underlying __enter__ method returns the wrong object
# (self.file) so override it to return the wrapper
def __enter__(self):
self.file.__enter__()
return self
# iter() doesn't use __getattr__ to find the __iter__ method
def __iter__(self):
return iter(self.file)
# NT provides delete-on-close as a primitive, so we don't need
# the wrapper to do anything special. We still use it so that
# file.name is useful (i.e. not "(fdopen)") with NamedTemporaryFile.
if _os.name != 'nt':
# Cache the unlinker so we don't get spurious errors at
# shutdown when the module-level "os" is None'd out. Note
# that this must be referenced as self.unlink, because the
# name TemporaryFileWrapper may also get None'd out before
# __del__ is called.
unlink = _os.unlink
def close(self):
if not self.close_called:
self.close_called = True
self.file.close()
if self.delete:
self.unlink(self.name)
def __del__(self):
self.close()
# Need to trap __exit__ as well to ensure the file gets
# deleted when used in a with statement
def __exit__(self, exc, value, tb):
result = self.file.__exit__(exc, value, tb)
self.close()
return result
else:
def __exit__(self, exc, value, tb):
self.file.__exit__(exc, value, tb)
def NamedTemporaryFile(mode='w+b', buffering=-1, encoding=None,
newline=None, suffix="", prefix=template,
dir=None, delete=True):
"""Create and return a temporary file.
Arguments:
'prefix', 'suffix', 'dir' -- as for mkstemp.
'mode' -- the mode argument to io.open (default "w+b").
'buffering' -- the buffer size argument to io.open (default -1).
'encoding' -- the encoding argument to io.open (default None)
'newline' -- the newline argument to io.open (default None)
'delete' -- whether the file is deleted on close (default True).
The file is created as mkstemp() would do it.
Returns an object with a file-like interface; the name of the file
is accessible as file.name. The file will be automatically deleted
when it is closed unless the 'delete' argument is set to False.
"""
if dir is None:
dir = gettempdir()
flags = _bin_openflags
# Setting O_TEMPORARY in the flags causes the OS to delete
# the file when it is closed. This is only supported by Windows.
if _os.name == 'nt' and delete:
flags |= _os.O_TEMPORARY
(fd, name) = _mkstemp_inner(dir, prefix, suffix, flags)
file = _io.open(fd, mode, buffering=buffering,
newline=newline, encoding=encoding)
return _TemporaryFileWrapper(file, name, delete)
if _os.name != 'posix' or _os.sys.platform == 'cygwin':
# On non-POSIX and Cygwin systems, assume that we cannot unlink a file
# while it is open.
TemporaryFile = NamedTemporaryFile
else:
def TemporaryFile(mode='w+b', buffering=-1, encoding=None,
newline=None, suffix="", prefix=template,
dir=None):
"""Create and return a temporary file.
Arguments:
'prefix', 'suffix', 'dir' -- as for mkstemp.
'mode' -- the mode argument to io.open (default "w+b").
'buffering' -- the buffer size argument to io.open (default -1).
'encoding' -- the encoding argument to io.open (default None)
'newline' -- the newline argument to io.open (default None)
The file is created as mkstemp() would do it.
Returns an object with a file-like interface. The file has no
name, and will cease to exist when it is closed.
"""
if dir is None:
dir = gettempdir()
flags = _bin_openflags
(fd, name) = _mkstemp_inner(dir, prefix, suffix, flags)
try:
_os.unlink(name)
return _io.open(fd, mode, buffering=buffering,
newline=newline, encoding=encoding)
except:
_os.close(fd)
raise
class SpooledTemporaryFile:
"""Temporary file wrapper, specialized to switch from BytesIO
or StringIO to a real file when it exceeds a certain size or
when a fileno is needed.
"""
_rolled = False
def __init__(self, max_size=0, mode='w+b', buffering=-1,
encoding=None, newline=None,
suffix="", prefix=template, dir=None):
if 'b' in mode:
self._file = _io.BytesIO()
else:
# Setting newline="\n" avoids newline translation;
# this is important because otherwise on Windows we'd
# hget double newline translation upon rollover().
self._file = _io.StringIO(newline="\n")
self._max_size = max_size
self._rolled = False
self._TemporaryFileArgs = {'mode': mode, 'buffering': buffering,
'suffix': suffix, 'prefix': prefix,
'encoding': encoding, 'newline': newline,
'dir': dir}
def _check(self, file):
if self._rolled: return
max_size = self._max_size
if max_size and file.tell() > max_size:
self.rollover()
def rollover(self):
if self._rolled: return
file = self._file
newfile = self._file = TemporaryFile(**self._TemporaryFileArgs)
del self._TemporaryFileArgs
newfile.write(file.getvalue())
newfile.seek(file.tell(), 0)
self._rolled = True
# The method caching trick from NamedTemporaryFile
# won't work here, because _file may change from a
# BytesIO/StringIO instance to a real file. So we list
# all the methods directly.
# Context management protocol
def __enter__(self):
if self._file.closed:
raise ValueError("Cannot enter context with closed file")
return self
def __exit__(self, exc, value, tb):
self._file.close()
# file protocol
def __iter__(self):
return self._file.__iter__()
def close(self):
self._file.close()
@property
def closed(self):
return self._file.closed
@property
def encoding(self):
try:
return self._file.encoding
except AttributeError:
if 'b' in self._TemporaryFileArgs['mode']:
raise
return self._TemporaryFileArgs['encoding']
def fileno(self):
self.rollover()
return self._file.fileno()
def flush(self):
self._file.flush()
def isatty(self):
return self._file.isatty()
@property
def mode(self):
try:
return self._file.mode
except AttributeError:
return self._TemporaryFileArgs['mode']
@property
def name(self):
try:
return self._file.name
except AttributeError:
return None
@property
def newlines(self):
try:
return self._file.newlines
except AttributeError:
if 'b' in self._TemporaryFileArgs['mode']:
raise
return self._TemporaryFileArgs['newline']
def read(self, *args):
return self._file.read(*args)
def readline(self, *args):
return self._file.readline(*args)
def readlines(self, *args):
return self._file.readlines(*args)
def seek(self, *args):
self._file.seek(*args)
@property
def softspace(self):
return self._file.softspace
def tell(self):
return self._file.tell()
def truncate(self, size=None):
if size is None:
self._file.truncate()
else:
if size > self._max_size:
self.rollover()
self._file.truncate(size)
def write(self, s):
file = self._file
rv = file.write(s)
self._check(file)
return rv
def writelines(self, iterable):
file = self._file
rv = file.writelines(iterable)
self._check(file)
return rv
class TemporaryDirectory(object):
"""Create and return a temporary directory. This has the same
behavior as mkdtemp but can be used as a context manager. For
example:
with TemporaryDirectory() as tmpdir:
...
Upon exiting the context, the directory and everything contained
in it are removed.
"""
def __init__(self, suffix="", prefix=template, dir=None):
self._closed = False
self.name = None # Handle mkdtemp raising an exception
self.name = mkdtemp(suffix, prefix, dir)
def __repr__(self):
return "<{} {!r}>".format(self.__class__.__name__, self.name)
def __enter__(self):
return self.name
def cleanup(self, _warn=False):
if self.name and not self._closed:
try:
self._rmtree(self.name)
except (TypeError, AttributeError) as ex:
# Issue #10188: Emit a warning on stderr
# if the directory could not be cleaned
# up due to missing globals
if "None" not in str(ex):
raise
print("ERROR: {!r} while cleaning up {!r}".format(ex, self,),
file=_sys.stderr)
return
self._closed = True
if _warn:
self._warn("Implicitly cleaning up {!r}".format(self),
ResourceWarning)
def __exit__(self, exc, value, tb):
self.cleanup()
def __del__(self):
# Issue a ResourceWarning if implicit cleanup needed
self.cleanup(_warn=True)
# XXX (ncoghlan): The following code attempts to make
# this class tolerant of the module nulling out process
# that happens during CPython interpreter shutdown
# Alas, it doesn't actually manage it. See issue #10188
_listdir = staticmethod(_os.listdir)
_path_join = staticmethod(_os.path.join)
_isdir = staticmethod(_os.path.isdir)
_islink = staticmethod(_os.path.islink)
_remove = staticmethod(_os.remove)
_rmdir = staticmethod(_os.rmdir)
_os_error = OSError
_warn = _warnings.warn
def _rmtree(self, path):
# Essentially a stripped down version of shutil.rmtree. We can't
# use globals because they may be None'ed out at shutdown.
for name in self._listdir(path):
fullname = self._path_join(path, name)
try:
isdir = self._isdir(fullname) and not self._islink(fullname)
except self._os_error:
isdir = False
if isdir:
self._rmtree(fullname)
else:
try:
self._remove(fullname)
except self._os_error:
pass
try:
self._rmdir(path)
except self._os_error:
pass
| gpl-3.0 |
kalxas/QGIS | python/core/additions/qgsfunction.py | 30 | 6622 | # -*- coding: utf-8 -*-
"""
***************************************************************************
qgsfunction.py
---------------------
Date : May 2018
Copyright : (C) 2018 by Denis Rouzaud
Email : denis@opengis.ch
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
import inspect
import string
from builtins import str
from qgis.PyQt.QtCore import QCoreApplication
from qgis._core import QgsExpressionFunction, QgsExpression, QgsMessageLog, QgsFeatureRequest, Qgis
def register_function(function, arg_count, group, usesgeometry=False,
referenced_columns=[QgsFeatureRequest.ALL_ATTRIBUTES], handlesnull=False, **kwargs):
"""
Register a Python function to be used as a expression function.
Functions should take (values, feature, parent) as args:
Example:
def myfunc(values, feature, parent):
pass
They can also shortcut naming feature and parent args by using *args
if they are not needed in the function.
Example:
def myfunc(values, *args):
pass
Functions should return a value compatible with QVariant
Eval errors can be raised using parent.setEvalErrorString("Error message")
:param function:
:param arg_count:
:param group:
:param usesgeometry:
:param handlesnull: Needs to be set to True if this function does not always return NULL if any parameter is NULL. Default False.
:return:
"""
class QgsPyExpressionFunction(QgsExpressionFunction):
def __init__(self, func, name, args, group, helptext='', usesGeometry=True,
referencedColumns=QgsFeatureRequest.ALL_ATTRIBUTES, expandargs=False, handlesNull=False):
QgsExpressionFunction.__init__(self, name, args, group, helptext)
self.function = func
self.expandargs = expandargs
self.uses_geometry = usesGeometry
self.referenced_columns = referencedColumns
self.handles_null = handlesNull
def func(self, values, context, parent, node):
feature = None
if context:
feature = context.feature()
try:
if self.expandargs:
values.append(feature)
values.append(parent)
if inspect.getfullargspec(self.function).args[-1] == 'context':
values.append(context)
return self.function(*values)
else:
if inspect.getfullargspec(self.function).args[-1] == 'context':
self.function(values, feature, parent, context)
return self.function(values, feature, parent)
except Exception as ex:
parent.setEvalErrorString(str(ex))
return None
def usesGeometry(self, node):
return self.uses_geometry
def referencedColumns(self, node):
return self.referenced_columns
def handlesNull(self):
return self.handles_null
helptemplate = string.Template("""<h3>$name function</h3><br>$doc""")
name = kwargs.get('name', function.__name__)
helptext = kwargs.get('helpText') or function.__doc__ or ''
helptext = helptext.strip()
expandargs = False
if arg_count == "auto":
# Work out the number of args we need.
# Number of function args - 2. The last two args are always feature, parent.
args = inspect.getfullargspec(function).args
number = len(args)
arg_count = number - 2
if args[-1] == 'context':
arg_count -= 1
expandargs = True
register = kwargs.get('register', True)
if register and QgsExpression.isFunctionName(name):
if not QgsExpression.unregisterFunction(name):
msgtitle = QCoreApplication.translate("UserExpressions", "User expressions")
msg = QCoreApplication.translate("UserExpressions",
"The user expression {0} already exists and could not be unregistered.").format(
name)
QgsMessageLog.logMessage(msg + "\n", msgtitle, Qgis.Warning)
return None
function.__name__ = name
helptext = helptemplate.safe_substitute(name=name, doc=helptext)
f = QgsPyExpressionFunction(function, name, arg_count, group, helptext, usesgeometry, referenced_columns,
expandargs, handlesnull)
# This doesn't really make any sense here but does when used from a decorator context
# so it can stay.
if register:
QgsExpression.registerFunction(f)
return f
def qgsfunction(args='auto', group='custom', **kwargs):
r"""
Decorator function used to define a user expression function.
:param args: Number of parameters, set to 'auto' to accept a variable length of parameters.
:param group: The expression group to which this expression should be added.
:param \**kwargs:
See below
:Keyword Arguments:
* *referenced_columns* (``list``) --
An array of field names on which this expression works. Can be set to ``[QgsFeatureRequest.ALL_ATTRIBUTES]``. By default empty.
* *usesgeometry* (``bool``) --
Defines if this expression requires the geometry. By default False.
* *handlesnull* (``bool``) --
Defines if this expression has custom handling for NULL values. If False, the result will always be NULL as soon as any parameter is NULL. False by default.
Example:
@qgsfunction(2, 'test'):
def add(values, feature, parent):
pass
Will create and register a function in QgsExpression called 'add' in the
'test' group that takes two arguments.
or not using feature and parent:
Example:
@qgsfunction(2, 'test'):
def add(values, *args):
pass
"""
def wrapper(func):
return register_function(func, args, group, **kwargs)
return wrapper
| gpl-2.0 |
UrusTeam/android_ndk_toolchain_cross | lib/python2.7/ctypes/test/test_parameters.py | 76 | 6552 | import unittest, sys
class SimpleTypesTestCase(unittest.TestCase):
def setUp(self):
import ctypes
try:
from _ctypes import set_conversion_mode
except ImportError:
pass
else:
self.prev_conv_mode = set_conversion_mode("ascii", "strict")
def tearDown(self):
try:
from _ctypes import set_conversion_mode
except ImportError:
pass
else:
set_conversion_mode(*self.prev_conv_mode)
def test_subclasses(self):
from ctypes import c_void_p, c_char_p
# ctypes 0.9.5 and before did overwrite from_param in SimpleType_new
class CVOIDP(c_void_p):
def from_param(cls, value):
return value * 2
from_param = classmethod(from_param)
class CCHARP(c_char_p):
def from_param(cls, value):
return value * 4
from_param = classmethod(from_param)
self.assertEqual(CVOIDP.from_param("abc"), "abcabc")
self.assertEqual(CCHARP.from_param("abc"), "abcabcabcabc")
try:
from ctypes import c_wchar_p
except ImportError:
return
class CWCHARP(c_wchar_p):
def from_param(cls, value):
return value * 3
from_param = classmethod(from_param)
self.assertEqual(CWCHARP.from_param("abc"), "abcabcabc")
# XXX Replace by c_char_p tests
def test_cstrings(self):
from ctypes import c_char_p, byref
# c_char_p.from_param on a Python String packs the string
# into a cparam object
s = "123"
self.assertTrue(c_char_p.from_param(s)._obj is s)
# new in 0.9.1: convert (encode) unicode to ascii
self.assertEqual(c_char_p.from_param(u"123")._obj, "123")
self.assertRaises(UnicodeEncodeError, c_char_p.from_param, u"123\377")
self.assertRaises(TypeError, c_char_p.from_param, 42)
# calling c_char_p.from_param with a c_char_p instance
# returns the argument itself:
a = c_char_p("123")
self.assertTrue(c_char_p.from_param(a) is a)
def test_cw_strings(self):
from ctypes import byref
try:
from ctypes import c_wchar_p
except ImportError:
## print "(No c_wchar_p)"
return
s = u"123"
if sys.platform == "win32":
self.assertTrue(c_wchar_p.from_param(s)._obj is s)
self.assertRaises(TypeError, c_wchar_p.from_param, 42)
# new in 0.9.1: convert (decode) ascii to unicode
self.assertEqual(c_wchar_p.from_param("123")._obj, u"123")
self.assertRaises(UnicodeDecodeError, c_wchar_p.from_param, "123\377")
pa = c_wchar_p.from_param(c_wchar_p(u"123"))
self.assertEqual(type(pa), c_wchar_p)
def test_int_pointers(self):
from ctypes import c_short, c_uint, c_int, c_long, POINTER, pointer
LPINT = POINTER(c_int)
## p = pointer(c_int(42))
## x = LPINT.from_param(p)
x = LPINT.from_param(pointer(c_int(42)))
self.assertEqual(x.contents.value, 42)
self.assertEqual(LPINT(c_int(42)).contents.value, 42)
self.assertEqual(LPINT.from_param(None), None)
if c_int != c_long:
self.assertRaises(TypeError, LPINT.from_param, pointer(c_long(42)))
self.assertRaises(TypeError, LPINT.from_param, pointer(c_uint(42)))
self.assertRaises(TypeError, LPINT.from_param, pointer(c_short(42)))
def test_byref_pointer(self):
# The from_param class method of POINTER(typ) classes accepts what is
# returned by byref(obj), it type(obj) == typ
from ctypes import c_short, c_uint, c_int, c_long, pointer, POINTER, byref
LPINT = POINTER(c_int)
LPINT.from_param(byref(c_int(42)))
self.assertRaises(TypeError, LPINT.from_param, byref(c_short(22)))
if c_int != c_long:
self.assertRaises(TypeError, LPINT.from_param, byref(c_long(22)))
self.assertRaises(TypeError, LPINT.from_param, byref(c_uint(22)))
def test_byref_pointerpointer(self):
# See above
from ctypes import c_short, c_uint, c_int, c_long, pointer, POINTER, byref
LPLPINT = POINTER(POINTER(c_int))
LPLPINT.from_param(byref(pointer(c_int(42))))
self.assertRaises(TypeError, LPLPINT.from_param, byref(pointer(c_short(22))))
if c_int != c_long:
self.assertRaises(TypeError, LPLPINT.from_param, byref(pointer(c_long(22))))
self.assertRaises(TypeError, LPLPINT.from_param, byref(pointer(c_uint(22))))
def test_array_pointers(self):
from ctypes import c_short, c_uint, c_int, c_long, POINTER
INTARRAY = c_int * 3
ia = INTARRAY()
self.assertEqual(len(ia), 3)
self.assertEqual([ia[i] for i in range(3)], [0, 0, 0])
# Pointers are only compatible with arrays containing items of
# the same type!
LPINT = POINTER(c_int)
LPINT.from_param((c_int*3)())
self.assertRaises(TypeError, LPINT.from_param, c_short*3)
self.assertRaises(TypeError, LPINT.from_param, c_long*3)
self.assertRaises(TypeError, LPINT.from_param, c_uint*3)
## def test_performance(self):
## check_perf()
def test_noctypes_argtype(self):
import _ctypes_test
from ctypes import CDLL, c_void_p, ArgumentError
func = CDLL(_ctypes_test.__file__)._testfunc_p_p
func.restype = c_void_p
# TypeError: has no from_param method
self.assertRaises(TypeError, setattr, func, "argtypes", (object,))
class Adapter(object):
def from_param(cls, obj):
return None
func.argtypes = (Adapter(),)
self.assertEqual(func(None), None)
self.assertEqual(func(object()), None)
class Adapter(object):
def from_param(cls, obj):
return obj
func.argtypes = (Adapter(),)
# don't know how to convert parameter 1
self.assertRaises(ArgumentError, func, object())
self.assertEqual(func(c_void_p(42)), 42)
class Adapter(object):
def from_param(cls, obj):
raise ValueError(obj)
func.argtypes = (Adapter(),)
# ArgumentError: argument 1: ValueError: 99
self.assertRaises(ArgumentError, func, 99)
################################################################
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
osiell/oerplib | oerplib/service/osv/osv.py | 3 | 10524 | # -*- coding: UTF-8 -*-
##############################################################################
#
# OERPLib
# Copyright (C) 2011-2013 Sébastien Alix.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
"""Provide the :class:`Model` class which allow to access dynamically to all
methods proposed by a data model."""
import sys # to check Python version at runtime
import collections
from oerplib.tools import v
from oerplib import error
from oerplib.service.osv import fields, browse
class Model(object):
""".. versionadded:: 0.5
Represent a data model.
.. note::
This class have to be used through the :func:`oerplib.OERP.get`
method.
>>> import oerplib
>>> oerp = oerplib.OERP('localhost')
>>> user = oerp.login('admin', 'passwd', 'database')
>>> user_obj = oerp.get('res.users')
>>> user_obj
<oerplib.service.osv.osv.Model object at 0xb75ba4ac>
>>> user_obj.name_get(user.id) # Use any methods from the model instance
[[1, 'Administrator']]
.. warning::
The only method implemented in this class is ``browse``. Except this
one, method calls are purely dynamic. As long as you know the signature
of the model method targeted, you will be able to use it
(see the :ref:`tutorial <tutorials-execute-queries>`).
"""
fields_reserved = ['id', '__oerp__', '__osv__', '__data__']
def __init__(self, oerp, model):
super(Model, self).__init__()
self._oerp = oerp
self._name = model
self._browse_class = self._generate_browse_class()
def browse(self, ids, context=None):
"""Browse one or several records (if `ids` is a list of IDs)
from `model`. The fields and values for such objects are generated
dynamically.
>>> oerp.get('res.partner').browse(1)
browse_record(res.partner, 1)
>>> [partner.name for partner in oerp.get('res.partner').browse([1, 2])]
[u'Your Company', u'ASUStek']
A list of data types used by ``browse_record`` fields are
available :ref:`here <fields>`.
:return: a ``browse_record`` instance
:return: a generator to iterate on ``browse_record`` instances
:raise: :class:`oerplib.error.RPCError`
"""
if isinstance(ids, list):
return browse.BrowseRecordIterator(self, ids, context=context)
#return browse.BrowseRecordIterator(
# model=self,
# ids=ids,
# context=context)
else:
obj = self._browse_class(ids)
self._refresh(obj, context)
return obj
#return self.browse(ids, context)
def _generate_browse_class(self):
"""Generate a class with all its fields corresponding to
the model name supplied and return them.
"""
# Retrieve server fields info and generate corresponding local fields
fields_get = self._oerp.execute(self._name, 'fields_get')
cls_name = self._name.replace('.', '_')
# Encode the class name for the Python2 'type()' function.
# No need to do this for Python3.
if type(cls_name) == unicode and sys.version_info < (3,):
cls_name = cls_name.encode('utf-8')
cls_fields = {}
for field_name, field_data in fields_get.items():
if field_name not in Model.fields_reserved:
cls_fields[field_name] = fields.generate_field(
self, field_name, field_data)
# Case where no field 'name' exists, we generate one (which will be
# in readonly mode) in purpose to be filled with the 'name_get' method
if 'name' not in cls_fields:
field_data = {'type': 'text', 'string': 'Name', 'readonly': True}
cls_fields['name'] = fields.generate_field(self, 'name', field_data)
cls = type(cls_name, (browse.BrowseRecord,), {})
cls.__oerp__ = self._oerp
cls.__osv__ = {'name': self._name, 'columns': cls_fields}
slots = ['__oerp__', '__osv__', '__dict__', '__data__']
slots.extend(cls_fields.keys())
cls.__slots__ = slots
return cls
def _write_record(self, obj, context=None):
"""Send values of fields updated to the server."""
context = context or self._oerp.context
obj_data = obj.__data__
vals = {}
for field_name in obj_data['updated_values']:
if field_name in obj_data['raw_data']:
field = self._browse_class.__osv__['columns'][field_name]
field_value = obj.__data__['updated_values'][field_name]
# Many2One fields
if isinstance(field, fields.Many2OneField):
vals[field_name] = field_value and field_value[0]
# All other fields
else:
vals[field_name] = field_value
try:
if v(self._oerp.version) < v('6.1'):
res = self.write([obj.id], vals, context)
else:
res = self.write([obj.id], vals, context=context)
except error.Error as exc:
raise exc
else:
# Update raw_data dictionary
# FIXME: make it optional to avoid a RPC request?
self._refresh(obj, context)
return res
def _refresh(self, obj, context=None):
"""Retrieve field values from the server.
May be used to restore the original values
in the purpose to cancel all changes made.
"""
context = context or self._oerp.context
obj_data = obj.__data__
obj_data['context'] = context
# Get basic fields (no relational ones)
basic_fields = []
for field_name, field in obj.__osv__['columns'].iteritems():
if not getattr(field, 'relation', False):
basic_fields.append(field_name)
else:
obj_data['raw_data'][field_name] = None
# Fill fields with values of the record
if obj.id:
if v(self._oerp.version) < v('6.1'):
data = self.read([obj.id], basic_fields, context)
if data:
obj_data['raw_data'].update(data[0])
else:
obj_data['raw_data'] = False
else:
data = self.read([obj.id], basic_fields, context=context)
if data:
obj_data['raw_data'].update(data[0])
else:
obj_data['raw_data'] = False
if obj_data['raw_data'] is False:
raise error.RPCError(
"There is no '{model}' record with ID {obj_id}.".format(
model=obj.__class__.__osv__['name'], obj_id=obj.id))
# No ID: fields filled with default values
else:
if v(self._oerp.version) < v('6.1'):
default_get = self.default_get(
obj.__osv__['columns'].keys(), context)
else:
default_get = self.default_get(
obj.__osv__['columns'].keys(), context=context)
obj_data['raw_data'] = {}
for field_name in obj.__osv__['columns']:
obj_data['raw_data'][field_name] = False
obj_data['raw_data'].update(default_get)
self._reset(obj)
def _reset(self, obj):
"""Cancel all changes by restoring field values with original values
obtained during the last refresh (object instanciation or
last call to _refresh() method).
"""
obj_data = obj.__data__
obj_data['updated_values'] = {}
# Load fields and their values
for field in self._browse_class.__osv__['columns'].values():
if field.name in obj_data['raw_data']:
obj_data['values'][field.name] = \
obj_data['raw_data'][field.name]
setattr(obj.__class__, field.name, field)
def _unlink_record(self, obj, context=None):
"""Delete the object from the server."""
context = context or self._oerp.context
if v(self._oerp.version) < v('6.1'):
return self.unlink([obj.id], context)
else:
return self.unlink([obj.id], context=context)
def __getattr__(self, method):
"""Provide a dynamic access to a RPC method."""
def rpc_method(*args, **kwargs):
"""Return the result of the RPC request."""
if v(self._oerp.version) < v('6.1'):
if kwargs:
raise error.RPCError(
"Named parameters are not supported by the version "
"of this server.")
result = self._oerp.execute(
self._browse_class.__osv__['name'], method, *args)
else:
if self._oerp.config['auto_context'] \
and 'context' not in kwargs:
kwargs['context'] = self._oerp.context
result = self._oerp.execute_kw(
self._browse_class.__osv__['name'], method, args, kwargs)
return result
return rpc_method
def __repr__(self):
return "Model(%r)" % (self._browse_class.__osv__['name'])
# ---------------------------- #
# -- MutableMapping methods -- #
# ---------------------------- #
def __getitem__(self, obj_id):
return self.browse(obj_id)
def __iter__(self):
ids = self.search([])
return browse.BrowseRecordIterator(self, ids)
def __len__(self):
return self._oerp.search(self._browse_class.__osv__['name'], count=True)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| lgpl-3.0 |
maciek263/django2 | myvenv/Lib/site-packages/django/contrib/gis/db/models/proxy.py | 164 | 2605 | """
The GeometryProxy object, allows for lazy-geometries. The proxy uses
Python descriptors for instantiating and setting Geometry objects
corresponding to geographic model fields.
Thanks to Robert Coup for providing this functionality (see #4322).
"""
from django.utils import six
class GeometryProxy(object):
def __init__(self, klass, field):
"""
Proxy initializes on the given Geometry class (not an instance) and
the GeometryField.
"""
self._field = field
self._klass = klass
def __get__(self, obj, type=None):
"""
This accessor retrieves the geometry, initializing it using the geometry
class specified during initialization and the HEXEWKB value of the field.
Currently, only GEOS or OGR geometries are supported.
"""
if obj is None:
# Accessed on a class, not an instance
return self
# Getting the value of the field.
geom_value = obj.__dict__[self._field.attname]
if isinstance(geom_value, self._klass):
geom = geom_value
elif (geom_value is None) or (geom_value == ''):
geom = None
else:
# Otherwise, a Geometry object is built using the field's contents,
# and the model's corresponding attribute is set.
geom = self._klass(geom_value)
setattr(obj, self._field.attname, geom)
return geom
def __set__(self, obj, value):
"""
This accessor sets the proxied geometry with the geometry class
specified during initialization. Values of None, HEXEWKB, or WKT may
be used to set the geometry as well.
"""
# The OGC Geometry type of the field.
gtype = self._field.geom_type
# The geometry type must match that of the field -- unless the
# general GeometryField is used.
if isinstance(value, self._klass) and (str(value.geom_type).upper() == gtype or gtype == 'GEOMETRY'):
# Assigning the SRID to the geometry.
if value.srid is None:
value.srid = self._field.srid
elif value is None or isinstance(value, six.string_types + (six.memoryview,)):
# Set with None, WKT, HEX, or WKB
pass
else:
raise TypeError('Cannot set %s GeometryProxy (%s) with value of type: %s' % (
obj.__class__.__name__, gtype, type(value)))
# Setting the objects dictionary with the value, and returning.
obj.__dict__[self._field.attname] = value
return value
| mit |
xs2maverick/adhocracy3.mercator | src/adhocracy_core/adhocracy_core/events/__init__.py | 2 | 7835 | """Hooks to modify runtime behavior (use 'subscriber.py' in you package).
In addition we have the normal substanced events:
https://substanced.readthedocs.org/en/latest/api.html#module-substanced.event
"""
from pyramid.request import Request
from pyramid.registry import Registry
from zope.interface import implementer
from zope.interface import Interface
from zope.interface.interfaces import IInterface
from adhocracy_core.interfaces import IItemVersionNewVersionAdded
from adhocracy_core.interfaces import ISheetReferenceNewVersion
from adhocracy_core.interfaces import IResourceCreatedAndAdded
from adhocracy_core.interfaces import IResourceSheetModified
from adhocracy_core.interfaces import ILocalRolesModfied
from adhocracy_core.interfaces import ISheet
from adhocracy_core.interfaces import ISheetBackReferenceAdded
from adhocracy_core.interfaces import ISheetBackReferenceRemoved
@implementer(IResourceCreatedAndAdded)
class ResourceCreatedAndAdded:
"""An event type sent when a new IResource is created and added.
:param object(adhocracy_core.interfaces.IResource):
:param parent(adhocracy_core.interfaces.IResource):
:param registry(pyramid.registry.Registry):
:param creator(adhocracy_core.resource.principal.IUser):
"""
def __init__(self, object, parent, registry, creator):
"""Initialize self."""
self.object = object
self.parent = parent
self.registry = registry
self.creator = creator
@implementer(IResourceSheetModified)
class ResourceSheetModified:
"""An event type sent when a resource sheet is modified.
:param object: adhocracy_core.interfaces.IResource
:param isheet: adhocracy_core.interfaces.IISheet
:param registry: pyramid.registry.Registry
:param old_appstruct: The old :term:`appstruct` data (dict)
:param new_appstruct: The new :term:`appstruct` data (dict)
:param request: The current request or None
"""
def __init__(self,
object,
isheet,
registry,
old_appstruct,
new_appstruct,
request: Request):
"""Initialize self."""
self.object = object
self.isheet = isheet
self.registry = registry
self.old_appstruct = old_appstruct
self.new_appstruct = new_appstruct
self.request = request
@implementer(IItemVersionNewVersionAdded)
class ItemVersionNewVersionAdded:
""" An event sent when a new IItemVersion is being added.
:param object(adhocracy_core.interfaces.IItem):
:param new_version(adhocracy_core.interfaces.IItemVersion):
:param registry(pyramid.registry.Registry):
:param creator(adhocracy_core.resource.principal.IUser':
"""
def __init__(self, object, new_version, registry, creator):
"""Initialize self."""
self.object = object
self.new_version = new_version
self.registry = registry
self.creator = creator
@implementer(ISheetReferenceNewVersion)
class SheetReferenceNewVersion:
""" An event type sent when a referenced ItemVersion has a new follower.
:param object(adhocracy_core.interfaces.IResource):
:param isheet(adhocracy_core.interfaces.IISheet):
:param isheet_field(str): field name with updated reference
:param old_version(adhocracy_core.interfaces.IItemVersion): old referenced
resource
:param new_version(adhocracy_core.interfaces.IItemVersion): new referenced
resource
:param registry(pyramid.registry.Registry):
:param root_versions(list): IItemVersions not in the subtree of
these root resources should ignore
this event. Optional.
:param creator(adhocracy_core.resource.principal.IUser':
:param is_batchmode(bool): Flag to do sheet autoupdates in batch request
mode. Defaults to False.
"""
def __init__(self,
object,
isheet,
isheet_field,
old_version,
new_version,
registry,
creator,
root_versions=[],
is_batchmode=False,
):
"""Initialize self."""
self.object = object
self.isheet = isheet
self.isheet_field = isheet_field
self.old_version = old_version
self.new_version = new_version
self.registry = registry
self.creator = creator
self.root_versions = root_versions
self.is_batchmode = is_batchmode
@implementer(ISheetBackReferenceRemoved)
class SheetBackReferenceRemoved:
"""An event type sent when a back reference is removed."""
def __init__(self,
object,
isheet,
reference,
registry,
):
"""Initialize self."""
self.object = object
""":class:`adhocracy_core.interfaces.IResource`"""
self.isheet = isheet
""":class:`adhocracy_core.interfaces.ISheet`"""
self.reference = reference
""":class:`adhocracy_core.graph.Reference` that was targeting `object`.
"""
self.registry = registry
""":class:`pyramid.content.Registry`"""
@implementer(ISheetBackReferenceAdded)
class SheetBackReferenceAdded:
"""An event type sent when a back reference is added."""
def __init__(self,
object,
isheet,
reference,
registry,
):
"""Initialize self."""
self.object = object
""":class:`adhocracy_core.interfaces.IResource`"""
self.isheet = isheet
""":class:`adhocracy_core.interfaces.ISheet`"""
self.reference = reference
""":class:`adhocracy_core.graph.Reference` that is targeting `object`.
"""
self.registry = registry
""":class:`pyramid.content.Registry`"""
@implementer(ILocalRolesModfied)
class LocalRolesModified:
"""An event type send when an resource`s :term:`local role` is modified."""
def __init__(self, object, new_local_roles: dict, old_local_roles: dict,
registry: Registry):
"""Initialize self."""
self.object = object
self.new_local_roles = new_local_roles
self.old_local_roles = old_local_roles
self.registry = registry
class _ISheetPredicate:
"""Subscriber predicate 'isheet' to check event.isheet."""
def __init__(self, isheet: IInterface, config):
assert isheet.isOrExtends(ISheet)
self.isheet = isheet
def text(self) -> str:
"""Return text representation."""
return 'isheet = %s' % (self.isheet.__identifier__)
phash = text
def __call__(self, event):
event_isheet = getattr(event, 'isheet', Interface)
return event_isheet.isOrExtends(self.isheet)
class _InterfacePredicate:
"""Subscriber predicate 'interface' to check interfaces of event.object."""
def __init__(self, interface: IInterface, config):
assert interface.isOrExtends(Interface)
self.interface = interface
def text(self) -> str:
"""Return text representation."""
return 'interface = %s' % (self.interface.__identifier__)
phash = text
def __call__(self, event):
return self.interface.providedBy(event.object)
def includeme(config):
""" register event subscriber predicates 'isheet' and 'interface'."""
config.include('substanced.event')
config.add_subscriber_predicate('event_isheet', _ISheetPredicate)
config.add_subscriber_predicate('object_iface', _InterfacePredicate)
| agpl-3.0 |
CasparLi/calibre | src/calibre/gui2/convert/xpath_wizard.py | 14 | 3351 | #!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
from PyQt5.Qt import QDialog, QWidget, Qt, QDialogButtonBox, QVBoxLayout
from calibre.gui2.convert.xpath_wizard_ui import Ui_Form
from calibre.gui2.convert.xexp_edit_ui import Ui_Form as Ui_Edit
from calibre.utils.localization import localize_user_manual_link
class WizardWidget(QWidget, Ui_Form):
def __init__(self, parent=None):
QWidget.__init__(self, parent)
self.setupUi(self)
try:
self.example_label.setText(self.example_label.text() % localize_user_manual_link(
'http://manual.calibre-ebook.com/xpath.html'))
except TypeError:
pass
@property
def xpath(self):
tag = unicode(self.tag.currentText()).strip()
if tag != '*':
tag = 'h:'+tag
attr, val = map(unicode, (self.attribute.text(), self.value.text()))
attr, val = attr.strip(), val.strip()
q = ''
if attr:
if val:
q = '[re:test(@%s, "%s", "i")]'%(attr, val)
else:
q = '[@%s]'%attr
elif val:
q = '[re:test(., "%s", "i")]'%(val)
expr = '//'+tag + q
return expr
class Wizard(QDialog):
def __init__(self, parent=None):
QDialog.__init__(self, parent)
self.resize(440, 480)
self.verticalLayout = QVBoxLayout(self)
self.widget = WizardWidget(self)
self.verticalLayout.addWidget(self.widget)
self.buttonBox = QDialogButtonBox(self)
self.buttonBox.setOrientation(Qt.Horizontal)
self.buttonBox.setStandardButtons(QDialogButtonBox.Cancel|QDialogButtonBox.Ok)
self.verticalLayout.addWidget(self.buttonBox)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
self.setModal(Qt.WindowModal)
@property
def xpath(self):
return self.widget.xpath
class XPathEdit(QWidget, Ui_Edit):
def __init__(self, parent=None):
QWidget.__init__(self, parent)
self.setupUi(self)
self.button.clicked.connect(self.wizard)
def wizard(self):
wiz = Wizard(self)
if wiz.exec_() == wiz.Accepted:
self.edit.setText(wiz.xpath)
def setObjectName(self, *args):
QWidget.setObjectName(self, *args)
if hasattr(self, 'edit'):
self.edit.initialize('xpath_edit_'+unicode(self.objectName()))
def set_msg(self, msg):
self.msg.setText(msg)
@property
def text(self):
return unicode(self.edit.text())
@property
def xpath(self):
return self.text
def check(self):
from calibre.ebooks.oeb.base import XPNSMAP
from lxml.etree import XPath
try:
if self.text.strip():
XPath(self.text, namespaces=XPNSMAP)
except:
import traceback
traceback.print_exc()
return False
return True
if __name__ == '__main__':
from PyQt5.Qt import QApplication
app = QApplication([])
w = XPathEdit()
w.setObjectName('test')
w.show()
app.exec_()
print w.xpath
| gpl-3.0 |
microcom/odoo | addons/account_asset/wizard/wizard_asset_compute.py | 47 | 1228 | # -*- coding: utf-8 -*-
from openerp import api, fields, models, _
class AssetDepreciationConfirmationWizard(models.TransientModel):
_name = "asset.depreciation.confirmation.wizard"
_description = "asset.depreciation.confirmation.wizard"
date = fields.Date('Account Date', required=True, help="Choose the period for which you want to automatically post the depreciation lines of running assets", default=fields.Date.context_today)
@api.multi
def asset_compute(self):
self.ensure_one()
context = self._context
assets = self.env['account.asset.asset'].search([('state', '=', 'open'), ('category_id.type', '=', context.get('asset_type'))])
created_move_ids = assets._compute_entries(self.date)
if context.get('asset_type') == 'purchase':
title = _('Created Asset Moves')
else:
title = _('Created Revenue Moves')
return {
'name': title,
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'account.move',
'view_id': False,
'domain': "[('id','in',["+','.join(map(str, created_move_ids))+"])]",
'type': 'ir.actions.act_window',
}
| agpl-3.0 |
michaelhowden/eden | modules/tests/inv/warehouse_search.py | 24 | 4021 | # -*- coding: utf-8 -*-
""" Sahana Eden Warehouse Search Module Automated Tests
@copyright: 2011-2012 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from gluon import current
from tests.web2unittest import SeleniumUnitTest
import functools
def _kwsearch(instance, column, items, keyword):
for item in [instance.dt_data_item(i, column) for i in xrange(1, items + 1)]:
if not (keyword.strip().lower() in item.strip().lower()):
return False
return True
class SearchWarehouse(SeleniumUnitTest):
def setUp(self):
super(SeleniumUnitTest, self).setUp()
print "\n"
self.login(account="admin", nexturl="inv/warehouse/search?clear_opts=1")
def test_warehouse_01_search_name(self):
"""
@case: warehouse_01
@description: Search Warehouse - Simple Search
"""
w = current.s3db["inv_warehouse"]
key="na"
dbRowCount = current.db( (w.deleted != "T") & (w.name.like("%"+ key + "%")) ).count()
self.search(self.search.advanced_form,
True,
({
"id": "warehouse_search_simple",
"value": key
},), dbRowCount,
manual_check=functools.partial(_kwsearch, keyword=key, items=dbRowCount, column=2)
)
def test_warehouse_02_search_by_Organization(self):
"""
@case: warehouse_02
@description: Search Warehouse - Advanced Search by Organization
"""
w = current.s3db["inv_warehouse"]
o = current.s3db["org_organisation"]
key="Timor-Leste Red Cross Society (Cruz Vermelha de Timor-Leste)"
dbRowCount = current.db((w.deleted != "T") & (w.organisation_id == o.id) & (o.name == key)).count()
self.search(self.search.advanced_form,
True,
({
"name": "warehouse_search_org",
"label": key,
"value": True
},), dbRowCount,
manual_check=functools.partial(_kwsearch, keyword=key, items=dbRowCount, column=3)
)
def test_warehouse_03_search_by_District(self):
"""
@case: warehouse_03
@description: Search Warehouse - Advanced Search by District
"""
w = current.s3db["inv_warehouse"]
l = current.s3db["gis_location"]
key="Viqueque"
dbRowCount = current.db((w.deleted != "T") & (w.location_id == l.id) & (l.L2 == key)).count()
self.search(self.search.advanced_form,
True,
({
"name": "warehouse_search_location",
"label": key,
"value": True
},), dbRowCount,
manual_check=functools.partial(_kwsearch, keyword=key, items=dbRowCount, column=5)
)
| mit |
Workday/OpenFrame | third_party/WebKit/Tools/Scripts/webkitpy/tool/multicommandtool.py | 51 | 13189 | # Copyright (c) 2009 Google Inc. All rights reserved.
# Copyright (c) 2009 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# MultiCommandTool provides a framework for writing svn-like/git-like tools
# which are called with the following format:
# tool-name [global options] command-name [command options]
import logging
import sys
from optparse import OptionParser, IndentedHelpFormatter, SUPPRESS_USAGE, make_option
from webkitpy.tool.grammar import pluralize
_log = logging.getLogger(__name__)
class TryAgain(Exception):
pass
class Command(object):
name = None
show_in_main_help = False
def __init__(self, help_text, argument_names=None, options=None, long_help=None, requires_local_commits=False):
self.help_text = help_text
self.long_help = long_help
self.argument_names = argument_names
self.required_arguments = self._parse_required_arguments(argument_names)
self.options = options
self.requires_local_commits = requires_local_commits
self._tool = None
# option_parser can be overriden by the tool using set_option_parser
# This default parser will be used for standalone_help printing.
self.option_parser = HelpPrintingOptionParser(usage=SUPPRESS_USAGE, add_help_option=False, option_list=self.options)
def _exit(self, code):
sys.exit(code)
# This design is slightly awkward, but we need the
# the tool to be able to create and modify the option_parser
# before it knows what Command to run.
def set_option_parser(self, option_parser):
self.option_parser = option_parser
self._add_options_to_parser()
def _add_options_to_parser(self):
options = self.options or []
for option in options:
self.option_parser.add_option(option)
# The tool calls bind_to_tool on each Command after adding it to its list.
def bind_to_tool(self, tool):
# Command instances can only be bound to one tool at a time.
if self._tool and tool != self._tool:
raise Exception("Command already bound to tool!")
self._tool = tool
@staticmethod
def _parse_required_arguments(argument_names):
required_args = []
if not argument_names:
return required_args
split_args = argument_names.split(" ")
for argument in split_args:
if argument[0] == '[':
# For now our parser is rather dumb. Do some minimal validation that
# we haven't confused it.
if argument[-1] != ']':
raise Exception("Failure to parse argument string %s. Argument %s is missing ending ]" % (argument_names, argument))
else:
required_args.append(argument)
return required_args
def name_with_arguments(self):
usage_string = self.name
if self.options:
usage_string += " [options]"
if self.argument_names:
usage_string += " " + self.argument_names
return usage_string
def parse_args(self, args):
return self.option_parser.parse_args(args)
def check_arguments_and_execute(self, options, args, tool=None):
if len(args) < len(self.required_arguments):
_log.error("%s required, %s provided. Provided: %s Required: %s\nSee '%s help %s' for usage." % (
pluralize("argument", len(self.required_arguments)),
pluralize("argument", len(args)),
"'%s'" % " ".join(args),
" ".join(self.required_arguments),
tool.name(),
self.name))
return 1
return self.execute(options, args, tool) or 0
def standalone_help(self):
help_text = self.name_with_arguments().ljust(len(self.name_with_arguments()) + 3) + self.help_text + "\n\n"
if self.long_help:
help_text += "%s\n\n" % self.long_help
help_text += self.option_parser.format_option_help(IndentedHelpFormatter())
return help_text
def execute(self, options, args, tool):
raise NotImplementedError, "subclasses must implement"
# main() exists so that Commands can be turned into stand-alone scripts.
# Other parts of the code will likely require modification to work stand-alone.
def main(self, args=sys.argv):
(options, args) = self.parse_args(args)
# Some commands might require a dummy tool
return self.check_arguments_and_execute(options, args)
# FIXME: This should just be rolled into Command. help_text and argument_names do not need to be instance variables.
class AbstractDeclarativeCommand(Command):
help_text = None
argument_names = None
long_help = None
def __init__(self, options=None, **kwargs):
Command.__init__(self, self.help_text, self.argument_names, options=options, long_help=self.long_help, **kwargs)
class HelpPrintingOptionParser(OptionParser):
def __init__(self, epilog_method=None, *args, **kwargs):
self.epilog_method = epilog_method
OptionParser.__init__(self, *args, **kwargs)
def error(self, msg):
self.print_usage(sys.stderr)
error_message = "%s: error: %s\n" % (self.get_prog_name(), msg)
# This method is overriden to add this one line to the output:
error_message += "\nType \"%s --help\" to see usage.\n" % self.get_prog_name()
self.exit(1, error_message)
# We override format_epilog to avoid the default formatting which would paragraph-wrap the epilog
# and also to allow us to compute the epilog lazily instead of in the constructor (allowing it to be context sensitive).
def format_epilog(self, epilog):
if self.epilog_method:
return "\n%s\n" % self.epilog_method()
return ""
class HelpCommand(AbstractDeclarativeCommand):
name = "help"
help_text = "Display information about this program or its subcommands"
argument_names = "[COMMAND]"
def __init__(self):
options = [
make_option("-a", "--all-commands", action="store_true", dest="show_all_commands", help="Print all available commands"),
]
AbstractDeclarativeCommand.__init__(self, options)
self.show_all_commands = False # A hack used to pass --all-commands to _help_epilog even though it's called by the OptionParser.
def _help_epilog(self):
# Only show commands which are relevant to this checkout's SCM system. Might this be confusing to some users?
if self.show_all_commands:
epilog = "All %prog commands:\n"
relevant_commands = self._tool.commands[:]
else:
epilog = "Common %prog commands:\n"
relevant_commands = filter(self._tool.should_show_in_main_help, self._tool.commands)
longest_name_length = max(map(lambda command: len(command.name), relevant_commands))
relevant_commands.sort(lambda a, b: cmp(a.name, b.name))
command_help_texts = map(lambda command: " %s %s\n" % (command.name.ljust(longest_name_length), command.help_text), relevant_commands)
epilog += "%s\n" % "".join(command_help_texts)
epilog += "See '%prog help --all-commands' to list all commands.\n"
epilog += "See '%prog help COMMAND' for more information on a specific command.\n"
return epilog.replace("%prog", self._tool.name()) # Use of %prog here mimics OptionParser.expand_prog_name().
# FIXME: This is a hack so that we don't show --all-commands as a global option:
def _remove_help_options(self):
for option in self.options:
self.option_parser.remove_option(option.get_opt_string())
def execute(self, options, args, tool):
if args:
command = self._tool.command_by_name(args[0])
if command:
print command.standalone_help()
return 0
self.show_all_commands = options.show_all_commands
self._remove_help_options()
self.option_parser.print_help()
return 0
class MultiCommandTool(object):
global_options = None
def __init__(self, name=None, commands=None):
self._name = name or OptionParser(prog=name).get_prog_name() # OptionParser has nice logic for fetching the name.
# Allow the unit tests to disable command auto-discovery.
self.commands = commands or [cls() for cls in self._find_all_commands() if cls.name]
self.help_command = self.command_by_name(HelpCommand.name)
# Require a help command, even if the manual test list doesn't include one.
if not self.help_command:
self.help_command = HelpCommand()
self.commands.append(self.help_command)
for command in self.commands:
command.bind_to_tool(self)
@classmethod
def _add_all_subclasses(cls, class_to_crawl, seen_classes):
for subclass in class_to_crawl.__subclasses__():
if subclass not in seen_classes:
seen_classes.add(subclass)
cls._add_all_subclasses(subclass, seen_classes)
@classmethod
def _find_all_commands(cls):
commands = set()
cls._add_all_subclasses(Command, commands)
return sorted(commands)
def name(self):
return self._name
def _create_option_parser(self):
usage = "Usage: %prog [options] COMMAND [ARGS]"
return HelpPrintingOptionParser(epilog_method=self.help_command._help_epilog, prog=self.name(), usage=usage)
@staticmethod
def _split_command_name_from_args(args):
# Assume the first argument which doesn't start with "-" is the command name.
command_index = 0
for arg in args:
if arg[0] != "-":
break
command_index += 1
else:
return (None, args[:])
command = args[command_index]
return (command, args[:command_index] + args[command_index + 1:])
def command_by_name(self, command_name):
for command in self.commands:
if command_name == command.name:
return command
return None
def path(self):
raise NotImplementedError, "subclasses must implement"
def command_completed(self):
pass
def should_show_in_main_help(self, command):
return command.show_in_main_help
def should_execute_command(self, command):
return True
def _add_global_options(self, option_parser):
global_options = self.global_options or []
for option in global_options:
option_parser.add_option(option)
def handle_global_options(self, options):
pass
def main(self, argv=sys.argv):
(command_name, args) = self._split_command_name_from_args(argv[1:])
option_parser = self._create_option_parser()
self._add_global_options(option_parser)
command = self.command_by_name(command_name) or self.help_command
if not command:
option_parser.error("%s is not a recognized command" % command_name)
command.set_option_parser(option_parser)
(options, args) = command.parse_args(args)
self.handle_global_options(options)
(should_execute, failure_reason) = self.should_execute_command(command)
if not should_execute:
_log.error(failure_reason)
return 0 # FIXME: Should this really be 0?
while True:
try:
result = command.check_arguments_and_execute(options, args, self)
break
except TryAgain, e:
pass
self.command_completed()
return result
| bsd-3-clause |
robinro/ansible | lib/ansible/modules/network/nxos/nxos_vlan.py | 33 | 10853 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {
'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community',
}
DOCUMENTATION = '''
---
module: nxos_vlan
extends_documentation_fragment: nxos
version_added: "2.1"
short_description: Manages VLAN resources and attributes.
description:
- Manages VLAN configurations on NX-OS switches.
author: Jason Edelman (@jedelman8)
options:
vlan_id:
description:
- Single VLAN ID.
required: false
default: null
vlan_range:
description:
- Range of VLANs such as 2-10 or 2,5,10-15, etc.
required: false
default: null
name:
description:
- Name of VLAN.
required: false
default: null
vlan_state:
description:
- Manage the vlan operational state of the VLAN
(equivalent to state {active | suspend} command.
required: false
default: active
choices: ['active','suspend']
admin_state:
description:
- Manage the VLAN administrative state of the VLAN equivalent
to shut/no shut in VLAN config mode.
required: false
default: up
choices: ['up','down']
mapped_vni:
description:
- The Virtual Network Identifier (VNI) ID that is mapped to the
VLAN. Valid values are integer and keyword 'default'.
required: false
default: null
version_added: "2.2"
state:
description:
- Manage the state of the resource.
required: false
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
- name: Ensure a range of VLANs are not present on the switch
nxos_vlan:
vlan_range: "2-10,20,50,55-60,100-150"
state: absent
transport: nxapi
- name: Ensure VLAN 50 exists with the name WEB and is in the shutdown state
nxos_vlan:
vlan_id: 50
admin_state: down
name: WEB
transport: nxapi
- name: Ensure VLAN is NOT on the device
nxos_vlan:
vlan_id: 50
state: absent
transport: nxapi
'''
RETURN = '''
commands:
description: Set of command strings to send to the remote device
returned: always
type: list
sample: ["vlan 20", "vlan 55", "vn-segment 5000"]
'''
import re
from ansible.module_utils.nxos import get_config, load_config, run_commands
from ansible.module_utils.nxos import nxos_argument_spec
from ansible.module_utils.nxos import check_args as nxos_check_args
from ansible.module_utils.basic import AnsibleModule
def vlan_range_to_list(vlans):
result = []
if vlans:
for part in vlans.split(','):
if part == 'none':
break
if '-' in part:
start, end = part.split('-')
start, end = int(start), int(end)
result.extend([str(i) for i in range(start, end + 1)])
else:
result.append(part)
return result
def numerical_sort(iterable):
"""Sort list of strings (VLAN IDs) that are digits in numerical order.
"""
as_int_list = []
for vlan in iterable:
as_int_list.append(int(vlan))
as_int_list.sort()
as_str_list = []
for vlan in as_int_list:
as_str_list.append(str(vlan))
return as_str_list
def build_commands(vlans, state):
commands = []
for vlan in vlans:
if state == 'present':
command = 'vlan {0}'.format(vlan)
commands.append(command)
elif state == 'absent':
command = 'no vlan {0}'.format(vlan)
commands.append(command)
return commands
def get_vlan_config_commands(vlan, vid):
"""Build command list required for VLAN configuration
"""
reverse_value_map = {
"admin_state": {
"down": "shutdown",
"up": "no shutdown"
}
}
if vlan.get('admin_state'):
# apply value map when making change to the admin state
# note: would need to be a loop or more in depth check if
# value map has more than 1 key
vlan = apply_value_map(reverse_value_map, vlan)
vlan_args = {
'name': 'name {0}',
'vlan_state': 'state {0}',
'admin_state': '{0}',
'mode': 'mode {0}',
'mapped_vni': 'vn-segment {0}'
}
commands = []
for param, value in vlan.items():
if param == 'mapped_vni' and value == 'default':
command = 'no vn-segment'
else:
command = vlan_args.get(param).format(vlan.get(param))
if command:
commands.append(command)
commands.insert(0, 'vlan ' + vid)
commands.append('exit')
return commands
def get_list_of_vlans(module):
body = run_commands(module, ['show vlan | json'])[0]
vlan_list = []
vlan_table = body.get('TABLE_vlanbrief')['ROW_vlanbrief']
if isinstance(vlan_table, list):
for vlan in vlan_table:
vlan_list.append(str(vlan['vlanshowbr-vlanid-utf']))
else:
vlan_list.append('1')
return vlan_list
def get_vni(vlanid, module):
flags = str('all | section vlan.{0}'.format(vlanid)).split(' ')
body = get_config(module, flags=flags)
value = ''
if body:
REGEX = re.compile(r'(?:vn-segment\s)(?P<value>.*)$', re.M)
if 'vn-segment' in body:
value = REGEX.search(body).group('value')
return value
def get_vlan(vlanid, module):
"""Get instance of VLAN as a dictionary
"""
command = 'show vlan id %s | json' % vlanid
try:
body = run_commands(module, [command])[0]
vlan_table = body['TABLE_vlanbriefid']['ROW_vlanbriefid']
except (TypeError, IndexError, KeyError):
return {}
key_map = {
"vlanshowbr-vlanid-utf": "vlan_id",
"vlanshowbr-vlanname": "name",
"vlanshowbr-vlanstate": "vlan_state",
"vlanshowbr-shutstate": "admin_state"
}
vlan = apply_key_map(key_map, vlan_table)
value_map = {
"admin_state": {
"shutdown": "down",
"noshutdown": "up"
}
}
vlan = apply_value_map(value_map, vlan)
vlan['mapped_vni'] = get_vni(vlanid, module)
return vlan
def apply_key_map(key_map, table):
new_dict = {}
for key, value in table.items():
new_key = key_map.get(key)
if new_key:
new_dict[new_key] = str(value)
return new_dict
def apply_value_map(value_map, resource):
for key, value in value_map.items():
resource[key] = value[resource.get(key)]
return resource
def check_args(module, warnings):
nxos_check_args(module, warnings)
for key in ('include_defaults', 'config', 'save'):
if module.params[key] is not None:
warnings.append('argument %s is no longer supported, ignoring value' % key)
def main():
argument_spec = dict(
vlan_id=dict(required=False, type='str'),
vlan_range=dict(required=False),
name=dict(required=False),
vlan_state=dict(choices=['active', 'suspend'], required=False),
mapped_vni=dict(required=False, type='str'),
state=dict(choices=['present', 'absent'], default='present', required=False),
admin_state=dict(choices=['up', 'down'], required=False),
# Deprecated in Ansible 2.4
include_defaults=dict(default=False),
config=dict(),
save=dict(type='bool', default=False)
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=[['vlan_range', 'name'],
['vlan_id', 'vlan_range']],
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
results = dict(changed=False, warnings=warnings)
vlan_range = module.params['vlan_range']
vlan_id = module.params['vlan_id']
name = module.params['name']
vlan_state = module.params['vlan_state']
admin_state = module.params['admin_state']
mapped_vni = module.params['mapped_vni']
state = module.params['state']
if vlan_id:
if not vlan_id.isdigit():
module.fail_json(msg='vlan_id must be a valid VLAN ID')
args = dict(name=name, vlan_state=vlan_state,
admin_state=admin_state, mapped_vni=mapped_vni)
proposed = dict((k, v) for k, v in args.items() if v is not None)
proposed_vlans_list = vlan_range_to_list(vlan_id or vlan_range)
existing_vlans_list = get_list_of_vlans(module)
commands = []
existing = {}
if vlan_range:
if state == 'present':
# These are all of the VLANs being proposed that don't
# already exist on the switch
vlans_delta = numerical_sort(
set(proposed_vlans_list).difference(existing_vlans_list))
commands = build_commands(vlans_delta, state)
elif state == 'absent':
# VLANs that are common between what is being proposed and
# what is on the switch
vlans_common = numerical_sort(
set(proposed_vlans_list).intersection(existing_vlans_list))
commands = build_commands(vlans_common, state)
else:
existing = get_vlan(vlan_id, module)
if state == 'absent' and existing:
commands = ['no vlan ' + vlan_id]
elif state == 'present':
if (existing.get('mapped_vni') == '0' and
proposed.get('mapped_vni') == 'default'):
proposed.pop('mapped_vni')
delta = dict(set(proposed.items()).difference(existing.items()))
if delta or not existing:
commands = get_vlan_config_commands(delta, vlan_id)
if commands:
if existing.get('mapped_vni'):
if (existing.get('mapped_vni') != proposed.get('mapped_vni') and
existing.get('mapped_vni') != '0' and proposed.get('mapped_vni') != 'default'):
commands.insert(1, 'no vn-segment')
if module.check_mode:
module.exit_json(changed=True, commands=commands)
else:
load_config(module, commands)
results['changed'] = True
results['commands'] = commands
module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 |
blaggacao/odoo | addons/account_test/report/account_test_report.py | 156 | 3740 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import datetime
import time
from openerp.osv import osv
from openerp.tools.translate import _
from openerp.report import report_sxw
#
# Use period and Journal for selection or resources
#
class report_assert_account(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(report_assert_account, self).__init__(cr, uid, name, context=context)
self.localcontext.update( {
'time': time,
'datetime': datetime,
'execute_code': self.execute_code,
})
def execute_code(self, code_exec):
def reconciled_inv():
"""
returns the list of invoices that are set as reconciled = True
"""
return self.pool.get('account.invoice').search(self.cr, self.uid, [('reconciled','=',True)])
def order_columns(item, cols=None):
"""
This function is used to display a dictionary as a string, with its columns in the order chosen.
:param item: dict
:param cols: list of field names
:returns: a list of tuples (fieldname: value) in a similar way that would dict.items() do except that the
returned values are following the order given by cols
:rtype: [(key, value)]
"""
if cols is None:
cols = item.keys()
return [(col, item.get(col)) for col in cols if col in item.keys()]
localdict = {
'cr': self.cr,
'uid': self.uid,
'reconciled_inv': reconciled_inv, #specific function used in different tests
'result': None, #used to store the result of the test
'column_order': None, #used to choose the display order of columns (in case you are returning a list of dict)
}
exec code_exec in localdict
result = localdict['result']
column_order = localdict.get('column_order', None)
if not isinstance(result, (tuple, list, set)):
result = [result]
if not result:
result = [_('The test was passed successfully')]
else:
def _format(item):
if isinstance(item, dict):
return ', '.join(["%s: %s" % (tup[0], tup[1]) for tup in order_columns(item, column_order)])
else:
return item
result = [_(_format(rec)) for rec in result]
return result
class report_accounttest(osv.AbstractModel):
_name = 'report.account_test.report_accounttest'
_inherit = 'report.abstract_report'
_template = 'account_test.report_accounttest'
_wrapped_report_class = report_assert_account
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
rcarrillocruz/ansible | lib/ansible/modules/remote_management/hpilo/hponcfg.py | 66 | 2841 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Dag Wieers <dag@wieers.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: hponcfg
author: Dag Wieers (@dagwieers)
version_added: "2.3"
short_description: Configure HP iLO interface using hponcfg
description:
- This modules configures the HP iLO interface using hponcfg.
options:
path:
description:
- The XML file as accepted by hponcfg
required: true
aliases: ['src']
minfw:
description:
- The minimum firmware level needed
requirements:
- hponcfg tool
notes:
- You need a working hponcfg on the target system.
'''
EXAMPLES = r'''
- name: Example hponcfg configuration XML
copy:
content: |
<ribcl VERSION="2.0">
<login USER_LOGIN="user" PASSWORD="password">
<rib_info MODE="WRITE">
<mod_global_settings>
<session_timeout value="0"/>
<ssh_status value="Y"/>
<ssh_port value="22"/>
<serial_cli_status value="3"/>
<serial_cli_speed value="5"/>
</mod_global_settings>
</rib_info>
</login>
</ribcl>
dest: /tmp/enable-ssh.xml
- name: Configure HP iLO using enable-ssh.xml
hponcfg:
src: /tmp/enable-ssh.xml
'''
from ansible.module_utils.basic import AnsibleModule
def main():
module = AnsibleModule(
argument_spec = dict(
src = dict(required=True, type='path', aliases=['path']),
minfw = dict(type='str'),
)
)
# Consider every action a change (not idempotent yet!)
changed = True
src = module.params['src']
minfw = module.params['minfw']
options = ' -f %s' % src
# Add -v for debugging
# options += ' -v'
if minfw:
option += ' -m %s' % minfw
rc, stdout, stderr = module.run_command('hponcfg %s' % options)
if rc != 0:
module.fail_json(rc=rc, msg="Failed to run hponcfg", stdout=stdout, stderr=stderr)
module.exit_json(changed=changed, stdout=stdout, stderr=stderr)
if __name__ == '__main__':
main()
| gpl-3.0 |
PaulChongPeng/YOLO2TensorFlow | src/deployment/model_deploy.py | 1 | 23821 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Deploy Slim models across multiple clones and replicas.
# TODO(sguada) docstring paragraph by (a) motivating the need for the file and
# (b) defining clones.
# TODO(sguada) describe the high-level components of model deployment.
# E.g. "each model deployment is composed of several parts: a DeploymentConfig,
# which captures A, B and C, an input_fn which loads data.. etc
To easily train a model on multiple GPUs or across multiple machines this
module provides a set of helper functions: `create_clones`,
`optimize_clones` and `deploy`.
Usage:
g = tf.Graph()
# Set up DeploymentConfig
config = model_deploy.DeploymentConfig(num_clones=2, clone_on_cpu=True)
# Create the global step on the device storing the variables.
with tf.device(config.variables_device()):
global_step = slim.create_global_step()
# Define the inputs
with tf.device(config.inputs_device()):
images, labels = LoadData(...)
inputs_queue = slim.data.prefetch_queue((images, labels))
# Define the optimizer.
with tf.device(config.optimizer_device()):
optimizer = tf.train.MomentumOptimizer(FLAGS.learning_rate, FLAGS.momentum)
# Define the model including the loss.
def model_fn(inputs_queue):
images, labels = inputs_queue.dequeue()
predictions = CreateNetwork(images)
slim.losses.log_loss(predictions, labels)
model_dp = model_deploy.deploy(config, model_fn, [inputs_queue],
optimizer=optimizer)
# Run training.
slim.learning.train(model_dp.train_op, my_log_dir,
summary_op=model_dp.summary_op)
The Clone namedtuple holds together the values associated with each call to
model_fn:
* outputs: The return values of the calls to `model_fn()`.
* scope: The scope used to create the clone.
* device: The device used to create the clone.
DeployedModel namedtuple, holds together the values needed to train multiple
clones:
* train_op: An operation that run the optimizer training op and include
all the update ops created by `model_fn`. Present only if an optimizer
was specified.
* summary_op: An operation that run the summaries created by `model_fn`
and process_gradients.
* total_loss: A `Tensor` that contains the sum of all losses created by
`model_fn` plus the regularization losses.
* clones: List of `Clone` tuples returned by `create_clones()`.
DeploymentConfig parameters:
* num_clones: Number of model clones to deploy in each replica.
* clone_on_cpu: True if clones should be placed on CPU.
* replica_id: Integer. Index of the replica for which the model is
deployed. Usually 0 for the chief replica.
* num_replicas: Number of replicas to use.
* num_ps_tasks: Number of tasks for the `ps` job. 0 to not use replicas.
* worker_job_name: A name for the worker job.
* ps_job_name: A name for the parameter server job.
TODO(sguada):
- describe side effect to the graph.
- what happens to summaries and update_ops.
- which graph collections are altered.
- write a tutorial on how to use this.
- analyze the possibility of calling deploy more than once.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import tensorflow as tf
from tensorflow.python.ops import control_flow_ops
slim = tf.contrib.slim
__all__ = ['create_clones',
'deploy',
'optimize_clones',
'DeployedModel',
'DeploymentConfig',
'Clone',
]
# Namedtuple used to represent a clone during deployment.
Clone = collections.namedtuple('Clone',
['outputs', # Whatever model_fn() returned.
'scope', # The scope used to create it.
'device', # The device used to create.
])
# Namedtuple used to represent a DeployedModel, returned by deploy().
DeployedModel = collections.namedtuple('DeployedModel',
['train_op', # The `train_op`
'summary_op', # The `summary_op`
'total_loss', # The loss `Tensor`
'clones', # A list of `Clones` tuples.
])
# Default parameters for DeploymentConfig
_deployment_params = {'num_clones': 1,
'clone_on_cpu': False,
'replica_id': 0,
'num_replicas': 1,
'num_ps_tasks': 0,
'worker_job_name': 'worker',
'ps_job_name': 'ps'}
def create_clones(config, model_fn, args=None, kwargs=None):
"""Creates multiple clones according to config using a `model_fn`.
The returned values of `model_fn(*args, **kwargs)` are collected along with
the scope and device used to created it in a namedtuple
`Clone(outputs, scope, device)`
Note: it is assumed that any loss created by `model_fn` is collected at
the tf.GraphKeys.LOSSES collection.
To recover the losses, summaries or update_ops created by the clone use:
```python
losses = tf.get_collection(tf.GraphKeys.LOSSES, clone.scope)
summaries = tf.get_collection(tf.GraphKeys.SUMMARIES, clone.scope)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, clone.scope)
```
The deployment options are specified by the config object and support
deploying one or several clones on different GPUs and one or several replicas
of such clones.
The argument `model_fn` is called `config.num_clones` times to create the
model clones as `model_fn(*args, **kwargs)`.
If `config` specifies deployment on multiple replicas then the default
tensorflow device is set appropriatly for each call to `model_fn` and for the
slim variable creation functions: model and global variables will be created
on the `ps` device, the clone operations will be on the `worker` device.
Args:
config: A DeploymentConfig object.
model_fn: A callable. Called as `model_fn(*args, **kwargs)`
args: Optional list of arguments to pass to `model_fn`.
kwargs: Optional list of keyword arguments to pass to `model_fn`.
Returns:
A list of namedtuples `Clone`.
"""
clones = []
args = args or []
kwargs = kwargs or {}
with slim.arg_scope([slim.model_variable, slim.variable],
device=config.variables_device()):
# Create clones.
for i in range(0, config.num_clones):
with tf.name_scope(config.clone_scope(i)) as clone_scope:
clone_device = config.clone_device(i)
with tf.device(clone_device):
with tf.variable_scope(tf.get_variable_scope(),
reuse=True if i > 0 else None):
outputs = model_fn(*args, **kwargs)
clones.append(Clone(outputs, clone_scope, clone_device))
return clones
def _gather_clone_loss(clone, num_clones, regularization_losses):
"""Gather the loss for a single clone.
Args:
clone: A Clone namedtuple.
num_clones: The number of clones being deployed.
regularization_losses: Possibly empty list of regularization_losses
to add to the clone losses.
Returns:
A tensor for the total loss for the clone. Can be None.
"""
# The return value.
sum_loss = None
# Individual components of the loss that will need summaries.
clone_loss = None
regularization_loss = None
# Compute and aggregate losses on the clone device.
with tf.device(clone.device):
all_losses = []
clone_losses = tf.get_collection(tf.GraphKeys.LOSSES, clone.scope)
if clone_losses:
clone_loss = tf.add_n(clone_losses, name='clone_loss')
if num_clones > 1:
clone_loss = tf.div(clone_loss, 1.0 * num_clones,
name='scaled_clone_loss')
all_losses.append(clone_loss)
if regularization_losses:
regularization_loss = tf.add_n(regularization_losses,
name='regularization_loss')
all_losses.append(regularization_loss)
if all_losses:
sum_loss = tf.add_n(all_losses)
# Add the summaries out of the clone device block.
if clone_loss is not None:
tf.summary.scalar(clone.scope + '/clone_loss', clone_loss)
if regularization_loss is not None:
tf.summary.scalar('regularization_loss', regularization_loss)
return sum_loss
def _optimize_clone(optimizer, clone, num_clones, regularization_losses,
**kwargs):
"""Compute losses and gradients for a single clone.
Args:
optimizer: A tf.Optimizer object.
clone: A Clone namedtuple.
num_clones: The number of clones being deployed.
regularization_losses: Possibly empty list of regularization_losses
to add to the clone losses.
**kwargs: Dict of kwarg to pass to compute_gradients().
Returns:
A tuple (clone_loss, clone_grads_and_vars).
- clone_loss: A tensor for the total loss for the clone. Can be None.
- clone_grads_and_vars: List of (gradient, variable) for the clone.
Can be empty.
"""
sum_loss = _gather_clone_loss(clone, num_clones, regularization_losses)
clone_grad = None
if sum_loss is not None:
with tf.device(clone.device):
clone_grad = optimizer.compute_gradients(sum_loss, **kwargs)
return sum_loss, clone_grad
def optimize_clones(clones, optimizer,
regularization_losses=None,
**kwargs):
"""Compute clone losses and gradients for the given list of `Clones`.
Note: The regularization_losses are added to the first clone losses.
Args:
clones: List of `Clones` created by `create_clones()`.
optimizer: An `Optimizer` object.
regularization_losses: Optional list of regularization losses. If None it
will gather them from tf.GraphKeys.REGULARIZATION_LOSSES. Pass `[]` to
exclude them.
**kwargs: Optional list of keyword arguments to pass to `compute_gradients`.
Returns:
A tuple (total_loss, grads_and_vars).
- total_loss: A Tensor containing the average of the clone losses including
the regularization loss.
- grads_and_vars: A List of tuples (gradient, variable) containing the sum
of the gradients for each variable.
"""
grads_and_vars = []
clones_losses = []
num_clones = len(clones)
if regularization_losses is None:
regularization_losses = tf.get_collection(
tf.GraphKeys.REGULARIZATION_LOSSES)
for clone in clones:
with tf.name_scope(clone.scope):
clone_loss, clone_grad = _optimize_clone(
optimizer, clone, num_clones, regularization_losses, **kwargs)
if clone_loss is not None:
clones_losses.append(clone_loss)
grads_and_vars.append(clone_grad)
# Only use regularization_losses for the first clone
regularization_losses = None
# Compute the total_loss summing all the clones_losses.
total_loss = tf.add_n(clones_losses, name='total_loss')
# Sum the gradients across clones.
grads_and_vars = _sum_clones_gradients(grads_and_vars)
return total_loss, grads_and_vars
def deploy(config,
model_fn,
args=None,
kwargs=None,
optimizer=None,
summarize_gradients=False):
"""Deploys a Slim-constructed model across multiple clones.
The deployment options are specified by the config object and support
deploying one or several clones on different GPUs and one or several replicas
of such clones.
The argument `model_fn` is called `config.num_clones` times to create the
model clones as `model_fn(*args, **kwargs)`.
The optional argument `optimizer` is an `Optimizer` object. If not `None`,
the deployed model is configured for training with that optimizer.
If `config` specifies deployment on multiple replicas then the default
tensorflow device is set appropriatly for each call to `model_fn` and for the
slim variable creation functions: model and global variables will be created
on the `ps` device, the clone operations will be on the `worker` device.
Args:
config: A `DeploymentConfig` object.
model_fn: A callable. Called as `model_fn(*args, **kwargs)`
args: Optional list of arguments to pass to `model_fn`.
kwargs: Optional list of keyword arguments to pass to `model_fn`.
optimizer: Optional `Optimizer` object. If passed the model is deployed
for training with that optimizer.
summarize_gradients: Whether or not add summaries to the gradients.
Returns:
A `DeployedModel` namedtuple.
"""
# Gather initial summaries.
summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES))
# Create Clones.
clones = create_clones(config, model_fn, args, kwargs)
first_clone = clones[0]
# Gather update_ops from the first clone. These contain, for example,
# the updates for the batch_norm variables created by model_fn.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, first_clone.scope)
train_op = None
total_loss = None
with tf.device(config.optimizer_device()):
if optimizer:
# Place the global step on the device storing the variables.
with tf.device(config.variables_device()):
global_step = slim.get_or_create_global_step()
# Compute the gradients for the clones.
total_loss, clones_gradients = optimize_clones(clones, optimizer)
if clones_gradients:
if summarize_gradients:
# Add summaries to the gradients.
summaries |= set(_add_gradients_summaries(clones_gradients))
# Create gradient updates.
grad_updates = optimizer.apply_gradients(clones_gradients,
global_step=global_step)
update_ops.append(grad_updates)
update_op = tf.group(*update_ops)
with tf.control_dependencies([update_op]):
train_op = tf.identity(total_loss, name='train_op')
else:
clones_losses = []
regularization_losses = tf.get_collection(
tf.GraphKeys.REGULARIZATION_LOSSES)
for clone in clones:
with tf.name_scope(clone.scope):
clone_loss = _gather_clone_loss(clone, len(clones),
regularization_losses)
if clone_loss is not None:
clones_losses.append(clone_loss)
# Only use regularization_losses for the first clone
regularization_losses = None
if clones_losses:
total_loss = tf.add_n(clones_losses, name='total_loss')
# Add the summaries from the first clone. These contain the summaries
# created by model_fn and either optimize_clones() or _gather_clone_loss().
summaries |= set(tf.get_collection(tf.GraphKeys.SUMMARIES,
first_clone.scope))
if total_loss is not None:
# Add total_loss to summary.
summaries.add(tf.summary.scalar('total_loss', total_loss))
if summaries:
# Merge all summaries together.
summary_op = tf.summary.merge(list(summaries), name='summary_op')
else:
summary_op = None
return DeployedModel(train_op, summary_op, total_loss, clones)
def _sum_clones_gradients(clone_grads):
"""Calculate the sum gradient for each shared variable across all clones.
This function assumes that the clone_grads has been scaled appropriately by
1 / num_clones.
Args:
clone_grads: A List of List of tuples (gradient, variable), one list per
`Clone`.
Returns:
List of tuples of (gradient, variable) where the gradient has been summed
across all clones.
"""
sum_grads = []
for grad_and_vars in zip(*clone_grads):
# Note that each grad_and_vars looks like the following:
# ((grad_var0_clone0, var0), ... (grad_varN_cloneN, varN))
grads = []
var = grad_and_vars[0][1]
for g, v in grad_and_vars:
assert v == var
if g is not None:
grads.append(g)
if grads:
if len(grads) > 1:
sum_grad = tf.add_n(grads, name=var.op.name + '/sum_grads')
else:
sum_grad = grads[0]
sum_grads.append((sum_grad, var))
return sum_grads
def _add_gradients_summaries(grads_and_vars):
"""Add histogram summaries to gradients.
Note: The summaries are also added to the SUMMARIES collection.
Args:
grads_and_vars: A list of gradient to variable pairs (tuples).
Returns:
The _list_ of the added summaries for grads_and_vars.
"""
summaries = []
for grad, var in grads_and_vars:
if grad is not None:
if isinstance(grad, tf.IndexedSlices):
grad_values = grad.values
else:
grad_values = grad
summaries.append(tf.summary.histogram(var.op.name + ':gradient',
grad_values))
summaries.append(tf.summary.histogram(var.op.name + ':gradient_norm',
tf.global_norm([grad_values])))
else:
tf.logging.info('Var %s has no gradient', var.op.name)
return summaries
class DeploymentConfig(object):
"""Configuration for deploying a model with `deploy()`.
You can pass an instance of this class to `deploy()` to specify exactly
how to deploy the model to build. If you do not pass one, an instance built
from the default deployment_hparams will be used.
"""
def __init__(self,
num_clones=1,
clone_on_cpu=False,
replica_id=0,
num_replicas=1,
num_ps_tasks=0,
worker_job_name='worker',
ps_job_name='ps'):
"""Create a DeploymentConfig.
The config describes how to deploy a model across multiple clones and
replicas. The model will be replicated `num_clones` times in each replica.
If `clone_on_cpu` is True, each clone will placed on CPU.
If `num_replicas` is 1, the model is deployed via a single process. In that
case `worker_device`, `num_ps_tasks`, and `ps_device` are ignored.
If `num_replicas` is greater than 1, then `worker_device` and `ps_device`
must specify TensorFlow devices for the `worker` and `ps` jobs and
`num_ps_tasks` must be positive.
Args:
num_clones: Number of model clones to deploy in each replica.
clone_on_cpu: If True clones would be placed on CPU.
replica_id: Integer. Index of the replica for which the model is
deployed. Usually 0 for the chief replica.
num_replicas: Number of replicas to use.
num_ps_tasks: Number of tasks for the `ps` job. 0 to not use replicas.
worker_job_name: A name for the worker job.
ps_job_name: A name for the parameter server job.
Raises:
ValueError: If the arguments are invalid.
"""
if num_replicas > 1:
if num_ps_tasks < 1:
raise ValueError('When using replicas num_ps_tasks must be positive')
if num_replicas > 1 or num_ps_tasks > 0:
if not worker_job_name:
raise ValueError('Must specify worker_job_name when using replicas')
if not ps_job_name:
raise ValueError('Must specify ps_job_name when using parameter server')
if replica_id >= num_replicas:
raise ValueError('replica_id must be less than num_replicas')
self._num_clones = num_clones
self._clone_on_cpu = clone_on_cpu
self._replica_id = replica_id
self._num_replicas = num_replicas
self._num_ps_tasks = num_ps_tasks
self._ps_device = '/job:' + ps_job_name if num_ps_tasks > 0 else ''
self._worker_device = '/job:' + worker_job_name if num_ps_tasks > 0 else ''
@property
def num_clones(self):
return self._num_clones
@property
def clone_on_cpu(self):
return self._clone_on_cpu
@property
def replica_id(self):
return self._replica_id
@property
def num_replicas(self):
return self._num_replicas
@property
def num_ps_tasks(self):
return self._num_ps_tasks
@property
def ps_device(self):
return self._ps_device
@property
def worker_device(self):
return self._worker_device
def caching_device(self):
"""Returns the device to use for caching variables.
Variables are cached on the worker CPU when using replicas.
Returns:
A device string or None if the variables do not need to be cached.
"""
if self._num_ps_tasks > 0:
return lambda op: op.device
else:
return None
def clone_device(self, clone_index):
"""Device used to create the clone and all the ops inside the clone.
Args:
clone_index: Int, representing the clone_index.
Returns:
A value suitable for `tf.device()`.
Raises:
ValueError: if `clone_index` is greater or equal to the number of clones".
"""
if clone_index >= self._num_clones:
raise ValueError('clone_index must be less than num_clones')
device = ''
if self._num_ps_tasks > 0:
device += self._worker_device
if self._clone_on_cpu:
device += '/device:CPU:0'
else:
if self._num_clones > 1:
device += '/device:GPU:%d' % clone_index
return device
def clone_scope(self, clone_index):
"""Name scope to create the clone.
Args:
clone_index: Int, representing the clone_index.
Returns:
A name_scope suitable for `tf.name_scope()`.
Raises:
ValueError: if `clone_index` is greater or equal to the number of clones".
"""
if clone_index >= self._num_clones:
raise ValueError('clone_index must be less than num_clones')
scope = ''
if self._num_clones > 1:
scope = 'clone_%d' % clone_index
return scope
def optimizer_device(self):
"""Device to use with the optimizer.
Returns:
A value suitable for `tf.device()`.
"""
if self._num_ps_tasks > 0 or self._num_clones > 0:
return self._worker_device + '/device:CPU:0'
else:
return ''
def inputs_device(self):
"""Device to use to build the inputs.
Returns:
A value suitable for `tf.device()`.
"""
device = ''
if self._num_ps_tasks > 0:
device += self._worker_device
device += '/device:CPU:0'
return device
def variables_device(self):
"""Returns the device to use for variables created inside the clone.
Returns:
A value suitable for `tf.device()`.
"""
device = ''
if self._num_ps_tasks > 0:
device += self._ps_device
device += '/device:CPU:0'
class _PSDeviceChooser(object):
"""Slim device chooser for variables when using PS."""
def __init__(self, device, tasks):
self._device = device
self._tasks = tasks
self._task = 0
def choose(self, op):
if op.device:
return op.device
node_def = op if isinstance(op, tf.NodeDef) else op.node_def
if node_def.op == 'Variable':
t = self._task
self._task = (self._task + 1) % self._tasks
d = '%s/task:%d' % (self._device, t)
return d
else:
return op.device
if not self._num_ps_tasks:
return device
else:
chooser = _PSDeviceChooser(device, self._num_ps_tasks)
return chooser.choose
| apache-2.0 |
idovear/odoo | addons/auth_oauth/controllers/main.py | 205 | 8042 | import functools
import logging
import simplejson
import urlparse
import werkzeug.utils
from werkzeug.exceptions import BadRequest
import openerp
from openerp import SUPERUSER_ID
from openerp import http
from openerp.http import request
from openerp.addons.web.controllers.main import db_monodb, ensure_db, set_cookie_and_redirect, login_and_redirect
from openerp.addons.auth_signup.controllers.main import AuthSignupHome as Home
from openerp.modules.registry import RegistryManager
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
#----------------------------------------------------------
# helpers
#----------------------------------------------------------
def fragment_to_query_string(func):
@functools.wraps(func)
def wrapper(self, *a, **kw):
kw.pop('debug', False)
if not kw:
return """<html><head><script>
var l = window.location;
var q = l.hash.substring(1);
var r = l.pathname + l.search;
if(q.length !== 0) {
var s = l.search ? (l.search === '?' ? '' : '&') : '?';
r = l.pathname + l.search + s + q;
}
if (r == l.pathname) {
r = '/';
}
window.location = r;
</script></head><body></body></html>"""
return func(self, *a, **kw)
return wrapper
#----------------------------------------------------------
# Controller
#----------------------------------------------------------
class OAuthLogin(Home):
def list_providers(self):
try:
provider_obj = request.registry.get('auth.oauth.provider')
providers = provider_obj.search_read(request.cr, SUPERUSER_ID, [('enabled', '=', True), ('auth_endpoint', '!=', False), ('validation_endpoint', '!=', False)])
# TODO in forwardport: remove conditions on 'auth_endpoint' and 'validation_endpoint' when these fields will be 'required' in model
except Exception:
providers = []
for provider in providers:
return_url = request.httprequest.url_root + 'auth_oauth/signin'
state = self.get_state(provider)
params = dict(
debug=request.debug,
response_type='token',
client_id=provider['client_id'],
redirect_uri=return_url,
scope=provider['scope'],
state=simplejson.dumps(state),
)
provider['auth_link'] = provider['auth_endpoint'] + '?' + werkzeug.url_encode(params)
return providers
def get_state(self, provider):
redirect = request.params.get('redirect') or 'web'
if not redirect.startswith(('//', 'http://', 'https://')):
redirect = '%s%s' % (request.httprequest.url_root, redirect[1:] if redirect[0] == '/' else redirect)
state = dict(
d=request.session.db,
p=provider['id'],
r=werkzeug.url_quote_plus(redirect),
)
token = request.params.get('token')
if token:
state['t'] = token
return state
@http.route()
def web_login(self, *args, **kw):
ensure_db()
if request.httprequest.method == 'GET' and request.session.uid and request.params.get('redirect'):
# Redirect if already logged in and redirect param is present
return http.redirect_with_hash(request.params.get('redirect'))
providers = self.list_providers()
response = super(OAuthLogin, self).web_login(*args, **kw)
if response.is_qweb:
error = request.params.get('oauth_error')
if error == '1':
error = _("Sign up is not allowed on this database.")
elif error == '2':
error = _("Access Denied")
elif error == '3':
error = _("You do not have access to this database or your invitation has expired. Please ask for an invitation and be sure to follow the link in your invitation email.")
else:
error = None
response.qcontext['providers'] = providers
if error:
response.qcontext['error'] = error
return response
@http.route()
def web_auth_signup(self, *args, **kw):
providers = self.list_providers()
if len(providers) == 1:
werkzeug.exceptions.abort(werkzeug.utils.redirect(providers[0]['auth_link'], 303))
response = super(OAuthLogin, self).web_auth_signup(*args, **kw)
response.qcontext.update(providers=providers)
return response
@http.route()
def web_auth_reset_password(self, *args, **kw):
providers = self.list_providers()
if len(providers) == 1:
werkzeug.exceptions.abort(werkzeug.utils.redirect(providers[0]['auth_link'], 303))
response = super(OAuthLogin, self).web_auth_reset_password(*args, **kw)
response.qcontext.update(providers=providers)
return response
class OAuthController(http.Controller):
@http.route('/auth_oauth/signin', type='http', auth='none')
@fragment_to_query_string
def signin(self, **kw):
state = simplejson.loads(kw['state'])
dbname = state['d']
provider = state['p']
context = state.get('c', {})
registry = RegistryManager.get(dbname)
with registry.cursor() as cr:
try:
u = registry.get('res.users')
credentials = u.auth_oauth(cr, SUPERUSER_ID, provider, kw, context=context)
cr.commit()
action = state.get('a')
menu = state.get('m')
redirect = werkzeug.url_unquote_plus(state['r']) if state.get('r') else False
url = '/web'
if redirect:
url = redirect
elif action:
url = '/web#action=%s' % action
elif menu:
url = '/web#menu_id=%s' % menu
return login_and_redirect(*credentials, redirect_url=url)
except AttributeError:
# auth_signup is not installed
_logger.error("auth_signup not installed on database %s: oauth sign up cancelled." % (dbname,))
url = "/web/login?oauth_error=1"
except openerp.exceptions.AccessDenied:
# oauth credentials not valid, user could be on a temporary session
_logger.info('OAuth2: access denied, redirect to main page in case a valid session exists, without setting cookies')
url = "/web/login?oauth_error=3"
redirect = werkzeug.utils.redirect(url, 303)
redirect.autocorrect_location_header = False
return redirect
except Exception, e:
# signup error
_logger.exception("OAuth2: %s" % str(e))
url = "/web/login?oauth_error=2"
return set_cookie_and_redirect(url)
@http.route('/auth_oauth/oea', type='http', auth='none')
def oea(self, **kw):
"""login user via Odoo Account provider"""
dbname = kw.pop('db', None)
if not dbname:
dbname = db_monodb()
if not dbname:
return BadRequest()
registry = RegistryManager.get(dbname)
with registry.cursor() as cr:
IMD = registry['ir.model.data']
try:
model, provider_id = IMD.get_object_reference(cr, SUPERUSER_ID, 'auth_oauth', 'provider_openerp')
except ValueError:
return set_cookie_and_redirect('/web?db=%s' % dbname)
assert model == 'auth.oauth.provider'
state = {
'd': dbname,
'p': provider_id,
'c': {'no_user_creation': True},
}
kw['state'] = simplejson.dumps(state)
return self.signin(**kw)
# vim:expandtab:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
GeorgeHahn/Printrun | printrun/gui/utils.py | 22 | 1466 | # This file is part of the Printrun suite.
#
# Printrun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Printrun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Printrun. If not, see <http://www.gnu.org/licenses/>.
import wx
def make_button(parent, label, callback, tooltip, container = None, size = wx.DefaultSize, style = 0):
button = wx.Button(parent, -1, label, style = style, size = size)
button.Bind(wx.EVT_BUTTON, callback)
button.SetToolTip(wx.ToolTip(tooltip))
if container:
container.Add(button)
return button
def make_autosize_button(*args):
return make_button(*args, size = (-1, -1), style = wx.BU_EXACTFIT)
def make_custom_button(root, parentpanel, i, style = 0):
btn = make_button(parentpanel, i.label, root.process_button,
i.tooltip, style = style)
btn.SetBackgroundColour(i.background)
btn.SetForegroundColour("black")
btn.properties = i
root.btndict[i.command] = btn
root.printerControls.append(btn)
return btn
| gpl-3.0 |
citrix-openstack-build/keystone | keystone/tests/_ldap_livetest.py | 3 | 10549 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ldap
import ldap.modlist
import subprocess
import uuid
from keystone.common import ldap as ldap_common
from keystone import config
from keystone import exception
from keystone.identity.backends import ldap as identity_ldap
from keystone import tests
from keystone.tests import test_backend_ldap
CONF = config.CONF
def create_object(dn, attrs):
conn = ldap.initialize(CONF.ldap.url)
conn.simple_bind_s(CONF.ldap.user, CONF.ldap.password)
ldif = ldap.modlist.addModlist(attrs)
conn.add_s(dn, ldif)
conn.unbind_s()
class LiveLDAPIdentity(test_backend_ldap.LDAPIdentity):
def clear_database(self):
devnull = open('/dev/null', 'w')
subprocess.call(['ldapdelete',
'-x',
'-D', CONF.ldap.user,
'-H', CONF.ldap.url,
'-w', CONF.ldap.password,
'-r', CONF.ldap.suffix],
stderr=devnull)
if CONF.ldap.suffix.startswith('ou='):
tree_dn_attrs = {'objectclass': 'organizationalUnit',
'ou': 'openstack'}
else:
tree_dn_attrs = {'objectclass': ['dcObject', 'organizationalUnit'],
'dc': 'openstack',
'ou': 'openstack'}
create_object(CONF.ldap.suffix, tree_dn_attrs)
create_object(CONF.ldap.user_tree_dn,
{'objectclass': 'organizationalUnit',
'ou': 'Users'})
create_object(CONF.ldap.role_tree_dn,
{'objectclass': 'organizationalUnit',
'ou': 'Roles'})
create_object(CONF.ldap.tenant_tree_dn,
{'objectclass': 'organizationalUnit',
'ou': 'Projects'})
create_object(CONF.ldap.group_tree_dn,
{'objectclass': 'organizationalUnit',
'ou': 'UserGroups'})
def _set_config(self):
self.config([tests.etcdir('keystone.conf.sample'),
tests.testsdir('test_overrides.conf'),
tests.testsdir('backend_liveldap.conf')])
def test_build_tree(self):
"""Regression test for building the tree names
"""
#logic is different from the fake backend.
user_api = identity_ldap.UserApi(CONF)
self.assertTrue(user_api)
self.assertEquals(user_api.tree_dn, CONF.ldap.user_tree_dn)
def tearDown(self):
tests.TestCase.tearDown(self)
def test_ldap_dereferencing(self):
alt_users_ldif = {'objectclass': ['top', 'organizationalUnit'],
'ou': 'alt_users'}
alt_fake_user_ldif = {'objectclass': ['person', 'inetOrgPerson'],
'cn': 'alt_fake1',
'sn': 'alt_fake1'}
aliased_users_ldif = {'objectclass': ['alias', 'extensibleObject'],
'aliasedobjectname': "ou=alt_users,%s" %
CONF.ldap.suffix}
create_object("ou=alt_users,%s" % CONF.ldap.suffix, alt_users_ldif)
create_object("%s=alt_fake1,ou=alt_users,%s" %
(CONF.ldap.user_id_attribute, CONF.ldap.suffix),
alt_fake_user_ldif)
create_object("ou=alt_users,%s" % CONF.ldap.user_tree_dn,
aliased_users_ldif)
CONF.ldap.query_scope = 'sub'
CONF.ldap.alias_dereferencing = 'never'
self.identity_api = identity_ldap.Identity()
self.assertRaises(exception.UserNotFound,
self.identity_api.get_user,
'alt_fake1')
CONF.ldap.alias_dereferencing = 'searching'
self.identity_api = identity_ldap.Identity()
user_ref = self.identity_api.get_user('alt_fake1')
self.assertEqual(user_ref['id'], 'alt_fake1')
CONF.ldap.alias_dereferencing = 'always'
self.identity_api = identity_ldap.Identity()
user_ref = self.identity_api.get_user('alt_fake1')
self.assertEqual(user_ref['id'], 'alt_fake1')
def test_base_ldap_connection_deref_option(self):
deref = ldap_common.parse_deref('default')
ldap_wrapper = ldap_common.LdapWrapper(CONF.ldap.url,
CONF.ldap.page_size,
alias_dereferencing=deref)
self.assertEqual(ldap.get_option(ldap.OPT_DEREF),
ldap_wrapper.conn.get_option(ldap.OPT_DEREF))
deref = ldap_common.parse_deref('always')
ldap_wrapper = ldap_common.LdapWrapper(CONF.ldap.url,
CONF.ldap.page_size,
alias_dereferencing=deref)
self.assertEqual(ldap.DEREF_ALWAYS,
ldap_wrapper.conn.get_option(ldap.OPT_DEREF))
deref = ldap_common.parse_deref('finding')
ldap_wrapper = ldap_common.LdapWrapper(CONF.ldap.url,
CONF.ldap.page_size,
alias_dereferencing=deref)
self.assertEqual(ldap.DEREF_FINDING,
ldap_wrapper.conn.get_option(ldap.OPT_DEREF))
deref = ldap_common.parse_deref('never')
ldap_wrapper = ldap_common.LdapWrapper(CONF.ldap.url,
CONF.ldap.page_size,
alias_dereferencing=deref)
self.assertEqual(ldap.DEREF_NEVER,
ldap_wrapper.conn.get_option(ldap.OPT_DEREF))
deref = ldap_common.parse_deref('searching')
ldap_wrapper = ldap_common.LdapWrapper(CONF.ldap.url,
CONF.ldap.page_size,
alias_dereferencing=deref)
self.assertEqual(ldap.DEREF_SEARCHING,
ldap_wrapper.conn.get_option(ldap.OPT_DEREF))
#FakeLDAP does not correctly process filters, so this test can only be run
#against a live LDAP server
def test_list_groups_for_user_filtered(self):
domain = self._get_domain_fixture()
test_groups = []
test_users = []
GROUP_COUNT = 3
USER_COUNT = 2
for x in range(0, USER_COUNT):
new_user = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
'password': uuid.uuid4().hex, 'enabled': True,
'domain_id': domain['id']}
test_users.append(new_user)
self.identity_api.create_user(new_user['id'], new_user)
positive_user = test_users[0]
negative_user = test_users[1]
for x in range(0, USER_COUNT):
group_refs = self.identity_api.list_groups_for_user(
test_users[x]['id'])
self.assertEquals(len(group_refs), 0)
for x in range(0, GROUP_COUNT):
new_group = {'id': uuid.uuid4().hex,
'domain_id': domain['id'],
'name': uuid.uuid4().hex}
self.identity_api.create_group(new_group['id'], new_group)
test_groups.append(new_group)
group_refs = self.identity_api.list_groups_for_user(
positive_user['id'])
self.assertEquals(len(group_refs), x)
self.identity_api.add_user_to_group(
positive_user['id'],
new_group['id'])
group_refs = self.identity_api.list_groups_for_user(
positive_user['id'])
self.assertEquals(len(group_refs), x + 1)
group_refs = self.identity_api.list_groups_for_user(
negative_user['id'])
self.assertEquals(len(group_refs), 0)
CONF.ldap.group_filter = "(dn=xx)"
self.reload_backends(CONF.identity.default_domain_id)
group_refs = self.identity_api.list_groups_for_user(
positive_user['id'])
self.assertEquals(len(group_refs), 0)
group_refs = self.identity_api.list_groups_for_user(
negative_user['id'])
self.assertEquals(len(group_refs), 0)
CONF.ldap.group_filter = "(objectclass=*)"
self.reload_backends(CONF.identity.default_domain_id)
group_refs = self.identity_api.list_groups_for_user(
positive_user['id'])
self.assertEquals(len(group_refs), GROUP_COUNT)
group_refs = self.identity_api.list_groups_for_user(
negative_user['id'])
self.assertEquals(len(group_refs), 0)
def test_user_enable_attribute_mask(self):
CONF.ldap.user_enabled_emulation = False
CONF.ldap.user_enabled_attribute = 'employeeType'
super(LiveLDAPIdentity, self).test_user_enable_attribute_mask()
def test_create_unicode_user_name(self):
self.skipTest('Addressed by bug #1172106')
def test_create_project_case_sensitivity(self):
# The attribute used for the live LDAP tests is case insensitive.
def call_super():
super(LiveLDAPIdentity, self).\
test_create_project_case_sensitivity()
self.assertRaises(exception.Conflict, call_super)
def test_create_user_case_sensitivity(self):
# The attribute used for the live LDAP tests is case insensitive.
def call_super():
super(LiveLDAPIdentity, self).test_create_user_case_sensitivity()
self.assertRaises(exception.Conflict, call_super)
def test_project_update_missing_attrs_with_a_falsey_value(self):
# The description attribute doesn't allow an empty value.
def call_super():
super(LiveLDAPIdentity, self).\
test_project_update_missing_attrs_with_a_falsey_value()
self.assertRaises(ldap.INVALID_SYNTAX, call_super)
| apache-2.0 |
escapewindow/scriptworker | tests/test_client.py | 2 | 10920 | #!/usr/bin/env python
# coding=utf-8
"""Test scriptworker.client
"""
import json
import logging
import os
import sys
import tempfile
from copy import deepcopy
from shutil import copyfile
from unittest.mock import MagicMock
import aiohttp
import arrow
import pytest
import scriptworker.client as client
from scriptworker.constants import DEFAULT_CONFIG
from scriptworker.context import Context
from scriptworker.exceptions import ScriptWorkerException, ScriptWorkerTaskException, TaskVerificationError
TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), "data")
PARTIAL_CREDS = os.path.join(TEST_DATA_DIR, "partial_credentials.json")
CLIENT_CREDS = os.path.join(TEST_DATA_DIR, "client_credentials.json")
SCHEMA = os.path.join(TEST_DATA_DIR, "basic_schema.json")
BASIC_TASK = os.path.join(TEST_DATA_DIR, "basic_task.json")
# constants helpers and fixtures {{{1
# LEGAL_URLS format:
# 1. valid_artifact_rules: tuple-of-dicts with `schemes`, `netlocs`, and `path_regexes`
# 2. valid_artifact_task_ids: list
# 3. url to test
# 4. expected `filepath` return value from `validate_artifact_url()`
LEGAL_URLS = (
(
deepcopy(DEFAULT_CONFIG["valid_artifact_rules"]),
["VALID_TASK_ID1", "VALID_TASK_ID2"],
"https://queue.taskcluster.net/v1/task/VALID_TASK_ID2/artifacts/FILE_DIR%2FFILE_PATH",
"FILE_DIR/FILE_PATH",
),
(
({"schemes": ("ftp", "http"), "netlocs": ("example.com", "localhost"), "path_regexes": ("(?P<filepath>.*.baz)",)},),
[],
"http://localhost/FILE/PATH.baz",
"FILE/PATH.baz",
),
)
# ILLEGAL_URLS format:
# 1. valid_artifact_rules: dict with `schemes`, `netlocs`, and `path_regexes`
# 2. valid_artifact_task_ids: list
# 3. url to test
ILLEGAL_URLS = (
(
deepcopy(DEFAULT_CONFIG["valid_artifact_rules"]),
["VALID_TASK_ID1", "VALID_TASK_ID2"],
"https://queue.taskcluster.net/v1/task/INVALID_TASK_ID/artifacts/FILE_PATH",
),
(
deepcopy(DEFAULT_CONFIG["valid_artifact_rules"]),
["VALID_TASK_ID1", "VALID_TASK_ID2"],
"https://queue.taskcluster.net/v1/task/VALID_TASK_ID1/BAD_FILE_PATH",
),
(
deepcopy(DEFAULT_CONFIG["valid_artifact_rules"]),
["VALID_TASK_ID1", "VALID_TASK_ID2"],
"BAD_SCHEME://queue.taskcluster.net/v1/task/VALID_TASK_ID1/artifacts/FILE_PATH",
),
(deepcopy(DEFAULT_CONFIG["valid_artifact_rules"]), ["VALID_TASK_ID1", "VALID_TASK_ID2"], "https://BAD_NETLOC/v1/task/VALID_TASK_ID1/artifacts/FILE_PATH"),
(
(
{
"schemes": ["https"],
"netlocs": ["example.com"],
# missing filepath
"path_regexes": [".*BAD_REGEX.*"],
},
),
[],
"https://example.com/BAD_REGEX",
),
)
@pytest.fixture(scope="function")
def config(tmpdir):
work_dir = os.path.join(tmpdir, "work")
os.makedirs(work_dir)
return {
"work_dir": work_dir,
"log_dir": os.path.join(tmpdir, "log"),
"artifact_dir": os.path.join(tmpdir, "artifact"),
"task_log_dir": os.path.join(tmpdir, "artifact", "public", "logs"),
"provisioner_id": "provisioner_id",
"worker_type": "worker_type",
}
@pytest.fixture(scope="function")
def schema():
with open(SCHEMA, "r") as fh:
return json.load(fh)
def populate_credentials(config, sources, start=None):
start = start or arrow.utcnow().shift(minutes=-20)
for count, path in enumerate(sources):
new_time = start.shift(minutes=count)
copyfile(path, os.path.join(config["work_dir"], "credentials.{}.json".format(new_time.int_timestamp)))
def no_sleep(*args, **kwargs):
return 0
# tests {{{1
def test_get_missing_task(config):
with pytest.raises(ScriptWorkerTaskException):
client.get_task(config)
def test_get_task(config):
copyfile(BASIC_TASK, os.path.join(config["work_dir"], "task.json"))
assert client.get_task(config)["this_is_a_task"] is True
def test_validate_task(schema):
with open(BASIC_TASK, "r") as fh:
task = json.load(fh)
client.validate_json_schema(task, schema)
def test_invalid_task(schema):
with open(BASIC_TASK, "r") as fh:
task = json.load(fh)
with pytest.raises(ScriptWorkerTaskException):
client.validate_json_schema({"foo": task}, schema)
_TASK_SCHEMA = {
"title": "Task minimal schema",
"type": "object",
"properties": {"scopes": {"type": "array", "minItems": 1, "uniqueItems": True, "items": {"type": "string"}}},
"required": ["scopes"],
}
@pytest.mark.parametrize("raises, task", ((True, {}), (False, {"scopes": ["one:scope"]})))
def test_validate_task_schema(raises, task):
context = MagicMock()
context.task = task
with tempfile.NamedTemporaryFile("w+") as f:
json.dump(_TASK_SCHEMA, f)
f.seek(0)
context.config = {"schema_file": f.name}
if raises:
with pytest.raises(TaskVerificationError):
client.validate_task_schema(context)
else:
client.validate_task_schema(context)
def test_validate_task_schema_with_deep_key():
context = MagicMock()
context.task = {"scopes": ["one:scope"]}
with tempfile.NamedTemporaryFile("w+") as f:
json.dump(_TASK_SCHEMA, f)
f.seek(0)
context.config = {"first_layer": {"second_layer": f.name}}
client.validate_task_schema(context, schema_key="first_layer.second_layer")
@pytest.mark.parametrize("valid_artifact_rules,valid_artifact_task_ids,url,expected", LEGAL_URLS)
def test_artifact_url(valid_artifact_rules, valid_artifact_task_ids, url, expected):
value = client.validate_artifact_url(valid_artifact_rules, valid_artifact_task_ids, url)
assert value == expected
@pytest.mark.parametrize("valid_artifact_rules,valid_artifact_task_ids,url", ILLEGAL_URLS)
def test_bad_artifact_url(valid_artifact_rules, valid_artifact_task_ids, url):
with pytest.raises(ScriptWorkerTaskException):
client.validate_artifact_url(valid_artifact_rules, valid_artifact_task_ids, url)
@pytest.mark.asyncio
@pytest.mark.parametrize("should_validate_task", (True, False))
async def test_sync_main_runs_fully(config, should_validate_task):
copyfile(BASIC_TASK, os.path.join(config["work_dir"], "task.json"))
async_main_calls = []
run_until_complete_calls = []
async def async_main(*args):
async_main_calls.append(args)
def count_run_until_complete(arg1):
run_until_complete_calls.append(arg1)
fake_loop = MagicMock()
fake_loop.run_until_complete = count_run_until_complete
def loop_function():
return fake_loop
kwargs = {"loop_function": loop_function}
if should_validate_task:
schema_path = os.path.join(config["work_dir"], "schema.json")
copyfile(SCHEMA, schema_path)
config["schema_file"] = schema_path
else:
# Task is validated by default
kwargs["should_validate_task"] = False
with tempfile.NamedTemporaryFile("w+") as f:
json.dump(config, f)
f.seek(0)
kwargs["config_path"] = f.name
client.sync_main(async_main, **kwargs)
for i in run_until_complete_calls:
await i # suppress coroutine not awaited warning
assert len(run_until_complete_calls) == 1 # run_until_complete was called once
assert len(async_main_calls) == 1 # async_main was called once
@pytest.mark.parametrize(
"does_use_argv, default_config",
(
(True, None),
(True, {"some_param_only_in_default": "default_value", "worker_type": "default_value"}),
(False, None),
(True, {"some_param_only_in_default": "default_value", "worker_type": "default_value"}),
),
)
def test_init_context(config, monkeypatch, mocker, does_use_argv, default_config):
copyfile(BASIC_TASK, os.path.join(config["work_dir"], "task.json"))
with tempfile.NamedTemporaryFile("w+") as f:
json.dump(config, f)
f.seek(0)
kwargs = {"default_config": default_config}
if does_use_argv:
monkeypatch.setattr(sys, "argv", ["some_binary_name", f.name])
else:
kwargs["config_path"] = f.name
context = client._init_context(**kwargs)
assert isinstance(context, Context)
assert context.task["this_is_a_task"] is True
expected_config = deepcopy(config)
if default_config:
expected_config["some_param_only_in_default"] = "default_value"
assert context.config == expected_config
assert context.config["worker_type"] != "default_value"
mock_open = mocker.patch("builtins.open")
mock_open.assert_not_called()
def test_fail_init_context(capsys, monkeypatch):
for i in range(1, 10):
if i == 2:
# expected working case
continue
argv = ["argv{}".format(j) for j in range(i)]
monkeypatch.setattr(sys, "argv", argv)
with pytest.raises(SystemExit):
client._init_context()
# XXX This prevents usage from being printed out when the test is passing. Assertions are
# done in test_usage
capsys.readouterr()
def test_usage(capsys, monkeypatch):
monkeypatch.setattr(sys, "argv", ["my_binary"])
with pytest.raises(SystemExit):
client._usage()
captured = capsys.readouterr()
assert captured.out == ""
assert captured.err == "Usage: my_binary CONFIG_FILE\n"
@pytest.mark.parametrize("is_verbose, log_level", ((True, logging.DEBUG), (False, logging.INFO)))
def test_init_logging(monkeypatch, is_verbose, log_level):
context = MagicMock()
context.config = {"verbose": is_verbose}
basic_config_mock = MagicMock()
monkeypatch.setattr(logging, "basicConfig", basic_config_mock)
client._init_logging(context)
basic_config_mock.assert_called_once_with(format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=log_level)
assert logging.getLogger("taskcluster").level == logging.WARNING
@pytest.mark.asyncio
async def test_handle_asyncio_loop():
context = MagicMock()
context.was_async_main_called = False
async def async_main(context):
context.was_async_main_called = True
await client._handle_asyncio_loop(async_main, context)
assert isinstance(context.session, aiohttp.ClientSession)
assert context.was_async_main_called is True
@pytest.mark.asyncio
async def test_fail_handle_asyncio_loop(mocker):
context = MagicMock()
m = mocker.patch.object(client, "log")
async def async_error(context):
exception = ScriptWorkerException("async_error!")
exception.exit_code = 42
raise exception
with pytest.raises(SystemExit) as excinfo:
await client._handle_asyncio_loop(async_error, context)
assert excinfo.value.code == 42
m.exception.assert_called_once_with("Failed to run async_main")
| mpl-2.0 |
eyohansa/django | django/conf/locale/it/formats.py | 504 | 2079 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'd F Y' # 25 Ottobre 2006
TIME_FORMAT = 'H:i' # 14:30
DATETIME_FORMAT = 'l d F Y H:i' # Mercoledì 25 Ottobre 2006 14:30
YEAR_MONTH_FORMAT = 'F Y' # Ottobre 2006
MONTH_DAY_FORMAT = 'j/F' # 10/2006
SHORT_DATE_FORMAT = 'd/m/Y' # 25/12/2009
SHORT_DATETIME_FORMAT = 'd/m/Y H:i' # 25/10/2009 14:30
FIRST_DAY_OF_WEEK = 1 # Lunedì
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d/%m/%Y', '%Y/%m/%d', # '25/10/2006', '2008/10/25'
'%d-%m-%Y', '%Y-%m-%d', # '25-10-2006', '2008-10-25'
'%d-%m-%y', '%d/%m/%y', # '25-10-06', '25/10/06'
]
DATETIME_INPUT_FORMATS = [
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M:%S.%f', # '25/10/2006 14:30:59.000200'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
'%d/%m/%y %H:%M:%S', # '25/10/06 14:30:59'
'%d/%m/%y %H:%M:%S.%f', # '25/10/06 14:30:59.000200'
'%d/%m/%y %H:%M', # '25/10/06 14:30'
'%d/%m/%y', # '25/10/06'
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%d-%m-%Y %H:%M:%S', # '25-10-2006 14:30:59'
'%d-%m-%Y %H:%M:%S.%f', # '25-10-2006 14:30:59.000200'
'%d-%m-%Y %H:%M', # '25-10-2006 14:30'
'%d-%m-%Y', # '25-10-2006'
'%d-%m-%y %H:%M:%S', # '25-10-06 14:30:59'
'%d-%m-%y %H:%M:%S.%f', # '25-10-06 14:30:59.000200'
'%d-%m-%y %H:%M', # '25-10-06 14:30'
'%d-%m-%y', # '25-10-06'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| bsd-3-clause |
sassoftware/conary | conary_test/cvctest/buildtest/macrostest.py | 2 | 2983 | #
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from conary_test import rephelp
from conary.build import macros
class TestUse(rephelp.RepositoryHelper):
def testMacros(self):
m1 = macros.Macros()
m1.a = 'foo'
assert(m1.a == 'foo')
m2 = m1.copy()
m2.a = 'bar'
assert(m1.a == 'foo')
assert(m2.a == 'bar')
m3 = m2.copy(False)
m3.a = 'baz'
assert(m2.a == 'bar')
assert(m3.a == 'baz')
m4 = m3
m4.a = 'blah'
assert(m3.a == 'blah')
m1.b = '%(a)s/asdf'
assert(m1.b == 'foo/asdf')
m1.trackChanges()
m1.c = 'foo'
assert(m1.getTrackedChanges() == ['c'])
m1.trackChanges(False)
m1.d = 'bar'
assert(m1.getTrackedChanges() == ['c'])
m1.e = '1'
m1._override('e', '2')
m1.e = '3'
assert(m1.e == '2')
m1.r = 'foo++'
assert(m1.r == 'foo++')
assert(m1['r'] == 'foo++')
assert(str(m1['r.literalRegex']) == 'foo\+\+')
assert(str("%(r.literalRegex)s" % m1) == 'foo\+\+')
def testIterItems(self):
m1 = macros.Macros()
m1.a = 'a'
m1.b = 'b'
m2 = m1.copy()
m2.c = 'c'
iterkeys = [ x for x in m2.iterkeys() ]
iterkeys.sort()
assert(iterkeys == ['a', 'b', 'c'])
keys = m2.keys()
keys.sort()
assert(keys == ['a', 'b', 'c'])
iteritems = [ x for x in m2.iteritems() ]
iteritems.sort()
assert(iteritems == [('a', 'a'), ('b', 'b'), ('c', 'c')])
def testUpdate(self):
m1 = macros.Macros()
m1.a = 'a'
m1.b = 'b'
m2 = m1.copy()
m2.c = 'c'
m3 = macros.Macros()
m3.d = 'd'
m3.e = 'e'
m4 = m3.copy()
m4.f = 'f'
m2.update(m4)
keys = m2.keys()
keys.sort()
assert(keys == ['a', 'b', 'c', 'd', 'e', 'f'])
def testGet(self):
m1 = macros.Macros()
m1.march = 'i386'
m1.target = '%(march)s-unknown-linux'
assert(m1.target == 'i386-unknown-linux')
assert(m1._get('target') == '%(march)s-unknown-linux')
def testCallback(self):
a = [1]
m1 = macros.Macros()
def myfun(name):
a.append(2)
m1.setCallback('foo', myfun)
m1.foo = 'hello'
assert('%(foo)s' % m1 == 'hello')
assert(a == [1,2])
| apache-2.0 |
dweinstein/mitmproxy | libmproxy/console/searchable.py | 29 | 2808 | import urwid
from . import signals
class Highlight(urwid.AttrMap):
def __init__(self, t):
urwid.AttrMap.__init__(
self,
urwid.Text(t.text),
"focusfield",
)
self.backup = t
class Searchable(urwid.ListBox):
def __init__(self, state, contents):
self.walker = urwid.SimpleFocusListWalker(contents)
urwid.ListBox.__init__(self, self.walker)
self.state = state
self.search_offset = 0
self.current_highlight = None
self.search_term = None
def keypress(self, size, key):
if key == "/":
signals.status_prompt.send(
prompt = "Search for",
text = "",
callback = self.set_search
)
elif key == "n":
self.find_next(False)
elif key == "N":
self.find_next(True)
elif key == "g":
self.set_focus(0)
self.walker._modified()
elif key == "G":
self.set_focus(len(self.walker) - 1)
self.walker._modified()
else:
return super(self.__class__, self).keypress(size, key)
def set_search(self, text):
self.state.last_search = text
self.search_term = text or None
self.find_next(False)
def set_highlight(self, offset):
if self.current_highlight is not None:
old = self.body[self.current_highlight]
self.body[self.current_highlight] = old.backup
if offset is None:
self.current_highlight = None
else:
self.body[offset] = Highlight(self.body[offset])
self.current_highlight = offset
def get_text(self, w):
if isinstance(w, urwid.Text):
return w.text
elif isinstance(w, Highlight):
return w.backup.text
else:
return None
def find_next(self, backwards):
if not self.search_term:
if self.state.last_search:
self.search_term = self.state.last_search
else:
self.set_highlight(None)
return
# Start search at focus + 1
if backwards:
rng = xrange(len(self.body) - 1, -1, -1)
else:
rng = xrange(1, len(self.body) + 1)
for i in rng:
off = (self.focus_position + i) % len(self.body)
w = self.body[off]
txt = self.get_text(w)
if txt and self.search_term in txt:
self.set_highlight(off)
self.set_focus(off, coming_from="above")
self.body._modified()
return
else:
self.set_highlight(None)
signals.status_message.send(message="Search not found.", expire=1)
| mit |
Lukasa/cryptography | docs/development/custom-vectors/cast5/generate_cast5.py | 3 | 2745 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import binascii
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.ciphers import algorithms, base, modes
def encrypt(mode, key, iv, plaintext):
cipher = base.Cipher(
algorithms.CAST5(binascii.unhexlify(key)),
mode(binascii.unhexlify(iv)),
default_backend()
)
encryptor = cipher.encryptor()
ct = encryptor.update(binascii.unhexlify(plaintext))
ct += encryptor.finalize()
return binascii.hexlify(ct)
def build_vectors(mode, filename):
vector_file = open(filename, "r")
count = 0
output = []
key = None
iv = None
plaintext = None
for line in vector_file:
line = line.strip()
if line.startswith("KEY"):
if count != 0:
output.append("CIPHERTEXT = {}".format(
encrypt(mode, key, iv, plaintext))
)
output.append("\nCOUNT = {}".format(count))
count += 1
name, key = line.split(" = ")
output.append("KEY = {}".format(key))
elif line.startswith("IV"):
name, iv = line.split(" = ")
iv = iv[0:16]
output.append("IV = {}".format(iv))
elif line.startswith("PLAINTEXT"):
name, plaintext = line.split(" = ")
output.append("PLAINTEXT = {}".format(plaintext))
output.append("CIPHERTEXT = {}".format(encrypt(mode, key, iv, plaintext)))
return "\n".join(output)
def write_file(data, filename):
with open(filename, "w") as f:
f.write(data)
cbc_path = "tests/hazmat/primitives/vectors/ciphers/AES/CBC/CBCMMT128.rsp"
write_file(build_vectors(modes.CBC, cbc_path), "cast5-cbc.txt")
ofb_path = "tests/hazmat/primitives/vectors/ciphers/AES/OFB/OFBMMT128.rsp"
write_file(build_vectors(modes.OFB, ofb_path), "cast5-ofb.txt")
cfb_path = "tests/hazmat/primitives/vectors/ciphers/AES/CFB/CFB128MMT128.rsp"
write_file(build_vectors(modes.CFB, cfb_path), "cast5-cfb.txt")
ctr_path = "tests/hazmat/primitives/vectors/ciphers/AES/CTR/aes-128-ctr.txt"
write_file(build_vectors(modes.CTR, ctr_path), "cast5-ctr.txt")
| apache-2.0 |
uRndUsr/infinitecoin | contrib/pyminer/pyminer.py | 1257 | 6438 | #!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file license.txt or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 8332
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
| mit |
jupierce/openshift-tools | openshift/installer/vendored/openshift-ansible-git-2016-04-18/roles/os_firewall/library/os_firewall_manage_iptables.py | 68 | 10474 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# vim: expandtab:tabstop=4:shiftwidth=4
# pylint: disable=fixme, missing-docstring
from subprocess import call, check_output
DOCUMENTATION = '''
---
module: os_firewall_manage_iptables
short_description: This module manages iptables rules for a given chain
author: Jason DeTiberus
requirements: [ ]
'''
EXAMPLES = '''
'''
class IpTablesError(Exception):
def __init__(self, msg, cmd, exit_code, output):
super(IpTablesError, self).__init__(msg)
self.msg = msg
self.cmd = cmd
self.exit_code = exit_code
self.output = output
class IpTablesAddRuleError(IpTablesError):
pass
class IpTablesRemoveRuleError(IpTablesError):
pass
class IpTablesSaveError(IpTablesError):
pass
class IpTablesCreateChainError(IpTablesError):
def __init__(self, chain, msg, cmd, exit_code, output): # pylint: disable=too-many-arguments, line-too-long
super(IpTablesCreateChainError, self).__init__(msg, cmd, exit_code,
output)
self.chain = chain
class IpTablesCreateJumpRuleError(IpTablesError):
def __init__(self, chain, msg, cmd, exit_code, output): # pylint: disable=too-many-arguments, line-too-long
super(IpTablesCreateJumpRuleError, self).__init__(msg, cmd, exit_code,
output)
self.chain = chain
# TODO: impliment rollbacks for any events that where successful and an
# exception was thrown later. for example, when the chain is created
# successfully, but the add/remove rule fails.
class IpTablesManager(object): # pylint: disable=too-many-instance-attributes
def __init__(self, module):
self.module = module
self.ip_version = module.params['ip_version']
self.check_mode = module.check_mode
self.chain = module.params['chain']
self.create_jump_rule = module.params['create_jump_rule']
self.jump_rule_chain = module.params['jump_rule_chain']
self.cmd = self.gen_cmd()
self.save_cmd = self.gen_save_cmd()
self.output = []
self.changed = False
def save(self):
try:
self.output.append(check_output(self.save_cmd,
stderr=subprocess.STDOUT))
except subprocess.CalledProcessError as ex:
raise IpTablesSaveError(
msg="Failed to save iptables rules",
cmd=ex.cmd, exit_code=ex.returncode, output=ex.output)
def verify_chain(self):
if not self.chain_exists():
self.create_chain()
if self.create_jump_rule and not self.jump_rule_exists():
self.create_jump()
def add_rule(self, port, proto):
rule = self.gen_rule(port, proto)
if not self.rule_exists(rule):
self.verify_chain()
if self.check_mode:
self.changed = True
self.output.append("Create rule for %s %s" % (proto, port))
else:
cmd = self.cmd + ['-A'] + rule
try:
self.output.append(check_output(cmd))
self.changed = True
self.save()
except subprocess.CalledProcessError as ex:
raise IpTablesCreateChainError(
chain=self.chain,
msg="Failed to create rule for "
"%s %s" % (proto, port),
cmd=ex.cmd, exit_code=ex.returncode,
output=ex.output)
def remove_rule(self, port, proto):
rule = self.gen_rule(port, proto)
if self.rule_exists(rule):
if self.check_mode:
self.changed = True
self.output.append("Remove rule for %s %s" % (proto, port))
else:
cmd = self.cmd + ['-D'] + rule
try:
self.output.append(check_output(cmd))
self.changed = True
self.save()
except subprocess.CalledProcessError as ex:
raise IpTablesRemoveRuleError(
chain=self.chain,
msg="Failed to remove rule for %s %s" % (proto, port),
cmd=ex.cmd, exit_code=ex.returncode, output=ex.output)
def rule_exists(self, rule):
check_cmd = self.cmd + ['-C'] + rule
return True if call(check_cmd) == 0 else False
def gen_rule(self, port, proto):
return [self.chain, '-p', proto, '-m', 'state', '--state', 'NEW',
'-m', proto, '--dport', str(port), '-j', 'ACCEPT']
def create_jump(self):
if self.check_mode:
self.changed = True
self.output.append("Create jump rule for chain %s" % self.chain)
else:
try:
cmd = self.cmd + ['-L', self.jump_rule_chain, '--line-numbers']
output = check_output(cmd, stderr=subprocess.STDOUT)
# break the input rules into rows and columns
input_rules = [s.split() for s in output.split('\n')]
# Find the last numbered rule
last_rule_num = None
last_rule_target = None
for rule in input_rules[:-1]:
if rule:
try:
last_rule_num = int(rule[0])
except ValueError:
continue
last_rule_target = rule[1]
# Naively assume that if the last row is a REJECT rule, then
# we can add insert our rule right before it, otherwise we
# assume that we can just append the rule.
if (last_rule_num and last_rule_target
and last_rule_target == 'REJECT'):
# insert rule
cmd = self.cmd + ['-I', self.jump_rule_chain,
str(last_rule_num)]
else:
# append rule
cmd = self.cmd + ['-A', self.jump_rule_chain]
cmd += ['-j', self.chain]
output = check_output(cmd, stderr=subprocess.STDOUT)
self.changed = True
self.output.append(output)
self.save()
except subprocess.CalledProcessError as ex:
if '--line-numbers' in ex.cmd:
raise IpTablesCreateJumpRuleError(
chain=self.chain,
msg=("Failed to query existing " +
self.jump_rule_chain +
" rules to determine jump rule location"),
cmd=ex.cmd, exit_code=ex.returncode,
output=ex.output)
else:
raise IpTablesCreateJumpRuleError(
chain=self.chain,
msg=("Failed to create jump rule for chain " +
self.chain),
cmd=ex.cmd, exit_code=ex.returncode,
output=ex.output)
def create_chain(self):
if self.check_mode:
self.changed = True
self.output.append("Create chain %s" % self.chain)
else:
try:
cmd = self.cmd + ['-N', self.chain]
self.output.append(check_output(cmd,
stderr=subprocess.STDOUT))
self.changed = True
self.output.append("Successfully created chain %s" %
self.chain)
self.save()
except subprocess.CalledProcessError as ex:
raise IpTablesCreateChainError(
chain=self.chain,
msg="Failed to create chain: %s" % self.chain,
cmd=ex.cmd, exit_code=ex.returncode, output=ex.output
)
def jump_rule_exists(self):
cmd = self.cmd + ['-C', self.jump_rule_chain, '-j', self.chain]
return True if call(cmd) == 0 else False
def chain_exists(self):
cmd = self.cmd + ['-L', self.chain]
return True if call(cmd) == 0 else False
def gen_cmd(self):
cmd = 'iptables' if self.ip_version == 'ipv4' else 'ip6tables'
return ["/usr/sbin/%s" % cmd]
def gen_save_cmd(self): # pylint: disable=no-self-use
return ['/usr/libexec/iptables/iptables.init', 'save']
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True),
action=dict(required=True, choices=['add', 'remove',
'verify_chain']),
chain=dict(required=False, default='OS_FIREWALL_ALLOW'),
create_jump_rule=dict(required=False, type='bool', default=True),
jump_rule_chain=dict(required=False, default='INPUT'),
protocol=dict(required=False, choices=['tcp', 'udp']),
port=dict(required=False, type='int'),
ip_version=dict(required=False, default='ipv4',
choices=['ipv4', 'ipv6']),
),
supports_check_mode=True
)
action = module.params['action']
protocol = module.params['protocol']
port = module.params['port']
if action in ['add', 'remove']:
if not protocol:
error = "protocol is required when action is %s" % action
module.fail_json(msg=error)
if not port:
error = "port is required when action is %s" % action
module.fail_json(msg=error)
iptables_manager = IpTablesManager(module)
try:
if action == 'add':
iptables_manager.add_rule(port, protocol)
elif action == 'remove':
iptables_manager.remove_rule(port, protocol)
elif action == 'verify_chain':
iptables_manager.verify_chain()
except IpTablesError as ex:
module.fail_json(msg=ex.msg)
return module.exit_json(changed=iptables_manager.changed,
output=iptables_manager.output)
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| apache-2.0 |
wisechengyi/pants | src/python/pants/python/executable_pex_tool.py | 2 | 1754 | # Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from typing import TYPE_CHECKING, List, Optional
from pex.pex import PEX
from pex.pex_builder import PEXBuilder
from pex.pex_info import PexInfo
from pants.python.pex_build_util import PexBuilderWrapper
from pants.subsystem.subsystem import Subsystem
from pants.util.dirutil import is_executable, safe_concurrent_creation
if TYPE_CHECKING:
from pants.python.python_requirement import PythonRequirement # noqa
class ExecutablePexTool(Subsystem):
entry_point: Optional[str] = None
base_requirements: List["PythonRequirement"] = []
@classmethod
def subsystem_dependencies(cls):
return super().subsystem_dependencies() + (PexBuilderWrapper.Factory,)
def bootstrap(
self, interpreter, pex_file_path, extra_reqs: Optional[List["PythonRequirement"]] = None
) -> PEX:
# Caching is done just by checking if the file at the specified path is already executable.
if not is_executable(pex_file_path):
pex_info = PexInfo.default(interpreter=interpreter)
if self.entry_point is not None:
pex_info.entry_point = self.entry_point
with safe_concurrent_creation(pex_file_path) as safe_path:
all_reqs = list(self.base_requirements) + list(extra_reqs or [])
pex_builder = PexBuilderWrapper.Factory.create(
builder=PEXBuilder(interpreter=interpreter, pex_info=pex_info)
)
pex_builder.add_resolved_requirements(all_reqs, platforms=["current"])
pex_builder.build(safe_path)
return PEX(pex_file_path, interpreter)
| apache-2.0 |
bcharlas/mytrunk | examples/WireMatPM/wirecontacttest.py | 8 | 5316 | # -*- coding: utf-8 -*-
from yade import plot, qt
#### define parameters for the net
# wire diameter
d = 2.7/1000.
# particle radius
radius = d*4.
# define piecewise linear stress-strain curve [Pa]
strainStressValues=[(0.0019230769,2.5e8),(0.0192,3.2195e8),(0.05,3.8292e8),(0.15,5.1219e8),(0.25,5.5854e8),(0.3,5.6585e8),(0.35,5.6585e8)]
# elastic material properties
particleVolume = 4./3.*pow(radius,3)*pi
particleMass = 3.9/1000.
density = particleMass/particleVolume
young = strainStressValues[0][1] / strainStressValues[0][0]
poisson = 0.3
#### material definition
netMat = O.materials.append( WireMat( young=young,poisson=poisson,frictionAngle=radians(30),density=density,isDoubleTwist=True,diameter=d,strainStressValues=strainStressValues,lambdaEps=0.4,lambdak=0.66) )
wireMat = O.materials.append( WireMat( young=young,poisson=poisson,frictionAngle=radians(30),density=density,isDoubleTwist=False,diameter=3.4/1000,strainStressValues=strainStressValues ) )
blocMat = O.materials.append(FrictMat(young=60e3,poisson=0.15,frictionAngle=radians(30),density=44.5/((4./3.*pi*0.02**3)*1576.)))
#### define parameters for the net packing
# mesh geometry
mos = 0.08
a = 0.04
b = 0.04
# wire diameter
d = 2.7/1000.
# net dimension
cornerCoord=[0,0,0]
Lx = 2.
Ly = 2.
# properties of particles
kw = {'color':[0,1,0],'wire':True,'highlight':False,'fixed':False,'material':netMat}
##### create packing for net
[netpack,lx,ly] = hexaNet( radius=radius, cornerCoord=cornerCoord, xLength=Lx, yLength=Ly, mos=mos, a=a, b=b, startAtCorner=True, isSymmetric=False, **kw )
O.bodies.append(netpack)
#### get bodies for single wire at the boundary in y-direction and change properties
bb = uniaxialTestFeatures(axis=0)
negIds,posIds=bb['negIds'],bb['posIds']
for id in negIds:
O.bodies[id].material = O.materials[wireMat]
O.bodies[id].shape.color = [0,0,1]
for id in posIds:
O.bodies[id].material = O.materials[wireMat]
O.bodies[id].shape.color = [0,0,1]
#### define engines to create link
interactionRadius=2.8 # value has to be adjusted according to the particle size of the net and the mesh opening size of the net (check always if links are created)
O.engines=[
ForceResetter(),
InsertionSortCollider([Bo1_Sphere_Aabb(aabbEnlargeFactor=interactionRadius,label='aabb')]),
InteractionLoop(
[Ig2_Sphere_Sphere_ScGeom(interactionDetectionFactor=interactionRadius,label='Ig2ssGeom')],
[Ip2_WireMat_WireMat_WirePhys(linkThresholdIteration=1,label='wire_wire'),Ip2_FrictMat_FrictMat_FrictPhys(label='block_wire')],
[Law2_ScGeom_WirePhys_WirePM(linkThresholdIteration=1,label='Law_1'),Law2_ScGeom_FrictPhys_CundallStrack(label='Law_2')]
),
NewtonIntegrator(damping=0.),
]
#### define additional vertical interactions at the boundary for boundary wire
for i in range(24)[1::2]: # odd - start at second item and take every second item
createInteraction(negIds[i],negIds[i+1])
del posIds[1]
posIds.append(1)
for i in range(25)[::2]: # even - start at the beginning at take every second item
createInteraction(posIds[i],posIds[i+1])
#### time step definition for first time step to create links
O.step()
##### delete horizontal interactions for corner particles
bb = uniaxialTestFeatures(axis=1)
negIds,posIds,axis,crossSectionArea=bb['negIds'],bb['posIds'],bb['axis'],bb['area']
##### delete some interactions
O.interactions.erase(0,50)
O.interactions.erase(0,51)
O.interactions.erase(1,1250)
O.interactions.erase(1,1251)
#### time step definition for deleting some links which have been created by the Ig2 functor
O.step()
#### initializes now the interaction detection factor
aabb.aabbEnlargeFactor=-1.
Ig2ssGeom.interactionDetectionFactor=-1.
#### define boundary conditions
fixedIds=negIds
movingIds=posIds
for id in fixedIds:
O.bodies[id].shape.color = [1,0,0]
O.bodies[id].state.blockedDOFs='xyzXYZ'
for id in movingIds:
O.bodies[id].shape.color = [1,0,0]
O.bodies[id].state.blockedDOFs='xyzXYZ'
#### import block as a sphere after net has been created
bloc=O.bodies.append(sphere([1.0,1.0,0.65],radius=0.15,wire=False,highlight=False,color=[1,1,0],material=blocMat))
O.bodies[bloc].state.isDamped=False # switch damping off since free fall under gravity
#### plot some results
plot.plots={'t':['vz',None,('f_unbal','g--')]}
#plot.liveInterval=2.
plot.plot(noShow=False, subPlots=False)
def addPlotData():
plot.addData(t=O.time, vz=-O.bodies[bloc].state.vel[2], f_unbal=unbalancedForce(useMaxForce=False) )
#### define engines for simulation
v = qt.Controller()
v = qt.View()
rr = qt.Renderer()
rr.intrAllWire = True
O.engines=[
ForceResetter(),
InsertionSortCollider([Bo1_Sphere_Aabb(label='aabb')]),
InteractionLoop(
[Ig2_Sphere_Sphere_ScGeom(label='Ig2ssGeom')],
[Ip2_WireMat_WireMat_WirePhys(label='wire_wire'),Ip2_FrictMat_FrictMat_FrictPhys(label='block_wire')],
[Law2_ScGeom_WirePhys_WirePM(label='Law_1'),Law2_ScGeom_FrictPhys_CundallStrack(label='Law_2')]
),
NewtonIntegrator(damping=0.2,gravity=[0,0,-9.81],label='NewtonGravity'),
PyRunner(initRun=True,iterPeriod=100,command='addPlotData()'),
]
#### time step definition for simulation
## critical time step proposed by Bertrand
kn = 16115042 # stiffness of single wire from code, has to be changed if you change the stress-strain curve for the wire
O.dt = 0.2*sqrt(particleMass/(2.*kn))
O.run(200000)
| gpl-2.0 |
matthappens/taskqueue | taskqueue/venv_tq/lib/python2.7/site-packages/boto/route53/status.py | 186 | 1841 | # Copyright (c) 2011 Blue Pines Technologies LLC, Brad Carleton
# www.bluepines.org
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class Status(object):
def __init__(self, route53connection, change_dict):
self.route53connection = route53connection
for key in change_dict:
if key == 'Id':
self.__setattr__(key.lower(),
change_dict[key].replace('/change/', ''))
else:
self.__setattr__(key.lower(), change_dict[key])
def update(self):
""" Update the status of this request."""
status = self.route53connection.get_change(self.id)['GetChangeResponse']['ChangeInfo']['Status']
self.status = status
return status
def __repr__(self):
return '<Status:%s>' % self.status
| mit |
bulldy80/gyp_unofficial | test/msvs/filters/gyptest-filters-2010.py | 101 | 1517 | #!/usr/bin/env python
# Copyright (c) 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that extra filters are pruned correctly for Visual Studio 2010
and later.
"""
import TestGyp
test = TestGyp.TestGyp(formats=['msvs'])
test.run_gyp('filters.gyp', '-G', 'standalone', '-G', 'msvs_version=2010')
test.must_not_exist('no_source_files.vcxproj.filters')
test.must_not_exist('one_source_file.vcxproj.filters')
test.must_not_exist('two_source_files.vcxproj.filters')
test.must_contain('three_files_in_two_folders.vcxproj.filters', '''\
<ItemGroup>
<ClCompile Include="..\\folder1\\a.c">
<Filter>folder1</Filter>
</ClCompile>
<ClCompile Include="..\\folder1\\b.c">
<Filter>folder1</Filter>
</ClCompile>
<ClCompile Include="..\\folder2\\c.c">
<Filter>folder2</Filter>
</ClCompile>
</ItemGroup>
'''.replace('\n', '\r\n'))
test.must_contain('nested_folders.vcxproj.filters', '''\
<ItemGroup>
<ClCompile Include="..\\folder1\\nested\\a.c">
<Filter>folder1\\nested</Filter>
</ClCompile>
<ClCompile Include="..\\folder2\\d.c">
<Filter>folder2</Filter>
</ClCompile>
<ClCompile Include="..\\folder1\\nested\\b.c">
<Filter>folder1\\nested</Filter>
</ClCompile>
<ClCompile Include="..\\folder1\\other\\c.c">
<Filter>folder1\\other</Filter>
</ClCompile>
</ItemGroup>
'''.replace('\n', '\r\n'))
test.pass_test()
| bsd-3-clause |
cngo-github/nupic | tests/integration/nupic/opf/opf_description_template_test/opf_description_template_test.py | 12 | 10082 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Tests OPF descriptionTemplate.py-based experiment/sub-experiment pair"""
import os
import pprint
import sys
import unittest2 as unittest
from pkg_resources import resource_filename
from nupic.frameworks.opf.opfhelpers import (
loadExperimentDescriptionScriptFromDir,
getExperimentDescriptionInterfaceFromModule
)
from nupic.support.unittesthelpers.testcasebase import (
TestCaseBase as HelperTestCaseBase)
# Our __main__ entry block sets this to an instance of MyTestEnvironment()
g_myEnv = None
g_debug = False
class MyTestEnvironment(object):
def __init__(self):
examplesDir = resource_filename("nupic", os.path.join("..", "examples"))
_debugOut("examplesDir=<%s>" % (examplesDir,))
assert os.path.exists(examplesDir), \
"%s is not present in filesystem" % examplesDir
# This is where we find OPF binaries (e.g., run_opf_experiment.py, etc.)
# In the autobuild, it is a read-only directory
self.__opfBinDir = resource_filename("nupic", os.path.join("..", "scripts"))
assert os.path.exists(self.__opfBinDir), \
"%s is not present in filesystem" % self.__opfBinDir
_debugOut("self.__opfBinDir=<%s>" % self.__opfBinDir)
# Where this script is running from (our autotest counterpart may have
# copied it from its original location)
self.__testRunDir = os.path.abspath(os.path.dirname(__file__))
_debugOut("self.__testRunDir=<%s>" % self.__testRunDir)
# Parent directory of our private OPF experiments
self.__opfExperimentsParentDir = os.path.join(self.__testRunDir,
"experiments")
assert os.path.exists(self.__opfExperimentsParentDir), \
"%s is not present in filesystem" % self.__opfExperimentsParentDir
_debugOut("self.__opfExperimentsParentDir=<%s>"
% self.__opfExperimentsParentDir)
def getOpfRunExperimentPyPath(self):
return os.path.join(self.__opfBinDir, "run_opf_experiment.py")
def getOpfExperimentPath(self, experimentName):
"""
experimentName: e.g., "gym"; this string will be used to form
a directory path to the experiment.
Returns: absolute path to the experiment directory
"""
path = os.path.join(self.__opfExperimentsParentDir, experimentName)
assert os.path.isdir(path), \
"Experiment path %s doesn't exist or is not a directory" % (path,)
return path
class MyTestCaseBase(HelperTestCaseBase):
def setUp(self):
""" Method called to prepare the test fixture. This is called immediately
before calling the test method; any exception raised by this method will be
considered an error rather than a test failure. The default implementation
does nothing.
"""
global g_myEnv
if not g_myEnv:
# Setup environment
g_myEnv = MyTestEnvironment()
def tearDown(self):
""" Method called immediately after the test method has been called and the
result recorded. This is called even if the test method raised an exception,
so the implementation in subclasses may need to be particularly careful
about checking internal state. Any exception raised by this method will be
considered an error rather than a test failure. This method will only be
called if the setUp() succeeds, regardless of the outcome of the test
method. The default implementation does nothing.
"""
# Reset our log items
self.resetExtraLogItems()
def shortDescription(self):
""" Override to force unittest framework to use test method names instead
of docstrings in the report.
"""
return None
def executePositiveOpfExperiment(self, experimentName, short=False):
""" Executes a positive OPF RunExperiment test as a subprocess and validates
its exit status.
experimentName: e.g., "gym"; this string will be used to form
a directory path to the experiment.
short: if True, attempt to run the experiment with --testMode
flag turned on, which causes all inference and training
iteration counts to be overridden with small counts.
Returns: result from _executeExternalCmdAndReapOutputs
"""
opfRunner = g_myEnv.getOpfRunExperimentPyPath()
opfExpDir = g_myEnv.getOpfExperimentPath(experimentName)
r = self.__executePositiveRunExperimentTest(runnerPath=opfRunner,
experimentDirPath=opfExpDir,
short=short)
return r
def __executePositiveRunExperimentTest(self,
runnerPath,
experimentDirPath,
customOptions=[],
short=False):
""" Executes a positive RunExperiment.py test and performs
basic validation
runnerPath: experiment running (LPF or OPF RunExperiment.py path)
experimentDirPath: directory containing the description.py file of interest
short: if True, attempt to run the experiment with --testMode
flag turned on, which causes all inference and training
iteration counts to be overridden with small counts.
NOTE: if the (possibly aggregated) dataset has fewer
rows than the count overrides, then an LPF experiment
will fail.
Returns: result from _executeExternalCmdAndReapOutputs
"""
#----------------------------------------
# Set up args
command = [
"python",
runnerPath,
experimentDirPath,
]
command.extend(customOptions)
if short:
command.append("--testMode")
self.addExtraLogItem({'command':command})
#----------------------------------------
# Execute RunExperiment.py as subprocess and collect results
r = _executeExternalCmdAndReapOutputs(command)
self.addExtraLogItem({'result':r})
_debugOut(("_executeExternalCmdAndReapOutputs(%s)=%s") % (command, r))
#----------------------------------------
# Check subprocess exit status
self.assertEqual(r['exitStatus'], 0,
("Expected status = 0 from %s; got: %s") % \
(runnerPath, r['exitStatus'],))
self.resetExtraLogItems()
return r
class PositiveTests(MyTestCaseBase):
#========================
def test_sub_experiment_override(self):
expDir = g_myEnv.getOpfExperimentPath("gym")
module = loadExperimentDescriptionScriptFromDir(expDir)
expIface = getExperimentDescriptionInterfaceFromModule(module)
modelDesc = expIface.getModelDescription()
tpActivationThreshold = modelDesc['modelParams'] \
['tpParams']['activationThreshold']
expectedValue = 12
self.assertEqual(tpActivationThreshold, expectedValue,
"Expected tp activationThreshold=%s, but got %s" % (
expectedValue, tpActivationThreshold))
def test_run_sub_experiment(self):
self.executePositiveOpfExperiment(experimentName="gym", short=True)
################################################################################
# Support functions
################################################################################
def _executeExternalCmdAndReapOutputs(args):
"""
args: Args list as defined for the args parameter in subprocess.Popen()
Returns: result dicionary:
{
'exitStatus':<exit-status-of-external-command>,
'stdoutData':"string",
'stderrData':"string"
}
"""
import subprocess
_debugOut(("Starting...\n<%s>") % \
(args,))
p = subprocess.Popen(args,
env=os.environ,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
_debugOut(("Process started for <%s>") % (args,))
(stdoutData, stderrData) = p.communicate()
_debugOut(("Process completed for <%s>: exit status=%s, " + \
"stdoutDataType=%s, stdoutData=<%s>, stderrData=<%s>") % \
(args, p.returncode, type(stdoutData), stdoutData, stderrData))
result = dict(
exitStatus = p.returncode,
stdoutData = stdoutData,
stderrData = stderrData,
)
_debugOut(("args: <%s>: result:\n%s") % \
(args, pprint.pformat(result, indent=4)))
return result
def _debugOut(msg):
if g_debug:
callerTraceback = whoisCallersCaller()
print "OPF TestDescriptionTemplate (f=%s;line=%s): %s" % \
(callerTraceback.function, callerTraceback.lineno, msg,)
sys.stdout.flush()
def whoisCallersCaller():
"""
Returns: Traceback namedtuple for our caller's caller
"""
import inspect
frameObj = inspect.stack()[2][0]
return inspect.getframeinfo(frameObj)
if __name__ == "__main__":
g_myEnv = MyTestEnvironment()
unittest.longMessage = True
unittest.main()
| agpl-3.0 |
amitdhiman000/dais | daisadmin/urls.py | 1 | 1363 | """MyOffers URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^home$', views.index, name='index'),
url(r'^topics-view/$', views.topics_view, name='topics_view'),
url(r'^topic-create/$', views.topic_create, name='topic_create'),
url(r'^topic-update/$', views.topic_update, name='topic_update'),
url(r'^topic-delete/$', views.topic_delete, name='topic_delete'),
#url(r'^polls/view$', views.view_polls, name='view_polls'),
#url(r'^poll/create$', views.create_poll, name='create_poll'),
#url(r'^poll/edit/$', views.edit_poll, name='edit_poll'),
#url(r'^poll/edit/$', views.edit_poll, name='edit_poll'),
]
| apache-2.0 |
joeyjojo/django_offline | src/django/contrib/auth/context_processors.py | 44 | 1304 | # PermWrapper and PermLookupDict proxy the permissions system into objects that
# the template system can understand.
class PermLookupDict(object):
def __init__(self, user, module_name):
self.user, self.module_name = user, module_name
def __repr__(self):
return str(self.user.get_all_permissions())
def __getitem__(self, perm_name):
return self.user.has_perm("%s.%s" % (self.module_name, perm_name))
def __nonzero__(self):
return self.user.has_module_perms(self.module_name)
class PermWrapper(object):
def __init__(self, user):
self.user = user
def __getitem__(self, module_name):
return PermLookupDict(self.user, module_name)
def __iter__(self):
# I am large, I contain multitudes.
raise TypeError("PermWrapper is not iterable.")
def auth(request):
"""
Returns context variables required by apps that use Django's authentication
system.
If there is no 'user' attribute in the request, uses AnonymousUser (from
django.contrib.auth).
"""
if hasattr(request, 'user'):
user = request.user
else:
from django.contrib.auth.models import AnonymousUser
user = AnonymousUser()
return {
'user': user,
'perms': PermWrapper(user),
}
| mit |
jmerkow/ITK | Wrapping/Generators/Python/Tests/StrelFromImageGrayscaleDilateImageFilter.py | 19 | 1459 | #==========================================================================
#
# Copyright Insight Software Consortium
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#==========================================================================*/
#
# Example on the use of the GrayscaleDilateImageFilter
# and Box strucutring element
#
import itk
from sys import argv
itk.auto_progress(2)
dim = 2
IType = itk.Image[itk.US, dim]
OIType = itk.Image[itk.UC, dim]
reader = itk.ImageFileReader[IType].New(FileName=argv[1])
reader2 = itk.ImageFileReader[OIType].New(FileName=argv[3])
kernel = itk.FlatStructuringElement[dim].FromImageUC(reader2.GetOutput())
grayscaleFilter = itk.GrayscaleDilateImageFilter[IType, IType, kernel]
grayscaleFilter = grayscaleFilter.New(reader, Kernel=kernel)
cast = itk.CastImageFilter[IType, OIType].New(grayscaleFilter)
writer = itk.ImageFileWriter[OIType].New(cast, FileName=argv[2])
writer.Update()
| apache-2.0 |
rzhxeo/youtube-dl | youtube_dl/extractor/tmz.py | 30 | 1229 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
class TMZIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?tmz\.com/videos/(?P<id>[^/]+)/?'
_TEST = {
'url': 'http://www.tmz.com/videos/0_okj015ty/',
'md5': '791204e3bf790b1426cb2db0706184c0',
'info_dict': {
'id': '0_okj015ty',
'url': 'http://tmz.vo.llnwd.net/o28/2014-03/13/0_okj015ty_0_rt8ro3si_2.mp4',
'ext': 'mp4',
'title': 'Kim Kardashian\'s Boobs Unlock a Mystery!',
'description': 'Did Kim Kardasain try to one-up Khloe by one-upping Kylie??? Or is she just showing off her amazing boobs?',
'thumbnail': r're:http://cdnbakmi\.kaltura\.com/.*thumbnail.*',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
return {
'id': video_id,
'url': self._html_search_meta('VideoURL', webpage, fatal=True),
'title': self._og_search_title(webpage),
'description': self._og_search_description(webpage),
'thumbnail': self._html_search_meta('ThumbURL', webpage),
}
| unlicense |
cxxgtxy/tensorflow | tensorflow/contrib/tensor_forest/python/kernel_tests/tree_predictions_op_test.py | 78 | 9018 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.contrib.tensor_forest.ops.tree_predictions_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.tensor_forest.python.ops import data_ops
from tensorflow.contrib.tensor_forest.python.ops import tensor_forest_ops
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
class TreePredictionsDenseTest(test_util.TensorFlowTestCase):
def setUp(self):
self.nothing = []
spec_proto = data_ops.TensorForestDataSpec()
f1 = spec_proto.dense.add()
f1.name = 'f1'
f1.original_type = data_ops.DATA_FLOAT
f1.size = 1
f2 = spec_proto.dense.add()
f2.name = 'f2'
f2.original_type = data_ops.DATA_FLOAT
f2.size = 1
spec_proto.dense_features_size = 2
self.data_spec = spec_proto.SerializeToString()
def testSimple(self):
input_data = [[-1., 0.], [-1., 2.], # node 1
[1., 0.], [1., -2.]] # node 2
tree = [[1, 0], [-1, 0], [-1, 0]]
tree_thresholds = [0., 0., 0.]
node_pcw = [[1.0, 0.3, 0.4, 0.3], [1.0, 0.1, 0.1, 0.8],
[1.0, 0.5, 0.25, 0.25]]
with self.test_session():
predictions = tensor_forest_ops.tree_predictions(
input_data,
self.nothing,
self.nothing,
self.nothing,
tree,
tree_thresholds,
node_pcw,
input_spec=self.data_spec,
valid_leaf_threshold=1)
self.assertAllClose([[0.1, 0.1, 0.8], [0.1, 0.1, 0.8],
[0.5, 0.25, 0.25], [0.5, 0.25, 0.25]],
predictions.eval())
def testBackoffToParent(self):
input_data = [
[-1., 0.],
[-1., 2.], # node 1
[1., 0.],
[1., -2.]
] # node 2
tree = [[1, 0], [-1, 0], [-1, 0]]
tree_thresholds = [0., 0., 0.]
node_pcw = [[15.0, 3.0, 9.0, 3.0], [5.0, 1.0, 1.0, 3.0],
[25.0, 5.0, 20.0, 0.0]]
with self.test_session():
predictions = tensor_forest_ops.tree_predictions(
input_data,
self.nothing,
self.nothing,
self.nothing,
tree,
tree_thresholds,
node_pcw,
valid_leaf_threshold=10,
input_spec=self.data_spec)
# Node 2 has enough data, but Node 1 needs to combine with the parent
# counts.
self.assertAllClose([[0.2, 0.4, 0.4], [0.2, 0.4, 0.4], [0.2, 0.8, 0.0],
[0.2, 0.8, 0.0]], predictions.eval())
def testNoInput(self):
input_data = []
tree = [[1, 0], [-1, 0], [-1, 0]]
tree_thresholds = [0., 0., 0.]
node_pcw = [[1.0, 0.3, 0.4, 0.3], [1.0, 0.1, 0.1, 0.8],
[1.0, 0.5, 0.25, 0.25]]
with self.test_session():
predictions = tensor_forest_ops.tree_predictions(
input_data,
self.nothing,
self.nothing,
self.nothing,
tree,
tree_thresholds,
node_pcw,
valid_leaf_threshold=10,
input_spec=self.data_spec)
self.assertEquals((0, 3), predictions.eval().shape)
def testBadInput(self):
input_data = [
[-1., 0.],
[-1., 2.], # node 1
[1., 0.],
[1., -2.]
] # node 2
tree = [[1, 0], [-1, 0], [-1, 0]]
tree_thresholds = [0., 0.] # not enough nodes.
node_pcw = [[1.0, 0.3, 0.4, 0.3], [1.0, 0.1, 0.1, 0.8],
[1.0, 0.5, 0.25, 0.25]]
with self.test_session():
with self.assertRaisesOpError(
'Number of nodes should be the same in tree, tree_thresholds '
'and node_pcw.'):
predictions = tensor_forest_ops.tree_predictions(
input_data,
self.nothing,
self.nothing,
self.nothing,
tree,
tree_thresholds,
node_pcw,
valid_leaf_threshold=10,
input_spec=self.data_spec)
self.assertEquals((0, 3), predictions.eval().shape)
class TreePredictionsSparseTest(test_util.TensorFlowTestCase):
def setUp(self):
self.nothing = []
spec_proto = data_ops.TensorForestDataSpec()
f1 = spec_proto.sparse.add()
f1.name = 'f1'
f1.original_type = data_ops.DATA_FLOAT
f1.size = 1
f2 = spec_proto.sparse.add()
f2.name = 'f2'
f2.original_type = data_ops.DATA_FLOAT
f2.size = 9
spec_proto.dense_features_size = 0
self.data_spec = spec_proto.SerializeToString()
def testSparseInput(self):
sparse_shape = [3, 10]
sparse_indices = [[0, 0], [0, 4], [0, 9],
[1, 0], [1, 7],
[2, 0]]
sparse_values = [3.0, -1.0, 0.5,
1.5, 6.0,
-2.0]
tree = [[1, 0], [-1, 0], [-1, 0]]
tree_thresholds = [0., 0., 0.]
node_pcw = [[1.0, 0.3, 0.4, 0.3], [1.0, 0.1, 0.1, 0.8],
[1.0, 0.5, 0.25, 0.25]]
with self.test_session():
predictions = tensor_forest_ops.tree_predictions(
self.nothing,
sparse_indices,
sparse_values,
sparse_shape,
tree,
tree_thresholds,
node_pcw,
valid_leaf_threshold=1,
input_spec=self.data_spec)
self.assertAllClose([[0.5, 0.25, 0.25],
[0.5, 0.25, 0.25],
[0.1, 0.1, 0.8]],
predictions.eval())
def testSparseInputDefaultIsZero(self):
sparse_shape = [3, 10]
sparse_indices = [[0, 0], [0, 4], [0, 9],
[1, 0], [1, 7],
[2, 0]]
sparse_values = [3.0, -1.0, 0.5,
1.5, 6.0,
-2.0]
tree = [[1, 7], [-1, 0], [-1, 0]]
tree_thresholds = [3.0, 0., 0.]
node_pcw = [[1.0, 0.3, 0.4, 0.3], [1.0, 0.1, 0.1, 0.8],
[1.0, 0.5, 0.25, 0.25]]
with self.test_session():
predictions = tensor_forest_ops.tree_predictions(
self.nothing,
sparse_indices,
sparse_values,
sparse_shape,
tree,
tree_thresholds,
node_pcw,
valid_leaf_threshold=1,
input_spec=self.data_spec)
self.assertAllClose([[0.1, 0.1, 0.8],
[0.5, 0.25, 0.25],
[0.1, 0.1, 0.8]],
predictions.eval())
class TreePredictionsMixedTest(test_util.TensorFlowTestCase):
def setUp(self):
self.nothing = []
spec_proto = data_ops.TensorForestDataSpec()
f1 = spec_proto.dense.add()
f1.name = 'f1'
f1.original_type = data_ops.DATA_FLOAT
f1.size = 2
f2 = spec_proto.dense.add()
f2.name = 'f2'
f2.original_type = data_ops.DATA_CATEGORICAL
f2.size = 1
f3 = spec_proto.sparse.add()
f3.name = 'f3'
f3.original_type = data_ops.DATA_FLOAT
f3.size = -1
spec_proto.dense_features_size = 3
self.data_spec = spec_proto.SerializeToString()
def testSimpleMixed(self):
# 0 1 2 3 4 5 6
tree = [[1, 0], [3, 2], [5, 5], [-1, 0], [-1, 0], [-1, 0], [-1, 0]]
tree_thresholds = [0., 15., 1., 0., 0., 0., 0.]
node_pcw = [[1.0, 0., 1.0, 0.4, 0.3], [1.0, 0., 0.1, 0.1, 0.8],
[1.0, 0., 0.5, 0.25, 0.25], [1.0, 1., 0., 0., 0.],
[1.0, 0., 1., 0., 0.], [1.0, 0., 0., 1., 0.],
[1.0, 0., 0., 0., 1.]]
input_data = [
[-1., 0., 15.], # node 3
[-1., 2., 11.], # node 4
[1., 0., 11.],
[1., -2., 30.]
]
sparse_shape = [4, 5]
sparse_indices = [
[0, 0],
[0, 1],
[0, 4],
[1, 0],
[1, 2],
[2, 1], # node 5
[3, 2]
] # node 6
sparse_values = [3.0, -1.0, 0.5, 1.5, 6.0, -2.0, 2.0]
with self.test_session():
predictions = tensor_forest_ops.tree_predictions(
input_data,
sparse_indices,
sparse_values,
sparse_shape,
tree,
tree_thresholds,
node_pcw,
valid_leaf_threshold=1,
input_spec=self.data_spec)
self.assertAllClose([[1., 0., 0., 0.], [0., 1., 0., 0.], [0., 0., 1., 0.],
[0., 0., 0., 1.]], predictions.eval())
if __name__ == '__main__':
googletest.main()
| apache-2.0 |
mikekestemont/keras | tests/auto/keras/layers/test_core.py | 48 | 5211 | import unittest
import numpy as np
from numpy.testing import assert_allclose
import theano
from keras.layers import core
class TestLayerBase(unittest.TestCase):
def test_input_output(self):
nb_samples = 10
input_dim = 5
layer = core.Layer()
# As long as there is no input, an error should be raised.
for train in [True, False]:
self.assertRaises(AttributeError, layer.get_input, train)
self.assertRaises(AttributeError, layer.get_output, train)
# Once an input is provided, it should be reachable through the
# appropriate getters
input = np.ones((nb_samples, input_dim))
layer.input = theano.shared(value=input)
for train in [True, False]:
assert_allclose(layer.get_input(train).eval(), input)
assert_allclose(layer.get_output(train).eval(), input)
def test_connections(self):
nb_samples = 10
input_dim = 5
layer1 = core.Layer()
layer2 = core.Layer()
input = np.ones((nb_samples, input_dim))
layer1.input = theano.shared(value=input)
# As long as there is no previous layer, an error should be raised.
for train in [True, False]:
self.assertRaises(AttributeError, layer2.get_input, train)
# After connecting, input of layer1 should be passed through
layer2.set_previous(layer1)
for train in [True, False]:
assert_allclose(layer2.get_input(train).eval(), input)
assert_allclose(layer2.get_output(train).eval(), input)
class TestConfigParams(unittest.TestCase):
"""
Test the constructor, config and params functions of all layers in core.
"""
def _runner(self, layer):
conf = layer.get_config()
assert (type(conf) == dict)
param = layer.get_params()
# Typically a list or a tuple, but may be any iterable
assert hasattr(param, '__iter__')
def test_base(self):
layer = core.Layer()
self._runner(layer)
def test_masked(self):
layer = core.MaskedLayer()
self._runner(layer)
def test_merge(self):
layer_1 = core.Layer()
layer_2 = core.Layer()
layer = core.Merge([layer_1, layer_2])
self._runner(layer)
def test_dropout(self):
layer = core.Dropout(0.5)
self._runner(layer)
def test_activation(self):
layer = core.Activation('linear')
self._runner(layer)
def test_reshape(self):
layer = core.Reshape(10, 10)
self._runner(layer)
def test_flatten(self):
layer = core.Flatten()
self._runner(layer)
def test_repeat_vector(self):
layer = core.RepeatVector(10)
self._runner(layer)
def test_dense(self):
layer = core.Dense(10, 10)
self._runner(layer)
def test_act_reg(self):
layer = core.ActivityRegularization(0.5, 0.5)
self._runner(layer)
def test_time_dist_dense(self):
layer = core.TimeDistributedDense(10, 10)
self._runner(layer)
def test_autoencoder(self):
layer_1 = core.Layer()
layer_2 = core.Layer()
layer = core.AutoEncoder(layer_1, layer_2)
self._runner(layer)
def test_maxout_dense(self):
layer = core.MaxoutDense(10, 10)
self._runner(layer)
class TestMasking(unittest.TestCase):
"""Test the Masking class"""
def test_sequences(self):
"""Test masking sequences with zeroes as padding"""
# integer inputs, one per timestep, like embeddings
layer = core.Masking()
func = theano.function([layer.input], layer.get_output_mask())
self.assertTrue(np.all(
# get mask for this input
func(np.array(
[[[1], [2], [3], [0]],
[[0], [4], [5], [0]]], dtype=np.int32)) ==
# This is the expected output mask, one dimension less
np.array([[1, 1, 1, 0], [0, 1, 1, 0]])))
def test_non_zero(self):
"""Test masking with non-zero mask value"""
layer = core.Masking(5)
func = theano.function([layer.input], layer.get_output_mask())
self.assertTrue(np.all(
# get mask for this input, if not all the values are 5, shouldn't masked
func(np.array(
[[[1, 1], [2, 1], [3, 1], [5, 5]],
[[1, 5], [5, 0], [0, 0], [0, 0]]], dtype=np.int32)) ==
# This is the expected output mask, one dimension less
np.array([[1, 1, 1, 0], [1, 1, 1, 1]])))
def test_non_zero_output(self):
"""Test output of masking layer with non-zero mask value"""
layer = core.Masking(5)
func = theano.function([layer.input], layer.get_output())
self.assertTrue(np.all(
# get output for this input, replace padding with 0
func(np.array(
[[[1, 1], [2, 1], [3, 1], [5, 5]],
[[1, 5], [5, 0], [0, 0], [0, 0]]], dtype=np.int32)) ==
# This is the expected output
np.array([[[1, 1], [2, 1], [3, 1], [0, 0]],
[[1, 5], [5, 0], [0, 0], [0, 0]]])))
if __name__ == '__main__':
unittest.main()
| mit |
peterfpeterson/mantid | Framework/PythonInterface/plugins/algorithms/ExtractMonitors.py | 3 | 4216 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
from mantid.simpleapi import *
from mantid.kernel import *
from mantid.api import (MatrixWorkspaceProperty, DataProcessorAlgorithm, PropertyMode)
class ExtractMonitors(DataProcessorAlgorithm):
def category(self):
return 'Utility\\Workspaces'
def summary(self):
return 'Separates the monitors and/or detectors into separate workspaces.'
def seeAlso(self):
return [ "ExtractMonitorWorkspace" ]
def PyInit(self):
self.declareProperty(MatrixWorkspaceProperty('InputWorkspace', '',
direction=Direction.Input),
doc='A workspace with detectors and monitors')
self.declareProperty(MatrixWorkspaceProperty('DetectorWorkspace', '',
direction=Direction.Output,
optional=PropertyMode.Optional),
doc='The output workspace with detectors only')
self.declareProperty(MatrixWorkspaceProperty('MonitorWorkspace', '',
direction=Direction.Output,
optional=PropertyMode.Optional),
doc='The output workspace with monitors only')
def validateInputs(self):
issues = {}
detector_ws_name = self.getProperty("DetectorWorkspace").valueAsStr
monitor_ws_name = self.getProperty("MonitorWorkspace").valueAsStr
if not detector_ws_name and not monitor_ws_name:
msg = "Must specify one of DetectorsWorkspace or MonitorsWorkspace"
issues["DetectorWorkspace"] = msg
issues["MonitorWorkspace"] = msg
elif detector_ws_name == monitor_ws_name:
msg = "DetectorWorkspace and MonitorWorkspace must be different"
issues["DetectorWorkspace"] = msg
issues["MonitorWorkspace"] = msg
return issues
def PyExec(self):
in_ws = self.getProperty("InputWorkspace").value
detector_ws_name = self.getProperty("DetectorWorkspace").valueAsStr
monitor_ws_name = self.getProperty("MonitorWorkspace").valueAsStr
try:
mon = in_ws.getMonitorWorkspace()
raise ValueError("Monitor workspace already exists, called: " + mon.name() + ".")
except RuntimeError:
pass
monitors = []
detectors = []
spectrumInfo = in_ws.spectrumInfo()
for i in range(in_ws.getNumberHistograms()):
try:
monitors.append(i) if spectrumInfo.isMonitor(i) else detectors.append(i)
except RuntimeError:
self.log().warning("Missing detector at " + str(i))
if detector_ws_name:
if detectors:
detector_ws = ExtractSpectra(InputWorkspace=in_ws, WorkspaceIndexList=detectors, StoreInADS=False,
OutputWorkspace=self.getPropertyValue("DetectorWorkspace"))
self.setProperty("DetectorWorkspace", detector_ws)
else:
self.log().error("No detectors found in input workspace. No detector output workspace created.")
if monitor_ws_name:
if monitors:
monitor_ws = ExtractSpectra(InputWorkspace=in_ws, WorkspaceIndexList=monitors, StoreInADS=False,
OutputWorkspace=self.getPropertyValue("MonitorWorkspace"))
self.setProperty("MonitorWorkspace", monitor_ws)
else:
self.log().error("No monitors found in input workspace. No monitor output workspace created.")
if detector_ws_name and detectors and monitor_ws_name and monitors:
detector_ws.setMonitorWorkspace(monitor_ws)
AlgorithmFactory.subscribe(ExtractMonitors)
| gpl-3.0 |
chouseknecht/ansible | lib/ansible/modules/network/avi/avi_snmptrapprofile.py | 28 | 3563 | #!/usr/bin/python
#
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
#
# Copyright: (c) 2017 Gaurav Rastogi, <grastogi@avinetworks.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_snmptrapprofile
author: Gaurav Rastogi (@grastogi23) <grastogi@avinetworks.com>
short_description: Module for setup of SnmpTrapProfile Avi RESTful Object
description:
- This module is used to configure SnmpTrapProfile object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
name:
description:
- A user-friendly name of the snmp trap configuration.
required: true
tenant_ref:
description:
- It is a reference to an object of type tenant.
trap_servers:
description:
- The ip address or hostname of the snmp trap destination server.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Uuid of the snmp trap profile object.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create SnmpTrapProfile object
avi_snmptrapprofile:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_snmptrapprofile
"""
RETURN = '''
obj:
description: SnmpTrapProfile (api/snmptrapprofile) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, avi_ansible_api, HAS_AVI)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
name=dict(type='str', required=True),
tenant_ref=dict(type='str',),
trap_servers=dict(type='list',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) or requests is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'snmptrapprofile',
set([]))
if __name__ == '__main__':
main()
| gpl-3.0 |
z1gm4/desarrollo_web_udp | condominioseguro/urls.py | 1 | 1345 | """condominioseguro URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:x
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import *
from django.contrib import admin
from condominioseguro.views import current_datetime,hours_ahead, login, logout_view, login_view,bet, betmatch, check_bet, base_view,profile
from desarrolloweb.models import Apuesta, Usuario
from django.template import RequestContext
#django.contrib.auth.views.login
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^time/$', current_datetime),
url(r'^time/plus/(\d+)/$', hours_ahead),
url(r'^$', base_view),
url(r'^logins/$', login_view),
url(r'^logouts/$', logout_view),
url(r'^bet/$', bet),
url(r'^betlist/$', check_bet),
url(r'^match/$', betmatch),
url(r'^profile/$', profile)
]
| gpl-3.0 |
brianrodri/oppia | core/storage/subtopic/gae_models.py | 2 | 6435 | # coding: utf-8
#
# Copyright 2020 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Models for subtopics and related constructs."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
from constants import constants
from core.platform import models
(base_models,) = models.Registry.import_models([models.NAMES.base_model])
datastore_services = models.Registry.import_datastore_services()
class SubtopicPageSnapshotMetadataModel(base_models.BaseSnapshotMetadataModel):
"""Storage model for the metadata for a subtopic page snapshot."""
pass
class SubtopicPageCommitLogEntryModel(base_models.BaseCommitLogEntryModel):
"""Log of commits to subtopic pages.
A new instance of this model is created and saved every time a commit to
SubtopicPageModel occurs.
The id for this model is of the form
'subtopicpage-[subtopic_page_id]-[version]'.
"""
# The id of the subtopic page being edited.
subtopic_page_id = (
datastore_services.StringProperty(indexed=True, required=True))
@classmethod
def get_instance_id(cls, subtopic_page_id, version):
"""This function returns the generated id for the get_commit function
in the parent class.
Args:
subtopic_page_id: str. The id of the subtopic page being edited.
version: int. The version number of the subtopic page after the
commit.
Returns:
str. The commit id with the subtopic page id and version number.
"""
return 'subtopicpage-%s-%s' % (subtopic_page_id, version)
@staticmethod
def get_model_association_to_user():
"""The history of commits is not relevant for the purposes of Takeout
since commits don't contain relevant data corresponding to users.
"""
return base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER
@classmethod
def get_export_policy(cls):
"""Model contains data corresponding to a user, but this isn't exported
because the history of commits isn't deemed as useful for users since
commit logs don't contain relevant data corresponding to those users.
"""
return dict(super(cls, cls).get_export_policy(), **{
'subtopic_page_id': base_models.EXPORT_POLICY.NOT_APPLICABLE
})
class SubtopicPageSnapshotContentModel(base_models.BaseSnapshotContentModel):
"""Storage model for the content of a subtopic page snapshot."""
@staticmethod
def get_deletion_policy():
"""Model doesn't contain any data directly corresponding to a user."""
return base_models.DELETION_POLICY.NOT_APPLICABLE
class SubtopicPageModel(base_models.VersionedModel):
"""Model for storing Subtopic pages.
This stores the HTML data for a subtopic page.
"""
SNAPSHOT_METADATA_CLASS = SubtopicPageSnapshotMetadataModel
SNAPSHOT_CONTENT_CLASS = SubtopicPageSnapshotContentModel
COMMIT_LOG_ENTRY_CLASS = SubtopicPageCommitLogEntryModel
ALLOW_REVERT = False
# The topic id that this subtopic is a part of.
topic_id = datastore_services.StringProperty(required=True, indexed=True)
# The json data of the subtopic consisting of subtitled_html,
# recorded_voiceovers and written_translations fields.
page_contents = datastore_services.JsonProperty(required=True)
# The schema version for the page_contents field.
page_contents_schema_version = datastore_services.IntegerProperty(
required=True, indexed=True)
# The ISO 639-1 code for the language this subtopic page is written in.
language_code = (
datastore_services.StringProperty(required=True, indexed=True))
@staticmethod
def get_deletion_policy():
"""Model doesn't contain any data directly corresponding to a user."""
return base_models.DELETION_POLICY.NOT_APPLICABLE
def _trusted_commit(
self, committer_id, commit_type, commit_message, commit_cmds):
"""Record the event to the commit log after the model commit.
Note that this extends the superclass method.
Args:
committer_id: str. The user_id of the user who committed the
change.
commit_type: str. The type of commit. Possible values are in
core.storage.base_models.COMMIT_TYPE_CHOICES.
commit_message: str. The commit description message.
commit_cmds: list(dict). A list of commands, describing changes
made in this model, which should give sufficient information to
reconstruct the commit. Each dict always contains:
cmd: str. Unique command.
and then additional arguments for that command.
"""
super(SubtopicPageModel, self)._trusted_commit(
committer_id, commit_type, commit_message, commit_cmds)
subtopic_page_commit_log_entry = SubtopicPageCommitLogEntryModel.create(
self.id, self.version, committer_id, commit_type, commit_message,
commit_cmds, constants.ACTIVITY_STATUS_PUBLIC, False
)
subtopic_page_commit_log_entry.subtopic_page_id = self.id
subtopic_page_commit_log_entry.update_timestamps()
subtopic_page_commit_log_entry.put()
@classmethod
def get_export_policy(cls):
"""Model doesn't contain any data directly corresponding to a user."""
return dict(super(cls, cls).get_export_policy(), **{
'topic_id': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'page_contents': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'page_contents_schema_version':
base_models.EXPORT_POLICY.NOT_APPLICABLE,
'language_code': base_models.EXPORT_POLICY.NOT_APPLICABLE
})
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.