repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
|---|---|---|---|---|---|
dimagol/trex-core
|
scripts/external_libs/scapy-2.3.1/python3/scapy/layers/llmnr.py
|
22
|
2396
|
from scapy.fields import *
from scapy.packet import *
from scapy.layers.inet import UDP
from scapy.layers.dns import DNSQRField, DNSRRField, DNSRRCountField
"""
LLMNR (Link Local Multicast Node Resolution).
[RFC 4795]
"""
#############################################################################
### LLMNR (RFC4795) ###
#############################################################################
# LLMNR is based on the DNS packet format (RFC1035 Section 4)
# RFC also envisions LLMNR over TCP. Like vista, we don't support it -- arno
_LLMNR_IPv6_mcast_Addr = "FF02:0:0:0:0:0:1:3"
_LLMNR_IPv4_mcast_addr = "224.0.0.252"
class LLMNRQuery(Packet):
name = "Link Local Multicast Node Resolution - Query"
fields_desc = [ ShortField("id", 0),
BitField("qr", 0, 1),
BitEnumField("opcode", 0, 4, { 0:"QUERY" }),
BitField("c", 0, 1),
BitField("tc", 0, 2),
BitField("z", 0, 4),
BitEnumField("rcode", 0, 4, { 0:"ok" }),
DNSRRCountField("qdcount", None, "qd"),
DNSRRCountField("ancount", None, "an"),
DNSRRCountField("nscount", None, "ns"),
DNSRRCountField("arcount", None, "ar"),
DNSQRField("qd", "qdcount"),
DNSRRField("an", "ancount"),
DNSRRField("ns", "nscount"),
DNSRRField("ar", "arcount",0)]
overload_fields = {UDP: {"sport": 5355, "dport": 5355 }}
def hashret(self):
return struct.pack("!H", self.id)
class LLMNRResponse(LLMNRQuery):
name = "Link Local Multicast Node Resolution - Response"
qr = 1
def answers(self, other):
return (isinstance(other, LLMNRQuery) and
self.id == other.id and
self.qr == 1 and
other.qr == 0)
def _llmnr_dispatcher(x, *args, **kargs):
cls = conf.raw_layer
if len(x) >= 3:
if (ord(x[4]) & 0x80): # Response
cls = LLMNRResponse
else: # Query
cls = LLMNRQuery
return cls(x, *args, **kargs)
bind_bottom_up(UDP, _llmnr_dispatcher, { "dport": 5355 })
bind_bottom_up(UDP, _llmnr_dispatcher, { "sport": 5355 })
# LLMNRQuery(id=RandShort(), qd=DNSQR(qname="vista.")))
|
apache-2.0
|
awkspace/ansible
|
lib/ansible/plugins/terminal/cnos.py
|
86
|
2824
|
# (C) 2017 Red Hat Inc.
# Copyright (C) 2017 Lenovo.
#
# GNU General Public License v3.0+
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
# Contains terminal Plugin methods for CNOS Config Module
# Lenovo Networking
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import re
from ansible.errors import AnsibleConnectionFailure
from ansible.module_utils._text import to_text, to_bytes
from ansible.plugins.terminal import TerminalBase
class TerminalModule(TerminalBase):
terminal_stdout_re = [
re.compile(br"[\r\n]?[\w+\-\.:\/\[\]]+(?:\([^\)]+\)){,3}(?:>|#) ?$"),
re.compile(br"\[\w+\@[\w\-\.]+(?: [^\]])\] ?[>#\$] ?$"),
re.compile(br">[\r\n]?")
]
terminal_stderr_re = [
re.compile(br"% ?Error"),
re.compile(br"% ?Bad secret"),
re.compile(br"invalid input", re.I),
re.compile(br"(?:incomplete|ambiguous) command", re.I),
re.compile(br"connection timed out", re.I),
re.compile(br"[^\r\n]+ not found"),
re.compile(br"'[^']' +returned error code: ?\d+"),
]
def on_open_shell(self):
try:
for cmd in (b'\n', b'terminal length 0\n'):
self._exec_cli_command(cmd)
except AnsibleConnectionFailure:
raise AnsibleConnectionFailure('unable to set terminal parameters')
def on_become(self, passwd=None):
if self._get_prompt().endswith(b'#'):
return
cmd = {u'command': u'enable'}
if passwd:
# Note: python-3.5 cannot combine u"" and r"" together. Thus make
# an r string and use to_text to ensure it's text
# on both py2 and py3.
cmd[u'prompt'] = to_text(r"[\r\n]?password: $",
errors='surrogate_or_strict')
cmd[u'answer'] = passwd
try:
self._exec_cli_command(to_bytes(json.dumps(cmd),
errors='surrogate_or_strict'))
except AnsibleConnectionFailure:
msg = 'unable to elevate privilege to enable mode'
raise AnsibleConnectionFailure(msg)
def on_unbecome(self):
prompt = self._get_prompt()
if prompt is None:
# if prompt is None most likely the terminal is hung up at a prompt
return
if b'(config' in prompt:
self._exec_cli_command(b'end')
self._exec_cli_command(b'disable')
elif prompt.endswith(b'#'):
self._exec_cli_command(b'disable')
|
gpl-3.0
|
alsoicode/django-filer
|
filer/utils/filer_easy_thumbnails.py
|
12
|
3444
|
#-*- coding: utf-8 -*-
from easy_thumbnails.files import Thumbnailer
import os
import re
from filer import settings as filer_settings
# match the source filename using `__` as the seperator. ``opts_and_ext`` is non
# greedy so it should match the last occurence of `__`.
# in ``ThumbnailerNameMixin.get_thumbnail_name`` we ensure that there is no `__`
# in the opts part.
RE_ORIGINAL_FILENAME = re.compile(r"^(?P<source_filename>.*)__(?P<opts_and_ext>.*?)$")
def thumbnail_to_original_filename(thumbnail_name):
m = RE_ORIGINAL_FILENAME.match(thumbnail_name)
if m:
return m.group(1)
return None
class ThumbnailerNameMixin(object):
thumbnail_basedir = ''
thumbnail_subdir = ''
thumbnail_prefix = ''
def get_thumbnail_name(self, thumbnail_options, transparent=False,
high_resolution=False):
"""
A version of ``Thumbnailer.get_thumbnail_name`` that produces a
reproducible thumbnail name that can be converted back to the original
filename.
"""
path, source_filename = os.path.split(self.name)
source_extension = os.path.splitext(source_filename)[1][1:]
if self.thumbnail_preserve_extensions is True or \
(self.thumbnail_preserve_extensions and
source_extension.lower() in self.thumbnail_preserve_extensions):
extension = source_extension
elif transparent:
extension = self.thumbnail_transparency_extension
else:
extension = self.thumbnail_extension
extension = extension or 'jpg'
thumbnail_options = thumbnail_options.copy()
size = tuple(thumbnail_options.pop('size'))
quality = thumbnail_options.pop('quality', self.thumbnail_quality)
initial_opts = ['%sx%s' % size, 'q%s' % quality]
opts = thumbnail_options.items()
opts.sort() # Sort the options so the file name is consistent.
opts = ['%s' % (v is not True and '%s-%s' % (k, v) or k)
for k, v in opts if v]
all_opts = '_'.join(initial_opts + opts)
basedir = self.thumbnail_basedir
subdir = self.thumbnail_subdir
#make sure our magic delimiter is not used in all_opts
all_opts = all_opts.replace('__', '_')
if high_resolution:
all_opts += '@2x'
filename = u'%s__%s.%s' % (source_filename, all_opts, extension)
return os.path.join(basedir, path, subdir, filename)
class ActionThumbnailerMixin(object):
thumbnail_basedir = ''
thumbnail_subdir = ''
thumbnail_prefix = ''
def get_thumbnail_name(self, thumbnail_options, transparent=False,
high_resolution=False):
"""
A version of ``Thumbnailer.get_thumbnail_name`` that returns the original
filename to resize.
"""
path, filename = os.path.split(self.name)
basedir = self.thumbnail_basedir
subdir = self.thumbnail_subdir
return os.path.join(basedir, path, subdir, filename)
def thumbnail_exists(self, thumbnail_name):
return False
class FilerThumbnailer(ThumbnailerNameMixin, Thumbnailer):
def __init__(self, *args, **kwargs):
self.thumbnail_basedir = kwargs.pop('thumbnail_basedir', '')
super(FilerThumbnailer, self).__init__(*args, **kwargs)
class FilerActionThumbnailer(ActionThumbnailerMixin, Thumbnailer):
pass
|
bsd-3-clause
|
harshilasu/LinkurApp
|
y/google-cloud-sdk/platform/gsutil/third_party/boto/tests/unit/cloudfront/test_invalidation_list.py
|
22
|
4037
|
#!/usr/bin/env python
import random
import string
from tests.unit import unittest
import mock
import boto
RESPONSE_TEMPLATE = r"""
<InvalidationList>
<Marker/>
<NextMarker>%(next_marker)s</NextMarker>
<MaxItems>%(max_items)s</MaxItems>
<IsTruncated>%(is_truncated)s</IsTruncated>
%(inval_summaries)s
</InvalidationList>
"""
INVAL_SUMMARY_TEMPLATE = r"""
<InvalidationSummary>
<Id>%(cfid)s</Id>
<Status>%(status)s</Status>
</InvalidationSummary>
"""
class CFInvalidationListTest(unittest.TestCase):
cloudfront = True
def setUp(self):
self.cf = boto.connect_cloudfront('aws.aws_access_key_id',
'aws.aws_secret_access_key')
def _get_random_id(self, length=14):
return ''.join([random.choice(string.ascii_letters) for i in
range(length)])
def _group_iter(self, iterator, n):
accumulator = []
for item in iterator:
accumulator.append(item)
if len(accumulator) == n:
yield accumulator
accumulator = []
if len(accumulator) != 0:
yield accumulator
def _get_mock_responses(self, num, max_items):
max_items = min(max_items, 100)
cfid_groups = list(self._group_iter([self._get_random_id() for i in
range(num)], max_items))
cfg = dict(status='Completed', max_items=max_items, next_marker='')
responses = []
is_truncated = 'true'
for i, group in enumerate(cfid_groups):
next_marker = group[-1]
if (i + 1) == len(cfid_groups):
is_truncated = 'false'
next_marker = ''
invals = ''
cfg.update(dict(next_marker=next_marker,
is_truncated=is_truncated))
for cfid in group:
cfg.update(dict(cfid=cfid))
invals += INVAL_SUMMARY_TEMPLATE % cfg
cfg.update(dict(inval_summaries=invals))
mock_response = mock.Mock()
mock_response.read.return_value = RESPONSE_TEMPLATE % cfg
mock_response.status = 200
responses.append(mock_response)
return responses
def test_manual_pagination(self, num_invals=30, max_items=4):
"""
Test that paginating manually works properly
"""
self.assertGreater(num_invals, max_items)
responses = self._get_mock_responses(num=num_invals,
max_items=max_items)
self.cf.make_request = mock.Mock(side_effect=responses)
ir = self.cf.get_invalidation_requests('dist-id-here',
max_items=max_items)
all_invals = list(ir)
self.assertEqual(len(all_invals), max_items)
while ir.is_truncated:
ir = self.cf.get_invalidation_requests('dist-id-here',
marker=ir.next_marker,
max_items=max_items)
invals = list(ir)
self.assertLessEqual(len(invals), max_items)
all_invals.extend(invals)
remainder = num_invals % max_items
if remainder != 0:
self.assertEqual(len(invals), remainder)
self.assertEqual(len(all_invals), num_invals)
def test_auto_pagination(self, num_invals=1024):
"""
Test that auto-pagination works properly
"""
max_items = 100
self.assertGreaterEqual(num_invals, max_items)
responses = self._get_mock_responses(num=num_invals,
max_items=max_items)
self.cf.make_request = mock.Mock(side_effect=responses)
ir = self.cf.get_invalidation_requests('dist-id-here')
self.assertEqual(len(ir._inval_cache), max_items)
self.assertEqual(len(list(ir)), num_invals)
if __name__ == '__main__':
unittest.main()
|
gpl-3.0
|
jaor/bigmler
|
bigmler/options/timeseries.py
|
1
|
10871
|
# -*- coding: utf-8 -*-
#
# Copyright 2017-2020 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Options for BigMLer time series
"""
import sys
def int_or_none(value):
"""Casts to integer if the value is not None
"""
if value is not None:
return int(value)
return None
def range(value):
"""Creates a range from two comma-separated integers
"""
if "," not in value:
sys.exit("Failed to parse a comma-separated pair of integers")
try:
min, max = value.split(",")
return [int(min), int(max)]
except ValueError:
sys.exit("Failed to parse a comma-separated pair of integers")
def get_time_series_options(defaults=None):
"""Adding arguments for the time series subcommand
"""
if defaults is None:
defaults = {}
options = {
# If a BigML logistic regression is provided, the script will
# use it to generate predictions
'--time-series': {
'action': 'store',
'dest': 'time_series',
'default': defaults.get('time_series', None),
'help': "BigML time series Id."},
# The path to a file containing time series ids.
'--time-series-set': {
'action': 'store',
'dest': 'time_series_set',
'default': defaults.get('time_series_set', None),
'help': ("Path to a file containing timeseries/ids."
" One timeseries"
" per line (e.g., "
"timeseries/50a206a8035d0706dc000376"
").")},
# If a BigML json file containing a time series
# structure is provided,
# the script will use it.
'--time-series-file': {
'action': 'store',
'dest': 'time_series_file',
'default': defaults.get('time_series_file', None),
'help': "BigML time series JSON structure file."},
# Setting as objectives all the numeric fields.
'--all-numeric-objectives': {
"action": 'store_true',
"dest": 'all_numeric_objectives',
"default": defaults.get('all_numeric_objectives', False),
"help": ("Setting as objective fields all the existing"
"numeric fields.")},
# Not using damping in trends
'--no-damped-trend': {
"action": 'store_false',
"dest": 'damped_trend',
"default": defaults.get('damped_trend', False),
"help": ("Use no damping for models with additive or"
" multiplicative trend.")},
# The default numeric value when missing.
'--default-numeric-value': {
'action': 'store',
'dest': 'default_numeric_value',
'default': defaults.get('default_numeric_value',
None),
'choices': [None, "mean", "median", "minimum",
"maximum", "zero"],
'help': ("The default numeric value to be used when missing."
" Possible values are: mean, "
" median, minimum, maximum or zero. The default "
"when not used will be spline interpolation.")},
# Type or submodel according to error
'--error': {
'action': 'store',
'dest': 'error',
'type': int_or_none,
'default': defaults.get('error', None),
'choices': [1, 2],
'help': ("The error type of the submodel: 1 (additive), 2 "
"(multiplicative) or not set.")},
# Field especific parameters
'--field-parameters': {
'action': 'store',
'dest': 'field_parameters',
'default': defaults.get('field_parameters', None),
'help': ("Path to a JSON file describing the field-specific"
" parameters to override the top-level ones.")},
# Number of points to forecast
'--horizon': {
'action': 'store',
'dest': 'horizon',
'type': int,
'default': defaults.get('horizon', None),
'help': ("The number of forecast points to compute for the "
"objective field.")},
# Whether to produce a forecast
'--forecast': {
'action': 'store_true',
'dest': 'forecast',
'default': defaults.get('forecast', False),
'help': ("Whether to produce forecasts using the time series.")},
# objective fields
'--objectives': {
'action': 'store',
'dest': 'objectives',
'default': defaults.get('objectives', None),
'help': ("Comma-separated list of fields to be used "
"as objective in the time series.")},
# period
'--period': {
'action': 'store',
'dest': 'period',
'default': defaults.get('period', 1),
'help': ("Seasonal period length. The default is 1 and creates"
" non-seasonal models")},
# range of values to be used in the time series
'--range': {
'action': 'store',
'dest': 'range',
'type': range,
'default': defaults.get('range', None),
'metavar': 'min,max',
'nargs': '?',
'help': ("Comma-separated list of two values that define the range"
" of rows used in the time series.")},
# Type or submodel according to seasonality
'--seasonality': {
'action': 'store',
'dest': 'seasonality',
'type': int,
'default': defaults.get('seasonality', 0),
'choices': [0, 1, 2],
'help': ("The seasonality used in the time series: 0 (none),"
"1 (additive), 2 "
"(multiplicative).")},
# Type or submodel according to trend
'--trend': {
'action': 'store',
'dest': 'trend',
'type': int,
'default': defaults.get('trend', 0),
'choices': [0, 1, 2],
'help': ("The trend used in the time series: 0 (none),"
"1 (additive), 2 "
"(multiplicative).")},
# Defining the time-range of the data: start
'--time-start': {
'action': 'store',
'dest': 'time_start',
'type': int_or_none,
'default': defaults.get('time_start', None),
'help': ("The epoch, in milliseconds, to be assigned to the"
" first objective value. Default is 0.")},
# Defining the time-range of the data: end
'--time-end': {
'action': 'store',
'dest': 'time_end',
'type': int_or_none,
'default': defaults.get('time_end', None),
'help': ("The epoch, in milliseconds, to be assigned to the"
" last objective value.")},
# Defining the time-range of the data: interval
'--time-interval': {
'action': 'store',
'dest': 'time_interval',
'type': int_or_none,
'default': defaults.get('time_interval', None),
'help': ("Lapse, in milliseconds, between two consecutive values"
" of the objective field. Default is 1.")},
# Defining the time-range of the data: interval
'--time-interval-unit': {
'action': 'store',
'dest': 'time_interval_unit',
'choices': ["milliseconds", "millisecond", "ms", "MS", "seconds",
"second", "s", "S", "minutes", "minute", "m", "hours",
"hour", "h", "H", "days", "day", "d", "D", "weeks",
"week", "w", "W", "months", "month", "M", "years",
"year", "y", "Y"],
'default': defaults.get('time_interval_unit', None),
'help': ('One of the available time units: "milliseconds", '
'"millisecond", "ms", "MS", "seconds", "second", "s", '
'"S", "minutes", "minute", "m", "hours", "hour", "h", '
'"H", "days", "day", "d", "D", "weeks", "week", "w", "W",'
' "months", "month", "M", "years", "year", "y", "Y".'
' Default is milliseconds.')},
# Does not create a time series, just a dataset.
'--no-time-series': {
'action': 'store_true',
'dest': 'no_time_series',
'default': defaults.get('no_time_series', False),
'help': "Do not create a logistic regression."},
# The path to a file containing time series attributes.
'--time-series-attributes': {
'action': 'store',
'dest': 'time_series_attributes',
'default': defaults.get('time_series_attributes', None),
'help': ("Path to a JSON file describing time series"
" attributes.")},
# Unsetting as objectives all the numeric fields.
'--no-numeric-objectives': {
"action": 'store_false',
"dest": 'all_numeric_objectives',
"default": defaults.get('all_numeric_objectives', False),
"help": ("Unsetting as objective fields all the existing"
"numeric fields.")},
# Using damping in trends
'--damped-trend': {
"action": 'store_true',
"dest": 'damped_trend',
"default": defaults.get('damped_trend', False),
"help": ("Use damping for models with additive or"
" multiplicative trend.")},
# Create a time series, not just a dataset.
'--no-no-time-series': {
'action': 'store_false',
'dest': 'no_time_series',
'default': defaults.get('no_time_series', False),
'help': "Create a time series."},
# Wheter to use intervals in forecast
'--no-intervals': {
'action': 'store_false',
'dest': 'intervals',
'default': defaults.get('intervals', True),
'help': ("Whether to produce forecasts using the time series.")}}
return options
|
apache-2.0
|
VapourApps/va_master
|
va_master/host_drivers/libvirt_driver.py
|
1
|
28404
|
try:
from . import base
from .base import Step, StepResult
except:
import base
from base import Step, StepResult
from base import int_to_bytes
from tornado.httpclient import AsyncHTTPClient, HTTPRequest
import tornado.gen
import json, yaml
import subprocess
import libvirt
import uuid
import os
from xml.etree import ElementTree as ET
#This is a dictionary which I used to parse with yaml to write a config drive. We ended up using a template instead, but we might need this sometime.
users_dict = {
'fqdn' : 'some.fqdn',
'users' : [
{
'name' : 'root',
'ssh-authorized-keys': [
'some_rsa_key'
]
}],
'salt-minion' : {
'conf' : {
'master' : '192.168.80.39'
},
'public_key' : 'some_public_key',
'private_key' : 'some_private_key',
}
}
BASE_CONFIG_DRIVE="""#cloud-config
hostname: VAR_INSTANCE_NAME
users:
- name: root
ssh-authorized-keys:
- VAR_SSH_KEY
salt_minion:
conf:
startup_states: highstate
master: VAR_IP
grains:
role: VAR_ROLE
private_key: |
VAR_PRIVATE_KEY
public_key: |
VAR_PUBLIC_KEY
"""
PROVIDER_TEMPLATE = ''
PROFILE_TEMPLATE = ''
CONFIG_DRIVE = """#cloud-config
fqdn: VAR_INSTANCE_FQDN
users:
- name: root
ssh-authorized-keys:
- VAR_SSH_AUTH
salt_minion:
conf:
master: VAR_MASTER_FQDN
public_key: |
VAR_PUBLIC_KEY
private_key: |
VAR_PRIVATE_KEY
"""
DISK_XML = """<disk type='file' device='disk'>
<driver name='qemu' type='qcow2'/>
<source file='/var/lib/libvirt/images/va-master.local.qcow2'/>
<target dev='vda' bus='virtio'/>
</disk>
"""
DOMAIN_XML = """<domain type='kvm'>
<name>va-master.local</name>
<memory unit='KiB'>1048576</memory>
<currentMemory unit='KiB'>1048576</currentMemory>
<vcpu placement='static'>1</vcpu>
<os>
<type arch='x86_64' machine='pc'>hvm</type>
<boot dev='hd'/>
</os>
<cpu mode='host-model'>
<model fallback='allow'/>
</cpu>
<devices>
<!-- <emulator>/usr/sbin/qemu-system-x86_64</emulator> -->
<disk type='file' device='disk'>
<driver name='qemu' type='qcow2'/>
<source file='/var/lib/libvirt/images/va-master.local.qcow2'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='file' device='disk'>
<driver name='qemu' type='qcow2'/>
<source file='/var/lib/libvirt/images/va-master.local.qcow2'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='file' device='cdrom'>
<driver name='qemu' type='raw'/>
<source file='/var/lib/libvirt/images/va-master.local-config.iso'/>
<target dev='hda' bus='ide'/>
<readonly/>
</disk>
<interface type='network'>
<source network='default'/>
<model type='virtio'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>
</interface>
<!-- <interface type='direct'>
<source dev='HOSTNETWORKINTERFACE' mode='bridge'/>
<model type='virtio'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x09' function='0x0'/>
</interface> -->
<serial type='pty'>
<target port='0'/>
</serial>
<console type='pty'>
<target type='serial' port='0'/>
</console>
<input type='mouse' bus='ps2'/>
<input type='keyboard' bus='ps2'/>
<graphics type='vnc' port='-1' autoport='yes'>
<listen type='address'/>
</graphics>
<sound model='ich6'>
<address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
</sound>
<video>
<model type='qxl' ram='65536' vram='65536' vgamem='16384' heads='1' primary='yes'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/>
</video>
<redirdev bus='usb' type='spicevmc'>
<address type='usb' bus='0' port='1'/>
</redirdev>
<redirdev bus='usb' type='spicevmc'>
<address type='usb' bus='0' port='2'/>
</redirdev>
<memballoon model='virtio'>
<address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x0'/>
</memballoon>
</devices>
</domain>"""
BASE_VOLUME_XML = """
<volume type='file'>
<name>VAR_NAME</name>
<key>/var/lib/libvirt/images/VAR_NAME</key>
<source>
</source>
<capacity unit='bytes'>VAR_SIZE</capacity>
<target>
<path>/var/lib/libvirt/images/VAR_NAME</path>
<format type='VAR_FORMAT'/>
<permissions>
<mode>0600</mode>
<owner>0</owner>
<group>0</group>
</permissions>
</target>
</volume>"""
class LibVirtDriver(base.DriverBase):
def __init__(self, flavours, provider_name = 'libvirt_provider', profile_name = 'libvirt_profile', host_ip = '192.168.80.39', path_to_images = '/etc/libvirt/qemu/', config_path = '/etc/salt/libvirt_configs/', key_name = 'va_master_key', key_path = '/root/va_master_key', datastore_handler = None):
"""
Custom init for libvirt. Does not work with saltstack, so a lot of things have to be done manually.
Arguments
flavours -- A list of "flavours" defined so it can work similar to OpenStack. A flavour is just a dictionary with some values which are used to create servers. Flavours are saved in the datastore_handler, and the deploy_handler manages them.
The rest are similar to the Base driver arguments.
The LibVirt driver defines a property libvirt_states, which maps LibVirt states to OpenStack states where possible.
"""
kwargs = {
'driver_name' : 'libvirt',
'provider_template' : PROVIDER_TEMPLATE,
'profile_template' : PROFILE_TEMPLATE,
'provider_name' : provider_name,
'profile_name' : profile_name,
'host_ip' : host_ip,
'key_name' : key_name,
'key_path' : key_path,
'datastore_handler' : datastore_handler
}
self.conn = None
self.config_path = config_path
self.path_to_images = path_to_images
self.flavours = flavours
self.config_drive = BASE_CONFIG_DRIVE
self.libvirt_states = ['no_state', 'ACTIVE', 'blocked', 'PAUSED', 'shutdown', 'SHUTOFF', 'crashed', 'SUSPENDED']
super(LibVirtDriver, self).__init__(**kwargs)
@tornado.gen.coroutine
def driver_id(self):
""" Pretty simple. """
raise tornado.gen.Return('libvirt')
@tornado.gen.coroutine
def friendly_name(self):
""" Pretty simple. """
raise tornado.gen.Return('LibVirt')
@tornado.gen.coroutine
def get_steps(self):
""" Works like the Base get_steps, but adds the provider_ip and provider_protocol fields. Also, there are no security groups in LibVirt, so that field is removed. """
steps = yield super(LibVirtDriver, self).get_steps()
steps[0].add_fields([
('provider_ip', 'Provider ip', 'str'),
('provider_protocol', 'Protocol; use qemu with Cert or qemu+tcp for no auth', 'options'),
])
del steps[1].fields[2]
# self.steps = steps
raise tornado.gen.Return(steps)
@tornado.gen.coroutine
def get_networks(self):
""" Networks are retrieved via the python api. """
try:
networks = self.conn.listAllNetworks()
networks = [x.name() for x in networks]
except:
import traceback
print ('Error in get_networks in libvirt provider. ')
traceback.print_exc()
raise Exception("There was an error getting networks for the libvirt provider. ")
raise tornado.gen.Return(networks)
@tornado.gen.coroutine
def get_sec_groups(self):
""" The list of security groups is empty. """
sec_groups = ['Libvirt has no security groups']
raise tornado.gen.Return(sec_groups)
@tornado.gen.coroutine
def get_images(self):
""" Lists all volumes from the default storage pool. """
try:
images = [x for x in self.conn.listAllStoragePools() if x.name() == 'default'][0]
images = images.listAllVolumes()
images = [x.name() for x in images]
except:
import traceback
print ('Error in get_images in libvirt provider')
traceback.print_exc()
raise Exception("There was an error getting images for the libvirt provider. ")
raise tornado.gen.Return(images)
@tornado.gen.coroutine
def get_sizes(self):
""" Returns the flavours received from the datastore_handler. """
raise tornado.gen.Return(self.flavours.keys())
@tornado.gen.coroutine
def get_provider_status(self, provider):
""" Tries to open a connection to a provider so as to get the status. """
try:
provider_url = provider['provider_protocol'] + '://' + provider['provider_ip'] + '/system'
self.conn = libvirt.open(provider_url)
except Exception as e:
raise tornado.gen.Return({'success' : False, 'message' : 'Error connecting to libvirt provider. ' + e.message})
raise tornado.gen.Return({'success' : True, 'message' : ''})
@tornado.gen.coroutine
def get_servers(self, provider, get_servers = True, get_billing = True):
""" Gets servers in the specified format so they can be used in get_provider_data() """
provider_url = provider['provider_protocol'] + '://' + provider['provider_ip'] + '/system'
try:
conn = libvirt.open(provider_url)
except Exception as e:
raise tornado.gen.Return([])
servers = []
if not get_servers: return servers
for x in conn.listAllDomains():
print ('Trying to get ', x.name())
server = {
'hostname' : x.name(),
'ip' : 'n/a',
'size' : 'va-small',
'status' : self.libvirt_states[x.info()[0]],
'provider' : provider['provider_name'],
'used_ram' : x.info()[2] / 2.0**10,
'used_cpu': x.info()[3],
'used_disk' : 'n/a',
}
try:
server['used_disk'] = x.blockInfo('hda')[1] / 2.0**30
except:
server['used_disk'] = 0
# import traceback
# print ('Cannot get used disk for server : ', x.name())
# traceback.print_exc()
servers.append(server)
raise tornado.gen.Return(servers)
@tornado.gen.coroutine
def get_provider_data(self, provider, get_servers = True, get_billing = True):
""" Gets provider data as specified per the Base driver. """
provider_url = provider['provider_protocol'] + '://' + provider['provider_ip'] + '/system'
try:
conn = libvirt.open(provider_url)
except Exception as e:
provider_data = {
'servers' : [],
'limits' : {},
'provider_usage' : {},
'status' : {'success' : False, 'message' : 'Could not connect to the libvirt provider. ' + str(e)}
}
raise tornado.gen.Return(provider_data)
if get_servers:
servers = yield self.get_servers(provider)
else:
servers = []
try:
storage = [x for x in conn.listAllStoragePools() if x.name() == 'default'][0]
except:
import traceback
print ('Error getting storage in get_provider_data()')
traceback.print_exc()
raise Exception('Error getting storage for the libvirt provider. ')
info = conn.getInfo()
storage_info = storage.info()
try:
used_disk = sum([x.info()[1] for x in storage.listAllVolumes()])
total_disk = sum([x.info()[2] for x in storage.listAllVolumes()])
except:
import traceback
print ('Error getting volumes for the default storage in get_provider_data()')
traceback.print_exc()
raise Exception('Error getting volumes for the default storage from the libvirt provider. ')
print ('My servers are : ', servers)
provider_usage = {
'max_cpus' : conn.getMaxVcpus(None),
'used_cpus' : sum([x['used_cpu'] for x in servers]),
'max_ram' : sum([x.info()[1] for x in conn.listAllDomains()]) / 2.0**10,
'used_ram' : sum([x['used_ram'] for x in servers]),
'max_disk' : storage_info[1] / 2.0**30,
'used_disk' : storage_info[2] / 2.0**30,
'free_disk' : storage_info[3] / 2.0**30,
'max_servers' : 'n/a',
'used_servers' : len(servers),
}
provider_usage['free_cpus'] = provider_usage['max_cpus'] - provider_usage['used_cpus']
provider_usage['free_ram'] = provider_usage['max_ram'] - provider_usage['used_ram']
print ('And my usage : ', provider_usage)
provider_info = {
'servers' : servers,
'provider_usage' : provider_usage,
'status' : {'success' : True, 'message': ''}
}
raise tornado.gen.Return(provider_info)
@tornado.gen.coroutine
def get_provider_billing(self, provider):
#TODO provide should have some sort of costing mechanism, and we multiply used stuff by some price.
total_cost = 0
servers = yield self.get_servers(provider)
for s in servers:
s['cost'] = 0
s['estimated_cost'] = 0
# servers.append({
# 'hostname' : 'Other Costs',
# 'ip' : '',
# 'size' : '',
# 'used_disk' : 0,
# 'used_ram' : 0,
# 'used_cpu' : 0,
# 'status' : '',
# 'cost' : total_cost,
# 'estimated_cost' : 0,
# 'provider' : provider['provider_name'],
# })
total_memory = sum([x['used_ram'] for x in servers]) * 2**20
total_memory = int_to_bytes(total_memory)
provider['memory'] = total_memory
for server in servers:
server['used_ram'] = int_to_bytes(server['used_ram'] * (2 ** 20))
billing_data = {
'provider' : provider,
'servers' : servers,
'total_cost' : total_cost
}
raise tornado.gen.Return(billing_data)
@tornado.gen.coroutine
def server_action(self, provider, server_name, action):
""" Performs an action via the python api. """
provider_url = provider['provider_protocol'] + '://' + provider['provider_ip'] + '/system'
try:
conn = libvirt.open(provider_url)
server = conn.lookupByName(server_name)
except Exception as e:
raise tornado.gen.Return({'success' : False, 'message' : 'Could not connect to provider. ' + e.message})
server_action = {
'delete' : server.undefine,
'reboot' : server.reboot,
'start' : server.create,
'stop' : server.shutdown,
'suspend' : server.suspend,
'resume' : server.resume,
}
if action not in server_action:
raise tornado.gen.Return({'success' : False, 'message' : 'Action not supported : ' + action})
try:
success = server_action[action]()
except Exception as e:
raise tornado.gen.Return({'success' : False, 'message' : 'Action was not performed. ' + e.message, 'data' : {}})
raise tornado.gen.Return({'success' : True, 'message' : ''})
@tornado.gen.coroutine
def validate_field_values(self, step_index, field_values):
""" Adds the provider_protocol field, and opens a connection libvirt.conn to get info about the provider. """
print ('Validating on step : ', step_index)
if step_index < 0:
protocols = ['qemu', 'qemu+tcp', 'qemu+tls']
raise tornado.gen.Return(StepResult(
errors=[], new_step_index=0, option_choices={'provider_protocol' : protocols}
))
elif step_index == 0:
provider_url = field_values['provider_protocol'] + '://' + field_values['provider_ip'] + '/system'
self.field_values['provider_ip'] = field_values['provider_ip']
try:
self.conn = libvirt.open(provider_url)
print ('Opened connection to ', provider_url)
self.field_values['provider_protocol'] = field_values['provider_protocol']
except:
import traceback
print ('Error connecting to libvirt in validate_field_values()')
traceback.print_exc()
raise Exception('Could not connect to libvirt using the parameters - protocol: %s, provider_ip: %s. ' % (field_values['provider_protocol'], field_values['provider_ip']))
self.field_values['networks'] = yield self.get_networks()
self.field_values['images'] = yield self.get_images()
self.field_values['sizes']= self.flavours.keys()
self.field_values['sec_groups'] = []
elif step_index == 1:
field_values['sec_group'] = None
step_result = yield super(LibVirtDriver, self).validate_field_values(step_index, field_values)
raise tornado.gen.Return(step_result)
@tornado.gen.coroutine
def create_server(self, provider, data):
"""
Instances are created manually, as there is no saltstack support. This happens by following these steps:
1. Open a connection to the libvirt provider.
2. Create a config drive for cloud init. What's needed for this is the salt master fqdn and the salt keys.
3. Clone the libvirt volume selected when adding a provider.
4. If a certain storage is defined when creating an server, create a new disk for it.
5. Create an iso image from the config drive.
6. Create an xml for the new server.
7. Define the image with the xml.
8. Create permanent server.
"""
print ('Creating libvirt server. ')
data.update(self.app_fields)
provider_url = provider['provider_protocol'] + '://' + provider['provider_ip'] + '/system'
try:
conn = libvirt.open(provider_url)
except:
import traceback
print ('Error connecting to libvirt at %s in create_server()' % provider_url)
traceback.print_exc()
raise Exception('Error connecting to libvirt with url : %s' % (provider_url))
try:
storage = [x for x in conn.listAllStoragePools() if x.name() == 'default'][0]
except:
import traceback
print ('Error getting the default storage pool for the libvirt provider. ')
traceback.print_exc()
raise Exception('Error getting the default storage pool at %s' % (provider_url))
flavour = self.flavours[data['size']]
storage_disk = data.get('storage_disk', 0)
try:
config_drive = yield self.create_config_drive(provider, data)
except:
import traceback
print ('Error creating the config drive. ')
traceback.print_exc()
raise Exception('Error creating the config drive for the libvirt provider. ')
try:
old_vol = [x for x in storage.listAllVolumes() if x.name() == data['image']][0]
new_vol = yield self.clone_libvirt_volume(storage, flavour['vol_capacity'], old_vol, data['server_name'] + '-volume.qcow2')
disks = [new_vol.name()]
except:
import traceback
print ('Error cloning the libvirt volume. ')
traceback.print_exc()
raise Exception('Error cloning the libvirt volume for the new minion. ')
if storage_disk:
try:
new_disk = yield self.create_libvirt_volume(storage, storage_disk, data['server_name'] + '-disk.qcow2')
disks.append(new_disk.name())
except:
import traceback
print ('Error creating libvirt volume with parameters storage_disk: %s, server_name: %s' % (storage_disk, data['server_name']))
traceback.print_exc()
raise Exception('Error creating additional libvirt volume. ')
else:
disks.append(None)
try:
iso_image = yield self.create_iso_image(provider_url, conn, data['server_name'], config_drive, old_vol)
except:
import traceback
print ('Error creating an iso image. ')
traceback.print_exc()
raise Exception('Error creating the iso image for the new minion. ')
try:
new_xml = yield self.create_domain_xml(data['server_name'], disks, iso_image)
except:
import traceback
print ('Error generating the xml for the new minion. ')
traceback.print_exc()
raise Exception('Error generating the xml for the new minion. ')
try:
new_img = conn.defineXML(new_xml)
new_img.setMemory = flavour['memory']
new_img.setMaxMemory = flavour['max_memory']
new_img.setVcpus = flavour['num_cpus']
new_img.create()
except:
import traceback
print ('Error creating a minion from the defined XML')
traceback.print_exc()
raise Exception('Error creating a minion with - XML was defined but the minion was not created. ')
self.config_drive = BASE_CONFIG_DRIVE
@tornado.gen.coroutine
def create_domain_xml(self, server_name, disks, iso_name):
old_xml = DOMAIN_XML
print ('Generating domain xml')
tree = ET.fromstring(old_xml)
tree.find('name').text = server_name
devices = tree.find('devices')
domain_disks = [x for x in devices.findall('disk') if x.get('device') == 'disk']
domain_disks[0].find('source').attrib['file'] = '/var/lib/libvirt/images/' + disks[0]
if disks[1]:
domain_disks[1].find('source').attrib['file'] = '/var/lib/libvirt/images/' + disks[1]
else:
devices.remove(devices[1])
domain_iso_disk = [x for x in tree.find('devices').findall('disk') if x.get('device') == 'cdrom'][0]
#Patekata mu e kaj pool-ot kaj sto e uploadiran volume 08.02.2017
domain_iso_disk.find('source').attrib['file'] = '/var/lib/libvirt/images/' + iso_name #self.config_path + iso_name
mac = tree.find('devices').find('interface').find('mac')
print ('Success, result is : ', ET.tostring(tree))
raise tornado.gen.Return(ET.tostring(tree))
@tornado.gen.coroutine
def create_iso_image(self, provider_url, conn, vol_name, config_drive, base_volume):
print ('Trying to create iso from dir: ', config_drive)
try:
iso_name = vol_name + '.iso'
iso_path = self.config_path + iso_name
iso_command = ['xorrisofs', '-J', '-r', '-V', 'config_drive', '-o', iso_path, config_drive]
storage = [x for x in conn.listAllStoragePools() if x.name() == 'default'][0]
upload_command = ['virsh', '-c', provider_url, 'vol-upload', '--pool', storage.name(), iso_name, iso_path]
iso_volume = yield self.create_libvirt_volume(storage, 1, iso_name)
subprocess.call(iso_command)
subprocess.call(upload_command)
with open(iso_path, 'r') as f:
#Libvirt documentation is terrible and I don't really know how this works.
def handler(stream, data, file_):
return file_.read(data)
st = conn.newStream(0)
# st.sendAll(handler, f)
except:
import traceback
traceback.print_exc()
print ('Created at : ', iso_path)
raise tornado.gen.Return(vol_name + '.iso')
@tornado.gen.coroutine
def create_salt_key(self, server_name, config_dir):
print 'Creating salt key'
salt_command = ['salt-key', '--gen-keys=' + server_name, '--gen-keys-dir', config_dir]
result = subprocess.call(salt_command)
print ('Created with result ', result)
raise tornado.gen.Return(None)
@tornado.gen.coroutine
def clone_libvirt_volume(self, storage, vol_capacity, old_vol, vol_name, resize = True):
new_vol = ET.fromstring(old_vol.XMLDesc())
print ('Creating volume ', vol_name)
new_vol.find('name').text = vol_name
new_vol.find('capacity').text = str(vol_capacity)
new_vol = storage.createXMLFrom(ET.tostring(new_vol), old_vol)
if resize:
new_vol.resize(vol_capacity * (2**30))
raise tornado.gen.Return(new_vol)
@tornado.gen.coroutine
def create_libvirt_volume(self, storage, vol_size, vol_name):
print ('Creating disk ', vol_name)
try:
vol_xml = BASE_VOLUME_XML
vol_values = {
'VAR_SIZE' : str(vol_size * (2 ** 30)),
'VAR_NAME' : vol_name,
'VAR_FORMAT' : 'raw'
}
for key in vol_values:
vol_xml = vol_xml.replace(key, vol_values[key])
new_vol = storage.createXML(vol_xml)
except:
import traceback
traceback.print_exc()
print ('Success!', new_vol.XMLDesc())
raise tornado.gen.Return(new_vol)
@tornado.gen.coroutine
def create_config_drive(self, provider, data):
print ('Creating config with ', data)
minion_dir = self.config_path + data['server_name']
config_dir = minion_dir + '/config_drive'
server_dir = config_dir + '/openstack/2012-08-10'
os.makedirs(config_dir)
os.makedirs(server_dir)
yield self.create_salt_key(data['server_name'], minion_dir)
pub_key = ''
pub_key_path = minion_dir + '/' + data['server_name']
with open(pub_key_path + '.pub', 'r') as f:
pub_key = f.read()
pub_key_cp_cmd = ['cp',pub_key_path + '.pub', '/etc/salt/pki/minion/' + data['server_name']]
pub_key_cp_master_cmd = ['cp',pub_key_path + '.pub', '/etc/salt/pki/master/minions/' + data['server_name']]
subprocess.call(pub_key_cp_cmd)
subprocess.call(pub_key_cp_master_cmd)
pri_key = ''
with open(minion_dir + '/' + data['server_name'] + '.pem', 'r') as f:
pri_key = f.read()
auth_key = ''
with open(self.key_path + '.pub') as f:
auth_key = f.read()
config_dict = {
'VAR_INSTANCE_NAME' : data['server_name'],
'VAR_IP' : self.host_ip,
'VAR_SSH_KEY' : auth_key,
'VAR_PUBLIC_KEY' : '\n'.join([' ' * 4 + line for line in pub_key.split('\n')]),
'VAR_PRIVATE_KEY' : '\n'.join([' ' * 4 + line for line in pri_key.split('\n')]),
'VAR_ROLE' : data['role'],
# 'VAR_INSTANCE_FQDN' : data['server_name'],
}
for key in config_dict:
self.config_drive = self.config_drive.replace(key, config_dict[key])
users_dict = {
'fqdn' : data['server_name'],
'users' : [
{
'name' : 'root',
'ssh-authorized-keys': [
auth_key
]
}],
'salt-minion' : {
'conf' : {
'master' : self.host_ip
},
'public_key' : pub_key,
'private_key' : pri_key,
}
}
# self.config_drive = yaml.safe_dump(users_dict)
with open(server_dir + '/meta_data.json', 'w') as f:
f.write(json.dumps({'uuid' : data['server_name']}))
with open(server_dir + '/user_data', 'w') as f:
f.write(self.config_drive)
# minion_dir = self.config_path + data['server_name']
# config_dir = minion_dir + '/config_drive'
# server_dir = config_dir + '/openstack/2012-08-10'
os.symlink('../openstack/2012-08-10', config_dir + '/openstack/latest')
raise tornado.gen.Return(config_dir)
|
gpl-3.0
|
GdZ/scriptfile
|
software/googleAppEngine/lib/django_1_3/django/contrib/localflavor/ca/ca_provinces.py
|
237
|
1603
|
"""
An alphabetical list of provinces and territories for use as `choices`
in a formfield., and a mapping of province misspellings/abbreviations to
normalized abbreviations
Source: http://www.canada.gc.ca/othergov/prov_e.html
This exists in this standalone file so that it's only imported into memory
when explicitly needed.
"""
import warnings
warnings.warn(
'There have been recent changes to the CA localflavor. See the release notes for details',
RuntimeWarning
)
PROVINCE_CHOICES = (
('AB', 'Alberta'),
('BC', 'British Columbia'),
('MB', 'Manitoba'),
('NB', 'New Brunswick'),
('NL', 'Newfoundland and Labrador'),
('NT', 'Northwest Territories'),
('NS', 'Nova Scotia'),
('NU', 'Nunavut'),
('ON', 'Ontario'),
('PE', 'Prince Edward Island'),
('QC', 'Quebec'),
('SK', 'Saskatchewan'),
('YT', 'Yukon')
)
PROVINCES_NORMALIZED = {
'ab': 'AB',
'alberta': 'AB',
'bc': 'BC',
'b.c.': 'BC',
'british columbia': 'BC',
'mb': 'MB',
'manitoba': 'MB',
'nb': 'NB',
'new brunswick': 'NB',
'nf': 'NL',
'nl': 'NL',
'newfoundland': 'NL',
'newfoundland and labrador': 'NL',
'nt': 'NT',
'northwest territories': 'NT',
'ns': 'NS',
'nova scotia': 'NS',
'nu': 'NU',
'nunavut': 'NU',
'on': 'ON',
'ontario': 'ON',
'pe': 'PE',
'pei': 'PE',
'p.e.i.': 'PE',
'prince edward island': 'PE',
'qc': 'QC',
'quebec': 'QC',
'sk': 'SK',
'saskatchewan': 'SK',
'yk': 'YT',
'yt': 'YT',
'yukon': 'YT',
'yukon territory': 'YT',
}
|
mit
|
kmunve/TSanalysis
|
Plotting/meteo_plots.py
|
1
|
1137
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from matplotlib import pyplot as plt
from numpy import arange
"""
__author__: 'kmunve'
"""
def _temperature_plot(values, xticks=None, p_title=None, p_xlabel='Time', p_ylabel='Temperature'):
"""
TODO: add a check if the values are in Kelvin, Fahrenheit, or Celsius and adjust plot parameters accordingly.
TODO: rotate xlabels and change format to YYYY-MM-DD:HH
:param values:
:param xlabels:
:return:
"""
y = values
if xticks is None:
x = arange(len(y))
else:
x = xticks
# Create figure
plt.figure(figsize=(14,6))
ax = plt.axes()
# Set y limits
ax.set_ylim(-10, 25)
plt.plot(x, y, color='green', linewidth=2)
plt.axhline(0.0, color='grey', linestyle='--')
plt.title = p_title
plt.xlabel = p_xlabel
plt.ylabel = p_ylabel
def temperature_plot(values, xticks=None, p_title=None, p_xlabel='Time', p_ylabel='Temperature'):
"""
Plot temperature values with envoked plt.show() for external use.
"""
_temperature_plot(values, xticks, p_title, p_xlabel, p_ylabel)
plt.show()
|
mit
|
yunify/qingcloud-cli
|
qingcloud/cli/iaas_client/actions/sg/describe_security_group_rules.py
|
1
|
2198
|
# =========================================================================
# Copyright 2012-present Yunify, Inc.
# -------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
from qingcloud.cli.misc.utils import explode_array
from qingcloud.cli.iaas_client.actions.base import BaseAction
class DescribeSecurityGroupRulesAction(BaseAction):
action = 'DescribeSecurityGroupRules'
command = 'describe-security-group-rules'
usage = '%(prog)s -s <security_group_id> -r <security_group_rules> [-f <conf_file>]'
@classmethod
def add_ext_arguments(cls, parser):
parser.add_argument('-s', '--security_group', dest='security_group',
action='store', type=str, default='',
help='ID of security_group whose rules you want to list. ')
parser.add_argument('-r', '--security_group_rules', dest='security_group_rules',
action='store', type=str, default='',
help='ID of security group rule you want to list. ')
parser.add_argument('-d', '--direction', dest='direction',
action='store', type=int, default=None,
help='0 for inbound; 1 for outbound.')
@classmethod
def build_directive(cls, options):
return {
'security_group': options.security_group,
'security_group_rules': explode_array(options.security_group_rules),
'direction': options.direction,
'offset':options.offset,
'limit': options.limit,
}
|
apache-2.0
|
tushevorg/namebench
|
nb_third_party/dns/tsigkeyring.py
|
248
|
1658
|
# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""A place to store TSIG keys."""
import base64
import dns.name
def from_text(textring):
"""Convert a dictionary containing (textual DNS name, base64 secret) pairs
into a binary keyring which has (dns.name.Name, binary secret) pairs.
@rtype: dict"""
keyring = {}
for keytext in textring:
keyname = dns.name.from_text(keytext)
secret = base64.decodestring(textring[keytext])
keyring[keyname] = secret
return keyring
def to_text(keyring):
"""Convert a dictionary containing (dns.name.Name, binary secret) pairs
into a text keyring which has (textual DNS name, base64 secret) pairs.
@rtype: dict"""
textring = {}
for keyname in keyring:
keytext = dns.name.to_text(keyname)
secret = base64.encodestring(keyring[keyname])
textring[keytext] = secret
return textring
|
apache-2.0
|
mzdaniel/oh-mainline
|
vendor/packages/Django/django/db/models/manager.py
|
306
|
7872
|
from django.utils import copycompat as copy
from django.conf import settings
from django.db import router
from django.db.models.query import QuerySet, EmptyQuerySet, insert_query, RawQuerySet
from django.db.models import signals
from django.db.models.fields import FieldDoesNotExist
def ensure_default_manager(sender, **kwargs):
"""
Ensures that a Model subclass contains a default manager and sets the
_default_manager attribute on the class. Also sets up the _base_manager
points to a plain Manager instance (which could be the same as
_default_manager if it's not a subclass of Manager).
"""
cls = sender
if cls._meta.abstract:
return
if not getattr(cls, '_default_manager', None):
# Create the default manager, if needed.
try:
cls._meta.get_field('objects')
raise ValueError("Model %s must specify a custom Manager, because it has a field named 'objects'" % cls.__name__)
except FieldDoesNotExist:
pass
cls.add_to_class('objects', Manager())
cls._base_manager = cls.objects
elif not getattr(cls, '_base_manager', None):
default_mgr = cls._default_manager.__class__
if (default_mgr is Manager or
getattr(default_mgr, "use_for_related_fields", False)):
cls._base_manager = cls._default_manager
else:
# Default manager isn't a plain Manager class, or a suitable
# replacement, so we walk up the base class hierarchy until we hit
# something appropriate.
for base_class in default_mgr.mro()[1:]:
if (base_class is Manager or
getattr(base_class, "use_for_related_fields", False)):
cls.add_to_class('_base_manager', base_class())
return
raise AssertionError("Should never get here. Please report a bug, including your model and model manager setup.")
signals.class_prepared.connect(ensure_default_manager)
class Manager(object):
# Tracks each time a Manager instance is created. Used to retain order.
creation_counter = 0
def __init__(self):
super(Manager, self).__init__()
self._set_creation_counter()
self.model = None
self._inherited = False
self._db = None
def contribute_to_class(self, model, name):
# TODO: Use weakref because of possible memory leak / circular reference.
self.model = model
setattr(model, name, ManagerDescriptor(self))
if not getattr(model, '_default_manager', None) or self.creation_counter < model._default_manager.creation_counter:
model._default_manager = self
if model._meta.abstract or (self._inherited and not self.model._meta.proxy):
model._meta.abstract_managers.append((self.creation_counter, name,
self))
else:
model._meta.concrete_managers.append((self.creation_counter, name,
self))
def _set_creation_counter(self):
"""
Sets the creation counter value for this instance and increments the
class-level copy.
"""
self.creation_counter = Manager.creation_counter
Manager.creation_counter += 1
def _copy_to_model(self, model):
"""
Makes a copy of the manager and assigns it to 'model', which should be
a child of the existing model (used when inheriting a manager from an
abstract base class).
"""
assert issubclass(model, self.model)
mgr = copy.copy(self)
mgr._set_creation_counter()
mgr.model = model
mgr._inherited = True
return mgr
def db_manager(self, using):
obj = copy.copy(self)
obj._db = using
return obj
@property
def db(self):
return self._db or router.db_for_read(self.model)
#######################
# PROXIES TO QUERYSET #
#######################
def get_empty_query_set(self):
return EmptyQuerySet(self.model, using=self._db)
def get_query_set(self):
"""Returns a new QuerySet object. Subclasses can override this method
to easily customize the behavior of the Manager.
"""
return QuerySet(self.model, using=self._db)
def none(self):
return self.get_empty_query_set()
def all(self):
return self.get_query_set()
def count(self):
return self.get_query_set().count()
def dates(self, *args, **kwargs):
return self.get_query_set().dates(*args, **kwargs)
def distinct(self, *args, **kwargs):
return self.get_query_set().distinct(*args, **kwargs)
def extra(self, *args, **kwargs):
return self.get_query_set().extra(*args, **kwargs)
def get(self, *args, **kwargs):
return self.get_query_set().get(*args, **kwargs)
def get_or_create(self, **kwargs):
return self.get_query_set().get_or_create(**kwargs)
def create(self, **kwargs):
return self.get_query_set().create(**kwargs)
def filter(self, *args, **kwargs):
return self.get_query_set().filter(*args, **kwargs)
def aggregate(self, *args, **kwargs):
return self.get_query_set().aggregate(*args, **kwargs)
def annotate(self, *args, **kwargs):
return self.get_query_set().annotate(*args, **kwargs)
def complex_filter(self, *args, **kwargs):
return self.get_query_set().complex_filter(*args, **kwargs)
def exclude(self, *args, **kwargs):
return self.get_query_set().exclude(*args, **kwargs)
def in_bulk(self, *args, **kwargs):
return self.get_query_set().in_bulk(*args, **kwargs)
def iterator(self, *args, **kwargs):
return self.get_query_set().iterator(*args, **kwargs)
def latest(self, *args, **kwargs):
return self.get_query_set().latest(*args, **kwargs)
def order_by(self, *args, **kwargs):
return self.get_query_set().order_by(*args, **kwargs)
def select_related(self, *args, **kwargs):
return self.get_query_set().select_related(*args, **kwargs)
def values(self, *args, **kwargs):
return self.get_query_set().values(*args, **kwargs)
def values_list(self, *args, **kwargs):
return self.get_query_set().values_list(*args, **kwargs)
def update(self, *args, **kwargs):
return self.get_query_set().update(*args, **kwargs)
def reverse(self, *args, **kwargs):
return self.get_query_set().reverse(*args, **kwargs)
def defer(self, *args, **kwargs):
return self.get_query_set().defer(*args, **kwargs)
def only(self, *args, **kwargs):
return self.get_query_set().only(*args, **kwargs)
def using(self, *args, **kwargs):
return self.get_query_set().using(*args, **kwargs)
def exists(self, *args, **kwargs):
return self.get_query_set().exists(*args, **kwargs)
def _insert(self, values, **kwargs):
return insert_query(self.model, values, **kwargs)
def _update(self, values, **kwargs):
return self.get_query_set()._update(values, **kwargs)
def raw(self, raw_query, params=None, *args, **kwargs):
return RawQuerySet(raw_query=raw_query, model=self.model, params=params, using=self._db, *args, **kwargs)
class ManagerDescriptor(object):
# This class ensures managers aren't accessible via model instances.
# For example, Poll.objects works, but poll_obj.objects raises AttributeError.
def __init__(self, manager):
self.manager = manager
def __get__(self, instance, type=None):
if instance != None:
raise AttributeError("Manager isn't accessible via %s instances" % type.__name__)
return self.manager
class EmptyManager(Manager):
def get_query_set(self):
return self.get_empty_query_set()
|
agpl-3.0
|
dstufft/html5lib-python
|
html5lib/filters/optionaltags.py
|
1727
|
10500
|
from __future__ import absolute_import, division, unicode_literals
from . import _base
class Filter(_base.Filter):
def slider(self):
previous1 = previous2 = None
for token in self.source:
if previous1 is not None:
yield previous2, previous1, token
previous2 = previous1
previous1 = token
yield previous2, previous1, None
def __iter__(self):
for previous, token, next in self.slider():
type = token["type"]
if type == "StartTag":
if (token["data"] or
not self.is_optional_start(token["name"], previous, next)):
yield token
elif type == "EndTag":
if not self.is_optional_end(token["name"], next):
yield token
else:
yield token
def is_optional_start(self, tagname, previous, next):
type = next and next["type"] or None
if tagname in 'html':
# An html element's start tag may be omitted if the first thing
# inside the html element is not a space character or a comment.
return type not in ("Comment", "SpaceCharacters")
elif tagname == 'head':
# A head element's start tag may be omitted if the first thing
# inside the head element is an element.
# XXX: we also omit the start tag if the head element is empty
if type in ("StartTag", "EmptyTag"):
return True
elif type == "EndTag":
return next["name"] == "head"
elif tagname == 'body':
# A body element's start tag may be omitted if the first thing
# inside the body element is not a space character or a comment,
# except if the first thing inside the body element is a script
# or style element and the node immediately preceding the body
# element is a head element whose end tag has been omitted.
if type in ("Comment", "SpaceCharacters"):
return False
elif type == "StartTag":
# XXX: we do not look at the preceding event, so we never omit
# the body element's start tag if it's followed by a script or
# a style element.
return next["name"] not in ('script', 'style')
else:
return True
elif tagname == 'colgroup':
# A colgroup element's start tag may be omitted if the first thing
# inside the colgroup element is a col element, and if the element
# is not immediately preceeded by another colgroup element whose
# end tag has been omitted.
if type in ("StartTag", "EmptyTag"):
# XXX: we do not look at the preceding event, so instead we never
# omit the colgroup element's end tag when it is immediately
# followed by another colgroup element. See is_optional_end.
return next["name"] == "col"
else:
return False
elif tagname == 'tbody':
# A tbody element's start tag may be omitted if the first thing
# inside the tbody element is a tr element, and if the element is
# not immediately preceeded by a tbody, thead, or tfoot element
# whose end tag has been omitted.
if type == "StartTag":
# omit the thead and tfoot elements' end tag when they are
# immediately followed by a tbody element. See is_optional_end.
if previous and previous['type'] == 'EndTag' and \
previous['name'] in ('tbody', 'thead', 'tfoot'):
return False
return next["name"] == 'tr'
else:
return False
return False
def is_optional_end(self, tagname, next):
type = next and next["type"] or None
if tagname in ('html', 'head', 'body'):
# An html element's end tag may be omitted if the html element
# is not immediately followed by a space character or a comment.
return type not in ("Comment", "SpaceCharacters")
elif tagname in ('li', 'optgroup', 'tr'):
# A li element's end tag may be omitted if the li element is
# immediately followed by another li element or if there is
# no more content in the parent element.
# An optgroup element's end tag may be omitted if the optgroup
# element is immediately followed by another optgroup element,
# or if there is no more content in the parent element.
# A tr element's end tag may be omitted if the tr element is
# immediately followed by another tr element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] == tagname
else:
return type == "EndTag" or type is None
elif tagname in ('dt', 'dd'):
# A dt element's end tag may be omitted if the dt element is
# immediately followed by another dt element or a dd element.
# A dd element's end tag may be omitted if the dd element is
# immediately followed by another dd element or a dt element,
# or if there is no more content in the parent element.
if type == "StartTag":
return next["name"] in ('dt', 'dd')
elif tagname == 'dd':
return type == "EndTag" or type is None
else:
return False
elif tagname == 'p':
# A p element's end tag may be omitted if the p element is
# immediately followed by an address, article, aside,
# blockquote, datagrid, dialog, dir, div, dl, fieldset,
# footer, form, h1, h2, h3, h4, h5, h6, header, hr, menu,
# nav, ol, p, pre, section, table, or ul, element, or if
# there is no more content in the parent element.
if type in ("StartTag", "EmptyTag"):
return next["name"] in ('address', 'article', 'aside',
'blockquote', 'datagrid', 'dialog',
'dir', 'div', 'dl', 'fieldset', 'footer',
'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6',
'header', 'hr', 'menu', 'nav', 'ol',
'p', 'pre', 'section', 'table', 'ul')
else:
return type == "EndTag" or type is None
elif tagname == 'option':
# An option element's end tag may be omitted if the option
# element is immediately followed by another option element,
# or if it is immediately followed by an <code>optgroup</code>
# element, or if there is no more content in the parent
# element.
if type == "StartTag":
return next["name"] in ('option', 'optgroup')
else:
return type == "EndTag" or type is None
elif tagname in ('rt', 'rp'):
# An rt element's end tag may be omitted if the rt element is
# immediately followed by an rt or rp element, or if there is
# no more content in the parent element.
# An rp element's end tag may be omitted if the rp element is
# immediately followed by an rt or rp element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] in ('rt', 'rp')
else:
return type == "EndTag" or type is None
elif tagname == 'colgroup':
# A colgroup element's end tag may be omitted if the colgroup
# element is not immediately followed by a space character or
# a comment.
if type in ("Comment", "SpaceCharacters"):
return False
elif type == "StartTag":
# XXX: we also look for an immediately following colgroup
# element. See is_optional_start.
return next["name"] != 'colgroup'
else:
return True
elif tagname in ('thead', 'tbody'):
# A thead element's end tag may be omitted if the thead element
# is immediately followed by a tbody or tfoot element.
# A tbody element's end tag may be omitted if the tbody element
# is immediately followed by a tbody or tfoot element, or if
# there is no more content in the parent element.
# A tfoot element's end tag may be omitted if the tfoot element
# is immediately followed by a tbody element, or if there is no
# more content in the parent element.
# XXX: we never omit the end tag when the following element is
# a tbody. See is_optional_start.
if type == "StartTag":
return next["name"] in ['tbody', 'tfoot']
elif tagname == 'tbody':
return type == "EndTag" or type is None
else:
return False
elif tagname == 'tfoot':
# A tfoot element's end tag may be omitted if the tfoot element
# is immediately followed by a tbody element, or if there is no
# more content in the parent element.
# XXX: we never omit the end tag when the following element is
# a tbody. See is_optional_start.
if type == "StartTag":
return next["name"] == 'tbody'
else:
return type == "EndTag" or type is None
elif tagname in ('td', 'th'):
# A td element's end tag may be omitted if the td element is
# immediately followed by a td or th element, or if there is
# no more content in the parent element.
# A th element's end tag may be omitted if the th element is
# immediately followed by a td or th element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] in ('td', 'th')
else:
return type == "EndTag" or type is None
return False
|
mit
|
meziti/bigbullions-test
|
p2pool/bitcoin/p2p.py
|
12
|
6325
|
'''
Implementation of Bitcoin's p2p protocol
'''
import random
import sys
import time
from twisted.internet import protocol
import p2pool
from . import data as bitcoin_data
from p2pool.util import deferral, p2protocol, pack, variable
class Protocol(p2protocol.Protocol):
def __init__(self, net):
p2protocol.Protocol.__init__(self, net.P2P_PREFIX, 1000000, ignore_trailing_payload=True)
self.net = net
def connectionMade(self):
self.send_version(
version=70002,
services=1,
time=int(time.time()),
addr_to=dict(
services=1,
address=self.transport.getPeer().host,
port=self.transport.getPeer().port,
),
addr_from=dict(
services=1,
address=self.transport.getHost().host,
port=self.transport.getHost().port,
),
nonce=random.randrange(2**64),
sub_version_num='/P2Pool:%s/' % (p2pool.__version__,),
start_height=0,
)
message_version = pack.ComposedType([
('version', pack.IntType(32)),
('services', pack.IntType(64)),
('time', pack.IntType(64)),
('addr_to', bitcoin_data.address_type),
('addr_from', bitcoin_data.address_type),
('nonce', pack.IntType(64)),
('sub_version_num', pack.VarStrType()),
('start_height', pack.IntType(32)),
])
def handle_version(self, version, services, time, addr_to, addr_from, nonce, sub_version_num, start_height):
self.send_verack()
message_verack = pack.ComposedType([])
def handle_verack(self):
self.get_block = deferral.ReplyMatcher(lambda hash: self.send_getdata(requests=[dict(type='block', hash=hash)]))
self.get_block_header = deferral.ReplyMatcher(lambda hash: self.send_getheaders(version=1, have=[], last=hash))
if hasattr(self.factory, 'resetDelay'):
self.factory.resetDelay()
if hasattr(self.factory, 'gotConnection'):
self.factory.gotConnection(self)
self.pinger = deferral.RobustLoopingCall(self.send_ping, nonce=1234)
self.pinger.start(30)
message_inv = pack.ComposedType([
('invs', pack.ListType(pack.ComposedType([
('type', pack.EnumType(pack.IntType(32), {1: 'tx', 2: 'block'})),
('hash', pack.IntType(256)),
]))),
])
def handle_inv(self, invs):
for inv in invs:
if inv['type'] == 'tx':
self.send_getdata(requests=[inv])
elif inv['type'] == 'block':
self.factory.new_block.happened(inv['hash'])
else:
print 'Unknown inv type', inv
message_getdata = pack.ComposedType([
('requests', pack.ListType(pack.ComposedType([
('type', pack.EnumType(pack.IntType(32), {1: 'tx', 2: 'block'})),
('hash', pack.IntType(256)),
]))),
])
message_getblocks = pack.ComposedType([
('version', pack.IntType(32)),
('have', pack.ListType(pack.IntType(256))),
('last', pack.PossiblyNoneType(0, pack.IntType(256))),
])
message_getheaders = pack.ComposedType([
('version', pack.IntType(32)),
('have', pack.ListType(pack.IntType(256))),
('last', pack.PossiblyNoneType(0, pack.IntType(256))),
])
message_getaddr = pack.ComposedType([])
message_addr = pack.ComposedType([
('addrs', pack.ListType(pack.ComposedType([
('timestamp', pack.IntType(32)),
('address', bitcoin_data.address_type),
]))),
])
def handle_addr(self, addrs):
for addr in addrs:
pass
message_tx = pack.ComposedType([
('tx', bitcoin_data.tx_type),
])
def handle_tx(self, tx):
self.factory.new_tx.happened(tx)
message_block = pack.ComposedType([
('block', bitcoin_data.block_type),
])
def handle_block(self, block):
#block_hash = bitcoin_data.hash256(bitcoin_data.block_header_type.pack(block['header']))
block_hash = self.net.BLOCKHASH_FUNC(bitcoin_data.block_header_type.pack(block['header']))
self.get_block.got_response(block_hash, block)
self.get_block_header.got_response(block_hash, block['header'])
message_headers = pack.ComposedType([
('headers', pack.ListType(bitcoin_data.block_type)),
])
def handle_headers(self, headers):
for header in headers:
header = header['header']
header_hash = self.net.BLOCKHASH_FUNC(bitcoin_data.block_header_type.pack(header))
self.get_block_header.got_response(header_hash, header)
#self.get_block_header.got_response(bitcoin_data.hash256(bitcoin_data.block_header_type.pack(header)), header)
self.factory.new_headers.happened([header['header'] for header in headers])
message_ping = pack.ComposedType([
('nonce', pack.IntType(64)),
])
def handle_ping(self, nonce):
self.send_pong(nonce=nonce)
message_pong = pack.ComposedType([
('nonce', pack.IntType(64)),
])
def handle_pong(self, nonce):
pass
message_alert = pack.ComposedType([
('message', pack.VarStrType()),
('signature', pack.VarStrType()),
])
def handle_alert(self, message, signature):
pass # print 'ALERT:', (message, signature)
def connectionLost(self, reason):
if hasattr(self.factory, 'gotConnection'):
self.factory.gotConnection(None)
if hasattr(self, 'pinger'):
self.pinger.stop()
if p2pool.DEBUG:
print >>sys.stderr, 'Bitcoin connection lost. Reason:', reason.getErrorMessage()
class ClientFactory(protocol.ReconnectingClientFactory):
protocol = Protocol
maxDelay = 1
def __init__(self, net):
self.net = net
self.conn = variable.Variable(None)
self.new_block = variable.Event()
self.new_tx = variable.Event()
self.new_headers = variable.Event()
def buildProtocol(self, addr):
p = self.protocol(self.net)
p.factory = self
return p
def gotConnection(self, conn):
self.conn.set(conn)
def getProtocol(self):
return self.conn.get_not_none()
|
gpl-3.0
|
diorcety/intellij-community
|
python/testData/inspections/PyNumpyType/Dtype.py
|
79
|
1189
|
def maximum_fill_value(obj):
"""
Return the minimum value that can be represented by the dtype of an object.
This function is useful for calculating a fill value suitable for
taking the maximum of an array with a given dtype.
Parameters
----------
obj : dtype
An object that can be queried for it's numeric type.
Returns
-------
val : scalar
The minimum representable value.
Raises
------
TypeError
If `obj` isn't a suitable numeric type.
"""
errmsg = "Unsuitable type for calculating maximum."
if hasattr(obj, 'dtype'):
return _recursive_extremum_fill_value(obj.dtype, max_filler)
elif isinstance(obj, float):
return max_filler[ntypes.typeDict['float_']]
elif isinstance(obj, int):
return max_filler[ntypes.typeDict['int_']]
elif isinstance(obj, long):
return max_filler[ntypes.typeDict['uint']]
elif isinstance(obj, np.dtype):
return max_filler[obj]
else:
raise TypeError(errmsg)
a = np.int8()
maximum_fill_value(a)
maximum_fill_value(('i4',[('r','u1'), ('g','u1'), ('b','u1'), ('a','u1')]))
maximum_fill_value("i8,f8,S5")
|
apache-2.0
|
gilbert-yuan/gooderp_addons
|
buy/wizard/buy_summary_goods_wizard.py
|
6
|
2109
|
# -*- coding: utf-8 -*-
from datetime import date
from odoo import models, fields, api
from odoo.exceptions import UserError
class BuySummaryGoodsWizard(models.TransientModel):
_name = 'buy.summary.goods.wizard'
_description = u'采购汇总表(按商品)向导'
@api.model
def _default_date_start(self):
return self.env.user.company_id.start_date
@api.model
def _default_date_end(self):
return date.today()
date_start = fields.Date(u'开始日期', default=_default_date_start,
help=u'报表汇总的开始日期,默认为公司启用日期')
date_end = fields.Date(u'结束日期', default=_default_date_end,
help=u'报表汇总的结束日期,默认为当前日期')
partner_id = fields.Many2one('partner', u'供应商',
help=u'只统计选定的供应商')
goods_id = fields.Many2one('goods', u'商品',
help=u'只统计选定的商品')
goods_categ_id = fields.Many2one('core.category', u'商品类别',
help=u'只统计选定的商品类别')
warehouse_dest_id = fields.Many2one('warehouse', u'仓库',
help=u'只统计选定的仓库')
company_id = fields.Many2one(
'res.company',
string=u'公司',
change_default=True,
default=lambda self: self.env['res.company']._company_default_get())
@api.multi
def button_ok(self):
self.ensure_one()
if self.date_end < self.date_start:
raise UserError(u'开始日期不能大于结束日期!')
read_field = ['date_start', 'date_end', 'partner_id',
'goods_id', 'goods_categ_id', 'warehouse_dest_id']
return {
'name': u'采购汇总表(按商品)',
'view_mode': 'tree',
'res_model': 'buy.summary.goods',
'type': 'ir.actions.act_window',
'context': self.read(read_field)[0],
'limit': 65535,
}
|
agpl-3.0
|
nwjs/chromium.src
|
tools/grit/grit/format/gen_predetermined_ids.py
|
3
|
4798
|
#!/usr/bin/env python
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
A tool to generate a predetermined resource ids file that can be used as an
input to grit via the -p option. This is meant to be run manually every once in
a while and its output checked in. See tools/gritsettings/README.md for details.
"""
from __future__ import print_function
import os
import re
import sys
# Regular expression for parsing the #define macro format. Matches both the
# version of the macro with whitelist support and the one without. For example,
# Without generate whitelist flag:
# #define IDS_FOO_MESSAGE 1234
# With generate whitelist flag:
# #define IDS_FOO_MESSAGE (::ui::WhitelistedResource<1234>(), 1234)
RESOURCE_EXTRACT_REGEX = re.compile(r'^#define (\S*).* (\d+)\)?$', re.MULTILINE)
ORDERED_RESOURCE_IDS_REGEX = re.compile(r'^Resource=(\d*)$', re.MULTILINE)
def _GetResourceNameIdPairsIter(string_to_scan):
"""Gets an iterator of the resource name and id pairs of the given string.
Scans the input string for lines of the form "#define NAME ID" and returns
an iterator over all matching (NAME, ID) pairs.
Args:
string_to_scan: The input string to scan.
Yields:
A tuple of name and id.
"""
for match in RESOURCE_EXTRACT_REGEX.finditer(string_to_scan):
yield match.group(1, 2)
def _ReadOrderedResourceIds(path):
"""Reads ordered resource ids from the given file.
The resources are expected to be of the format produced by running Chrome
with --print-resource-ids command line.
Args:
path: File path to read resource ids from.
Returns:
An array of ordered resource ids.
"""
ordered_resource_ids = []
with open(path, "r") as f:
for match in ORDERED_RESOURCE_IDS_REGEX.finditer(f.read()):
ordered_resource_ids.append(int(match.group(1)))
return ordered_resource_ids
def GenerateResourceMapping(original_resources, ordered_resource_ids):
"""Generates a resource mapping from the ordered ids and the original mapping.
The returned dict will assign new ids to ordered_resource_ids numerically
increasing from 101.
Args:
original_resources: A dict of original resource ids to resource names.
ordered_resource_ids: An array of ordered resource ids.
Returns:
A dict of resource ids to resource names.
"""
output_resource_map = {}
# 101 is used as the starting value since other parts of GRIT require it to be
# the minimum (e.g. rc_header.py) based on Windows resource numbering.
next_id = 101
for original_id in ordered_resource_ids:
resource_name = original_resources[original_id]
output_resource_map[next_id] = resource_name
next_id += 1
return output_resource_map
def ReadResourceIdsFromFile(file, original_resources):
"""Reads resource ids from a GRIT-produced header file.
Args:
file: File to a GRIT-produced header file to read from.
original_resources: Dict of resource ids to resource names to add to.
"""
for resource_name, resource_id in _GetResourceNameIdPairsIter(file.read()):
original_resources[int(resource_id)] = resource_name
def _ReadOriginalResourceIds(out_dir):
"""Reads resource ids from GRIT header files in the specified directory.
Args:
out_dir: A Chrome build output directory (e.g. out/gn) to scan.
Returns:
A dict of resource ids to resource names.
"""
original_resources = {}
for root, dirnames, filenames in os.walk(out_dir + '/gen'):
for filename in filenames:
if filename.endswith(('_resources.h', '_settings.h', '_strings.h')):
with open(os.path.join(root, filename), "r") as f:
ReadResourceIdsFromFile(f, original_resources)
return original_resources
def _GeneratePredeterminedIdsFile(ordered_resources_file, out_dir):
"""Generates a predetermined ids file.
Args:
ordered_resources_file: File path to read ordered resource ids from.
out_dir: A Chrome build output directory (e.g. out/gn) to scan.
Returns:
A dict of resource ids to resource names.
"""
original_resources = _ReadOriginalResourceIds(out_dir)
ordered_resource_ids = _ReadOrderedResourceIds(ordered_resources_file)
output_resource_map = GenerateResourceMapping(original_resources,
ordered_resource_ids)
for res_id in sorted(output_resource_map.keys()):
print(output_resource_map[res_id], res_id)
def main(argv):
if len(argv) != 2:
print("usage: gen_predetermined_ids.py <ordered_resources_file> <out_dir>")
sys.exit(1)
ordered_resources_file, out_dir = argv[0], argv[1]
_GeneratePredeterminedIdsFile(ordered_resources_file, out_dir)
if '__main__' == __name__:
main(sys.argv[1:])
|
bsd-3-clause
|
moio/spacewalk
|
proxy/proxy/rhnConstants.py
|
3
|
1463
|
#!/usr/bin/python
#
# Copyright (c) 2008--2013 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
##
# rhnDefines.py - Constants used throughout the Spacewalk Proxy.
#-----------------------------------------------------------------------------
#
"""Constants used by the Spacewalk Proxy"""
# HTTP Headers
HEADER_ACTUAL_URI = 'X-RHN-ActualURI'
HEADER_EFFECTIVE_URI = 'X-RHN-EffectiveURI'
HEADER_CHECKSUM = 'X-RHN-Checksum'
HEADER_LOCATION = 'Location'
HEADER_CONTENT_LENGTH = 'Content-Length'
HEADER_RHN_REDIRECT = 'X-RHN-Redirect'
HEADER_RHN_ORIG_LOC = 'X-RHN-OriginalLocation'
# HTTP Schemes
SCHEME_HTTP = 'http'
SCHEME_HTTPS = 'https'
# These help us match URIs when kickstarting through a Proxy.
URI_PREFIX_KS = '/ty/'
URI_PREFIX_KS_CHECKSUM = '/ty-cksm/'
# Component Constants
COMPONENT_BROKER = 'proxy.broker'
COMPONENT_REDIRECT = 'proxy.redirect'
|
gpl-2.0
|
jakevdp/altair
|
tools/schemapi/codegen.py
|
1
|
7382
|
"""Code generation utilities"""
from .utils import SchemaInfo, is_valid_identifier, indent_docstring, indent_arglist
import textwrap
import re
class CodeSnippet(object):
"""Object whose repr() is a string of code"""
def __init__(self, code):
self.code = code
def __repr__(self):
return self.code
def _get_args(info):
"""Return the list of args & kwds for building the __init__ function"""
# TODO: - set additional properties correctly
# - handle patternProperties etc.
required = set()
kwds = set()
invalid_kwds = set()
# TODO: specialize for anyOf/oneOf?
if info.is_allOf():
# recursively call function on all children
arginfo = [_get_args(child) for child in info.allOf]
nonkeyword = all(args[0] for args in arginfo)
required = set.union(set(), *(args[1] for args in arginfo))
kwds = set.union(set(), *(args[2] for args in arginfo))
kwds -= required
invalid_kwds = set.union(set(), *(args[3] for args in arginfo))
additional = all(args[4] for args in arginfo)
elif info.is_empty() or info.is_compound():
nonkeyword = True
additional = True
elif info.is_value():
nonkeyword = True
additional=False
elif info.is_object():
invalid_kwds = ({p for p in info.required if not is_valid_identifier(p)} |
{p for p in info.properties if not is_valid_identifier(p)})
required = {p for p in info.required if is_valid_identifier(p)}
kwds = {p for p in info.properties if is_valid_identifier(p)}
kwds -= required
nonkeyword = False
additional = True
#additional = info.additionalProperties or info.patternProperties
else:
raise ValueError("Schema object not understood")
return (nonkeyword, required, kwds, invalid_kwds, additional)
class SchemaGenerator(object):
"""Class that defines methods for generating code from schemas
Parameters
----------
classname : string
The name of the class to generate
schema : dict
The dictionary defining the schema class
rootschema : dict (optional)
The root schema for the class
basename : string (default: "SchemaBase")
The name of the base class to use in the class definition
schemarepr : CodeSnippet or object, optional
An object whose repr will be used in the place of the explicit schema.
This can be useful, for example, when the generated code should reference
a predefined schema object. The user must ensure that the schema within
the evaluated code is identical to the schema used to generate the code.
rootschemarepr : CodeSnippet or object, optional
An object whose repr will be used in the place of the explicit root
schema.
**kwargs : dict
Additional keywords for derived classes.
"""
schema_class_template = textwrap.dedent('''
class {classname}({basename}):
"""{docstring}"""
_schema = {schema!r}
_rootschema = {rootschema!r}
{init_code}
''')
init_template = textwrap.dedent("""
def __init__({arglist}):
super({classname}, self).__init__({super_arglist})
""").lstrip()
def _process_description(self, description):
return description
def __init__(self, classname, schema, rootschema=None,
basename='SchemaBase', schemarepr=None, rootschemarepr=None,
nodefault=(), **kwargs):
self.classname = classname
self.schema = schema
self.rootschema = rootschema
self.basename = basename
self.schemarepr = schemarepr
self.rootschemarepr = rootschemarepr
self.nodefault = nodefault
self.kwargs = kwargs
def schema_class(self):
"""Generate code for a schema class"""
rootschema = self.rootschema if self.rootschema is not None else self.schema
schemarepr = self.schemarepr if self.schemarepr is not None else self.schema
rootschemarepr = self.rootschemarepr
if rootschemarepr is None:
if rootschema is self.schema:
rootschemarepr = CodeSnippet('_schema')
else:
rootschemarepr = rootschema
return self.schema_class_template.format(
classname=self.classname,
basename=self.basename,
schema=schemarepr,
rootschema=rootschemarepr,
docstring=self.docstring(indent=4),
init_code=self.init_code(indent=4),
**self.kwargs
)
def docstring(self, indent=0):
# TODO: add a general description at the top, derived from the schema.
# for example, a non-object definition should list valid type, enum
# values, etc.
# TODO: use _get_args here for more information on allOf objects
info = SchemaInfo(self.schema, self.rootschema)
doc = ["{} schema wrapper".format(self.classname),
'',
info.medium_description]
if info.description:
doc += self._process_description( #remove condition description
re.sub(r"\n\{\n(\n|.)*\n\}",'',info.description)).splitlines()
if info.properties:
nonkeyword, required, kwds, invalid_kwds, additional = _get_args(info)
doc += ['',
'Attributes',
'----------',
'']
for prop in sorted(required) + sorted(kwds) + sorted(invalid_kwds):
propinfo = info.properties[prop]
doc += ["{} : {}".format(prop, propinfo.short_description),
" {}".format(self._process_description(propinfo.description))]
if len(doc) > 1:
doc += ['']
return indent_docstring(doc, indent_level=indent, width=100, lstrip=True)
def init_code(self, indent=0):
"""Return code suitablde for the __init__ function of a Schema class"""
info = SchemaInfo(self.schema, rootschema=self.rootschema)
nonkeyword, required, kwds, invalid_kwds, additional =_get_args(info)
nodefault=set(self.nodefault)
required -= nodefault
kwds -= nodefault
args = ['self']
super_args = []
if nodefault:
args.extend(sorted(nodefault))
elif nonkeyword:
args.append('*args')
super_args.append('*args')
args.extend('{}=Undefined'.format(p)
for p in sorted(required) + sorted(kwds))
super_args.extend('{0}={0}'.format(p)
for p in sorted(nodefault) + sorted(required) + sorted(kwds))
if additional:
args.append('**kwds')
super_args.append('**kwds')
arg_indent_level = 9 + indent
super_arg_indent_level = 23 + len(self.classname) + indent
initfunc = self.init_template.format(classname=self.classname,
arglist=indent_arglist(args, indent_level=arg_indent_level),
super_arglist=indent_arglist(super_args, indent_level=super_arg_indent_level))
if indent:
initfunc = ('\n' + indent * ' ').join(initfunc.splitlines())
return initfunc
|
bsd-3-clause
|
Samweli/inasafe
|
realtime/earthquake/shake_data.py
|
4
|
7459
|
# coding=utf-8
"""
InaSAFE Disaster risk assessment tool developed by AusAid and World Bank
- **Getting shake data from local storage**
Contact : ole.moller.nielsen@gmail.com
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'akbargumbira@gmail.com'
__version__ = '3.0'
__date__ = '14/12/2014'
__copyright__ = ('Copyright 2012, Australia Indonesia Facility for '
'Disaster Reduction')
import os
import shutil
from datetime import datetime
import logging
import filecmp
from realtime.utilities import is_event_id
from realtime.utilities import (
shakemap_extract_dir,
make_directory,
realtime_logger_name)
from realtime.exceptions import (
FileNotFoundError,
EventIdError,
EventValidationError,
CopyError,
EmptyShakeDirectoryError)
LOGGER = logging.getLogger(realtime_logger_name())
class ShakeData(object):
"""A class for reading data from shake files.
The shake files currently located under BASE_PATH directory in a folder
named by the event id (which represent the timestamp of the event of the
shake)
There are numerous files in that directory but there is only really one
that we are interested in:
* grid.xml - which contains all the metadata pertaining to the event
It's located at output/grid.xml under each event directory
"""
def __init__(self,
working_dir,
event=None,
force_flag=False):
"""Constructor for the LocalShakeData class.
:param working_dir: The working directory where all the shakemaps are
located.
:type working_dir: str
:param event: A string representing the event id that this raster is
associated with. e.g. 20110413170148 (Optional).
**If no event id is supplied, the latest event id will be
assigned.**
:type event: str
:param force_flag: The flag if we want to force move data from
working_dir to extracted dir.
:type force_flag: bool
"""
self.event_id = event
self.working_dir = working_dir
self.force_flag = force_flag
self.input_file_name = 'grid.xml'
if self.event_id is None:
try:
self.get_latest_event_id()
except (EmptyShakeDirectoryError, EventIdError):
raise
else:
# If we fetched it above using get_latest_event_id we assume it is
# already validated.
try:
self.validate_event()
except EventValidationError:
raise
# If event_id is still None after all the above, moan....
if self.event_id is None:
message = ('No id was passed to the constructor and the latest '
'id could not be retrieved from the server.')
LOGGER.exception('ShakeData initialisation failed')
raise EventIdError(message)
def validate_event(self):
"""Check that the event exists in working dir.
:return: True if valid, False if not.
:rtype: bool
"""
event_path = os.path.join(
self.working_dir, self.event_id)
return os.path.exists(event_path)
@staticmethod
def get_list_event_ids_from_folder(working_dir):
"""Get all event id indicated by folder in working dir."""
if os.path.exists(working_dir):
directories = os.listdir(working_dir)
else:
LOGGER.debug(
'Directory %s does not exist, return None' % working_dir)
return None
# Filter the dirs to only contain valid event dirs
valid_dirs = []
for directory in directories:
if is_event_id(directory):
valid_dirs.append(directory)
if len(valid_dirs) == 0:
raise EmptyShakeDirectoryError(
'The directory %s does not contain any shakemaps.' %
working_dir)
return valid_dirs
def get_list_event_ids(self):
return ShakeData.get_list_event_ids_from_folder(self.working_dir)
def get_latest_event_id(self):
"""Return latest event id."""
try:
event_ids = self.get_list_event_ids()
except EmptyShakeDirectoryError:
raise
now = datetime.now()
now = int(
'%04d%02d%02d%02d%02d%02d' %
(now.year, now.month, now.day, now.hour, now.minute, now.second))
if event_ids is not None:
event_ids.sort()
latest_event_id = now + 1
while int(latest_event_id) > now:
if len(event_ids) < 1:
raise EventIdError('Latest Event Id could not be obtained')
latest_event_id = event_ids.pop()
self.event_id = latest_event_id
return self.event_id
def extract_dir(self):
"""A helper method to get the path to the extracted datasets.
:return: A string representing the absolute local filesystem path to
the unzipped shake event dir. e.g.
:file:`/tmp/inasafe/realtime/shakemaps-extracted/20131105060809`
:rtype: str
:raises: Any exceptions will be propagated.
"""
return os.path.join(shakemap_extract_dir(), self.event_id)
def extract(self, force_flag=False):
"""Checking the grid.xml file in the machine, if found use it.
:param force_flag: force flag to extract.
:type force_flag: bool
:return: a string containing the grid.xml paths e.g.::
grid_xml = local_shake_data.extract()
print grid_xml
/tmp/inasafe/realtime/shakemaps-extracted/20131105060809/grid.xml
"""
final_grid_xml_file = os.path.join(self.extract_dir(), 'grid.xml')
# move grid.xml from working dir to the extracted dir
local_path = os.path.join(self.working_dir, self.event_id)
source_grid_xml = os.path.join(local_path, 'output', 'grid.xml')
if force_flag or self.force_flag:
self.remove_extracted_files()
elif os.path.exists(final_grid_xml_file):
if filecmp.cmp(final_grid_xml_file, source_grid_xml):
return final_grid_xml_file
# if it is not identical, copy again
if not os.path.exists(self.extract_dir()):
make_directory(self.extract_dir())
if not os.path.exists(source_grid_xml):
raise FileNotFoundError(
'The output does not contain %s file.' %
source_grid_xml)
# move the file we care about to the top of the extract dir
shutil.copyfile(source_grid_xml, final_grid_xml_file)
if not os.path.exists(final_grid_xml_file):
raise CopyError('Error copying grid.xml')
return final_grid_xml_file
def remove_extracted_files(self):
"""Tidy up the filesystem by removing all extracted files
for the given event instance.
:raises: Any error e.g. file permission error will be raised.
"""
extracted_dir = self.extract_dir()
if os.path.isdir(extracted_dir):
shutil.rmtree(extracted_dir)
|
gpl-3.0
|
jamalex/kolibri
|
kolibri/deployment/default/urls.py
|
2
|
1495
|
# -*- coding: utf-8 -*-
"""kolibri URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
.. moduleauthor:: Learning Equality <info@learningequality.org>
"""
from __future__ import absolute_import, print_function, unicode_literals
from django.conf.urls import include, url
from django.contrib import admin
from django.core.urlresolvers import reverse_lazy
from django.views.generic import RedirectView
urlpatterns = [
url(r'^$', RedirectView.as_view(url=reverse_lazy('kolibri:learnplugin:learn'))),
url(r'^admin/', include(admin.site.urls)),
url(r'', include('kolibri.core.urls')),
url(r'', include('kolibri.content.urls')),
url(r'^api/', include('kolibri.auth.api_urls')),
url(r'^api/', include('kolibri.content.api_urls')),
url(r'^api/', include('kolibri.logger.api_urls')),
url(r'^api/', include('kolibri.tasks.api_urls')),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
]
|
mit
|
lindamar/ecclesi
|
env/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/exceptions.py
|
515
|
5599
|
from __future__ import absolute_import
# Base Exceptions
class HTTPError(Exception):
"Base exception used by this module."
pass
class HTTPWarning(Warning):
"Base warning used by this module."
pass
class PoolError(HTTPError):
"Base exception for errors caused within a pool."
def __init__(self, pool, message):
self.pool = pool
HTTPError.__init__(self, "%s: %s" % (pool, message))
def __reduce__(self):
# For pickling purposes.
return self.__class__, (None, None)
class RequestError(PoolError):
"Base exception for PoolErrors that have associated URLs."
def __init__(self, pool, url, message):
self.url = url
PoolError.__init__(self, pool, message)
def __reduce__(self):
# For pickling purposes.
return self.__class__, (None, self.url, None)
class SSLError(HTTPError):
"Raised when SSL certificate fails in an HTTPS connection."
pass
class ProxyError(HTTPError):
"Raised when the connection to a proxy fails."
pass
class DecodeError(HTTPError):
"Raised when automatic decoding based on Content-Type fails."
pass
class ProtocolError(HTTPError):
"Raised when something unexpected happens mid-request/response."
pass
#: Renamed to ProtocolError but aliased for backwards compatibility.
ConnectionError = ProtocolError
# Leaf Exceptions
class MaxRetryError(RequestError):
"""Raised when the maximum number of retries is exceeded.
:param pool: The connection pool
:type pool: :class:`~urllib3.connectionpool.HTTPConnectionPool`
:param string url: The requested Url
:param exceptions.Exception reason: The underlying error
"""
def __init__(self, pool, url, reason=None):
self.reason = reason
message = "Max retries exceeded with url: %s (Caused by %r)" % (
url, reason)
RequestError.__init__(self, pool, url, message)
class HostChangedError(RequestError):
"Raised when an existing pool gets a request for a foreign host."
def __init__(self, pool, url, retries=3):
message = "Tried to open a foreign host with url: %s" % url
RequestError.__init__(self, pool, url, message)
self.retries = retries
class TimeoutStateError(HTTPError):
""" Raised when passing an invalid state to a timeout """
pass
class TimeoutError(HTTPError):
""" Raised when a socket timeout error occurs.
Catching this error will catch both :exc:`ReadTimeoutErrors
<ReadTimeoutError>` and :exc:`ConnectTimeoutErrors <ConnectTimeoutError>`.
"""
pass
class ReadTimeoutError(TimeoutError, RequestError):
"Raised when a socket timeout occurs while receiving data from a server"
pass
# This timeout error does not have a URL attached and needs to inherit from the
# base HTTPError
class ConnectTimeoutError(TimeoutError):
"Raised when a socket timeout occurs while connecting to a server"
pass
class NewConnectionError(ConnectTimeoutError, PoolError):
"Raised when we fail to establish a new connection. Usually ECONNREFUSED."
pass
class EmptyPoolError(PoolError):
"Raised when a pool runs out of connections and no more are allowed."
pass
class ClosedPoolError(PoolError):
"Raised when a request enters a pool after the pool has been closed."
pass
class LocationValueError(ValueError, HTTPError):
"Raised when there is something wrong with a given URL input."
pass
class LocationParseError(LocationValueError):
"Raised when get_host or similar fails to parse the URL input."
def __init__(self, location):
message = "Failed to parse: %s" % location
HTTPError.__init__(self, message)
self.location = location
class ResponseError(HTTPError):
"Used as a container for an error reason supplied in a MaxRetryError."
GENERIC_ERROR = 'too many error responses'
SPECIFIC_ERROR = 'too many {status_code} error responses'
class SecurityWarning(HTTPWarning):
"Warned when perfoming security reducing actions"
pass
class SubjectAltNameWarning(SecurityWarning):
"Warned when connecting to a host with a certificate missing a SAN."
pass
class InsecureRequestWarning(SecurityWarning):
"Warned when making an unverified HTTPS request."
pass
class SystemTimeWarning(SecurityWarning):
"Warned when system time is suspected to be wrong"
pass
class InsecurePlatformWarning(SecurityWarning):
"Warned when certain SSL configuration is not available on a platform."
pass
class SNIMissingWarning(HTTPWarning):
"Warned when making a HTTPS request without SNI available."
pass
class DependencyWarning(HTTPWarning):
"""
Warned when an attempt is made to import a module with missing optional
dependencies.
"""
pass
class ResponseNotChunked(ProtocolError, ValueError):
"Response needs to be chunked in order to read it as chunks."
pass
class ProxySchemeUnknown(AssertionError, ValueError):
"ProxyManager does not support the supplied scheme"
# TODO(t-8ch): Stop inheriting from AssertionError in v2.0.
def __init__(self, scheme):
message = "Not supported proxy scheme %s" % scheme
super(ProxySchemeUnknown, self).__init__(message)
class HeaderParsingError(HTTPError):
"Raised by assert_header_parsing, but we convert it to a log.warning statement."
def __init__(self, defects, unparsed_data):
message = '%s, unparsed data: %r' % (defects or 'Unknown', unparsed_data)
super(HeaderParsingError, self).__init__(message)
|
mit
|
WSULib/combine
|
tests/test_forms/test_field_mapper_form.py
|
1
|
1300
|
from django.test import TestCase
from core.forms import FieldMapperForm
from core.models import get_field_mapper_choices
class FieldMapperFormTestCase(TestCase):
def test_python_prohibited(self):
test_body = {
'name': 'Test Field Mapper',
'field_mapper_type': 'python',
}
form = FieldMapperForm(test_body)
self.assertFalse(form.is_valid())
def test_python_permitted(self):
with self.settings(ENABLE_PYTHON='true'):
test_body = {
'name': 'Test Field Mapper',
'field_mapper_type': 'python',
}
form = FieldMapperForm(test_body)
self.assertTrue(form.is_valid())
def test_get_type_choices(self):
choices = get_field_mapper_choices()
self.assertEqual(choices, [
('xml2kvp', 'XML to Key/Value Pair (XML2kvp)'),
('xslt', 'XSL Stylesheet')
])
def test_get_python_type_choices(self):
with self.settings(ENABLE_PYTHON='true'):
choices = get_field_mapper_choices()
self.assertEqual(choices, [
('xml2kvp', 'XML to Key/Value Pair (XML2kvp)'),
('xslt', 'XSL Stylesheet'),
('python', 'Python Code Snippet')
])
|
mit
|
EvaSDK/sqlalchemy
|
lib/sqlalchemy/orm/__init__.py
|
70
|
7976
|
# orm/__init__.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
Functional constructs for ORM configuration.
See the SQLAlchemy object relational tutorial and mapper configuration
documentation for an overview of how this module is used.
"""
from . import exc
from .mapper import (
Mapper,
_mapper_registry,
class_mapper,
configure_mappers,
reconstructor,
validates
)
from .interfaces import (
EXT_CONTINUE,
EXT_STOP,
PropComparator,
)
from .deprecated_interfaces import (
MapperExtension,
SessionExtension,
AttributeExtension,
)
from .util import (
aliased,
join,
object_mapper,
outerjoin,
polymorphic_union,
was_deleted,
with_parent,
with_polymorphic,
)
from .properties import ColumnProperty
from .relationships import RelationshipProperty
from .descriptor_props import (
ComparableProperty,
CompositeProperty,
SynonymProperty,
)
from .relationships import (
foreign,
remote,
)
from .session import (
Session,
object_session,
sessionmaker,
make_transient,
make_transient_to_detached
)
from .scoping import (
scoped_session
)
from . import mapper as mapperlib
from .query import AliasOption, Query, Bundle
from ..util.langhelpers import public_factory
from .. import util as _sa_util
from . import strategies as _strategies
def create_session(bind=None, **kwargs):
"""Create a new :class:`.Session`
with no automation enabled by default.
This function is used primarily for testing. The usual
route to :class:`.Session` creation is via its constructor
or the :func:`.sessionmaker` function.
:param bind: optional, a single Connectable to use for all
database access in the created
:class:`~sqlalchemy.orm.session.Session`.
:param \*\*kwargs: optional, passed through to the
:class:`.Session` constructor.
:returns: an :class:`~sqlalchemy.orm.session.Session` instance
The defaults of create_session() are the opposite of that of
:func:`sessionmaker`; ``autoflush`` and ``expire_on_commit`` are
False, ``autocommit`` is True. In this sense the session acts
more like the "classic" SQLAlchemy 0.3 session with these.
Usage::
>>> from sqlalchemy.orm import create_session
>>> session = create_session()
It is recommended to use :func:`sessionmaker` instead of
create_session().
"""
kwargs.setdefault('autoflush', False)
kwargs.setdefault('autocommit', True)
kwargs.setdefault('expire_on_commit', False)
return Session(bind=bind, **kwargs)
relationship = public_factory(RelationshipProperty, ".orm.relationship")
def relation(*arg, **kw):
"""A synonym for :func:`relationship`."""
return relationship(*arg, **kw)
def dynamic_loader(argument, **kw):
"""Construct a dynamically-loading mapper property.
This is essentially the same as
using the ``lazy='dynamic'`` argument with :func:`relationship`::
dynamic_loader(SomeClass)
# is the same as
relationship(SomeClass, lazy="dynamic")
See the section :ref:`dynamic_relationship` for more details
on dynamic loading.
"""
kw['lazy'] = 'dynamic'
return relationship(argument, **kw)
column_property = public_factory(ColumnProperty, ".orm.column_property")
composite = public_factory(CompositeProperty, ".orm.composite")
def backref(name, **kwargs):
"""Create a back reference with explicit keyword arguments, which are the
same arguments one can send to :func:`relationship`.
Used with the ``backref`` keyword argument to :func:`relationship` in
place of a string argument, e.g.::
'items':relationship(
SomeItem, backref=backref('parent', lazy='subquery'))
"""
return (name, kwargs)
def deferred(*columns, **kw):
"""Indicate a column-based mapped attribute that by default will
not load unless accessed.
:param \*columns: columns to be mapped. This is typically a single
:class:`.Column` object, however a collection is supported in order
to support multiple columns mapped under the same attribute.
:param \**kw: additional keyword arguments passed to
:class:`.ColumnProperty`.
.. seealso::
:ref:`deferred`
"""
return ColumnProperty(deferred=True, *columns, **kw)
mapper = public_factory(Mapper, ".orm.mapper")
synonym = public_factory(SynonymProperty, ".orm.synonym")
comparable_property = public_factory(ComparableProperty,
".orm.comparable_property")
@_sa_util.deprecated("0.7", message=":func:`.compile_mappers` "
"is renamed to :func:`.configure_mappers`")
def compile_mappers():
"""Initialize the inter-mapper relationships of all mappers that have
been defined.
"""
configure_mappers()
def clear_mappers():
"""Remove all mappers from all classes.
This function removes all instrumentation from classes and disposes
of their associated mappers. Once called, the classes are unmapped
and can be later re-mapped with new mappers.
:func:`.clear_mappers` is *not* for normal use, as there is literally no
valid usage for it outside of very specific testing scenarios. Normally,
mappers are permanent structural components of user-defined classes, and
are never discarded independently of their class. If a mapped class
itself is garbage collected, its mapper is automatically disposed of as
well. As such, :func:`.clear_mappers` is only for usage in test suites
that re-use the same classes with different mappings, which is itself an
extremely rare use case - the only such use case is in fact SQLAlchemy's
own test suite, and possibly the test suites of other ORM extension
libraries which intend to test various combinations of mapper construction
upon a fixed set of classes.
"""
mapperlib._CONFIGURE_MUTEX.acquire()
try:
while _mapper_registry:
try:
# can't even reliably call list(weakdict) in jython
mapper, b = _mapper_registry.popitem()
mapper.dispose()
except KeyError:
pass
finally:
mapperlib._CONFIGURE_MUTEX.release()
from . import strategy_options
joinedload = strategy_options.joinedload._unbound_fn
joinedload_all = strategy_options.joinedload._unbound_all_fn
contains_eager = strategy_options.contains_eager._unbound_fn
defer = strategy_options.defer._unbound_fn
undefer = strategy_options.undefer._unbound_fn
undefer_group = strategy_options.undefer_group._unbound_fn
load_only = strategy_options.load_only._unbound_fn
lazyload = strategy_options.lazyload._unbound_fn
lazyload_all = strategy_options.lazyload_all._unbound_all_fn
subqueryload = strategy_options.subqueryload._unbound_fn
subqueryload_all = strategy_options.subqueryload_all._unbound_all_fn
immediateload = strategy_options.immediateload._unbound_fn
noload = strategy_options.noload._unbound_fn
defaultload = strategy_options.defaultload._unbound_fn
from .strategy_options import Load
def eagerload(*args, **kwargs):
"""A synonym for :func:`joinedload()`."""
return joinedload(*args, **kwargs)
def eagerload_all(*args, **kwargs):
"""A synonym for :func:`joinedload_all()`"""
return joinedload_all(*args, **kwargs)
contains_alias = public_factory(AliasOption, ".orm.contains_alias")
def __go(lcls):
global __all__
from .. import util as sa_util
from . import dynamic
from . import events
import inspect as _inspect
__all__ = sorted(name for name, obj in lcls.items()
if not (name.startswith('_') or _inspect.ismodule(obj)))
_sa_util.dependencies.resolve_all("sqlalchemy.orm")
__go(locals())
|
mit
|
kallewoof/bitcoin
|
test/functional/p2p_message_capture.py
|
22
|
2883
|
#!/usr/bin/env python3
# Copyright (c) 2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test per-peer message capture capability.
Additionally, the output of contrib/message-capture/message-capture-parser.py should be verified manually.
"""
import glob
from io import BytesIO
import os
from test_framework.p2p import P2PDataStore, MESSAGEMAP
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
TIME_SIZE = 8
LENGTH_SIZE = 4
MSGTYPE_SIZE = 12
def mini_parser(dat_file):
"""Parse a data file created by CaptureMessage.
From the data file we'll only check the structure.
We won't care about things like:
- Deserializing the payload of the message
- This is managed by the deserialize methods in test_framework.messages
- The order of the messages
- There's no reason why we can't, say, change the order of the messages in the handshake
- Message Type
- We can add new message types
We're ignoring these because they're simply too brittle to test here.
"""
with open(dat_file, 'rb') as f_in:
# This should have at least one message in it
assert(os.fstat(f_in.fileno()).st_size >= TIME_SIZE + LENGTH_SIZE + MSGTYPE_SIZE)
while True:
tmp_header_raw = f_in.read(TIME_SIZE + LENGTH_SIZE + MSGTYPE_SIZE)
if not tmp_header_raw:
break
tmp_header = BytesIO(tmp_header_raw)
tmp_header.read(TIME_SIZE) # skip the timestamp field
raw_msgtype = tmp_header.read(MSGTYPE_SIZE)
msgtype: bytes = raw_msgtype.split(b'\x00', 1)[0]
remainder = raw_msgtype.split(b'\x00', 1)[1]
assert(len(msgtype) > 0)
assert(msgtype in MESSAGEMAP)
assert(len(remainder) == 0 or not remainder.decode().isprintable())
length: int = int.from_bytes(tmp_header.read(LENGTH_SIZE), "little")
data = f_in.read(length)
assert_equal(len(data), length)
class MessageCaptureTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [["-capturemessages"]]
self.setup_clean_chain = True
def run_test(self):
capturedir = os.path.join(self.nodes[0].datadir, "regtest/message_capture")
# Connect a node so that the handshake occurs
self.nodes[0].add_p2p_connection(P2PDataStore())
self.nodes[0].disconnect_p2ps()
recv_file = glob.glob(os.path.join(capturedir, "*/msgs_recv.dat"))[0]
mini_parser(recv_file)
sent_file = glob.glob(os.path.join(capturedir, "*/msgs_sent.dat"))[0]
mini_parser(sent_file)
if __name__ == '__main__':
MessageCaptureTest().main()
|
mit
|
yfried/ansible
|
test/units/compat/unittest.py
|
156
|
1302
|
# (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
'''
Compat module for Python2.7's unittest module
'''
import sys
# Allow wildcard import because we really do want to import all of
# unittests's symbols into this compat shim
# pylint: disable=wildcard-import,unused-wildcard-import
if sys.version_info < (2, 7):
try:
# Need unittest2 on python2.6
from unittest2 import *
except ImportError:
print('You need unittest2 installed on python2.6.x to run tests')
else:
from unittest import *
|
gpl-3.0
|
agaldona/odoomrp-wip-1
|
product_variants_no_automatic_creation/models/product_configurator.py
|
8
|
12757
|
# -*- coding: utf-8 -*-
# © 2015 Oihane Crucelaegui - AvanzOSC
# © 2016 Pedro M. Baeza <pedro.baeza@tecnativa.com>
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3
from openerp import api, fields, models, exceptions, _
from openerp.addons import decimal_precision as dp
class ProductConfigurator(models.AbstractModel):
_name = 'product.configurator'
product_tmpl_id = fields.Many2one(
comodel_name='product.template', string='Product Template',
auto_join=True)
product_attribute_ids = fields.One2many(
comodel_name='product.configurator.attribute',
domain=lambda self: [("owner_model", "=", self._name)],
inverse_name='owner_id', string='Product attributes', copy=True)
price_extra = fields.Float(
compute='_compute_price_extra',
digits=dp.get_precision('Product Price'),
help="Price Extra: Extra price for the variant with the currently "
"selected attributes values on sale price. eg. 200 price extra, "
"1000 + 200 = 1200.")
product_id = fields.Many2one(
comodel_name="product.product", string="Product")
name = fields.Char()
@api.multi
@api.depends('product_attribute_ids', 'product_attribute_ids.price_extra')
def _compute_price_extra(self):
for record in self:
record.price_extra = sum(
record.mapped('product_attribute_ids.price_extra'))
@api.multi
@api.onchange('product_tmpl_id')
def onchange_product_tmpl_id(self):
# First, empty current list
self.product_attribute_ids = [
(2, x.id) for x in self.product_attribute_ids]
if not self.product_tmpl_id.attribute_line_ids:
self.product_id = self.product_tmpl_id.product_variant_ids
else:
if not self.env.context.get('not_reset_product'):
self.product_id = False
attribute_list = []
for attribute_line in self.product_tmpl_id.attribute_line_ids:
attribute_list.append({
'attribute_id': attribute_line.attribute_id.id,
'product_tmpl_id': self.product_tmpl_id.id,
'owner_model': self._name,
'owner_id': self.id,
})
self.product_attribute_ids = [(0, 0, x) for x in attribute_list]
# Needed because the compute method is not triggered
self.product_attribute_ids._compute_possible_value_ids()
# Restrict product possible values to current selection
domain = [('product_tmpl_id', '=', self.product_tmpl_id.id)]
return {'domain': {'product_id': domain}}
@api.multi
@api.onchange('product_attribute_ids')
def onchange_product_attribute_ids(self):
product_obj = self.env['product.product']
domain, cont = product_obj._build_attributes_domain(
self.product_tmpl_id, self.product_attribute_ids)
self.product_id = False
if cont:
products = product_obj.search(domain)
# Filter the product with the exact number of attributes values
for product in products:
if len(product.attribute_value_ids) == cont:
self.product_id = product.id
break
if not self.product_id:
product_tmpl = self.product_tmpl_id
values = self.product_attribute_ids.mapped('value_id')
if self._fields.get('partner_id'):
# If our model has a partner_id field, language is got from it
obj = self.env['product.attribute.value'].with_context(
lang=self.partner_id.lang)
values = obj.browse(
self.product_attribute_ids.mapped('value_id').ids)
obj = self.env['product.template'].with_context(
lang=self.partner_id.lang)
product_tmpl = obj.browse(self.product_tmpl_id.id)
self.name = self._get_product_description(
product_tmpl, False, values)
return {'domain': {'product_id': domain}}
@api.multi
@api.onchange('product_id')
def onchange_product_id_product_configurator(self):
# First, empty current list
self.product_attribute_ids = [
(2, x.id) for x in self.product_attribute_ids]
if self.product_id:
attribute_list = (
self.product_id._get_product_attributes_values_dict())
for val in attribute_list:
val['product_tmpl_id'] = self.product_id.product_tmpl_id
val['owner_model'] = self._name
val['owner_id'] = self.id
product = self.product_id
if self._fields.get('partner_id'):
# If our model has a partner_id field, language is got from it
product = self.env['product.product'].with_context(
lang=self.partner_id.lang).browse(self.product_id.id)
self.product_attribute_ids = [(0, 0, x) for x in attribute_list]
self.name = self._get_product_description(
product.product_tmpl_id, product, product.attribute_value_ids)
@api.multi
def onchange_product_id_product_configurator_old_api(self, product_id,
partner_id=None):
"""Method to be called in case inherited model use old API on_change.
The returned result has to be merged with current 'value' key in the
regular on_change method, not with the complete dictionary.
:param product_id: ID of the changed product.
:return: Dictionary with the changed values.
"""
res = {}
if product_id:
product_obj = self.env['product.product']
if partner_id:
partner = self.env['res.partner'].browse(partner_id)
product_obj = product_obj.with_context(lang=partner.lang)
product = product_obj.browse(product_id)
attr_values_dict = product._get_product_attributes_values_dict()
for val in attr_values_dict:
val['product_tmpl_id'] = product.product_tmpl_id.id
val['owner_model'] = self._name
val['owner_id'] = self.id
attr_values = [(0, 0, values) for values in attr_values_dict]
res['product_attribute_ids'] = attr_values
res['name'] = self._get_product_description(
product.product_tmpl_id, product,
product.attribute_value_ids)
return res
@api.model
def _order_attributes(self, template, product_attribute_values):
res = template._get_product_attributes_dict()
res2 = []
for val in res:
value = product_attribute_values.filtered(
lambda x: x.attribute_id.id == val['attribute_id'])
if value:
val['value_id'] = value
res2.append(val)
return res2
@api.model
def _get_product_description(self, template, product, product_attributes):
name = product and product.name or template.name
extended = self.user_has_groups(
'product_variants_no_automatic_creation.'
'group_product_variant_extended_description')
if not product_attributes and product:
product_attributes = product.attribute_value_ids
values = self._order_attributes(template, product_attributes)
if extended:
description = "\n".join(
"%s: %s" %
(x['value_id'].attribute_id.name, x['value_id'].name)
for x in values)
else:
description = ", ".join([x['value_id'].name for x in values])
if not description:
return name
return ("%s\n%s" if extended else "%s (%s)") % (name, description)
@api.multi
def unlink(self):
"""Mimic `ondelete="cascade"`."""
attributes = self.mapped("product_attribute_ids")
result = super(ProductConfigurator, self).unlink()
if result:
attributes.unlink()
return result
@api.model
def check_configuration_validity(self, vals):
"""This method checks that the current selection values are correct
according rules. As default, the validity means that all the attributes
values are set. This can be overridden to set another rules.
:param vals: Dictionary of values that creates the record
:type vals: dict
:raises: exceptions.ValidationError: If the check is not valid.
"""
if any(not x[2].get('value_id') for
x in vals.get('product_attribute_ids', [])):
raise exceptions.ValidationError(
_("You have to fill all the attributes values."))
@api.multi
def _create_variant_from_vals(self, vals):
"""This method creates a product variant extracting the needed values
from the values dictionary passed to the ORM methods create/write. It
also takes the rest of the values from the associated recordset in self
if needed, or raise an ensure_one exception if not provided.
:param vals: Dictionary of values for the record creation/update.
:return: The same values dictionary with the ID of the created product
in it under the key `product_id`.
"""
attribute_obj = self.env['product.configurator.attribute']
product_attributes_dict = vals.get('product_attribute_ids')
if not vals.get('product_attribute_ids'):
self.ensure_one()
product_attributes_dict = self._convert_to_write(self._cache)
product_tmpl_id = vals.get('product_tmpl_id', self.product_tmpl_id.id)
value_ids = []
for op in product_attributes_dict:
if op[0] == 4:
attribute = attribute_obj.browse(op[1])
if attribute.value_id:
value_ids.append(attribute.value_id.id)
elif op[0] in (0, 1):
if 'value_id' in op[2]:
if op[2]['value_id']:
value_ids.append(op[2]['value_id'])
else:
attribute = attribute_obj.browse(op[1])
if attribute.value_id:
value_ids.append(attribute.value_id.id)
elif op[0] == 6:
value_ids = []
for attribute_id in op[2]:
attribute = attribute_obj.browse(attribute_id)
if attribute.value_id:
value_ids.append(attribute.value_id.id)
product = self.env['product.product'].create({
'product_tmpl_id': product_tmpl_id,
'attribute_value_ids': [(6, 0, value_ids)],
})
vals['product_id'] = product.id
return vals
class ProductConfiguratorAttribute(models.Model):
_name = 'product.configurator.attribute'
owner_id = fields.Integer(string="Owner", required=True)
owner_model = fields.Char(required=True)
product_tmpl_id = fields.Many2one(
comodel_name='product.template', string='Product Template',
required=True)
attribute_id = fields.Many2one(
comodel_name='product.attribute', string='Attribute', readonly=True)
value_id = fields.Many2one(
comodel_name='product.attribute.value',
domain="[('attribute_id', '=', attribute_id), "
" ('id', 'in', possible_value_ids[0][2])]",
string='Value')
possible_value_ids = fields.Many2many(
comodel_name='product.attribute.value',
compute='_compute_possible_value_ids')
price_extra = fields.Float(
compute='_compute_price_extra',
digits=dp.get_precision('Product Price'),
help="Price Extra: Extra price for the variant with this attribute "
"value on sale price. eg. 200 price extra, 1000 + 200 = 1200.")
@api.multi
@api.depends('attribute_id')
def _compute_possible_value_ids(self):
for record in self:
# This should be unique due to the new constraint added
attribute = record.product_tmpl_id.attribute_line_ids.filtered(
lambda x: x.attribute_id == record.attribute_id)
record.possible_value_ids = attribute.value_ids.sorted()
@api.multi
@api.depends('value_id')
def _compute_price_extra(self):
for record in self:
record.price_extra = sum(
record.value_id.price_ids.filtered(
lambda x: (
x.product_tmpl_id == record.product_tmpl_id)
).mapped('price_extra'))
|
agpl-3.0
|
biorack/metatlas
|
jaws/jaws_wdls/scripts/mzml_loader_jaws.py
|
2
|
6958
|
#!/usr/bin/env python
from __future__ import print_function
import argparse
import os
import pwd
import datetime
import sys
import traceback
import tables
import warnings
warnings.filterwarnings("ignore", module="plotly")
# pymzml has plotly in it and if you don't have a setup in your
# home directory, you get a warning.
from pymzml.run import Reader
from metatlas import __version__
DEBUG = False
FORMAT_VERSION = 5
# time.time() when the latest version of the format was created
VERSION_TIMESTAMP = 1451062032
class MS1Data(tables.IsDescription):
mz = tables.Float32Col(pos=0)
i = tables.Float32Col(pos=1)
rt = tables.Float32Col(pos=2)
class MS2Data(MS1Data):
# the rest are only relevant for ms2 spectra
precursor_MZ = tables.Float32Col(pos=5)
precursor_intensity = tables.Float32Col(pos=6)
collision_energy = tables.Float32Col(pos=7)
class ScanInfo(tables.IsDescription):
rt = tables.Float32Col(pos=0)
polarity = tables.Int16Col(pos=1)
ms_level = tables.Int16Col(pos=2)
max_mz = tables.Float32Col(pos=3)
min_mz = tables.Float32Col(pos=4)
# the rest are only relevant for ms2 spectra
precursor_MZ = tables.Float32Col(pos=5)
precursor_intensity = tables.Float32Col(pos=6)
collision_energy = tables.Float32Col(pos=7)
def read_spectrum(spectrum, index):
"""Read a single spectrum
Parameters
----------
spectrum : mzML spectrum
The mzML spectrum to parse.
Returns
-------
out : list of tuples
List of values associated with each peak in the spectrum.
"""
polarity = 'MS:1000130' in spectrum
ms_level = spectrum['ms level']
rt, units = spectrum['MS:1000016']
if units != 'minute':
rt /= 60
collision_energy = spectrum.get('MS:1000045', 0)
precursor_intensity = spectrum.get('MS:1000042', 0)
precursor_MZ = spectrum.get('MS:1000744', 0)
min_mz = spectrum.get('lowest m/z value', 0)
max_mz = spectrum.get('highest m/z value', 0)
info = (rt, polarity, ms_level, min_mz, max_mz, precursor_MZ,
precursor_intensity, collision_energy)
if ms_level == 1:
data = [(mz, i, rt) for (mz, i) in spectrum.peaks]
else:
data = [(mz, i, rt, precursor_MZ, precursor_intensity,
collision_energy) for (mz, i) in spectrum.peaks]
return data, info
def mzml_to_hdf(in_file_name, out_file_name=None, debug=False):
"""Converts in_file (mzml) to binary and stores it in out_file
"""
debug = debug or DEBUG
if debug:
sys.stdout.write("STATUS: Converting %s to %s (mzML to HDF)" %
(in_file_name, out_file_name))
sys.stdout.flush()
# Extra accessions for pymzml to read
extraAccessions = [
('MS:1000016', ['value', 'unitName']), # scan start time
('MS:1000129', ['name']), # negative scan
('MS:1000130', ['name']), # positive scan
('MS:1000744', ['value']), # selected ion m/z
('MS:1000042', ['value']), # peak intensity
('MS:1000045', ['value']), # collision energy
('MS:1000528', ['value']), # lowest observed m/z
('MS:1000527', ['value']), # highest observed m/z
('MS:1000529', ['value']), # serial number
]
try:
mzml_reader = Reader(in_file_name,
extraAccessions=extraAccessions)
except Exception as e:
sys.stderr.write('\nMzml error: %s\n' % e)
sys.stderr.flush()
raise TypeError('Not a valid mzML file: "%s"' % in_file_name)
if not out_file_name:
out_file_name = in_file_name.replace('.mzML', '.h5')
FILTERS = tables.Filters(complib='blosc', complevel=1)
out_file = tables.open_file(out_file_name, "w", filters=FILTERS)
try:
_convert(out_file, mzml_reader, debug)
except Exception as e:
sys.stderr.write('\nConversion error:\n')
traceback.print_exception(*sys.exc_info())
sys.stderr.flush()
sys.stdout.flush()
raise
finally:
out_file.close()
return out_file_name
def _convert(out_file, mzml_reader, debug):
ms1_neg = out_file.create_table('/', 'ms1_neg', description=MS1Data)
ms1_pos = out_file.create_table('/', 'ms1_pos', description=MS1Data)
ms2_neg = out_file.create_table('/', 'ms2_neg', description=MS2Data)
ms2_pos = out_file.create_table('/', 'ms2_pos', description=MS2Data)
info_table = out_file.create_table('/', 'info', description=ScanInfo)
got_first = False
for (ind, spectrum) in enumerate(mzml_reader):
if got_first and spectrum['id'] == 1:
# check for a repeat
break
try:
data, info = read_spectrum(spectrum, ind)
except (KeyError, TypeError):
continue
except Exception as e:
sys.stdout.write('Read spectrum error: %s\n' % e)
sys.stdout.flush()
continue
if not data:
continue
got_first = True
if info[2] == 1: # ms level
if not info[1]: # polarity
table = ms1_neg
else:
table = ms1_pos
elif not info[1]:
table = ms2_neg
else:
table = ms2_pos
table.append(data)
table.flush
info_table.append([info])
if debug and not (ind % 100):
sys.stdout.write('.')
sys.stdout.flush()
info_table.flush()
for name in ['ms1_neg', 'ms2_neg', 'ms1_pos', 'ms2_pos']:
table = out_file.get_node('/' + name)
table.cols.mz.create_csindex()
table.copy(sortby='mz', newname=name + '_mz')
table.cols.mz.remove_index()
out_file.set_node_attr('/', "upload_date", datetime.datetime.utcnow())
out_file.set_node_attr('/', "uploaded_by",
pwd.getpwuid(os.getuid())[0])
serial = mzml_reader.param.get('MS:1000529', 'Unknown')
out_file.set_node_attr('/', 'instrument_serial_number', serial)
out_file.set_node_attr('/', 'format_version', FORMAT_VERSION)
out_file.set_node_attr('/', 'metatlas_version', __version__)
if debug:
sys.stdout.write('\nSaving file\n')
sys.stdout.flush()
out_file.close()
if debug:
sys.stdout.write("STATUS: Finished mzML to HDF conversion\n")
sys.stdout.flush()
def main():
parser = argparse.ArgumentParser(description="Load mzml files to HDF")
parser.add_argument("-o", "--output", type=str,
help="Output file name", required=False)
parser.add_argument("-i", "--input", type=str,
help="Input mzML file", required=True)
parser.add_argument("-d", "--debug", help="Sets debug mode",
action="store_true")
args = parser.parse_args()
mzml_to_hdf(args.input, args.output, args.debug)
if __name__ == '__main__': # pragma : no cover
main()
|
bsd-3-clause
|
iDTLabssl/hr
|
hr_unported/hr_payroll_register_report/report/payroll_register.py
|
1
|
9329
|
# -*- coding:utf-8 -*-
#
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 Michael Telahun Makonnen <mmakonnen@gmail.com>
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# d$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
import time
from openerp.report import report_sxw
class Parser(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(Parser, self).__init__(cr, uid, name, context)
self.localcontext.update({
'time': time,
'get_no': self.get_no,
'get_basic': self.get_basic,
'get_ot': self.get_ot,
'get_transportation': self.get_transportation,
'get_allowances': self.get_allowances,
'get_gross': self.get_gross,
'get_taxable_gross': self.get_taxable_gross,
'get_ded_fit': self.get_ded_fit,
'get_ded_pf_ee': self.get_ded_pf_ee,
'get_deduct': self.get_deduct,
'get_total_deduct': self.get_total_deduct,
'get_net': self.get_net,
'get_er_contributions': self.get_er_contributions,
'get_details_by_payslip': self.get_details_by_payslip,
})
self.no = 0
self.salary = 0.0
self.ot = 0.0
self.transportation = 0.0
self.allowances = 0.0
self.gross = 0.0
self.taxable_gross = 0.0
self.ded_fit = 0.0
self.ded_pf_ee = 0.0
self.deduct = 0.0
self.total_deduct = 0.0
self.net = 0.0
self.er_contributions = 0.0
self.saved_run_id = -1
@api.model
def _reset_values(self, run_id):
self.no = 0
self.salary = 0.0
self.ot = 0.0
self.transportation = 0.0
self.allowances = 0.0
self.gross = 0.0
self.taxable_gross = 0.0
self.ded_fit = 0.0
self.ded_pf_ee = 0.0
self.deduct = 0.0
self.total_deduct = 0.0
self.net = 0.0
self.er_contributions = 0.0
self.saved_run_id = run_id
@api.model
def get_details_by_payslip(self, payslips):
res = []
for slip in payslips:
if self.saved_run_id != slip.payslip_run_id.id:
self._reset_values(slip.payslip_run_id.id)
tmp = self.get_details_by_rule_category(
slip.details_by_salary_rule_category)
tmp['name'] = slip.employee_id.name
tmp['id_no'] = slip.employee_id.f_employee_no
res.append(tmp)
return res
# Most of this function (except at the end) is copied verbatim from
# the Pay Slip Details Report
#
@api.model
def get_details_by_rule_category(self, obj):
payslip_line = self.pool.get('hr.payslip.line')
rule_cate_obj = self.pool.get('hr.salary.rule.category')
def get_recursive_parent(rule_categories):
if not rule_categories:
return []
if rule_categories[0].parent_id:
rule_categories.insert(0, rule_categories[0].parent_id)
get_recursive_parent(rule_categories)
return rule_categories
res = []
result = {}
ids = []
# Choose only the categories (or rules) that we want to
# show in the report.
#
regline = {
'name': '',
'id_no': '',
'salary': 0,
'ot': 0,
'transportation': 0,
'allowances': 0,
'taxable_gross': 0,
'gross': 0,
'fit': 0,
'ee_pension': 0,
'deductions': 0,
'deductions_total': 0,
'net': 0,
'er_contributions': 0,
}
# Arrange the Pay Slip Lines by category
#
for id in range(len(obj)):
ids.append(obj[id].id)
if ids:
self.cr.execute('''\
SELECT pl.id, pl.category_id
FROM hr_payslip_line as pl
LEFT JOIN hr_salary_rule_category AS rc on (pl.category_id = rc.id)
WHERE pl.id in %s
GROUP BY rc.parent_id, pl.sequence, pl.id, pl.category_id
ORDER BY pl.sequence, rc.parent_id''', (tuple(ids), ))
for x in self.cr.fetchall():
result.setdefault(x[1], [])
result[x[1]].append(x[0])
for key, value in result.iteritems():
rule_categories = rule_cate_obj.browse(
self.cr, self.uid, [key])
parents = get_recursive_parent(rule_categories)
category_total = 0
for line in payslip_line.browse(self.cr, self.uid, value):
category_total += line.total
level = 0
for parent in parents:
res.append({
'rule_category': parent.name,
'name': parent.name,
'code': parent.code,
'level': level,
'total': category_total,
})
level += 1
for line in payslip_line.browse(self.cr, self.uid, value):
res.append({
'rule_category': line.name,
'name': line.name,
'code': line.code,
'total': line.total,
'level': level
})
for r in res:
# Level 0 is the category
if r['code'] == 'BASIC' and r['level'] == 0:
regline['salary'] = r['total']
elif r['code'] == 'OT':
regline['ot'] = r['total']
elif r['code'] == 'TRA' or r['code'] == 'TRVA':
regline['transportation'] = r['total']
elif r['code'] == 'ALW':
regline['allowances'] = r['total']
elif r['code'] == 'TXBL':
regline['taxable_gross'] = r['total']
elif r['code'] == 'GROSS':
regline['gross'] = r['total']
elif r['code'] == 'FITCALC':
regline['fit'] = r['total']
elif r['code'] == 'PENFEE':
regline['ee_pension'] = r['total']
elif r['code'] == 'DED':
regline['deductions'] = r['total']
elif r['code'] == 'DEDTOTAL':
regline['deductions_total'] = r['total']
elif r['code'] == 'NET':
regline['net'] = r['total']
elif r['code'] == 'ER':
regline['er_contributions'] = r['total']
# Make adjustments to subtract from the parent category's total the
# amount of individual rules that we show separately on the sheet.
#
regline['allowances'] -= regline['transportation']
regline['deductions'] -= regline['ee_pension']
# Increase running totals
#
self.salary += regline['salary']
self.ot += regline['ot']
self.transportation += regline['transportation']
self.allowances += regline['allowances']
self.gross += regline['gross']
self.taxable_gross += regline['taxable_gross']
self.ded_fit += regline['fit']
self.ded_pf_ee += regline['ee_pension']
self.deduct += regline['deductions']
self.total_deduct += regline['deductions_total']
self.net += regline['net']
self.er_contributions += regline['er_contributions']
return regline
@api.one
def get_basic(self, obj):
return self.salary
@api.one
def get_ot(self, obj):
return self.ot
@api.one
def get_transportation(self, obj):
return self.transportation
@api.one
def get_allowances(self, obj):
return self.allowances
@api.one
def get_gross(self, obj):
return self.gross
@api.one
def get_taxable_gross(self, obj):
return self.taxable_gross
@api.one
def get_ded_fit(self, obj):
return self.ded_fit
@api.one
def get_ded_pf_ee(self, obj):
return self.ded_pf_ee
@api.one
def get_deduct(self, obj):
return self.deduct
@api.one
def get_total_deduct(self, obj):
return self.total_deduct
@api.one
def get_net(self, obj):
return self.net
@api.one
def get_er_contributions(self, obj):
return self.er_contributions
@api.one
def get_no(self):
self.no += 1
return self.no
|
agpl-3.0
|
40223231/2015-cdb-g4-final-test-by-6-22
|
static/Brython3.1.1-20150328-091302/Lib/sre_parse.py
|
630
|
29657
|
#
# Secret Labs' Regular Expression Engine
#
# convert re-style regular expression to sre pattern
#
# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved.
#
# See the sre.py file for information on usage and redistribution.
#
"""Internal support module for sre"""
# XXX: show string offset and offending character for all errors
import sys
from sre_constants import *
from _sre import MAXREPEAT
SPECIAL_CHARS = ".\\[{()*+?^$|"
REPEAT_CHARS = "*+?{"
DIGITS = set("0123456789")
OCTDIGITS = set("01234567")
HEXDIGITS = set("0123456789abcdefABCDEF")
WHITESPACE = set(" \t\n\r\v\f")
ESCAPES = {
r"\a": (LITERAL, ord("\a")),
r"\b": (LITERAL, ord("\b")),
r"\f": (LITERAL, ord("\f")),
r"\n": (LITERAL, ord("\n")),
r"\r": (LITERAL, ord("\r")),
r"\t": (LITERAL, ord("\t")),
r"\v": (LITERAL, ord("\v")),
r"\\": (LITERAL, ord("\\"))
}
CATEGORIES = {
r"\A": (AT, AT_BEGINNING_STRING), # start of string
r"\b": (AT, AT_BOUNDARY),
r"\B": (AT, AT_NON_BOUNDARY),
r"\d": (IN, [(CATEGORY, CATEGORY_DIGIT)]),
r"\D": (IN, [(CATEGORY, CATEGORY_NOT_DIGIT)]),
r"\s": (IN, [(CATEGORY, CATEGORY_SPACE)]),
r"\S": (IN, [(CATEGORY, CATEGORY_NOT_SPACE)]),
r"\w": (IN, [(CATEGORY, CATEGORY_WORD)]),
r"\W": (IN, [(CATEGORY, CATEGORY_NOT_WORD)]),
r"\Z": (AT, AT_END_STRING), # end of string
}
FLAGS = {
# standard flags
"i": SRE_FLAG_IGNORECASE,
"L": SRE_FLAG_LOCALE,
"m": SRE_FLAG_MULTILINE,
"s": SRE_FLAG_DOTALL,
"x": SRE_FLAG_VERBOSE,
# extensions
"a": SRE_FLAG_ASCII,
"t": SRE_FLAG_TEMPLATE,
"u": SRE_FLAG_UNICODE,
}
class Pattern:
# master pattern object. keeps track of global attributes
def __init__(self):
self.flags = 0
self.open = []
self.groups = 1
self.groupdict = {}
def opengroup(self, name=None):
gid = self.groups
self.groups = gid + 1
if name is not None:
ogid = self.groupdict.get(name, None)
if ogid is not None:
raise error("redefinition of group name %s as group %d; "
"was group %d" % (repr(name), gid, ogid))
self.groupdict[name] = gid
self.open.append(gid)
return gid
def closegroup(self, gid):
self.open.remove(gid)
def checkgroup(self, gid):
return gid < self.groups and gid not in self.open
class SubPattern:
# a subpattern, in intermediate form
def __init__(self, pattern, data=None):
self.pattern = pattern
if data is None:
data = []
self.data = data
self.width = None
def __iter__(self):
return iter(self.data)
def dump(self, level=0):
nl = 1
seqtypes = (tuple, list)
for op, av in self.data:
print(level*" " + op, end=' '); nl = 0
if op == "in":
# member sublanguage
print(); nl = 1
for op, a in av:
print((level+1)*" " + op, a)
elif op == "branch":
print(); nl = 1
i = 0
for a in av[1]:
if i > 0:
print(level*" " + "or")
a.dump(level+1); nl = 1
i = i + 1
elif isinstance(av, seqtypes):
for a in av:
if isinstance(a, SubPattern):
if not nl: print()
a.dump(level+1); nl = 1
else:
print(a, end=' ') ; nl = 0
else:
print(av, end=' ') ; nl = 0
if not nl: print()
def __repr__(self):
return repr(self.data)
def __len__(self):
return len(self.data)
def __delitem__(self, index):
del self.data[index]
def __getitem__(self, index):
if isinstance(index, slice):
return SubPattern(self.pattern, self.data[index])
return self.data[index]
def __setitem__(self, index, code):
self.data[index] = code
def insert(self, index, code):
self.data.insert(index, code)
def append(self, code):
self.data.append(code)
def getwidth(self):
# determine the width (min, max) for this subpattern
if self.width:
return self.width
lo = hi = 0
UNITCODES = (ANY, RANGE, IN, LITERAL, NOT_LITERAL, CATEGORY)
REPEATCODES = (MIN_REPEAT, MAX_REPEAT)
for op, av in self.data:
if op is BRANCH:
i = sys.maxsize
j = 0
for av in av[1]:
l, h = av.getwidth()
i = min(i, l)
j = max(j, h)
lo = lo + i
hi = hi + j
elif op is CALL:
i, j = av.getwidth()
lo = lo + i
hi = hi + j
elif op is SUBPATTERN:
i, j = av[1].getwidth()
lo = lo + i
hi = hi + j
elif op in REPEATCODES:
i, j = av[2].getwidth()
lo = lo + int(i) * av[0]
hi = hi + int(j) * av[1]
elif op in UNITCODES:
lo = lo + 1
hi = hi + 1
elif op == SUCCESS:
break
self.width = int(min(lo, sys.maxsize)), int(min(hi, sys.maxsize))
return self.width
class Tokenizer:
def __init__(self, string):
self.istext = isinstance(string, str)
self.string = string
self.index = 0
self.__next()
def __next(self):
if self.index >= len(self.string):
self.next = None
return
char = self.string[self.index:self.index+1]
# Special case for the str8, since indexing returns a integer
# XXX This is only needed for test_bug_926075 in test_re.py
if char and not self.istext:
char = chr(char[0])
if char == "\\":
try:
c = self.string[self.index + 1]
except IndexError:
raise error("bogus escape (end of line)")
if not self.istext:
c = chr(c)
char = char + c
self.index = self.index + len(char)
self.next = char
def match(self, char, skip=1):
if char == self.next:
if skip:
self.__next()
return 1
return 0
def get(self):
this = self.next
self.__next()
return this
def getwhile(self, n, charset):
result = ''
for _ in range(n):
c = self.next
if c not in charset:
break
result += c
self.__next()
return result
def tell(self):
return self.index, self.next
def seek(self, index):
self.index, self.next = index
def isident(char):
return "a" <= char <= "z" or "A" <= char <= "Z" or char == "_"
def isdigit(char):
return "0" <= char <= "9"
def isname(name):
# check that group name is a valid string
if not isident(name[0]):
return False
for char in name[1:]:
if not isident(char) and not isdigit(char):
return False
return True
def _class_escape(source, escape):
# handle escape code inside character class
code = ESCAPES.get(escape)
if code:
return code
code = CATEGORIES.get(escape)
if code and code[0] == IN:
return code
try:
c = escape[1:2]
if c == "x":
# hexadecimal escape (exactly two digits)
escape += source.getwhile(2, HEXDIGITS)
if len(escape) != 4:
raise ValueError
return LITERAL, int(escape[2:], 16) & 0xff
elif c == "u" and source.istext:
# unicode escape (exactly four digits)
escape += source.getwhile(4, HEXDIGITS)
if len(escape) != 6:
raise ValueError
return LITERAL, int(escape[2:], 16)
elif c == "U" and source.istext:
# unicode escape (exactly eight digits)
escape += source.getwhile(8, HEXDIGITS)
if len(escape) != 10:
raise ValueError
c = int(escape[2:], 16)
chr(c) # raise ValueError for invalid code
return LITERAL, c
elif c in OCTDIGITS:
# octal escape (up to three digits)
escape += source.getwhile(2, OCTDIGITS)
return LITERAL, int(escape[1:], 8) & 0xff
elif c in DIGITS:
raise ValueError
if len(escape) == 2:
return LITERAL, ord(escape[1])
except ValueError:
pass
raise error("bogus escape: %s" % repr(escape))
def _escape(source, escape, state):
# handle escape code in expression
code = CATEGORIES.get(escape)
if code:
return code
code = ESCAPES.get(escape)
if code:
return code
try:
c = escape[1:2]
if c == "x":
# hexadecimal escape
escape += source.getwhile(2, HEXDIGITS)
if len(escape) != 4:
raise ValueError
return LITERAL, int(escape[2:], 16) & 0xff
elif c == "u" and source.istext:
# unicode escape (exactly four digits)
escape += source.getwhile(4, HEXDIGITS)
if len(escape) != 6:
raise ValueError
return LITERAL, int(escape[2:], 16)
elif c == "U" and source.istext:
# unicode escape (exactly eight digits)
escape += source.getwhile(8, HEXDIGITS)
if len(escape) != 10:
raise ValueError
c = int(escape[2:], 16)
chr(c) # raise ValueError for invalid code
return LITERAL, c
elif c == "0":
# octal escape
escape += source.getwhile(2, OCTDIGITS)
return LITERAL, int(escape[1:], 8) & 0xff
elif c in DIGITS:
# octal escape *or* decimal group reference (sigh)
if source.next in DIGITS:
escape = escape + source.get()
if (escape[1] in OCTDIGITS and escape[2] in OCTDIGITS and
source.next in OCTDIGITS):
# got three octal digits; this is an octal escape
escape = escape + source.get()
return LITERAL, int(escape[1:], 8) & 0xff
# not an octal escape, so this is a group reference
group = int(escape[1:])
if group < state.groups:
if not state.checkgroup(group):
raise error("cannot refer to open group")
return GROUPREF, group
raise ValueError
if len(escape) == 2:
return LITERAL, ord(escape[1])
except ValueError:
pass
raise error("bogus escape: %s" % repr(escape))
def _parse_sub(source, state, nested=1):
# parse an alternation: a|b|c
items = []
itemsappend = items.append
sourcematch = source.match
while 1:
itemsappend(_parse(source, state))
if sourcematch("|"):
continue
if not nested:
break
if not source.next or sourcematch(")", 0):
break
else:
raise error("pattern not properly closed")
if len(items) == 1:
return items[0]
subpattern = SubPattern(state)
subpatternappend = subpattern.append
# check if all items share a common prefix
while 1:
prefix = None
for item in items:
if not item:
break
if prefix is None:
prefix = item[0]
elif item[0] != prefix:
break
else:
# all subitems start with a common "prefix".
# move it out of the branch
for item in items:
del item[0]
subpatternappend(prefix)
continue # check next one
break
# check if the branch can be replaced by a character set
for item in items:
if len(item) != 1 or item[0][0] != LITERAL:
break
else:
# we can store this as a character set instead of a
# branch (the compiler may optimize this even more)
set = []
setappend = set.append
for item in items:
setappend(item[0])
subpatternappend((IN, set))
return subpattern
subpattern.append((BRANCH, (None, items)))
return subpattern
def _parse_sub_cond(source, state, condgroup):
item_yes = _parse(source, state)
if source.match("|"):
item_no = _parse(source, state)
if source.match("|"):
raise error("conditional backref with more than two branches")
else:
item_no = None
if source.next and not source.match(")", 0):
raise error("pattern not properly closed")
subpattern = SubPattern(state)
subpattern.append((GROUPREF_EXISTS, (condgroup, item_yes, item_no)))
return subpattern
_PATTERNENDERS = set("|)")
_ASSERTCHARS = set("=!<")
_LOOKBEHINDASSERTCHARS = set("=!")
_REPEATCODES = set([MIN_REPEAT, MAX_REPEAT])
def _parse(source, state):
# parse a simple pattern
subpattern = SubPattern(state)
# precompute constants into local variables
subpatternappend = subpattern.append
sourceget = source.get
sourcematch = source.match
_len = len
PATTERNENDERS = _PATTERNENDERS
ASSERTCHARS = _ASSERTCHARS
LOOKBEHINDASSERTCHARS = _LOOKBEHINDASSERTCHARS
REPEATCODES = _REPEATCODES
while 1:
if source.next in PATTERNENDERS:
break # end of subpattern
this = sourceget()
if this is None:
break # end of pattern
if state.flags & SRE_FLAG_VERBOSE:
# skip whitespace and comments
if this in WHITESPACE:
continue
if this == "#":
while 1:
this = sourceget()
if this in (None, "\n"):
break
continue
if this and this[0] not in SPECIAL_CHARS:
subpatternappend((LITERAL, ord(this)))
elif this == "[":
# character set
set = []
setappend = set.append
## if sourcematch(":"):
## pass # handle character classes
if sourcematch("^"):
setappend((NEGATE, None))
# check remaining characters
start = set[:]
while 1:
this = sourceget()
if this == "]" and set != start:
break
elif this and this[0] == "\\":
code1 = _class_escape(source, this)
elif this:
code1 = LITERAL, ord(this)
else:
raise error("unexpected end of regular expression")
if sourcematch("-"):
# potential range
this = sourceget()
if this == "]":
if code1[0] is IN:
code1 = code1[1][0]
setappend(code1)
setappend((LITERAL, ord("-")))
break
elif this:
if this[0] == "\\":
code2 = _class_escape(source, this)
else:
code2 = LITERAL, ord(this)
if code1[0] != LITERAL or code2[0] != LITERAL:
raise error("bad character range")
lo = code1[1]
hi = code2[1]
if hi < lo:
raise error("bad character range")
setappend((RANGE, (lo, hi)))
else:
raise error("unexpected end of regular expression")
else:
if code1[0] is IN:
code1 = code1[1][0]
setappend(code1)
# XXX: <fl> should move set optimization to compiler!
if _len(set)==1 and set[0][0] is LITERAL:
subpatternappend(set[0]) # optimization
elif _len(set)==2 and set[0][0] is NEGATE and set[1][0] is LITERAL:
subpatternappend((NOT_LITERAL, set[1][1])) # optimization
else:
# XXX: <fl> should add charmap optimization here
subpatternappend((IN, set))
elif this and this[0] in REPEAT_CHARS:
# repeat previous item
if this == "?":
min, max = 0, 1
elif this == "*":
min, max = 0, MAXREPEAT
elif this == "+":
min, max = 1, MAXREPEAT
elif this == "{":
if source.next == "}":
subpatternappend((LITERAL, ord(this)))
continue
here = source.tell()
min, max = 0, MAXREPEAT
lo = hi = ""
while source.next in DIGITS:
lo = lo + source.get()
if sourcematch(","):
while source.next in DIGITS:
hi = hi + sourceget()
else:
hi = lo
if not sourcematch("}"):
subpatternappend((LITERAL, ord(this)))
source.seek(here)
continue
if lo:
min = int(lo)
if min >= MAXREPEAT:
raise OverflowError("the repetition number is too large")
if hi:
max = int(hi)
if max >= MAXREPEAT:
raise OverflowError("the repetition number is too large")
if max < min:
raise error("bad repeat interval")
else:
raise error("not supported")
# figure out which item to repeat
if subpattern:
item = subpattern[-1:]
else:
item = None
if not item or (_len(item) == 1 and item[0][0] == AT):
raise error("nothing to repeat")
if item[0][0] in REPEATCODES:
raise error("multiple repeat")
if sourcematch("?"):
subpattern[-1] = (MIN_REPEAT, (min, max, item))
else:
subpattern[-1] = (MAX_REPEAT, (min, max, item))
elif this == ".":
subpatternappend((ANY, None))
elif this == "(":
group = 1
name = None
condgroup = None
if sourcematch("?"):
group = 0
# options
if sourcematch("P"):
# python extensions
if sourcematch("<"):
# named group: skip forward to end of name
name = ""
while 1:
char = sourceget()
if char is None:
raise error("unterminated name")
if char == ">":
break
name = name + char
group = 1
if not name:
raise error("missing group name")
if not isname(name):
raise error("bad character in group name")
elif sourcematch("="):
# named backreference
name = ""
while 1:
char = sourceget()
if char is None:
raise error("unterminated name")
if char == ")":
break
name = name + char
if not name:
raise error("missing group name")
if not isname(name):
raise error("bad character in group name")
gid = state.groupdict.get(name)
if gid is None:
raise error("unknown group name")
subpatternappend((GROUPREF, gid))
continue
else:
char = sourceget()
if char is None:
raise error("unexpected end of pattern")
raise error("unknown specifier: ?P%s" % char)
elif sourcematch(":"):
# non-capturing group
group = 2
elif sourcematch("#"):
# comment
while 1:
if source.next is None or source.next == ")":
break
sourceget()
if not sourcematch(")"):
raise error("unbalanced parenthesis")
continue
elif source.next in ASSERTCHARS:
# lookahead assertions
char = sourceget()
dir = 1
if char == "<":
if source.next not in LOOKBEHINDASSERTCHARS:
raise error("syntax error")
dir = -1 # lookbehind
char = sourceget()
p = _parse_sub(source, state)
if not sourcematch(")"):
raise error("unbalanced parenthesis")
if char == "=":
subpatternappend((ASSERT, (dir, p)))
else:
subpatternappend((ASSERT_NOT, (dir, p)))
continue
elif sourcematch("("):
# conditional backreference group
condname = ""
while 1:
char = sourceget()
if char is None:
raise error("unterminated name")
if char == ")":
break
condname = condname + char
group = 2
if not condname:
raise error("missing group name")
if isname(condname):
condgroup = state.groupdict.get(condname)
if condgroup is None:
raise error("unknown group name")
else:
try:
condgroup = int(condname)
except ValueError:
raise error("bad character in group name")
else:
# flags
if not source.next in FLAGS:
raise error("unexpected end of pattern")
while source.next in FLAGS:
state.flags = state.flags | FLAGS[sourceget()]
if group:
# parse group contents
if group == 2:
# anonymous group
group = None
else:
group = state.opengroup(name)
if condgroup:
p = _parse_sub_cond(source, state, condgroup)
else:
p = _parse_sub(source, state)
if not sourcematch(")"):
raise error("unbalanced parenthesis")
if group is not None:
state.closegroup(group)
subpatternappend((SUBPATTERN, (group, p)))
else:
while 1:
char = sourceget()
if char is None:
raise error("unexpected end of pattern")
if char == ")":
break
raise error("unknown extension")
elif this == "^":
subpatternappend((AT, AT_BEGINNING))
elif this == "$":
subpattern.append((AT, AT_END))
elif this and this[0] == "\\":
code = _escape(source, this, state)
subpatternappend(code)
else:
raise error("parser error")
return subpattern
def fix_flags(src, flags):
# Check and fix flags according to the type of pattern (str or bytes)
if isinstance(src, str):
if not flags & SRE_FLAG_ASCII:
flags |= SRE_FLAG_UNICODE
elif flags & SRE_FLAG_UNICODE:
raise ValueError("ASCII and UNICODE flags are incompatible")
else:
if flags & SRE_FLAG_UNICODE:
raise ValueError("can't use UNICODE flag with a bytes pattern")
return flags
def parse(str, flags=0, pattern=None):
# parse 're' pattern into list of (opcode, argument) tuples
source = Tokenizer(str)
if pattern is None:
pattern = Pattern()
pattern.flags = flags
pattern.str = str
p = _parse_sub(source, pattern, 0)
p.pattern.flags = fix_flags(str, p.pattern.flags)
tail = source.get()
if tail == ")":
raise error("unbalanced parenthesis")
elif tail:
raise error("bogus characters at end of regular expression")
if flags & SRE_FLAG_DEBUG:
p.dump()
if not (flags & SRE_FLAG_VERBOSE) and p.pattern.flags & SRE_FLAG_VERBOSE:
# the VERBOSE flag was switched on inside the pattern. to be
# on the safe side, we'll parse the whole thing again...
return parse(str, p.pattern.flags)
return p
def parse_template(source, pattern):
# parse 're' replacement string into list of literals and
# group references
s = Tokenizer(source)
sget = s.get
p = []
a = p.append
def literal(literal, p=p, pappend=a):
if p and p[-1][0] is LITERAL:
p[-1] = LITERAL, p[-1][1] + literal
else:
pappend((LITERAL, literal))
sep = source[:0]
if isinstance(sep, str):
makechar = chr
else:
makechar = chr
while 1:
this = sget()
if this is None:
break # end of replacement string
if this and this[0] == "\\":
# group
c = this[1:2]
if c == "g":
name = ""
if s.match("<"):
while 1:
char = sget()
if char is None:
raise error("unterminated group name")
if char == ">":
break
name = name + char
if not name:
raise error("missing group name")
try:
index = int(name)
if index < 0:
raise error("negative group number")
except ValueError:
if not isname(name):
raise error("bad character in group name")
try:
index = pattern.groupindex[name]
except KeyError:
raise IndexError("unknown group name")
a((MARK, index))
elif c == "0":
if s.next in OCTDIGITS:
this = this + sget()
if s.next in OCTDIGITS:
this = this + sget()
literal(makechar(int(this[1:], 8) & 0xff))
elif c in DIGITS:
isoctal = False
if s.next in DIGITS:
this = this + sget()
if (c in OCTDIGITS and this[2] in OCTDIGITS and
s.next in OCTDIGITS):
this = this + sget()
isoctal = True
literal(makechar(int(this[1:], 8) & 0xff))
if not isoctal:
a((MARK, int(this[1:])))
else:
try:
this = makechar(ESCAPES[this][1])
except KeyError:
pass
literal(this)
else:
literal(this)
# convert template to groups and literals lists
i = 0
groups = []
groupsappend = groups.append
literals = [None] * len(p)
if isinstance(source, str):
encode = lambda x: x
else:
# The tokenizer implicitly decodes bytes objects as latin-1, we must
# therefore re-encode the final representation.
encode = lambda x: x.encode('latin-1')
for c, s in p:
if c is MARK:
groupsappend((i, s))
# literal[i] is already None
else:
literals[i] = encode(s)
i = i + 1
return groups, literals
def expand_template(template, match):
g = match.group
sep = match.string[:0]
groups, literals = template
literals = literals[:]
try:
for index, group in groups:
literals[index] = s = g(group)
if s is None:
raise error("unmatched group")
except IndexError:
raise error("invalid group reference")
return sep.join(literals)
|
gpl-3.0
|
j-faria/OPEN
|
scons/scons-local-2.3.4/SCons/Tool/MSCommon/netframework.py
|
9
|
2754
|
#
# Copyright (c) 2001 - 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Tool/MSCommon/netframework.py 2014/09/27 12:51:43 garyo"
__doc__ = """
"""
import os
import re
from common import read_reg, debug
# Original value recorded by dcournapeau
_FRAMEWORKDIR_HKEY_ROOT = r'Software\Microsoft\.NETFramework\InstallRoot'
# On SGK's system
_FRAMEWORKDIR_HKEY_ROOT = r'Software\Microsoft\Microsoft SDKs\.NETFramework\v2.0\InstallationFolder'
def find_framework_root():
# XXX: find it from environment (FrameworkDir)
try:
froot = read_reg(_FRAMEWORKDIR_HKEY_ROOT)
debug("Found framework install root in registry: %s" % froot)
except WindowsError, e:
debug("Could not read reg key %s" % _FRAMEWORKDIR_HKEY_ROOT)
return None
if not os.path.exists(froot):
debug("%s not found on fs" % froot)
return None
return froot
def query_versions():
froot = find_framework_root()
if froot:
contents = os.listdir(froot)
l = re.compile('v[0-9]+.*')
versions = [e for e in contents if l.match(e)]
def versrt(a,b):
# since version numbers aren't really floats...
aa = a[1:]
bb = b[1:]
aal = aa.split('.')
bbl = bb.split('.')
# sequence comparison in python is lexicographical
# which is exactly what we want.
# Note we sort backwards so the highest version is first.
return cmp(bbl,aal)
versions.sort(versrt)
else:
versions = []
return versions
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
mit
|
40223138/2015cd_0505
|
static/Brython3.1.1-20150328-091302/Lib/collections/__init__.py
|
625
|
25849
|
#__all__ = ['deque', 'defaultdict', 'Counter']
from _collections import deque, defaultdict
#from itertools import repeat as _repeat, chain as _chain, starmap as _starmap
__all__ = ['deque', 'defaultdict', 'namedtuple', 'UserDict', 'UserList',
'UserString', 'Counter', 'OrderedDict']
# For bootstrapping reasons, the collection ABCs are defined in _abcoll.py.
# They should however be considered an integral part of collections.py.
# fixme brython.. there is an issue with _abcoll
#from _abcoll import *
#from _abcoll import Set
from _abcoll import MutableMapping
#import _abcoll
#__all__ += _abcoll.__all__
from collections.abc import *
import collections.abc
__all__ += collections.abc.__all__
from _collections import deque, defaultdict, namedtuple
from operator import itemgetter as _itemgetter
from keyword import iskeyword as _iskeyword
import sys as _sys
import heapq as _heapq
#fixme brython
#from weakref import proxy as _proxy
from itertools import repeat as _repeat, chain as _chain, starmap as _starmap
from reprlib import recursive_repr as _recursive_repr
class Set(set):
pass
class Sequence(list):
pass
def _proxy(obj):
return obj
################################################################################
### OrderedDict
################################################################################
class _Link(object):
__slots__ = 'prev', 'next', 'key', '__weakref__'
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as regular dictionaries.
# The internal self.__map dict maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# The sentinel is in self.__hardroot with a weakref proxy in self.__root.
# The prev links are weakref proxies (to prevent circular references).
# Individual links are kept alive by the hard reference in self.__map.
# Those hard references disappear when a key is deleted from an OrderedDict.
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. The signature is the same as
regular dictionaries, but keyword arguments are not recommended because
their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__hardroot = _Link()
self.__root = root = _proxy(self.__hardroot)
root.prev = root.next = root
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value,
dict_setitem=dict.__setitem__, proxy=_proxy, Link=_Link):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link at the end of the linked list,
# and the inherited dictionary is updated with the new key/value pair.
if key not in self:
self.__map[key] = link = Link()
root = self.__root
last = root.prev
link.prev, link.next, link.key = last, root, key
last.next = link
root.prev = proxy(link)
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which gets
# removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link = self.__map.pop(key)
link_prev = link.prev
link_next = link.next
link_prev.next = link_next
link_next.prev = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
# Traverse the linked list in order.
root = self.__root
curr = root.next
while curr is not root:
yield curr.key
curr = curr.next
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
# Traverse the linked list in reverse order.
root = self.__root
curr = root.prev
while curr is not root:
yield curr.key
curr = curr.prev
def clear(self):
'od.clear() -> None. Remove all items from od.'
root = self.__root
root.prev = root.next = root
self.__map.clear()
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root.prev
link_prev = link.prev
link_prev.next = root
root.prev = link_prev
else:
link = root.next
link_next = link.next
root.next = link_next
link_next.prev = root
key = link.key
del self.__map[key]
value = dict.pop(self, key)
return key, value
def move_to_end(self, key, last=True):
'''Move an existing element to the end (or beginning if last==False).
Raises KeyError if the element does not exist.
When last=True, acts like a fast version of self[key]=self.pop(key).
'''
link = self.__map[key]
link_prev = link.prev
link_next = link.next
link_prev.next = link_next
link_next.prev = link_prev
root = self.__root
if last:
last = root.prev
link.prev = last
link.next = root
last.next = root.prev = link
else:
first = root.next
link.prev = root
link.next = first
root.next = first.prev = link
def __sizeof__(self):
sizeof = _sys.getsizeof
n = len(self) + 1 # number of links including root
size = sizeof(self.__dict__) # instance dictionary
size += sizeof(self.__map) * 2 # internal dict and inherited dict
size += sizeof(self.__hardroot) * n # link objects
size += sizeof(self.__root) * n # proxy objects
return size
#fixme brython.. Issue with _abcoll, which contains MutableMapping
update = __update = MutableMapping.update
keys = MutableMapping.keys
values = MutableMapping.values
items = MutableMapping.items
__ne__ = MutableMapping.__ne__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding
value. If key is not found, d is returned if given, otherwise KeyError
is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
#fixme, brython issue
#@_recursive_repr()
def __repr__(self):
'od.__repr__() <==> repr(od)'
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self.items()))
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S.
If not specified, the value defaults to None.
'''
self = cls()
for key in iterable:
self[key] = value
return self
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and \
all(p==q for p, q in zip(self.items(), other.items()))
return dict.__eq__(self, other)
########################################################################
### Counter
########################################################################
def _count_elements(mapping, iterable):
'Tally elements from the iterable.'
mapping_get = mapping.get
for elem in iterable:
mapping[elem] = mapping_get(elem, 0) + 1
#try: # Load C helper function if available
# from _collections import _count_elements
#except ImportError:
# pass
class Counter(dict):
'''Dict subclass for counting hashable items. Sometimes called a bag
or multiset. Elements are stored as dictionary keys and their counts
are stored as dictionary values.
>>> c = Counter('abcdeabcdabcaba') # count elements from a string
>>> c.most_common(3) # three most common elements
[('a', 5), ('b', 4), ('c', 3)]
>>> sorted(c) # list all unique elements
['a', 'b', 'c', 'd', 'e']
>>> ''.join(sorted(c.elements())) # list elements with repetitions
'aaaaabbbbcccdde'
>>> sum(c.values()) # total of all counts
15
>>> c['a'] # count of letter 'a'
5
>>> for elem in 'shazam': # update counts from an iterable
... c[elem] += 1 # by adding 1 to each element's count
>>> c['a'] # now there are seven 'a'
7
>>> del c['b'] # remove all 'b'
>>> c['b'] # now there are zero 'b'
0
>>> d = Counter('simsalabim') # make another counter
>>> c.update(d) # add in the second counter
>>> c['a'] # now there are nine 'a'
9
>>> c.clear() # empty the counter
>>> c
Counter()
Note: If a count is set to zero or reduced to zero, it will remain
in the counter until the entry is deleted or the counter is cleared:
>>> c = Counter('aaabbc')
>>> c['b'] -= 2 # reduce the count of 'b' by two
>>> c.most_common() # 'b' is still in, but its count is zero
[('a', 3), ('c', 1), ('b', 0)]
'''
# References:
# http://en.wikipedia.org/wiki/Multiset
# http://www.gnu.org/software/smalltalk/manual-base/html_node/Bag.html
# http://www.demo2s.com/Tutorial/Cpp/0380__set-multiset/Catalog0380__set-multiset.htm
# http://code.activestate.com/recipes/259174/
# Knuth, TAOCP Vol. II section 4.6.3
def __init__(self, iterable=None, **kwds):
'''Create a new, empty Counter object. And if given, count elements
from an input iterable. Or, initialize the count from another mapping
of elements to their counts.
>>> c = Counter() # a new, empty counter
>>> c = Counter('gallahad') # a new counter from an iterable
>>> c = Counter({'a': 4, 'b': 2}) # a new counter from a mapping
>>> c = Counter(a=4, b=2) # a new counter from keyword args
'''
#super().__init__() #BE modified since super not supported
dict.__init__(self)
self.update(iterable, **kwds)
def __missing__(self, key):
'The count of elements not in the Counter is zero.'
# Needed so that self[missing_item] does not raise KeyError
return 0
def most_common(self, n=None):
'''List the n most common elements and their counts from the most
common to the least. If n is None, then list all element counts.
>>> Counter('abcdeabcdabcaba').most_common(3)
[('a', 5), ('b', 4), ('c', 3)]
'''
# Emulate Bag.sortedByCount from Smalltalk
if n is None:
return sorted(self.items(), key=_itemgetter(1), reverse=True)
return _heapq.nlargest(n, self.items(), key=_itemgetter(1))
def elements(self):
'''Iterator over elements repeating each as many times as its count.
>>> c = Counter('ABCABC')
>>> sorted(c.elements())
['A', 'A', 'B', 'B', 'C', 'C']
# Knuth's example for prime factors of 1836: 2**2 * 3**3 * 17**1
>>> prime_factors = Counter({2: 2, 3: 3, 17: 1})
>>> product = 1
>>> for factor in prime_factors.elements(): # loop over factors
... product *= factor # and multiply them
>>> product
1836
Note, if an element's count has been set to zero or is a negative
number, elements() will ignore it.
'''
# Emulate Bag.do from Smalltalk and Multiset.begin from C++.
return _chain.from_iterable(_starmap(_repeat, self.items()))
# Override dict methods where necessary
@classmethod
def fromkeys(cls, iterable, v=None):
# There is no equivalent method for counters because setting v=1
# means that no element can have a count greater than one.
raise NotImplementedError(
'Counter.fromkeys() is undefined. Use Counter(iterable) instead.')
def update(self, iterable=None, **kwds):
'''Like dict.update() but add counts instead of replacing them.
Source can be an iterable, a dictionary, or another Counter instance.
>>> c = Counter('which')
>>> c.update('witch') # add elements from another iterable
>>> d = Counter('watch')
>>> c.update(d) # add elements from another counter
>>> c['h'] # four 'h' in which, witch, and watch
4
'''
# The regular dict.update() operation makes no sense here because the
# replace behavior results in the some of original untouched counts
# being mixed-in with all of the other counts for a mismash that
# doesn't have a straight-forward interpretation in most counting
# contexts. Instead, we implement straight-addition. Both the inputs
# and outputs are allowed to contain zero and negative counts.
if iterable is not None:
if isinstance(iterable, Mapping):
if self:
self_get = self.get
for elem, count in iterable.items():
self[elem] = count + self_get(elem, 0)
else:
super().update(iterable) # fast path when counter is empty
else:
_count_elements(self, iterable)
if kwds:
self.update(kwds)
def subtract(self, iterable=None, **kwds):
'''Like dict.update() but subtracts counts instead of replacing them.
Counts can be reduced below zero. Both the inputs and outputs are
allowed to contain zero and negative counts.
Source can be an iterable, a dictionary, or another Counter instance.
>>> c = Counter('which')
>>> c.subtract('witch') # subtract elements from another iterable
>>> c.subtract(Counter('watch')) # subtract elements from another counter
>>> c['h'] # 2 in which, minus 1 in witch, minus 1 in watch
0
>>> c['w'] # 1 in which, minus 1 in witch, minus 1 in watch
-1
'''
if iterable is not None:
self_get = self.get
if isinstance(iterable, Mapping):
for elem, count in iterable.items():
self[elem] = self_get(elem, 0) - count
else:
for elem in iterable:
self[elem] = self_get(elem, 0) - 1
if kwds:
self.subtract(kwds)
def copy(self):
'Return a shallow copy.'
return self.__class__(self)
def __reduce__(self):
return self.__class__, (dict(self),)
def __delitem__(self, elem):
'Like dict.__delitem__() but does not raise KeyError for missing values.'
if elem in self:
super().__delitem__(elem)
def __repr__(self):
if not self:
return '%s()' % self.__class__.__name__
try:
items = ', '.join(map('%r: %r'.__mod__, self.most_common()))
return '%s({%s})' % (self.__class__.__name__, items)
except TypeError:
# handle case where values are not orderable
return '{0}({1!r})'.format(self.__class__.__name__, dict(self))
# Multiset-style mathematical operations discussed in:
# Knuth TAOCP Volume II section 4.6.3 exercise 19
# and at http://en.wikipedia.org/wiki/Multiset
#
# Outputs guaranteed to only include positive counts.
#
# To strip negative and zero counts, add-in an empty counter:
# c += Counter()
def __add__(self, other):
'''Add counts from two counters.
>>> Counter('abbb') + Counter('bcc')
Counter({'b': 4, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
newcount = count + other[elem]
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count > 0:
result[elem] = count
return result
def __sub__(self, other):
''' Subtract count, but keep only results with positive counts.
>>> Counter('abbbc') - Counter('bccd')
Counter({'b': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
newcount = count - other[elem]
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count < 0:
result[elem] = 0 - count
return result
def __or__(self, other):
'''Union is the maximum of value in either of the input counters.
>>> Counter('abbb') | Counter('bcc')
Counter({'b': 3, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
other_count = other[elem]
newcount = other_count if count < other_count else count
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count > 0:
result[elem] = count
return result
def __and__(self, other):
''' Intersection is the minimum of corresponding counts.
>>> Counter('abbb') & Counter('bcc')
Counter({'b': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
other_count = other[elem]
newcount = count if count < other_count else other_count
if newcount > 0:
result[elem] = newcount
return result
########################################################################
### ChainMap (helper for configparser)
########################################################################
class ChainMap(MutableMapping):
''' A ChainMap groups multiple dicts (or other mappings) together
to create a single, updateable view.
The underlying mappings are stored in a list. That list is public and can
accessed or updated using the *maps* attribute. There is no other state.
Lookups search the underlying mappings successively until a key is found.
In contrast, writes, updates, and deletions only operate on the first
mapping.
'''
def __init__(self, *maps):
'''Initialize a ChainMap by setting *maps* to the given mappings.
If no mappings are provided, a single empty dictionary is used.
'''
self.maps = list(maps) or [{}] # always at least one map
def __missing__(self, key):
raise KeyError(key)
def __getitem__(self, key):
for mapping in self.maps:
try:
return mapping[key] # can't use 'key in mapping' with defaultdict
except KeyError:
pass
return self.__missing__(key) # support subclasses that define __missing__
def get(self, key, default=None):
return self[key] if key in self else default
def __len__(self):
return len(set().union(*self.maps)) # reuses stored hash values if possible
def __iter__(self):
return iter(set().union(*self.maps))
def __contains__(self, key):
return any(key in m for m in self.maps)
def __bool__(self):
return any(self.maps)
#fixme, brython
#@_recursive_repr()
def __repr__(self):
return '{0.__class__.__name__}({1})'.format(
self, ', '.join(map(repr, self.maps)))
def __repr__(self):
return ','.join(str(_map) for _map in self.maps)
@classmethod
def fromkeys(cls, iterable, *args):
'Create a ChainMap with a single dict created from the iterable.'
return cls(dict.fromkeys(iterable, *args))
def copy(self):
'New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]'
return self.__class__(self.maps[0].copy(), *self.maps[1:])
__copy__ = copy
def new_child(self): # like Django's Context.push()
'New ChainMap with a new dict followed by all previous maps.'
return self.__class__({}, *self.maps)
@property
def parents(self): # like Django's Context.pop()
'New ChainMap from maps[1:].'
return self.__class__(*self.maps[1:])
def __setitem__(self, key, value):
self.maps[0][key] = value
def __delitem__(self, key):
try:
del self.maps[0][key]
except KeyError:
raise KeyError('Key not found in the first mapping: {!r}'.format(key))
def popitem(self):
'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.'
try:
return self.maps[0].popitem()
except KeyError:
raise KeyError('No keys found in the first mapping.')
def pop(self, key, *args):
'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].'
try:
return self.maps[0].pop(key, *args)
except KeyError:
#raise KeyError('Key not found in the first mapping: {!r}'.format(key))
raise KeyError('Key not found in the first mapping: %s' % key)
def clear(self):
'Clear maps[0], leaving maps[1:] intact.'
self.maps[0].clear()
################################################################################
### UserDict
################################################################################
class UserDict(MutableMapping):
# Start by filling-out the abstract methods
def __init__(self, dict=None, **kwargs):
self.data = {}
if dict is not None:
self.update(dict)
if len(kwargs):
self.update(kwargs)
def __len__(self): return len(self.data)
def __getitem__(self, key):
if key in self.data:
return self.data[key]
if hasattr(self.__class__, "__missing__"):
return self.__class__.__missing__(self, key)
raise KeyError(key)
def __setitem__(self, key, item): self.data[key] = item
def __delitem__(self, key): del self.data[key]
def __iter__(self):
return iter(self.data)
# Modify __contains__ to work correctly when __missing__ is present
def __contains__(self, key):
return key in self.data
# Now, add the methods in dicts but not in MutableMapping
def __repr__(self): return repr(self.data)
def copy(self):
if self.__class__ is UserDict:
return UserDict(self.data.copy())
import copy
data = self.data
try:
self.data = {}
c = copy.copy(self)
finally:
self.data = data
c.update(self)
return c
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
################################################################################
### UserList
################################################################################
################################################################################
### UserString
################################################################################
|
agpl-3.0
|
fccagou/pynotif
|
usb/_objfinalizer.py
|
23
|
5287
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014 André Erdmann
#
# The following terms apply to all files associated
# with the software unless explicitly disclaimed in individual files.
#
# The authors hereby grant permission to use, copy, modify, distribute,
# and license this software and its documentation for any purpose, provided
# that existing copyright notices are retained in all copies and that this
# notice is included verbatim in any distributions. No written agreement,
# license, or royalty fee is required for any of the authorized uses.
# Modifications to this software may be copyrighted by their authors
# and need not follow the licensing terms described here, provided that
# the new terms are clearly indicated on the first page of each file where
# they apply.
#
# IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY
# FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
# ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY
# DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE
# IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE
# NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR
# MODIFICATIONS.
import sys
__all__ = ['AutoFinalizedObject']
class _AutoFinalizedObjectBase(object):
"""
Base class for objects that get automatically
finalized on delete or at exit.
"""
def _finalize_object(self):
"""Actually finalizes the object (frees allocated resources etc.).
Returns: None
Derived classes should implement this.
"""
pass
def __new__(cls, *args, **kwargs):
"""Creates a new object instance and adds the private finalizer
attributes to it.
Returns: new object instance
Arguments:
* *args, **kwargs -- ignored
"""
instance = super(_AutoFinalizedObjectBase, cls).__new__(cls)
instance._finalize_called = False
return instance
def _do_finalize_object(self):
"""Helper method that finalizes the object if not already done.
Returns: None
"""
if not self._finalize_called: # race-free?
self._finalize_called = True
self._finalize_object()
def finalize(self):
"""Finalizes the object if not already done.
Returns: None
"""
# this is the "public" finalize method
raise NotImplementedError(
"finalize() must be implemented by AutoFinalizedObject."
)
def __del__(self):
self.finalize()
if sys.hexversion >= 0x3040000:
# python >= 3.4: use weakref.finalize
import weakref
def _do_finalize_object_ref(obj_ref):
"""Helper function for weakref.finalize() that dereferences a weakref
to an object and calls its _do_finalize_object() method if the object
is still alive. Does nothing otherwise.
Returns: None (implicit)
Arguments:
* obj_ref -- weakref to an object
"""
obj = obj_ref()
if obj is not None:
# else object disappeared
obj._do_finalize_object()
class AutoFinalizedObject(_AutoFinalizedObjectBase):
def __new__(cls, *args, **kwargs):
"""Creates a new object instance and adds the private finalizer
attributes to it.
Returns: new object instance
Arguments:
* *args, **kwargs -- passed to the parent instance creator
(which ignores them)
"""
# Note: Do not pass a (hard) reference to instance to the
# finalizer as func/args/kwargs, it'd keep the object
# alive until the program terminates.
# A weak reference is fine.
#
# Note 2: When using weakrefs and not calling finalize() in
# __del__, the object may already have disappeared
# when weakref.finalize() kicks in.
# Make sure that _finalizer() gets called,
# i.e. keep __del__() from the base class.
#
# Note 3: the _finalize_called attribute is (probably) useless
# for this class
instance = super(AutoFinalizedObject, cls).__new__(
cls, *args, **kwargs
)
instance._finalizer = weakref.finalize(
instance, _do_finalize_object_ref, weakref.ref(instance)
)
return instance
def finalize(self):
"""Finalizes the object if not already done."""
self._finalizer()
else:
# python < 3.4: keep the old behavior (rely on __del__),
# but don't call _finalize_object() more than once
class AutoFinalizedObject(_AutoFinalizedObjectBase):
def finalize(self):
"""Finalizes the object if not already done."""
self._do_finalize_object()
|
gpl-2.0
|
wanglongqi/sympy
|
sympy/utilities/tests/test_pytest.py
|
105
|
1601
|
from sympy.utilities.pytest import raises, USE_PYTEST
if USE_PYTEST:
import py.test
pytestmark = py.test.mark.skipif(USE_PYTEST,
reason=("using py.test"))
# Test callables
def test_expected_exception_is_silent_callable():
def f():
raise ValueError()
raises(ValueError, f)
def test_lack_of_exception_triggers_AssertionError_callable():
try:
raises(Exception, lambda: 1 + 1)
assert False
except AssertionError as e:
assert str(e) == "DID NOT RAISE"
def test_unexpected_exception_is_passed_through_callable():
def f():
raise ValueError("some error message")
try:
raises(TypeError, f)
assert False
except ValueError as e:
assert str(e) == "some error message"
# Test with statement
def test_expected_exception_is_silent_with():
with raises(ValueError):
raise ValueError()
def test_lack_of_exception_triggers_AssertionError_with():
try:
with raises(Exception):
1 + 1
assert False
except AssertionError as e:
assert str(e) == "DID NOT RAISE"
def test_unexpected_exception_is_passed_through_with():
try:
with raises(TypeError):
raise ValueError("some error message")
assert False
except ValueError as e:
assert str(e) == "some error message"
# Now we can use raises() instead of try/catch
# to test that a specific exception class is raised
def test_second_argument_should_be_callable_or_string():
raises(TypeError, lambda: raises("irrelevant", 42))
|
bsd-3-clause
|
santisiri/popego
|
envs/ALPHA-POPEGO/lib/python2.5/site-packages/twisted/names/cache.py
|
55
|
2752
|
# -*- test-case-name: twisted.names.test -*-
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
import time
from zope.interface import implements
from twisted.names import dns
from twisted.python import failure, log
from twisted.internet import interfaces, defer
import common
class CacheResolver(common.ResolverBase):
"""A resolver that serves records from a local, memory cache."""
implements(interfaces.IResolver)
cache = None
def __init__(self, cache = None, verbose = 0):
common.ResolverBase.__init__(self)
if cache is None:
cache = {}
self.cache = cache
self.verbose = verbose
self.cancel = {}
def __setstate__(self, state):
self.__dict__ = state
now = time.time()
for (k, (when, (ans, add, ns))) in self.cache.items():
diff = now - when
for rec in ans + add + ns:
if rec.ttl < diff:
del self.cache[k]
break
def __getstate__(self):
for c in self.cancel.values():
c.cancel()
self.cancel.clear()
return self.__dict__
def _lookup(self, name, cls, type, timeout):
now = time.time()
q = dns.Query(name, type, cls)
try:
when, (ans, auth, add) = self.cache[q]
except KeyError:
if self.verbose > 1:
log.msg('Cache miss for ' + repr(name))
return defer.fail(failure.Failure(dns.DomainError(name)))
else:
if self.verbose:
log.msg('Cache hit for ' + repr(name))
diff = now - when
return defer.succeed((
[dns.RRHeader(str(r.name), r.type, r.cls, r.ttl - diff, r.payload) for r in ans],
[dns.RRHeader(str(r.name), r.type, r.cls, r.ttl - diff, r.payload) for r in auth],
[dns.RRHeader(str(r.name), r.type, r.cls, r.ttl - diff, r.payload) for r in add]
))
def lookupAllRecords(self, name, timeout = None):
return defer.fail(failure.Failure(dns.DomainError(name)))
def cacheResult(self, query, payload):
if self.verbose > 1:
log.msg('Adding %r to cache' % query)
self.cache[query] = (time.time(), payload)
if self.cancel.has_key(query):
self.cancel[query].cancel()
s = list(payload[0]) + list(payload[1]) + list(payload[2])
m = s[0].ttl
for r in s:
m = min(m, r.ttl)
from twisted.internet import reactor
self.cancel[query] = reactor.callLater(m, self.clearEntry, query)
def clearEntry(self, query):
del self.cache[query]
del self.cancel[query]
|
bsd-3-clause
|
yonglehou/pybrain
|
examples/supervised/neuralnets+svm/example_mixturedensity.py
|
26
|
3548
|
# $Id$
# Train a mixture of Gaussians to approximate a multi-mode dataset.
# It seems fairly easy to fall into some local minimum. Good solutions
# have errors around -200.
# This example reproduces Fig. 5.21 from Bishop (2006).
__author__ = 'Martin Felder'
import pylab as p
import numpy as np
from pybrain.structure.modules import LinearLayer, BiasUnit, SigmoidLayer
from pybrain.structure import FullConnection, FeedForwardNetwork
from pybrain.datasets import SupervisedDataSet
from pybrain.supervised.trainers.mixturedensity import RPropMinusTrainerMix, BackpropTrainerMix
from pybrain.structure.modules.mixturedensity import MixtureDensityLayer
def multigaussian(x, mean, stddev):
"""Returns value of uncorrelated Gaussians at given scalar point.
x: scalar
mean: vector
stddev: vector
"""
tmp = -0.5 * ((x-mean)/stddev)**2
return np.exp(tmp) / (np.sqrt(2.*np.pi) * stddev)
if __name__ == '__main__':
# build a network
n = FeedForwardNetwork()
# linear input layer
n.addInputModule(LinearLayer(1, name='in'))
# output layer of type 'outclass'
N_GAUSSIANS = 3
n.addOutputModule(MixtureDensityLayer(dim=1, name='out', mix=N_GAUSSIANS))
# add bias module and connection to out module
n.addModule(BiasUnit(name = 'bias'))
n.addConnection(FullConnection(n['bias'], n['out']))
# arbitrary number of hidden layers of type 'hiddenclass'
n.addModule(SigmoidLayer(5, name='hidden'))
n.addConnection(FullConnection(n['bias'], n['hidden']))
# network with hidden layer(s), connections
# from in to first hidden and last hidden to out
n.addConnection(FullConnection(n['in'], n['hidden']))
n.addConnection(FullConnection(n['hidden'], n['out']))
n.sortModules()
n._setParameters(np.random.uniform(-0.1, 0.1, size=n.paramdim))
# build some data
y = np.arange(0.0, 1.0, 0.005).reshape(200,1)
x = (
y +
0.3 * np.sin(2 * np.pi * y) +
np.random.uniform(-0.1, 0.1, y.size).reshape(y.size, 1)
)
dataset = SupervisedDataSet(1, 1)
dataset.setField('input', x)
dataset.setField('target', y)
# train the network
trainer = RPropMinusTrainerMix(n, dataset=dataset, verbose=True,
weightdecay=0.05)
trainer.trainEpochs(200)
# plot the density and other stuff
p.subplot(2, 2, 3)
dens = []
newx = np.arange(0.0, 1.0, 0.01)
newx = newx.reshape(newx.size, 1)
dataset.setField('input', newx)
out = n.activateOnDataset(dataset)
for pars in out:
stds = pars[N_GAUSSIANS:N_GAUSSIANS*2]
means = pars[N_GAUSSIANS*2:N_GAUSSIANS*3]
line = multigaussian(newx, means, stds)
density = line[:,0] * pars[0]
for gaussian in range(1, N_GAUSSIANS):
density += line[:, gaussian] * pars[gaussian]
dens.append(density)
newx = newx.flatten()
dens = np.array(dens).transpose()
p.contourf(newx, newx, dens, 30)
p.title("cond. probab. dens.")
p.subplot(221)
out = np.array(out)
p.plot(newx, out[:,0:3])
p.title("mixing coefficient")
p.subplot(222)
p.plot(newx, out[:,6:9])
p.title("means of Gaussians")
p.subplot(224)
p.scatter(x.flatten(), y.flatten(),
marker='o', edgecolor='g', facecolors='none')
p.hold(True)
cmode = dens.argmax(axis=0)
p.plot(newx, newx[cmode], "or", markersize=3)
p.xlim(0, 1)
p.ylim(0, 1)
p.title("data and cond. mode")
p.show()
|
bsd-3-clause
|
backmari/moose
|
python/chigger/tests/new_files/old_files.py
|
6
|
1873
|
#!/usr/bin/env python
#pylint: disable=missing-docstring
#################################################################
# DO NOT MODIFY THIS HEADER #
# MOOSE - Multiphysics Object Oriented Simulation Environment #
# #
# (c) 2010 Battelle Energy Alliance, LLC #
# ALL RIGHTS RESERVED #
# #
# Prepared by Battelle Energy Alliance, LLC #
# Under Contract No. DE-AC07-05ID14517 #
# With the U. S. Department of Energy #
# #
# See COPYRIGHT for full restrictions #
#################################################################
import chigger
import os
import time
import shutil
import glob
# Remove old temporary files
files = glob.glob('old_file_adapt*.e*')
for f in files:
os.remove(f)
# Copy the files to the local
base_file = '../input/step10_micro_out.e'
shutil.copy(base_file, 'old_file_adapt.e')
# Render the results and write a file
suffix = ['-s002', '-s003', '-s004', '-s005', '-s006', '-s007', '-s008', '-s009']
n = len(suffix)
for i in range(n):
shutil.copy(base_file + suffix[i], 'old_file_adapt.e' + suffix[i])
# Wait a second and then touch a few files so there are some that are old
time.sleep(1.2)
os.utime('old_file_adapt.e', None) # 0, 0.5
os.utime('old_file_adapt.e-s002', None) # 1.0
os.utime('old_file_adapt.e-s004', None) # 2.0
os.utime('old_file_adapt.e-s007', None) # 3.5
os.utime('old_file_adapt.e-s009', None) # 4.5
# Read the file
reader = chigger.exodus.ExodusReader('old_file_adapt.e')
reader.update()
print reader.getTimes()
|
lgpl-2.1
|
Nowheresly/odoo
|
addons/auth_signup/controllers/main.py
|
26
|
6356
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2012-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import logging
import werkzeug
import openerp
from openerp.addons.auth_signup.res_users import SignupError
from openerp.addons.web.controllers.main import ensure_db
from openerp import http
from openerp.http import request
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
class AuthSignupHome(openerp.addons.web.controllers.main.Home):
@http.route()
def web_login(self, *args, **kw):
ensure_db()
response = super(AuthSignupHome, self).web_login(*args, **kw)
response.qcontext.update(self.get_auth_signup_config())
if request.httprequest.method == 'GET' and request.session.uid and request.params.get('redirect'):
# Redirect if already logged in and redirect param is present
return http.redirect_with_hash(request.params.get('redirect'))
return response
@http.route('/web/signup', type='http', auth='public', website=True)
def web_auth_signup(self, *args, **kw):
qcontext = self.get_auth_signup_qcontext()
if not qcontext.get('token') and not qcontext.get('signup_enabled'):
raise werkzeug.exceptions.NotFound()
if 'error' not in qcontext and request.httprequest.method == 'POST':
try:
self.do_signup(qcontext)
return super(AuthSignupHome, self).web_login(*args, **kw)
except (SignupError, AssertionError), e:
if request.env["res.users"].sudo().search([("login", "=", qcontext.get("login"))]):
qcontext["error"] = _("Another user is already registered using this email address.")
else:
_logger.error(e.message)
qcontext['error'] = _("Could not create a new account.")
return request.render('auth_signup.signup', qcontext)
@http.route('/web/reset_password', type='http', auth='public', website=True)
def web_auth_reset_password(self, *args, **kw):
qcontext = self.get_auth_signup_qcontext()
if not qcontext.get('token') and not qcontext.get('reset_password_enabled'):
raise werkzeug.exceptions.NotFound()
if 'error' not in qcontext and request.httprequest.method == 'POST':
try:
if qcontext.get('token'):
self.do_signup(qcontext)
return super(AuthSignupHome, self).web_login(*args, **kw)
else:
login = qcontext.get('login')
assert login, "No login provided."
res_users = request.registry.get('res.users')
res_users.reset_password(request.cr, openerp.SUPERUSER_ID, login)
qcontext['message'] = _("An email has been sent with credentials to reset your password")
except SignupError:
qcontext['error'] = _("Could not reset your password")
_logger.exception('error when resetting password')
except Exception, e:
qcontext['error'] = e.message or e.name
return request.render('auth_signup.reset_password', qcontext)
def get_auth_signup_config(self):
"""retrieve the module config (which features are enabled) for the login page"""
icp = request.registry.get('ir.config_parameter')
return {
'signup_enabled': icp.get_param(request.cr, openerp.SUPERUSER_ID, 'auth_signup.allow_uninvited') == 'True',
'reset_password_enabled': icp.get_param(request.cr, openerp.SUPERUSER_ID, 'auth_signup.reset_password') == 'True',
}
def get_auth_signup_qcontext(self):
""" Shared helper returning the rendering context for signup and reset password """
qcontext = request.params.copy()
qcontext.update(self.get_auth_signup_config())
if qcontext.get('token'):
try:
# retrieve the user info (name, login or email) corresponding to a signup token
res_partner = request.registry.get('res.partner')
token_infos = res_partner.signup_retrieve_info(request.cr, openerp.SUPERUSER_ID, qcontext.get('token'))
for k, v in token_infos.items():
qcontext.setdefault(k, v)
except:
qcontext['error'] = _("Invalid signup token")
return qcontext
def do_signup(self, qcontext):
""" Shared helper that creates a res.partner out of a token """
values = dict((key, qcontext.get(key)) for key in ('login', 'name', 'password'))
assert any([k for k in values.values()]), "The form was not properly filled in."
assert values.get('password') == qcontext.get('confirm_password'), "Passwords do not match; please retype them."
values['lang'] = request.lang
self._signup_with_values(qcontext.get('token'), values)
request.cr.commit()
def _signup_with_values(self, token, values):
db, login, password = request.registry['res.users'].signup(request.cr, openerp.SUPERUSER_ID, values, token)
request.cr.commit() # as authenticate will use its own cursor we need to commit the current transaction
uid = request.session.authenticate(db, login, password)
if not uid:
raise SignupError(_('Authentification Failed.'))
# vim:expandtab:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
jarn0ld/gnuradio
|
gr-uhd/examples/python/freq_hopping.py
|
36
|
9337
|
#!/usr/bin/env python
#
# Copyright 2014 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
"""
TXs a waveform (either from a file, or a sinusoid) in a frequency-hopping manner.
"""
import numpy
import argparse
import pmt
from gnuradio import gr
from gnuradio import blocks
from gnuradio import uhd
def setup_parser():
""" Setup the parser for the frequency hopper. """
parser = argparse.ArgumentParser(
description="Transmit a signal in a frequency-hopping manner, using tx_freq tags."
)
parser.add_argument('-i', '--input-file', type=file, default=None,
help="File with samples to transmit. If left out, will transmit a sinusoid.")
parser.add_argument("-a", "--args", default="",
help="UHD device address args.")
parser.add_argument("--spec", default="",
help="UHD subdev spec.")
parser.add_argument("--antenna", default="",
help="UHD antenna settings.")
parser.add_argument("--gain", default=None, type=float,
help="USRP gain (defaults to mid-point in dB).")
parser.add_argument("-r", "--rate", type=float, default=1e6,
help="Sampling rate")
parser.add_argument("-N", "--samp-per-burst", type=int, default=10000,
help="Samples per burst")
parser.add_argument("-t", "--hop-time", type=float, default=1000,
help="Time between hops in milliseconds. This must be larger than or equal to the burst duration as set by --samp-per-burst")
parser.add_argument("-f", "--freq", type=float, default=2.45e9,
help="Base frequency. This is the middle channel frequency at which the USRP will Tx.")
parser.add_argument("--dsp", action='store_true',
help="DSP tuning only.")
parser.add_argument("-d", "--freq-delta", type=float, default=1e6,
help="Channel spacing.")
parser.add_argument("-c", "--num-channels", type=int, default=5,
help="Number of channels.")
parser.add_argument("-B", "--num-bursts", type=int, default=30,
help="Number of bursts to transmit before terminating.")
parser.add_argument("-p", "--post-tuning", action='count',
help="Tune after transmitting. Default is to tune immediately before transmitting.")
parser.add_argument("-v", "--verbose", action='count',
help="Print more information. The morer the printier.")
return parser
class FrequencyHopperSrc(gr.hier_block2):
""" Provides tags for frequency hopping """
def __init__(
self,
n_bursts, n_channels,
freq_delta, base_freq, dsp_tuning,
burst_length, base_time, hop_time,
post_tuning=False,
tx_gain=0,
verbose=False
):
gr.hier_block2.__init__(self,
"FrequencyHopperSrc",
gr.io_signature(1, 1, gr.sizeof_gr_complex),
gr.io_signature(1, 1, gr.sizeof_gr_complex),
)
n_samples_total = n_bursts * burst_length
lowest_frequency = base_freq - numpy.floor(n_channels/2) * freq_delta
self.hop_sequence = [lowest_frequency + n * freq_delta for n in xrange(n_channels)]
numpy.random.shuffle(self.hop_sequence)
# Repeat that:
self.hop_sequence = [self.hop_sequence[x % n_channels] for x in xrange(n_bursts)]
if verbose:
print "Hop Frequencies | Hop Pattern"
print "=================|================================"
for f in self.hop_sequence:
print "{:6.3f} MHz | ".format(f/1e6),
if n_channels < 50:
print " " * int((f - base_freq) / freq_delta) + "#"
else:
print "\n"
print "=================|================================"
# There's no real point in setting the gain via tag for this application,
# but this is an example to show you how to do it.
gain_tag = gr.tag_t()
gain_tag.offset = 0
gain_tag.key = pmt.string_to_symbol('tx_command')
gain_tag.value = pmt.to_pmt({'gain': tx_gain})
tag_list = [gain_tag,]
for i in xrange(len(self.hop_sequence)):
tune_tag = gr.tag_t()
tune_tag.offset = i * burst_length
if i > 0 and post_tuning and not dsp_tuning: # TODO dsp_tuning should also be able to do post_tuning
tune_tag.offset -= 1 # Move it to last sample of previous burst
if dsp_tuning:
tune_tag.key = pmt.string_to_symbol('tx_command')
tune_tag.value = pmt.to_pmt({'lo_freq': base_freq, 'dsp_freq': base_freq - self.hop_sequence[i]})
else:
tune_tag.key = pmt.string_to_symbol('tx_freq')
tune_tag.value = pmt.to_pmt(self.hop_sequence[i])
tag_list.append(tune_tag)
length_tag = gr.tag_t()
length_tag.offset = i * burst_length
length_tag.key = pmt.string_to_symbol('packet_len')
length_tag.value = pmt.from_long(burst_length)
tag_list.append(length_tag)
time_tag = gr.tag_t()
time_tag.offset = i * burst_length
time_tag.key = pmt.string_to_symbol('tx_time')
time_tag.value = pmt.make_tuple(
pmt.from_uint64(int(base_time + i * hop_time)),
pmt.from_double((base_time + i * hop_time) % 1),
)
tag_list.append(time_tag)
tag_source = blocks.vector_source_c((1.0,) * n_samples_total, repeat=False, tags=tag_list)
mult = blocks.multiply_cc()
self.connect(self, mult, self)
self.connect(tag_source, (mult, 1))
class FlowGraph(gr.top_block):
""" Flow graph that does the frequency hopping. """
def __init__(self, options):
gr.top_block.__init__(self)
if options.input_file is not None:
src = blocks.file_source(gr.sizeof_gr_complex, options.filename, repeat=True)
else:
src = blocks.vector_source_c((.5,) * int(1e6) * 2, repeat=True)
# Setup USRP
self.u = uhd.usrp_sink(options.args, uhd.stream_args('fc32'), "packet_len")
if(options.spec):
self.u.set_subdev_spec(options.spec, 0)
if(options.antenna):
self.u.set_antenna(options.antenna, 0)
self.u.set_samp_rate(options.rate)
# Gain is set in the hopper block
if options.gain is None:
g = self.u.get_gain_range()
options.gain = float(g.start()+g.stop())/2.0
print "-- Setting gain to {} dB".format(options.gain)
r = self.u.set_center_freq(options.freq)
if not r:
print '[ERROR] Failed to set base frequency.'
raise SystemExit, 1
hopper_block = FrequencyHopperSrc(
options.num_bursts, options.num_channels,
options.freq_delta, options.freq, options.dsp,
options.samp_per_burst, 1.0, options.hop_time / 1000.,
options.post_tuning,
options.gain,
options.verbose,
)
self.connect(src, hopper_block, self.u)
def print_hopper_stats(args):
""" Nothing to do with Grace Hopper """
print """
Parameter | Value
===================+=========================
Hop Interval | {hop_time} ms
Burst duration | {hop_duration} ms
Lowest Frequency | {lowest_freq:6.3f} MHz
Highest Frequency | {highest_freq:6.3f} MHz
Frequency spacing | {freq_delta:6.4f} MHz
Number of channels | {num_channels}
Sampling rate | {rate} Msps
Transmit Gain | {gain} dB
===================+=========================
""".format(
hop_time=args.hop_time,
hop_duration=1000.0/args.rate*args.samp_per_burst,
gain=args.gain,
lowest_freq=args.freq/1e6,
highest_freq=(args.freq + (args.num_channels-1) * args.freq_delta)/1e6,
freq_delta=args.freq_delta/1e6,
num_channels=args.num_channels,
rate=args.rate/1e6,
)
def main():
""" Go, go, go! """
args = setup_parser().parse_args()
if (1.0 * args.samp_per_burst / args.rate) > args.hop_time * 1e-3:
print "Burst duration must be smaller than hop time."
exit(1)
if args.verbose:
print_hopper_stats(args)
top_block = FlowGraph(args)
print "Starting to hop, skip and jump... press Ctrl+C to exit."
top_block.u.set_time_now(uhd.time_spec(0.0))
top_block.run()
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
|
gpl-3.0
|
gannetson/sportschooldeopenlucht
|
env/lib/python2.7/site-packages/django/core/files/images.py
|
178
|
1982
|
"""
Utility functions for handling images.
Requires PIL, as you might imagine.
"""
from django.core.files import File
class ImageFile(File):
"""
A mixin for use alongside django.core.files.base.File, which provides
additional features for dealing with images.
"""
def _get_width(self):
return self._get_image_dimensions()[0]
width = property(_get_width)
def _get_height(self):
return self._get_image_dimensions()[1]
height = property(_get_height)
def _get_image_dimensions(self):
if not hasattr(self, '_dimensions_cache'):
close = self.closed
self.open()
self._dimensions_cache = get_image_dimensions(self, close=close)
return self._dimensions_cache
def get_image_dimensions(file_or_path, close=False):
"""
Returns the (width, height) of an image, given an open file or a path. Set
'close' to True to close the file at the end if it is initially in an open
state.
"""
# Try to import PIL in either of the two ways it can end up installed.
try:
from PIL import ImageFile as PILImageFile
except ImportError:
import ImageFile as PILImageFile
p = PILImageFile.Parser()
if hasattr(file_or_path, 'read'):
file = file_or_path
file_pos = file.tell()
file.seek(0)
else:
file = open(file_or_path, 'rb')
close = True
try:
# Most of the time PIL only needs a small chunk to parse the image and
# get the dimensions, but with some TIFF files PIL needs to parse the
# whole file.
chunk_size = 1024
while 1:
data = file.read(chunk_size)
if not data:
break
p.feed(data)
if p.image:
return p.image.size
chunk_size = chunk_size*2
return None
finally:
if close:
file.close()
else:
file.seek(file_pos)
|
bsd-3-clause
|
bdauvergne/python-oath
|
oath/_ocra.py
|
1
|
14944
|
import hmac
import hashlib
import re
import random
import string
from . import _hotp as hotp, _utils
'''
Implementation of OCRA
See also http://tools.ietf.org/html/draft-mraihi-mutual-oath-hotp-variants-14
'''
__all__ = (
'str2ocrasuite',
'StateException',
'OCRAChallengeResponseServer',
'OCRAChallengeResponseClient',
'OCRAMutualChallengeResponseServer',
'OCRAMutualChallengeResponseClient',
)
def is_int(v):
try:
int(v)
return True
except ValueError:
return False
# Constants
PERIODS = {'H': 3600, 'M': 60, 'S': 1}
HOTP = 'HOTP'
OCRA_1 = 'OCRA-1'
class CryptoFunction(object):
'''Represents an OCRA CryptoFunction specification.
:attribute hash_algo:
an object implementing the digest interface as given by PEP 247 and
the hashlib package
:attribute truncation_length:
the length to truncate the decimal representation, can be None, in
this case no truncation is done.
'''
def __init__(self, hash_algo, truncation_length):
assert hash_algo
assert is_int(truncation_length) or truncation_length is None
self.hash_algo = hash_algo
self.truncation_length = truncation_length
def __call__(self, key, data_input):
'''Compute an HOTP digest using the given key and data input and
following the current crypto function description.
:param key:
a byte string containing the HMAC key
:param data_input:
the data input assembled as a byte-string as described by the
OCRA specification
:returns:
the computed digest
:rtype: str
'''
h = hmac.new(key, data_input, self.hash_algo).digest()
if self.truncation_length:
return hotp.dec(h, self.truncation_length)
else:
return str(hotp.truncated_value(h))
def __str__(self):
'''Return the standard representation for the given crypto function.
'''
return 'HOTP-%s-%s' % (self.hash_algo.__name__, self.truncation_length)
def str2hashalgo(description):
'''Convert the name of a hash algorithm as described in the OATH
specifications, to a python object handling the digest algorithm
interface, PEP-xxx.
:param description
the name of the hash algorithm, example
:rtype: a hash algorithm class constructor
'''
algo = getattr(hashlib, description.lower(), None)
if not callable(algo):
raise ValueError('Unknown hash algorithm %s' % description)
return algo
def str2cryptofunction(crypto_function_description):
'''
Convert an OCRA crypto function description into a CryptoFunction
instance
:param crypto_function_description:
:returns:
the CryptoFunction object
:rtype: CryptoFunction
'''
s = crypto_function_description.split('-')
if len(s) != 3:
raise ValueError('CryptoFunction description must be triplet separated by -')
if s[0] != HOTP:
raise ValueError('Unknown CryptoFunction kind %s' % s[0])
algo = str2hashalgo(s[1])
try:
truncation_length = int(s[2])
if truncation_length < 0 or truncation_length > 10:
raise ValueError()
except ValueError:
raise ValueError('Invalid truncation length %s' % s[2])
return CryptoFunction(algo, truncation_length)
class DataInput(object):
'''
OCRA data input description
By calling this instance of this class and giving the needed parameter
corrresponding to the data input description, it compute a binary string
to give to the HMAC algorithme implemented by a CryptoFunction object
'''
__slots__ = ['C', 'Q', 'P', 'S', 'T']
def __init__(self, C=None, Q=None, P=None, S=None, T=None):
self.C = C
self.Q = Q
self.P = P
self.S = S
self.T = T
def __call__(self, C=None, Q=None, P=None, P_digest=None, S=None, T=None, T_precomputed=None, Qsc=None):
datainput = b''
if self.C:
try:
C = int(C)
if C < 0 or C > 2 ** 64:
raise Exception()
except:
raise ValueError('Invalid counter value %s' % C)
datainput += hotp.int2beint64(int(C))
if self.Q:
max_length = self.Q[1]
if Qsc is not None:
# Mutual Challenge-Response
Q = Qsc
max_length *= 2
if Q is None or not isinstance(Q, str) or len(Q) > max_length:
raise ValueError('challenge')
if self.Q[0] == 'N' and not Q.isdigit():
raise ValueError('challenge')
if self.Q[0] == 'A' and not Q.isalnum():
raise ValueError('challenge')
if self.Q[0] == 'H':
try:
int(Q, 16)
except ValueError:
raise ValueError('challenge')
if self.Q[0] == 'N':
Q = '%x' % int(Q)
Q += '0' * (len(Q) % 2)
Q = _utils.fromhex(Q)
if self.Q[0] == 'A':
pass
if self.Q[0] == 'H':
Q = _utils.fromhex(Q)
datainput += _utils.tobytes(Q)
datainput += _utils.tobytes('\0' * (128 - len(Q)))
if self.P:
if P_digest:
if len(P_digest) == self.P.digest_size:
datainput += _utils.tobytes(P_digest)
elif len(P_digest) == 2 * self.P.digest_size:
datainput += _utils.fromhex(_utils.tobytes(P_digest))
else:
raise ValueError('Pin/Password digest invalid %r' % P_digest)
elif P is None:
raise ValueError('Pin/Password missing')
else:
datainput += self.P(_utils.tobytes(P)).digest()
if self.S:
if S is None or len(S) != self.S:
raise ValueError('session')
datainput += _utils.tobytes(S)
if self.T:
if is_int(T_precomputed):
datainput += hotp.int2beint64(int(T_precomputed))
elif is_int(T):
datainput += hotp.int2beint64(int(T / self.T))
else:
raise ValueError('timestamp')
return datainput
def __str__(self):
values = []
for slot in DataInput.__slots__:
value = getattr(self, slot, None)
if value is not None:
values.append('{0}={1}'.format(slot, value))
return '<{0} {1}>'.format(DataInput.__class__.__name__, ', '.join(values))
def str2datainput(datainput_description):
elements = datainput_description.split('-')
datainputs = {}
for element in elements:
letter = element[0]
if letter in datainputs:
raise ValueError('DataInput already present %s %s' % (element, datainput_description))
if letter == 'C':
datainputs[letter] = 1
elif letter == 'Q':
if len(element) == 1:
datainputs[letter] = ('N', 8)
else:
second_letter = element[1]
try:
if second_letter not in 'ANH':
raise ValueError()
length = int(element[2:])
if length < 4 or length > 64:
raise ValueError()
except ValueError:
raise ValueError('Invalid challenge descriptor %s' % element)
datainputs[letter] = (second_letter, length)
elif letter == 'P':
algo = str2hashalgo(element[1:] or 'SHA1')
datainputs[letter] = algo
elif letter == 'S':
length = 64
if element[1:]:
try:
length = int(element[1:])
except ValueError:
raise ValueError('Invalid session data descriptor %s' % element)
datainputs[letter] = length
elif letter == 'T':
complement = element[1:] or '1M'
try:
length = 0
if not re.match(r'^(\d+[HMS])+$', complement):
raise ValueError()
parts = re.findall(r'\d+[HMS]', complement)
for part in parts:
period = part[-1]
quantity = int(part[:-1])
length += quantity * PERIODS[period]
datainputs[letter] = length
except ValueError:
raise ValueError('Invalid timestamp descriptor %s' % element)
else:
raise ValueError('Invalid datainput descriptor %s' % element)
return DataInput(**datainputs)
class OcraSuite(object):
def __init__(self, ocrasuite_description, crypto_function, data_input):
self.ocrasuite_description = ocrasuite_description
self.crypto_function = crypto_function
self.data_input = data_input
def __call__(self, key, **kwargs):
data_input = self.ocrasuite_description.encode('ascii') + b'\0' + self.data_input(**kwargs)
return self.crypto_function(key, data_input)
def accept(self, response, key, **kwargs):
return _utils.compare_digest(str(response), self(key, **kwargs))
def __str__(self):
return '<OcraSuite crypto_function:%s data_input:%s>' % (self.crypto_function, self.data_input)
def str2ocrasuite(ocrasuite_description):
elements = ocrasuite_description.split(':')
if len(elements) != 3:
raise ValueError('Bad OcraSuite description %s' % ocrasuite_description)
if elements[0] != OCRA_1:
raise ValueError('Unsupported OCRA identifier %s' % elements[0])
crypto_function = str2cryptofunction(elements[1])
data_input = str2datainput(elements[2])
return OcraSuite(ocrasuite_description, crypto_function, data_input)
class StateException(Exception):
pass
DEFAULT_LENGTH = 20
class OCRAChallengeResponse(object):
state = 1
def __init__(self, key, ocrasuite_description, remote_ocrasuite_description=None):
self.key = key
self.ocrasuite = str2ocrasuite(ocrasuite_description)
self.remote_ocrasuite = remote_ocrasuite_description is not None and str2ocrasuite(
remote_ocrasuite_description
)
if not self.ocrasuite.data_input.Q:
raise ValueError('Ocrasuite must have a Q descriptor')
def compute_challenge(Q):
kind, length = Q
try:
r = xrange(0, length)
except NameError:
r = range(0, length)
if kind == 'N':
c = ''.join([random.choice(string.digits) for i in r])
elif kind == 'A':
alphabet = string.digits + string.ascii_letters
c = ''.join([random.choice(alphabet) for i in r])
elif kind == 'H':
c = ''.join([random.choice(string.hexdigits) for i in r])
else:
raise ValueError('Q kind is unknown: %s' % kind)
return c
class OCRAChallengeResponseServer(OCRAChallengeResponse):
SERVER_STATE_COMPUTE_CHALLENGE = 1
SERVER_STATE_VERIFY_RESPONSE = 2
SERVER_STATE_FINISHED = 3
def compute_challenge(self):
if self.state != self.SERVER_STATE_COMPUTE_CHALLENGE:
raise StateException()
ocrasuite = self.remote_ocrasuite or self.ocrasuite
self.challenge = compute_challenge(ocrasuite.data_input.Q)
self.state = self.SERVER_STATE_VERIFY_RESPONSE
return self.challenge
def verify_response(self, response, **kwargs):
if self.state != self.SERVER_STATE_VERIFY_RESPONSE:
return StateException()
ocrasuite = self.remote_ocrasuite or self.ocrasuite
c = _utils.compare_digest(ocrasuite(self.key, Q=self.challenge, **kwargs), response)
if c:
self.state = self.SERVER_STATE_FINISHED
return c
class OCRAChallengeResponseClient(OCRAChallengeResponse):
def compute_response(self, challenge, **kwargs):
return self.ocrasuite(self.key, Q=challenge, **kwargs)
class OCRAMutualChallengeResponseClient(OCRAChallengeResponse):
CLIENT_STATE_COMPUTE_CLIENT_CHALLENGE = 1
CLIENT_STATE_VERIFY_SERVER_RESPONSE = 2
CLIENT_STATE_COMPUTE_CLIENT_RESPONSE = 3
CLIENT_STATE_FINISHED = 4
def compute_client_challenge(self, Qc=None):
if self.state != self.CLIENT_STATE_COMPUTE_CLIENT_CHALLENGE:
raise StateException()
ocrasuite = self.remote_ocrasuite or self.ocrasuite
self.client_challenge = Qc or compute_challenge(ocrasuite.data_input.Q)
self.state = self.CLIENT_STATE_VERIFY_SERVER_RESPONSE
return self.client_challenge
def verify_server_response(self, response, challenge, **kwargs):
if self.state != self.CLIENT_STATE_VERIFY_SERVER_RESPONSE:
return StateException()
self.server_challenge = challenge
q = self.client_challenge + self.server_challenge
ocrasuite = self.remote_ocrasuite or self.ocrasuite
c = _utils.compare_digest(ocrasuite(self.key, Qsc=q, **kwargs), response)
if c:
self.state = self.CLIENT_STATE_COMPUTE_CLIENT_RESPONSE
return c
def compute_client_response(self, **kwargs):
if self.state != self.CLIENT_STATE_COMPUTE_CLIENT_RESPONSE:
return StateException()
q = self.server_challenge + self.client_challenge
rc = self.ocrasuite(self.key, Qsc=q, **kwargs)
self.state = self.CLIENT_STATE_FINISHED
return rc
class OCRAMutualChallengeResponseServer(OCRAChallengeResponse):
SERVER_STATE_COMPUTE_SERVER_RESPONSE = 1
SERVER_STATE_VERIFY_CLIENT_RESPONSE = 2
SERVER_STATE_FINISHED = 3
def compute_server_response(self, challenge, Qs=None, **kwargs):
if self.state != self.SERVER_STATE_COMPUTE_SERVER_RESPONSE:
raise StateException()
self.client_challenge = challenge
self.server_challenge = Qs or compute_challenge(self.ocrasuite.data_input.Q)
q = self.client_challenge + self.server_challenge
# no need for pin with server mode
kwargs.pop('P', None)
kwargs.pop('P_digest', None)
rs = self.ocrasuite(self.key, Qsc=q, **kwargs)
self.state = self.SERVER_STATE_VERIFY_CLIENT_RESPONSE
return rs, self.server_challenge
def verify_client_response(self, response, **kwargs):
if self.state != self.SERVER_STATE_VERIFY_CLIENT_RESPONSE:
raise StateException()
q = self.server_challenge + self.client_challenge
ocrasuite = self.remote_ocrasuite or self.ocrasuite
c = _utils.compare_digest(ocrasuite(self.key, Qsc=q, **kwargs), response)
if c:
self.state = self.SERVER_STATE_FINISHED
return c
|
bsd-3-clause
|
Denisolt/Tensorflow_Chat_Bot
|
local/lib/python2.7/site-packages/numpy/f2py/auxfuncs.py
|
85
|
21818
|
#!/usr/bin/env python
"""
Auxiliary functions for f2py2e.
Copyright 1999,2000 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy (BSD style) LICENSE.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Date: 2005/07/24 19:01:55 $
Pearu Peterson
"""
from __future__ import division, absolute_import, print_function
import pprint
import sys
import types
from functools import reduce
from . import __version__
from . import cfuncs
__all__ = [
'applyrules', 'debugcapi', 'dictappend', 'errmess', 'gentitle',
'getargs2', 'getcallprotoargument', 'getcallstatement',
'getfortranname', 'getpymethoddef', 'getrestdoc', 'getusercode',
'getusercode1', 'hasbody', 'hascallstatement', 'hascommon',
'hasexternals', 'hasinitvalue', 'hasnote', 'hasresultnote',
'isallocatable', 'isarray', 'isarrayofstrings', 'iscomplex',
'iscomplexarray', 'iscomplexfunction', 'iscomplexfunction_warn',
'isdouble', 'isdummyroutine', 'isexternal', 'isfunction',
'isfunction_wrap', 'isint1array', 'isinteger', 'isintent_aux',
'isintent_c', 'isintent_callback', 'isintent_copy', 'isintent_dict',
'isintent_hide', 'isintent_in', 'isintent_inout', 'isintent_inplace',
'isintent_nothide', 'isintent_out', 'isintent_overwrite', 'islogical',
'islogicalfunction', 'islong_complex', 'islong_double',
'islong_doublefunction', 'islong_long', 'islong_longfunction',
'ismodule', 'ismoduleroutine', 'isoptional', 'isprivate', 'isrequired',
'isroutine', 'isscalar', 'issigned_long_longarray', 'isstring',
'isstringarray', 'isstringfunction', 'issubroutine',
'issubroutine_wrap', 'isthreadsafe', 'isunsigned', 'isunsigned_char',
'isunsigned_chararray', 'isunsigned_long_long',
'isunsigned_long_longarray', 'isunsigned_short',
'isunsigned_shortarray', 'l_and', 'l_not', 'l_or', 'outmess',
'replace', 'show', 'stripcomma', 'throw_error',
]
f2py_version = __version__.version
errmess = sys.stderr.write
show = pprint.pprint
options = {}
debugoptions = []
wrapfuncs = 1
def outmess(t):
if options.get('verbose', 1):
sys.stdout.write(t)
def debugcapi(var):
return 'capi' in debugoptions
def _isstring(var):
return 'typespec' in var and var['typespec'] == 'character' and \
not isexternal(var)
def isstring(var):
return _isstring(var) and not isarray(var)
def ischaracter(var):
return isstring(var) and 'charselector' not in var
def isstringarray(var):
return isarray(var) and _isstring(var)
def isarrayofstrings(var):
# leaving out '*' for now so that `character*(*) a(m)` and `character
# a(m,*)` are treated differently. Luckily `character**` is illegal.
return isstringarray(var) and var['dimension'][-1] == '(*)'
def isarray(var):
return 'dimension' in var and not isexternal(var)
def isscalar(var):
return not (isarray(var) or isstring(var) or isexternal(var))
def iscomplex(var):
return isscalar(var) and \
var.get('typespec') in ['complex', 'double complex']
def islogical(var):
return isscalar(var) and var.get('typespec') == 'logical'
def isinteger(var):
return isscalar(var) and var.get('typespec') == 'integer'
def isreal(var):
return isscalar(var) and var.get('typespec') == 'real'
def get_kind(var):
try:
return var['kindselector']['*']
except KeyError:
try:
return var['kindselector']['kind']
except KeyError:
pass
def islong_long(var):
if not isscalar(var):
return 0
if var.get('typespec') not in ['integer', 'logical']:
return 0
return get_kind(var) == '8'
def isunsigned_char(var):
if not isscalar(var):
return 0
if var.get('typespec') != 'integer':
return 0
return get_kind(var) == '-1'
def isunsigned_short(var):
if not isscalar(var):
return 0
if var.get('typespec') != 'integer':
return 0
return get_kind(var) == '-2'
def isunsigned(var):
if not isscalar(var):
return 0
if var.get('typespec') != 'integer':
return 0
return get_kind(var) == '-4'
def isunsigned_long_long(var):
if not isscalar(var):
return 0
if var.get('typespec') != 'integer':
return 0
return get_kind(var) == '-8'
def isdouble(var):
if not isscalar(var):
return 0
if not var.get('typespec') == 'real':
return 0
return get_kind(var) == '8'
def islong_double(var):
if not isscalar(var):
return 0
if not var.get('typespec') == 'real':
return 0
return get_kind(var) == '16'
def islong_complex(var):
if not iscomplex(var):
return 0
return get_kind(var) == '32'
def iscomplexarray(var):
return isarray(var) and \
var.get('typespec') in ['complex', 'double complex']
def isint1array(var):
return isarray(var) and var.get('typespec') == 'integer' \
and get_kind(var) == '1'
def isunsigned_chararray(var):
return isarray(var) and var.get('typespec') in ['integer', 'logical']\
and get_kind(var) == '-1'
def isunsigned_shortarray(var):
return isarray(var) and var.get('typespec') in ['integer', 'logical']\
and get_kind(var) == '-2'
def isunsignedarray(var):
return isarray(var) and var.get('typespec') in ['integer', 'logical']\
and get_kind(var) == '-4'
def isunsigned_long_longarray(var):
return isarray(var) and var.get('typespec') in ['integer', 'logical']\
and get_kind(var) == '-8'
def issigned_chararray(var):
return isarray(var) and var.get('typespec') in ['integer', 'logical']\
and get_kind(var) == '1'
def issigned_shortarray(var):
return isarray(var) and var.get('typespec') in ['integer', 'logical']\
and get_kind(var) == '2'
def issigned_array(var):
return isarray(var) and var.get('typespec') in ['integer', 'logical']\
and get_kind(var) == '4'
def issigned_long_longarray(var):
return isarray(var) and var.get('typespec') in ['integer', 'logical']\
and get_kind(var) == '8'
def isallocatable(var):
return 'attrspec' in var and 'allocatable' in var['attrspec']
def ismutable(var):
return not ('dimension' not in var or isstring(var))
def ismoduleroutine(rout):
return 'modulename' in rout
def ismodule(rout):
return 'block' in rout and 'module' == rout['block']
def isfunction(rout):
return 'block' in rout and 'function' == rout['block']
def isfunction_wrap(rout):
if isintent_c(rout):
return 0
return wrapfuncs and isfunction(rout) and (not isexternal(rout))
def issubroutine(rout):
return 'block' in rout and 'subroutine' == rout['block']
def issubroutine_wrap(rout):
if isintent_c(rout):
return 0
return issubroutine(rout) and hasassumedshape(rout)
def hasassumedshape(rout):
if rout.get('hasassumedshape'):
return True
for a in rout['args']:
for d in rout['vars'].get(a, {}).get('dimension', []):
if d == ':':
rout['hasassumedshape'] = True
return True
return False
def isroutine(rout):
return isfunction(rout) or issubroutine(rout)
def islogicalfunction(rout):
if not isfunction(rout):
return 0
if 'result' in rout:
a = rout['result']
else:
a = rout['name']
if a in rout['vars']:
return islogical(rout['vars'][a])
return 0
def islong_longfunction(rout):
if not isfunction(rout):
return 0
if 'result' in rout:
a = rout['result']
else:
a = rout['name']
if a in rout['vars']:
return islong_long(rout['vars'][a])
return 0
def islong_doublefunction(rout):
if not isfunction(rout):
return 0
if 'result' in rout:
a = rout['result']
else:
a = rout['name']
if a in rout['vars']:
return islong_double(rout['vars'][a])
return 0
def iscomplexfunction(rout):
if not isfunction(rout):
return 0
if 'result' in rout:
a = rout['result']
else:
a = rout['name']
if a in rout['vars']:
return iscomplex(rout['vars'][a])
return 0
def iscomplexfunction_warn(rout):
if iscomplexfunction(rout):
outmess("""\
**************************************************************
Warning: code with a function returning complex value
may not work correctly with your Fortran compiler.
Run the following test before using it in your applications:
$(f2py install dir)/test-site/{b/runme_scalar,e/runme}
When using GNU gcc/g77 compilers, codes should work correctly.
**************************************************************\n""")
return 1
return 0
def isstringfunction(rout):
if not isfunction(rout):
return 0
if 'result' in rout:
a = rout['result']
else:
a = rout['name']
if a in rout['vars']:
return isstring(rout['vars'][a])
return 0
def hasexternals(rout):
return 'externals' in rout and rout['externals']
def isthreadsafe(rout):
return 'f2pyenhancements' in rout and \
'threadsafe' in rout['f2pyenhancements']
def hasvariables(rout):
return 'vars' in rout and rout['vars']
def isoptional(var):
return ('attrspec' in var and 'optional' in var['attrspec'] and
'required' not in var['attrspec']) and isintent_nothide(var)
def isexternal(var):
return 'attrspec' in var and 'external' in var['attrspec']
def isrequired(var):
return not isoptional(var) and isintent_nothide(var)
def isintent_in(var):
if 'intent' not in var:
return 1
if 'hide' in var['intent']:
return 0
if 'inplace' in var['intent']:
return 0
if 'in' in var['intent']:
return 1
if 'out' in var['intent']:
return 0
if 'inout' in var['intent']:
return 0
if 'outin' in var['intent']:
return 0
return 1
def isintent_inout(var):
return ('intent' in var and ('inout' in var['intent'] or
'outin' in var['intent']) and 'in' not in var['intent'] and
'hide' not in var['intent'] and 'inplace' not in var['intent'])
def isintent_out(var):
return 'out' in var.get('intent', [])
def isintent_hide(var):
return ('intent' in var and ('hide' in var['intent'] or
('out' in var['intent'] and 'in' not in var['intent'] and
(not l_or(isintent_inout, isintent_inplace)(var)))))
def isintent_nothide(var):
return not isintent_hide(var)
def isintent_c(var):
return 'c' in var.get('intent', [])
def isintent_cache(var):
return 'cache' in var.get('intent', [])
def isintent_copy(var):
return 'copy' in var.get('intent', [])
def isintent_overwrite(var):
return 'overwrite' in var.get('intent', [])
def isintent_callback(var):
return 'callback' in var.get('intent', [])
def isintent_inplace(var):
return 'inplace' in var.get('intent', [])
def isintent_aux(var):
return 'aux' in var.get('intent', [])
def isintent_aligned4(var):
return 'aligned4' in var.get('intent', [])
def isintent_aligned8(var):
return 'aligned8' in var.get('intent', [])
def isintent_aligned16(var):
return 'aligned16' in var.get('intent', [])
isintent_dict = {isintent_in: 'INTENT_IN', isintent_inout: 'INTENT_INOUT',
isintent_out: 'INTENT_OUT', isintent_hide: 'INTENT_HIDE',
isintent_cache: 'INTENT_CACHE',
isintent_c: 'INTENT_C', isoptional: 'OPTIONAL',
isintent_inplace: 'INTENT_INPLACE',
isintent_aligned4: 'INTENT_ALIGNED4',
isintent_aligned8: 'INTENT_ALIGNED8',
isintent_aligned16: 'INTENT_ALIGNED16',
}
def isprivate(var):
return 'attrspec' in var and 'private' in var['attrspec']
def hasinitvalue(var):
return '=' in var
def hasinitvalueasstring(var):
if not hasinitvalue(var):
return 0
return var['='][0] in ['"', "'"]
def hasnote(var):
return 'note' in var
def hasresultnote(rout):
if not isfunction(rout):
return 0
if 'result' in rout:
a = rout['result']
else:
a = rout['name']
if a in rout['vars']:
return hasnote(rout['vars'][a])
return 0
def hascommon(rout):
return 'common' in rout
def containscommon(rout):
if hascommon(rout):
return 1
if hasbody(rout):
for b in rout['body']:
if containscommon(b):
return 1
return 0
def containsmodule(block):
if ismodule(block):
return 1
if not hasbody(block):
return 0
for b in block['body']:
if containsmodule(b):
return 1
return 0
def hasbody(rout):
return 'body' in rout
def hascallstatement(rout):
return getcallstatement(rout) is not None
def istrue(var):
return 1
def isfalse(var):
return 0
class F2PYError(Exception):
pass
class throw_error:
def __init__(self, mess):
self.mess = mess
def __call__(self, var):
mess = '\n\n var = %s\n Message: %s\n' % (var, self.mess)
raise F2PYError(mess)
def l_and(*f):
l, l2 = 'lambda v', []
for i in range(len(f)):
l = '%s,f%d=f[%d]' % (l, i, i)
l2.append('f%d(v)' % (i))
return eval('%s:%s' % (l, ' and '.join(l2)))
def l_or(*f):
l, l2 = 'lambda v', []
for i in range(len(f)):
l = '%s,f%d=f[%d]' % (l, i, i)
l2.append('f%d(v)' % (i))
return eval('%s:%s' % (l, ' or '.join(l2)))
def l_not(f):
return eval('lambda v,f=f:not f(v)')
def isdummyroutine(rout):
try:
return rout['f2pyenhancements']['fortranname'] == ''
except KeyError:
return 0
def getfortranname(rout):
try:
name = rout['f2pyenhancements']['fortranname']
if name == '':
raise KeyError
if not name:
errmess('Failed to use fortranname from %s\n' %
(rout['f2pyenhancements']))
raise KeyError
except KeyError:
name = rout['name']
return name
def getmultilineblock(rout, blockname, comment=1, counter=0):
try:
r = rout['f2pyenhancements'].get(blockname)
except KeyError:
return
if not r:
return
if counter > 0 and isinstance(r, str):
return
if isinstance(r, list):
if counter >= len(r):
return
r = r[counter]
if r[:3] == "'''":
if comment:
r = '\t/* start ' + blockname + \
' multiline (' + repr(counter) + ') */\n' + r[3:]
else:
r = r[3:]
if r[-3:] == "'''":
if comment:
r = r[:-3] + '\n\t/* end multiline (' + repr(counter) + ')*/'
else:
r = r[:-3]
else:
errmess("%s multiline block should end with `'''`: %s\n"
% (blockname, repr(r)))
return r
def getcallstatement(rout):
return getmultilineblock(rout, 'callstatement')
def getcallprotoargument(rout, cb_map={}):
r = getmultilineblock(rout, 'callprotoargument', comment=0)
if r:
return r
if hascallstatement(rout):
outmess(
'warning: callstatement is defined without callprotoargument\n')
return
from .capi_maps import getctype
arg_types, arg_types2 = [], []
if l_and(isstringfunction, l_not(isfunction_wrap))(rout):
arg_types.extend(['char*', 'size_t'])
for n in rout['args']:
var = rout['vars'][n]
if isintent_callback(var):
continue
if n in cb_map:
ctype = cb_map[n] + '_typedef'
else:
ctype = getctype(var)
if l_and(isintent_c, l_or(isscalar, iscomplex))(var):
pass
elif isstring(var):
pass
else:
ctype = ctype + '*'
if isstring(var) or isarrayofstrings(var):
arg_types2.append('size_t')
arg_types.append(ctype)
proto_args = ','.join(arg_types + arg_types2)
if not proto_args:
proto_args = 'void'
return proto_args
def getusercode(rout):
return getmultilineblock(rout, 'usercode')
def getusercode1(rout):
return getmultilineblock(rout, 'usercode', counter=1)
def getpymethoddef(rout):
return getmultilineblock(rout, 'pymethoddef')
def getargs(rout):
sortargs, args = [], []
if 'args' in rout:
args = rout['args']
if 'sortvars' in rout:
for a in rout['sortvars']:
if a in args:
sortargs.append(a)
for a in args:
if a not in sortargs:
sortargs.append(a)
else:
sortargs = rout['args']
return args, sortargs
def getargs2(rout):
sortargs, args = [], rout.get('args', [])
auxvars = [a for a in rout['vars'].keys() if isintent_aux(rout['vars'][a])
and a not in args]
args = auxvars + args
if 'sortvars' in rout:
for a in rout['sortvars']:
if a in args:
sortargs.append(a)
for a in args:
if a not in sortargs:
sortargs.append(a)
else:
sortargs = auxvars + rout['args']
return args, sortargs
def getrestdoc(rout):
if 'f2pymultilines' not in rout:
return None
k = None
if rout['block'] == 'python module':
k = rout['block'], rout['name']
return rout['f2pymultilines'].get(k, None)
def gentitle(name):
l = (80 - len(name) - 6) // 2
return '/*%s %s %s*/' % (l * '*', name, l * '*')
def flatlist(l):
if isinstance(l, list):
return reduce(lambda x, y, f=flatlist: x + f(y), l, [])
return [l]
def stripcomma(s):
if s and s[-1] == ',':
return s[:-1]
return s
def replace(str, d, defaultsep=''):
if isinstance(d, list):
return [replace(str, _m, defaultsep) for _m in d]
if isinstance(str, list):
return [replace(_m, d, defaultsep) for _m in str]
for k in 2 * list(d.keys()):
if k == 'separatorsfor':
continue
if 'separatorsfor' in d and k in d['separatorsfor']:
sep = d['separatorsfor'][k]
else:
sep = defaultsep
if isinstance(d[k], list):
str = str.replace('#%s#' % (k), sep.join(flatlist(d[k])))
else:
str = str.replace('#%s#' % (k), d[k])
return str
def dictappend(rd, ar):
if isinstance(ar, list):
for a in ar:
rd = dictappend(rd, a)
return rd
for k in ar.keys():
if k[0] == '_':
continue
if k in rd:
if isinstance(rd[k], str):
rd[k] = [rd[k]]
if isinstance(rd[k], list):
if isinstance(ar[k], list):
rd[k] = rd[k] + ar[k]
else:
rd[k].append(ar[k])
elif isinstance(rd[k], dict):
if isinstance(ar[k], dict):
if k == 'separatorsfor':
for k1 in ar[k].keys():
if k1 not in rd[k]:
rd[k][k1] = ar[k][k1]
else:
rd[k] = dictappend(rd[k], ar[k])
else:
rd[k] = ar[k]
return rd
def applyrules(rules, d, var={}):
ret = {}
if isinstance(rules, list):
for r in rules:
rr = applyrules(r, d, var)
ret = dictappend(ret, rr)
if '_break' in rr:
break
return ret
if '_check' in rules and (not rules['_check'](var)):
return ret
if 'need' in rules:
res = applyrules({'needs': rules['need']}, d, var)
if 'needs' in res:
cfuncs.append_needs(res['needs'])
for k in rules.keys():
if k == 'separatorsfor':
ret[k] = rules[k]
continue
if isinstance(rules[k], str):
ret[k] = replace(rules[k], d)
elif isinstance(rules[k], list):
ret[k] = []
for i in rules[k]:
ar = applyrules({k: i}, d, var)
if k in ar:
ret[k].append(ar[k])
elif k[0] == '_':
continue
elif isinstance(rules[k], dict):
ret[k] = []
for k1 in rules[k].keys():
if isinstance(k1, types.FunctionType) and k1(var):
if isinstance(rules[k][k1], list):
for i in rules[k][k1]:
if isinstance(i, dict):
res = applyrules({'supertext': i}, d, var)
if 'supertext' in res:
i = res['supertext']
else:
i = ''
ret[k].append(replace(i, d))
else:
i = rules[k][k1]
if isinstance(i, dict):
res = applyrules({'supertext': i}, d)
if 'supertext' in res:
i = res['supertext']
else:
i = ''
ret[k].append(replace(i, d))
else:
errmess('applyrules: ignoring rule %s.\n' % repr(rules[k]))
if isinstance(ret[k], list):
if len(ret[k]) == 1:
ret[k] = ret[k][0]
if ret[k] == []:
del ret[k]
return ret
|
gpl-3.0
|
ydm/eecont
|
eecont/tests.py
|
1
|
24177
|
#-*- coding: utf-8 -*-
# TODO: use fixtures instead of hard coded dictionaries
from __future__ import unicode_literals
import copy
import datetime
from django.test import TestCase
from django.conf import settings
from django.utils import six
from django.utils import timezone
import pytz
from eecont import inserter
from eecont import models
from eecont import transform
def _dt(*args, **kwargs):
tz = kwargs.get('tz')
dt = datetime.datetime(*args)
if settings.USE_TZ and tz:
dt = timezone.make_aware(dt, pytz.timezone(tz))
return dt
########################
# Test transformations #
########################
class TransformTestMixin(object):
_expected = None
_input = None
_func = lambda a: a
def _time(self, *args):
return datetime.time(*args)
@property
def expected(self):
return self._expected
@property
def input(self):
return copy.deepcopy(self._input)
def test_filter(self):
inp = self.input[0]
inp['something1'] = 'something else 1'
inp['something2'] = 'something else 2'
inp['something3'] = 'something else 3'
actual = self._func(inp)
self.assertEqual([self._expected[0]], actual)
def test_single(self):
actual = self._func(self.input[0])
self.assertEqual([self.expected[0]], actual)
def test_multiple(self):
actual = self._func(self.input)
self.assertEqual(self.expected, actual)
class CityTransformTest(TransformTestMixin, TestCase):
_func = staticmethod(transform.cities)
_input = [
{'attach_offices':
{'cargo_expres_shipments':
{'from_door': {'office_code': ['1000', '1001']},
'from_office': {'office_code': ['1002', '1003']},
'to_door': {'office_code': ['1004', '1005']},
'to_office': {'office_code': ['1006', '1007']}},
'cargo_palet_shipments':
{'from_door': {'office_code': ['1008', '1009']},
'from_office': {'office_code': ['1010', '1011']},
'to_door': {'office_code': ['1012', '1013']},
'to_office': {'office_code': ['1014', '1015']}},
'courier_shipments':
{'from_door': {'office_code': ['1016', '1017']},
'from_office': {'office_code': ['1018', '1019']},
'to_door': {'office_code': ['1020', '1021']},
'to_office': {'office_code': ['1022', '1023']}},
'post_shipments':
{'from_door': {'office_code': ['1024', '1025']},
'from_office': {'office_code': ['1026', '1027']},
'to_door': {'office_code': ['1028', '1029']},
'to_office': {'office_code': ['1030', '1031']}}},
'id': '2000',
'id_country': '2001',
'id_office': '2003',
'id_zone': '2004',
'name': 'Град №1',
'name_en': 'City #1',
'post_code': '2005',
'region': 'Планета Земя',
'region_en': 'Planet Earth',
'service_days': {'day1': '1', 'day2': '1', 'day3': '1', 'day4': '1',
'day5': '0', 'day6': '0', 'day7': '0'},
'type': 'гр.',
'updated_time': '2012-10-03 09:07:09'},
{'attach_offices':
{'cargo_expres_shipments':
{'from_door': {'office_code': ''},
'from_office': '',
'to_door': {'office_code': ['2000', '2001']},
'to_office': {'office_code': ['2002', '2003']}},
# 'cargo_palet_shipments':
# {'from_door': {'office_code': ['2004', '2005']},
# 'from_office': {'office_code': ['2006', '2007']},
# 'to_door': {'office_code': ['2008', '2009']},
# 'to_office': {'office_code': ['2010', '2011']}},
'courier_shipments': '',
# {'from_door': {'office_code': ['2012', '2013']},
# 'from_office': {'office_code': ['2014', '2015']},
# 'to_door': {'office_code': ['2016', '2017']},
# 'to_office': {'office_code': ['2018', '2019']}},
'post_shipments':
{'from_door': {'office_code': ['2020', '2021']},
# 'from_office': {'office_code': ['2022', '2023']},
'to_door': {'office_code': []},
'to_office': {'office_code': ['2025', '2026', '2027']}}},
'id': '3000',
'id_country': '3001',
'id_office': '3002',
'id_zone': '3003',
'name': 'Град №2',
'name_en': 'City #2',
'post_code': '3004',
'region': 'Планета Земя',
'region_en': 'Planet Earth',
'service_days': {'day1': '0', 'day2': '0', 'day3': '0', 'day4': '0',
'day5': '1', 'day6': '1', 'day7': '1'},
'type': 'с.',
'updated_time': '2009-09-09 23:59:59'}]
def setUp(self):
self._expected = [
{'eid': 2000,
# we can't match a country by its id
# office
'zone': 2004,
'name': 'Град №1',
'name_en': 'City #1',
'post_code': 2005,
# TODO: region?
'service_days': [True, True, True, True, False, False, False],
'is_village': False,
'updated_time': _dt(2012, 10, 3, 9, 7, 9, tz='Europe/Sofia'),
'ces_from_door': [1000, 1001], 'ces_from_office': [1002, 1003],
'ces_to_door': [1004, 1005], 'ces_to_office': [1006, 1007],
'cps_from_door': [1008, 1009], 'cps_from_office': [1010, 1011],
'cps_to_door': [1012, 1013], 'cps_to_office': [1014, 1015],
'cs_from_door': [1016, 1017], 'cs_from_office': [1018, 1019],
'cs_to_door': [1020, 1021], 'cs_to_office': [1022, 1023],
'ps_from_door': [1024, 1025], 'ps_from_office': [1026, 1027],
'ps_to_door': [1028, 1029], 'ps_to_office': [1030, 1031]},
{'eid': 3000,
# we can't match a country by its id
# office
'zone': 3003,
'name': 'Град №2',
'name_en': 'City #2',
'post_code': 3004,
# TODO: region?
'service_days': [False, False, False, False, True, True, True],
'is_village': True,
'updated_time': _dt(2009, 9, 9, 23, 59, 59, tz='Europe/Sofia'),
'ces_from_door': [], 'ces_from_office': [],
'ces_to_door': [2000, 2001], 'ces_to_office': [2002, 2003],
'cps_from_door': [], 'cps_from_office': [],
'cps_to_door': [], 'cps_to_office': [],
'cs_from_door': [], 'cs_from_office': [],
'cs_to_door': [], 'cs_to_office': [],
'ps_from_door': [2020, 2021], 'ps_from_office': [],
'ps_to_door': [], 'ps_to_office': [2025, 2026, 2027]}]
class CountryTransformTest(TestCase, TransformTestMixin):
_func = staticmethod(transform.countries)
_input = [
{'country_name': 'Държава №1',
'country_name_en': 'Country #1',
'id_zone': '1000',
'zone_name': 'Зона №1',
'zone_name_en': 'Zone #1'},
{'country_name': 'Държава №2',
'country_name_en': 'Country #2',
'id_zone': '2000',
'zone_name': 'Зона №2',
'zone_name_en': 'Zone #2'}]
_expected = [
{'name': 'Държава №1',
'name_en': 'Country #1',
'zone': 1000},
{'name': 'Държава №2',
'name_en': 'Country #2',
'zone': 2000}]
class OfficeTransformTest(TestCase, TransformTestMixin):
_func = staticmethod(transform.offices)
_input = [
{
'address': 'Ямбол Ямбол ул. Дружба №1',
'address_details': {'ap': '',
'bl': '',
'et': '',
'num': '1',
'other': '',
'quarter_name': 'Ямбол',
'street_name': 'ул. Дружба',
'vh': ''},
'address_en': 'Yambol Qmbol ul. Druzhba #1',
'city_name': 'Ямбол',
'city_name_en': 'Yambol',
'id': '648',
'latitude': '42.4821587',
'longitude': '26.4996131',
'name': 'Ямбол Трите вятъра',
'name_en': 'Yambol Trite vjatara',
'office_code': '8603',
'phone': '+359 466 29962,+359 87 9922914',
'time_priority': '09:30:00',
'updated_time': '2013-03-24 01:00:13',
'work_begin': '09:00:00',
'work_begin_saturday': '09:00:00',
'work_end': '18:00:00',
'work_end_saturday': '13:00:00'},
{'address': 'Тутракан Тутракан ул. Силистра №51',
'address_details': {'ap': '123',
'bl': '234',
'et': '345',
'num': '51',
'other': 'Пепеляшка е красива',
'quarter_name': 'Тутракан',
'street_name': 'ул. Силистра',
'vh': '456'},
'address_en': 'Tutrakan Tutrakan ul. Silistra #1',
'city_name': 'Тутракан',
'city_name_en': 'Tutrakan',
'id': '177',
'latitude': '44.04661273118875',
'longitude': '26.624570750656133',
'name': 'Тутракан',
'name_en': 'Tutrakan',
'office_code': '7600',
'phone': '+359 866 61464,+359 87 9922602',
'time_priority': '23:34:45',
'updated_time': '2013-03-10 13:26:00',
'work_begin': '12:23:34',
'work_begin_saturday': '23:23:23',
'work_end': '12:34:56',
'work_end_saturday': '20:01:02'}]
def setUp(self):
self._expected = [
{'address': 'Ямбол Ямбол ул. Дружба №1',
'address_en': 'Yambol Qmbol ul. Druzhba #1',
'apartment': '',
'apartment_building': '',
'city_name': 'Ямбол',
'city_name_en': 'Yambol',
'eid': 648,
'entrance': '',
'floor': '',
'latitude': '42.4821587',
'longitude': '26.4996131',
'name': 'Ямбол Трите вятъра',
'name_en': 'Yambol Trite vjatara',
'number': '1',
'office_code': 8603,
'other': '',
'phone': '+359 466 29962,+359 87 9922914',
'quarter_name': 'Ямбол',
'street_name': 'ул. Дружба',
'time_priority': self._time(9, 30),
'updated_time': _dt(2013, 3, 24, 1, 0, 13, tz='Europe/Sofia'),
'work_begin': self._time(9),
'work_begin_saturday': self._time(9),
'work_end': self._time(18),
'work_end_saturday': self._time(13)},
{'address': 'Тутракан Тутракан ул. Силистра №51',
'address_en': 'Tutrakan Tutrakan ul. Silistra #1',
'apartment': '123',
'apartment_building': '234',
'city_name': 'Тутракан',
'city_name_en': 'Tutrakan',
'eid': 177,
'entrance': '456',
'floor': '345',
'latitude': '44.04661273118875',
'longitude': '26.624570750656133',
'name': 'Тутракан',
'name_en': 'Tutrakan',
'number': '51',
'office_code': 7600,
'other': 'Пепеляшка е красива',
'phone': '+359 866 61464,+359 87 9922602',
'quarter_name': 'Тутракан',
'street_name': 'ул. Силистра',
'time_priority': self._time(23, 34, 45),
'updated_time': _dt(2013, 03, 10, 13, 26, 00, tz='Europe/Sofia'),
'work_begin': self._time(12, 23, 34),
'work_begin_saturday': self._time(23, 23, 23),
'work_end': self._time(12, 34, 56),
'work_end_saturday': self._time(20, 01, 02)}]
class RegionTransformTest(TestCase, TransformTestMixin):
_func = staticmethod(transform.regions)
_input = [
{'id': '1000',
'name': 'Име на регион №1',
'code': '1001',
'id_city': '1002',
'updated_time': '2010-9-15 7:15:30'},
{'id': '2000',
'name': 'Име на регион №2',
'code': '2001',
'id_city': '2002',
'updated_time': '2010-12-8 22:33:44'}]
def setUp(self):
self._expected = [
{'eid': 1000,
'name': 'Име на регион №1',
'code': 1001,
'city': 1002,
'updated_time': _dt(2010, 9, 15, 7, 15, 30,
tz='Europe/Sofia')},
{'eid': 2000,
'name': 'Име на регион №2',
'code': 2001,
'city': 2002,
'updated_time': _dt(2010, 12, 8, 22, 33, 44,
tz='Europe/Sofia')}]
class StreetTransformTest(TestCase, TransformTestMixin):
_func = staticmethod(transform.streets)
_input = [
{'id': '1001',
'name': 'ул. Някоя си',
'name_en': 'Something Str.',
'city_post_code': '1002',
'id_city': '1003',
'updated_time': '2000-10-20 10:20:30'},
{'id': '2001',
'name': 'ул. Другая си',
'name_en': 'Something Else Str.',
'city_post_code': '2002',
'id_city': '2003',
'updated_time': '2001-11-21 11:21:31'}]
def setUp(self):
self._expected = [
{'eid': 1001,
'name': 'ул. Някоя си',
'name_en': 'Something Str.',
# 'city_post_code': 1002,
'city': 1003,
'updated_time': _dt(2000, 10, 20, 10, 20, 30,
tz='Europe/Sofia')},
{'eid': 2001,
'name': 'ул. Другая си',
'name_en': 'Something Else Str.',
# 'city_post_code': 2002,
'city': 2003,
'updated_time': _dt(2001, 11, 21, 11, 21, 31,
tz='Europe/Sofia')}]
class ZoneTransformTest(TestCase, TransformTestMixin):
_func = staticmethod(transform.zones)
_input = [
{'id': '1000',
'is_ee': '1',
'name': 'Име1',
'name_en': 'Name1',
'national': '1',
'updated_time': '2013-02-18 12:13:14'},
{'id': '2000',
'is_ee': '0',
'name': 'Име2',
'name_en': 'Name2',
'national': '0',
'updated_time': '2000-02-29 23:24:25'}]
def setUp(self):
tz = 'Europe/Sofia'
self._expected = [
{'eid': 1000,
'is_ee': True,
'name': 'Име1',
'name_en': 'Name1',
'national': True,
'updated_time':
_dt(2013, 2, 18, 12, 13, 14, tz=tz)},
{'eid': 2000,
'is_ee': False,
'name': 'Име2',
'name_en': 'Name2',
'national': False,
'updated_time':
_dt(2000, 2, 29, 23, 24, 25, tz=tz)}]
# TODO: test isn't completed
def test_invalid(self):
inp = self.input[0]
inp['id'] = 'bad'
self.assertRaises(ValueError, transform.zones, inp)
######################################
# Test populating of related objects #
######################################
class PopulateRelationsTest(TestCase):
def _create_city(self, zone_eid, eid=1000):
return models.City.objects.create(
eid=eid,
zone=models.Zone.objects.get(eid=zone_eid),
name='Градец', name_en='Gradets',
post_code=1000,
service_days=[True] * 7,
is_village=True,
updated_time=_dt(2012, 10, 10, tz='Europe/Sofia'),
ces_from_door=[], ces_to_door=[],
ces_from_office=[], ces_to_office=[],
cps_from_door=[], cps_to_door=[],
cps_from_office=[], cps_to_office=[],
cs_from_door=[], cs_to_door=[],
cs_from_office=[], cs_to_office=[],
ps_from_door=[], ps_to_door=[],
ps_from_office=[], ps_to_office=[])
def _create_country(self, zone_eid, name):
return models.Country.objects.create(
name=name,
name_en='Name in latin',
zone=models.Zone.objects.get(eid=zone_eid))
def _create_office(self, eid=1001):
return models.Office.objects.create(
eid=eid,
name='Офисно име', name_en='Office name',
office_code=1337,
phone='',
time_priority='11:30',
latitude='',
longitude='',
work_begin='9:00',
work_end='17:00',
work_begin_saturday='10:00',
work_end_saturday='14:00',
updated_time=_dt(1988, 10, 4, tz='Europe/Sofia'),
# Адрес
address='ул. Клокотница №7',
address_en='ul. Klokotnitsa No7',
city_name='София',
city_name_en='Sofia',
quarter_name='кв. Овча купер',
street_name='ул. Клокотница',
number='7',
apartment_building='',
entrance='',
floor='',
apartment='',
other='')
def _create_quarter(self, city_eid, eid=1002):
return models.Quarter.objects.create(
eid=eid,
name='Някой си', name_en='Nyakoi si',
city=models.City.objects.get(eid=city_eid),
updated_time=_dt(1990, 4, 10, tz='Europe/Sofia'))
def _create_street(self, city_eid, eid=1003):
return self._create_quarter(city_eid, eid)
def _create_zone(self, eid=1000):
return models.Zone.objects.create(
eid=eid,
is_ee=True,
name='Някаква зона',
name_en='A zone',
national=True,
updated_time=_dt(2012, 10, 04, tz='Europe/Sofia'))
def test_city(self):
expected_city = self._create_city(self._create_zone().eid)
data = {'city': expected_city.eid}
inserter.populate_relations(data)
self.assertEqual(expected_city, data['city'])
def test_country(self):
expected_country = self._create_country(self._create_zone().eid,
'България')
data = {'country': 'България'}
inserter.populate_relations(data)
self.assertEqual(expected_country, data['country'])
def test_office(self):
expected = self._create_office()
data = {'office': expected.eid}
inserter.populate_relations(data)
self.assertEqual(expected, data['office'])
# TODO
def test_quarter(self):
pass
def test_street(self):
pass
def test_zone(self):
expected_zone = self._create_zone()
data = {'zone': expected_zone.eid}
inserter.populate_relations(data)
self.assertEqual(expected_zone, data['zone'])
class InserterTest(TestCase):
_zone = {
'eid': 1000,
'is_ee': True,
'name': 'Име1',
'name_en': 'Name1',
'national': True,
'updated_time': _dt(2013, 2, 18, 12, 13, 14, tz='Europe/Sofia')}
_city = {
'eid': 2000,
'zone': 1000,
'name': 'Град №1',
'name_en': 'City #1',
'post_code': 2005,
'service_days': [True, True, True, True, False, False, False],
'is_village': False,
'updated_time': _dt(2012, 10, 3, 9, 7, 9, tz='Europe/Sofia'),
'ces_from_door': [1000, 1001], 'ces_from_office': [1002, 1003],
'ces_to_door': [1004, 1005], 'ces_to_office': [1006, 1007],
'cps_from_door': [1008, 1009], 'cps_from_office': [1010, 1011],
'cps_to_door': [1012, 1013], 'cps_to_office': [1014, 1015],
'cs_from_door': [1016, 1017], 'cs_from_office': [1018, 1019],
'cs_to_door': [1020, 1021], 'cs_to_office': [1022, 1023],
'ps_from_door': [1024, 1025], 'ps_from_office': [1026, 1027],
'ps_to_door': [1028, 1029], 'ps_to_office': [1030, 1031]}
def _insert_zone(self):
data = copy.deepcopy(self._zone)
inserter.insert(data, 'Zone', 'eid')
obj = models.Zone.objects.get(eid=self._zone['eid'])
return obj
def _insert_city(self):
data = copy.deepcopy(self._city)
inserter.insert(data, 'City', 'eid')
obj = models.City.objects.get(eid=self._city['eid'])
return obj
def test_inserter_zone(self):
zone_obj = self._insert_zone()
for k, v in six.iteritems(self._zone):
self.assertEqual(v, getattr(zone_obj, k))
def test_inserter_city(self):
zone_obj = self._insert_zone()
city_obj = self._insert_city()
data = copy.deepcopy(self._city)
data['zone'] = zone_obj
for k, v in six.iteritems(data):
self.assertEqual(v, getattr(city_obj, k))
class OfficeDefaultsTest(TestCase):
_input = [{
'address': 'Ямбол Ямбол ул. Дружба №1',
'address_details': {'ap': '',
'bl': '',
'et': '',
'num': '1',
'other': '',
'quarter_name': 'Ямбол',
'street_name': 'ул. Дружба',
'vh': ''},
'address_en': 'Yambol Qmbol ul. Druzhba #1',
'city_name': 'Ямбол',
'city_name_en': 'Yambol',
'id': '648',
'latitude': '42.4821587',
'longitude': '26.4996131',
'name': 'Ямбол Трите вятъра',
'name_en': 'Yambol Trite vjatara',
'office_code': '8603',
'phone': '+359 466 29962,+359 87 9922914',
'updated_time': '2013-03-24 01:00:13',
'time_priority': None,
'work_begin': None,
'work_begin_saturday': None,
'work_end': None,
'work_end_saturday': None}]
_expected = [{
'address': 'Ямбол Ямбол ул. Дружба №1',
'address_en': 'Yambol Qmbol ul. Druzhba #1',
'apartment': '',
'apartment_building': '',
'city_name': 'Ямбол',
'city_name_en': 'Yambol',
'eid': 648,
'entrance': '',
'floor': '',
'latitude': '42.4821587',
'longitude': '26.4996131',
'name': 'Ямбол Трите вятъра',
'name_en': 'Yambol Trite vjatara',
'number': '1',
'office_code': 8603,
'other': '',
'phone': '+359 466 29962,+359 87 9922914',
'quarter_name': 'Ямбол',
'street_name': 'ул. Дружба',
'updated_time': _dt(2013, 3, 24, 1, 0, 13, tz='Europe/Sofia'),
'time_priority': datetime.time(0, 0),
'work_begin': datetime.time(0, 0),
'work_begin_saturday': datetime.time(0, 0),
'work_end': datetime.time(0, 0),
'work_end_saturday': datetime.time(0, 0)}]
def test_default_values(self):
data = transform.offices(copy.deepcopy(self._input))
self.assertEqual(self._expected, data)
|
lgpl-3.0
|
bjolivot/ansible
|
lib/ansible/modules/cloud/openstack/os_user.py
|
26
|
9043
|
#!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_user
short_description: Manage OpenStack Identity Users
extends_documentation_fragment: openstack
author: David Shrewsbury
version_added: "2.0"
description:
- Manage OpenStack Identity users. Users can be created,
updated or deleted using this module. A user will be updated
if I(name) matches an existing user and I(state) is present.
The value for I(name) cannot be updated without deleting and
re-creating the user.
options:
name:
description:
- Username for the user
required: true
password:
description:
- Password for the user
required: false
default: None
update_password:
required: false
default: always
choices: ['always', 'on_create']
version_added: "2.3"
description:
- C(always) will attempt to update password. C(on_create) will only
set the password for newly created users.
email:
description:
- Email address for the user
required: false
default: None
default_project:
description:
- Project name or ID that the user should be associated with by default
required: false
default: None
domain:
description:
- Domain to create the user in if the cloud supports domains
required: false
default: None
enabled:
description:
- Is the user enabled
required: false
default: True
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
availability_zone:
description:
- Ignored. Present for backwards compatability
required: false
requirements:
- "python >= 2.6"
- "shade"
'''
EXAMPLES = '''
# Create a user
- os_user:
cloud: mycloud
state: present
name: demouser
password: secret
email: demo@example.com
domain: default
default_project: demo
# Delete a user
- os_user:
cloud: mycloud
state: absent
name: demouser
# Create a user but don't update password if user exists
- os_user:
cloud: mycloud
state: present
name: demouser
password: secret
update_password: on_create
email: demo@example.com
domain: default
default_project: demo
'''
RETURN = '''
user:
description: Dictionary describing the user.
returned: On success when I(state) is 'present'
type: dictionary
contains:
default_project_id:
description: User default project ID. Only present with Keystone >= v3.
type: string
sample: "4427115787be45f08f0ec22a03bfc735"
domain_id:
description: User domain ID. Only present with Keystone >= v3.
type: string
sample: "default"
email:
description: User email address
type: string
sample: "demo@example.com"
id:
description: User ID
type: string
sample: "f59382db809c43139982ca4189404650"
name:
description: User name
type: string
sample: "demouser"
'''
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
def _needs_update(params_dict, user):
for k, v in params_dict.items():
if k not in ('password', 'update_password') and user[k] != v:
return True
# We don't get password back in the user object, so assume any supplied
# password is a change.
if (params_dict['password'] is not None and
params_dict['update_password'] == 'always'):
return True
return False
def _get_domain_id(cloud, domain):
try:
# We assume admin is passing domain id
domain_id = cloud.get_domain(domain)['id']
except:
# If we fail, maybe admin is passing a domain name.
# Note that domains have unique names, just like id.
try:
domain_id = cloud.search_domains(filters={'name': domain})[0]['id']
except:
# Ok, let's hope the user is non-admin and passing a sane id
domain_id = domain
return domain_id
def _get_default_project_id(cloud, default_project):
project = cloud.get_project(default_project)
if not project:
module.fail_json(msg='Default project %s is not valid' % default_project)
return project['id']
def main():
argument_spec = openstack_full_argument_spec(
name=dict(required=True),
password=dict(required=False, default=None, no_log=True),
email=dict(required=False, default=None),
default_project=dict(required=False, default=None),
domain=dict(required=False, default=None),
enabled=dict(default=True, type='bool'),
state=dict(default='present', choices=['absent', 'present']),
update_password=dict(default='always', choices=['always',
'on_create']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(
argument_spec,
**module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
name = module.params['name']
password = module.params.pop('password')
email = module.params['email']
default_project = module.params['default_project']
domain = module.params['domain']
enabled = module.params['enabled']
state = module.params['state']
update_password = module.params['update_password']
try:
cloud = shade.openstack_cloud(**module.params)
user = cloud.get_user(name)
domain_id = None
if domain:
opcloud = shade.operator_cloud(**module.params)
domain_id = _get_domain_id(opcloud, domain)
if state == 'present':
if update_password in ('always', 'on_create'):
if not password:
msg = ("update_password is %s but a password value is "
"missing") % update_password
module.fail_json(msg=msg)
default_project_id = None
if default_project:
default_project_id = _get_default_project_id(cloud, default_project)
if user is None:
user = cloud.create_user(
name=name, password=password, email=email,
default_project=default_project_id, domain_id=domain_id,
enabled=enabled)
changed = True
else:
params_dict = {'email': email, 'enabled': enabled,
'password': password,
'update_password': update_password}
if domain_id is not None:
params_dict['domain_id'] = domain_id
if default_project_id is not None:
params_dict['default_project_id'] = default_project_id
if _needs_update(params_dict, user):
if update_password == 'always':
user = cloud.update_user(
user['id'], password=password, email=email,
default_project=default_project_id,
domain_id=domain_id, enabled=enabled)
else:
user = cloud.update_user(
user['id'], email=email,
default_project=default_project_id,
domain_id=domain_id, enabled=enabled)
changed = True
else:
changed = False
module.exit_json(changed=changed, user=user)
elif state == 'absent':
if user is None:
changed=False
else:
cloud.delete_user(user['id'])
changed=True
module.exit_json(changed=changed)
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e), extra_data=e.extra_data)
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
|
gpl-3.0
|
rtucker/sycamore
|
Sycamore/support/pytz/zoneinfo/Australia/Melbourne.py
|
9
|
6234
|
'''tzinfo timezone information for Australia/Melbourne.'''
from pytz.tzinfo import DstTzInfo
from pytz.tzinfo import memorized_datetime as d
from pytz.tzinfo import memorized_ttinfo as i
class Melbourne(DstTzInfo):
'''Australia/Melbourne timezone definition. See datetime.tzinfo for details'''
zone = 'Australia/Melbourne'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1916,12,31,14,1,0),
d(1917,3,24,15,0,0),
d(1941,12,31,16,0,0),
d(1942,3,28,15,0,0),
d(1942,9,26,16,0,0),
d(1943,3,27,15,0,0),
d(1943,10,2,16,0,0),
d(1944,3,25,15,0,0),
d(1971,10,30,16,0,0),
d(1972,2,26,16,0,0),
d(1972,10,28,16,0,0),
d(1973,3,3,16,0,0),
d(1973,10,27,16,0,0),
d(1974,3,2,16,0,0),
d(1974,10,26,16,0,0),
d(1975,3,1,16,0,0),
d(1975,10,25,16,0,0),
d(1976,3,6,16,0,0),
d(1976,10,30,16,0,0),
d(1977,3,5,16,0,0),
d(1977,10,29,16,0,0),
d(1978,3,4,16,0,0),
d(1978,10,28,16,0,0),
d(1979,3,3,16,0,0),
d(1979,10,27,16,0,0),
d(1980,3,1,16,0,0),
d(1980,10,25,16,0,0),
d(1981,2,28,16,0,0),
d(1981,10,24,16,0,0),
d(1982,3,6,16,0,0),
d(1982,10,30,16,0,0),
d(1983,3,5,16,0,0),
d(1983,10,29,16,0,0),
d(1984,3,3,16,0,0),
d(1984,10,27,16,0,0),
d(1985,3,2,16,0,0),
d(1985,10,26,16,0,0),
d(1986,3,15,16,0,0),
d(1986,10,18,16,0,0),
d(1987,3,14,16,0,0),
d(1987,10,17,16,0,0),
d(1988,3,19,16,0,0),
d(1988,10,29,16,0,0),
d(1989,3,18,16,0,0),
d(1989,10,28,16,0,0),
d(1990,3,17,16,0,0),
d(1990,10,27,16,0,0),
d(1991,3,2,16,0,0),
d(1991,10,26,16,0,0),
d(1992,2,29,16,0,0),
d(1992,10,24,16,0,0),
d(1993,3,6,16,0,0),
d(1993,10,30,16,0,0),
d(1994,3,5,16,0,0),
d(1994,10,29,16,0,0),
d(1995,3,25,16,0,0),
d(1995,10,28,16,0,0),
d(1996,3,30,16,0,0),
d(1996,10,26,16,0,0),
d(1997,3,29,16,0,0),
d(1997,10,25,16,0,0),
d(1998,3,28,16,0,0),
d(1998,10,24,16,0,0),
d(1999,3,27,16,0,0),
d(1999,10,30,16,0,0),
d(2000,3,25,16,0,0),
d(2000,8,26,16,0,0),
d(2001,3,24,16,0,0),
d(2001,10,27,16,0,0),
d(2002,3,30,16,0,0),
d(2002,10,26,16,0,0),
d(2003,3,29,16,0,0),
d(2003,10,25,16,0,0),
d(2004,3,27,16,0,0),
d(2004,10,30,16,0,0),
d(2005,3,26,16,0,0),
d(2005,10,29,16,0,0),
d(2006,4,1,16,0,0),
d(2006,10,28,16,0,0),
d(2007,3,24,16,0,0),
d(2007,10,27,16,0,0),
d(2008,3,29,16,0,0),
d(2008,10,25,16,0,0),
d(2009,3,28,16,0,0),
d(2009,10,24,16,0,0),
d(2010,3,27,16,0,0),
d(2010,10,30,16,0,0),
d(2011,3,26,16,0,0),
d(2011,10,29,16,0,0),
d(2012,3,24,16,0,0),
d(2012,10,27,16,0,0),
d(2013,3,30,16,0,0),
d(2013,10,26,16,0,0),
d(2014,3,29,16,0,0),
d(2014,10,25,16,0,0),
d(2015,3,28,16,0,0),
d(2015,10,24,16,0,0),
d(2016,3,26,16,0,0),
d(2016,10,29,16,0,0),
d(2017,3,25,16,0,0),
d(2017,10,28,16,0,0),
d(2018,3,24,16,0,0),
d(2018,10,27,16,0,0),
d(2019,3,30,16,0,0),
d(2019,10,26,16,0,0),
d(2020,3,28,16,0,0),
d(2020,10,24,16,0,0),
d(2021,3,27,16,0,0),
d(2021,10,30,16,0,0),
d(2022,3,26,16,0,0),
d(2022,10,29,16,0,0),
d(2023,3,25,16,0,0),
d(2023,10,28,16,0,0),
d(2024,3,30,16,0,0),
d(2024,10,26,16,0,0),
d(2025,3,29,16,0,0),
d(2025,10,25,16,0,0),
d(2026,3,28,16,0,0),
d(2026,10,24,16,0,0),
d(2027,3,27,16,0,0),
d(2027,10,30,16,0,0),
d(2028,3,25,16,0,0),
d(2028,10,28,16,0,0),
d(2029,3,24,16,0,0),
d(2029,10,27,16,0,0),
d(2030,3,30,16,0,0),
d(2030,10,26,16,0,0),
d(2031,3,29,16,0,0),
d(2031,10,25,16,0,0),
d(2032,3,27,16,0,0),
d(2032,10,30,16,0,0),
d(2033,3,26,16,0,0),
d(2033,10,29,16,0,0),
d(2034,3,25,16,0,0),
d(2034,10,28,16,0,0),
d(2035,3,24,16,0,0),
d(2035,10,27,16,0,0),
d(2036,3,29,16,0,0),
d(2036,10,25,16,0,0),
d(2037,3,28,16,0,0),
d(2037,10,24,16,0,0),
]
_transition_info = [
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
]
Melbourne = Melbourne()
|
gpl-2.0
|
taoyunxing/trafficserver
|
tests/gold_tests/logging/ccid_ctid_observer.py
|
8
|
1528
|
'''
Examines log generated by ccid_ctid.test.py, returns 0 if valid, 1 if not.
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import csv
ccid = []
ctid = []
# Read in ccid and ctid fields from each line of the generated report.
#
for ln in csv.reader(sys.stdin, delimiter=' '):
if len(ln) != 2:
exit(code=1)
i = int(ln[0])
if i < 0:
exit(code=1)
ccid.append(i)
i = int(ln[1])
if i < 0:
exit(code=1)
ctid.append(i)
# Validate contents of report.
#
if (ccid[0] != ccid[1] and
ccid[1] != ccid[2] and
ccid[2] == ccid[3] and
ctid[2] != ctid[3] and
ccid[3] != ccid[4] and
ccid[4] == ccid[5] and
ctid[4] != ctid[5]):
exit(code=0)
# Failure exit if report was not valid.
#
exit(code=1)
|
apache-2.0
|
bregman-arie/ansible
|
lib/ansible/modules/network/vyos/vyos_system.py
|
68
|
6307
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: "vyos_system"
version_added: "2.3"
author: "Nathaniel Case (@qalthos)"
short_description: Run `set system` commands on VyOS devices
description:
- Runs one or more commands on remote devices running VyOS.
This module can also be introspected to validate key parameters before
returning successfully.
extends_documentation_fragment: vyos
notes:
- Tested against VYOS 1.1.7
options:
host_name:
description:
- Configure the device hostname parameter. This option takes an ASCII string value.
domain_name:
description:
- The new domain name to apply to the device.
name_servers:
description:
- A list of name servers to use with the device. Mutually exclusive with
I(domain_search)
aliases: ['name_server']
domain_search:
description:
- A list of domain names to search. Mutually exclusive with
I(name_server)
state:
description:
- Whether to apply (C(present)) or remove (C(absent)) the settings.
default: present
choices: ['present', 'absent']
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always
type: list
sample:
- set system hostname vyos01
- set system domain-name foo.example.com
"""
EXAMPLES = """
- name: configure hostname and domain-name
vyos_system:
host_name: vyos01
domain_name: test.example.com
- name: remove all configuration
vyos_system:
state: absent
- name: configure name servers
vyos_system:
name_servers
- 8.8.8.8
- 8.8.4.4
- name: configure domain search suffixes
vyos_system:
domain_search:
- sub1.example.com
- sub2.example.com
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.vyos.vyos import get_config, load_config
from ansible.module_utils.network.vyos.vyos import vyos_argument_spec
def spec_key_to_device_key(key):
device_key = key.replace('_', '-')
# domain-search is longer than just it's key
if device_key == 'domain-search':
device_key += ' domain'
return device_key
def config_to_dict(module):
data = get_config(module)
config = {'domain_search': [], 'name_server': []}
for line in data.split('\n'):
if line.startswith('set system host-name'):
config['host_name'] = line[22:-1]
elif line.startswith('set system domain-name'):
config['domain_name'] = line[24:-1]
elif line.startswith('set system domain-search domain'):
config['domain_search'].append(line[33:-1])
elif line.startswith('set system name-server'):
config['name_server'].append(line[24:-1])
return config
def spec_to_commands(want, have):
commands = []
state = want.pop('state')
# state='absent' by itself has special meaning
if state == 'absent' and all(v is None for v in want.values()):
# Clear everything
for key in have:
commands.append('delete system %s' % spec_key_to_device_key(key))
for key in want:
if want[key] is None:
continue
current = have.get(key)
proposed = want[key]
device_key = spec_key_to_device_key(key)
# These keys are lists which may need to be reconciled with the device
if key in ['domain_search', 'name_server']:
if not proposed:
# Empty list was passed, delete all values
commands.append("delete system %s" % device_key)
for config in proposed:
if state == 'absent' and config in current:
commands.append("delete system %s '%s'" % (device_key, config))
elif state == 'present' and config not in current:
commands.append("set system %s '%s'" % (device_key, config))
else:
if state == 'absent' and current and proposed:
commands.append('delete system %s' % device_key)
elif state == 'present' and proposed and proposed != current:
commands.append("set system %s '%s'" % (device_key, proposed))
return commands
def map_param_to_obj(module):
return {
'host_name': module.params['host_name'],
'domain_name': module.params['domain_name'],
'domain_search': module.params['domain_search'],
'name_server': module.params['name_server'],
'state': module.params['state']
}
def main():
argument_spec = dict(
host_name=dict(type='str'),
domain_name=dict(type='str'),
domain_search=dict(type='list'),
name_server=dict(type='list', aliases=['name_servers']),
state=dict(type='str', default='present', choices=['present', 'absent']),
)
argument_spec.update(vyos_argument_spec)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[('domain_name', 'domain_search')],
)
warnings = list()
result = {'changed': False, 'warnings': warnings}
want = map_param_to_obj(module)
have = config_to_dict(module)
commands = spec_to_commands(want, have)
result['commands'] = commands
if commands:
commit = not module.check_mode
response = load_config(module, commands, commit=commit)
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
|
gpl-3.0
|
awni/tensorflow
|
tensorflow/python/ops/tensor_array_ops.py
|
4
|
9808
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Data Flow Operations."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import constant_op
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import math_ops
# pylint: disable=protected-access
class TensorArray(object):
"""Class wrapping dynamic-sized, per-time-step, write-once Tensor arrays.
This class is meant to be used with dynamic iteration primitives such as
`While` loops, and supports gradient back-propagation via special "flow"
control flow dependencies.
@@handle
@@flow
@@read
@@unpack
@@split
@@write
@@pack
@@concat
@@grad
"""
def __init__(self, dtype, size=None, dynamic_size=None,
tensor_array_name=None,
handle=None, flow=None, name=None):
"""Construct a new TensorArray or wrap an existing TensorArray handle.
Args:
dtype: (required) data type of the TensorArray.
size: (optional) int32 scalar `Tensor`: the size of the TensorArray.
Required if handle is not provided.
dynamic_size: (optional) Python bool: If true, writes to the TensorArray
can grow the TensorArray past its initial size. Default: False.
tensor_array_name: (optional) Python string: the name of the TensorArray.
This is used when creating the TensorArray handle. If this value is
set, handle should be None.
handle: (optional) A `Tensor` handle to an existing TensorArray. If this
is set, tensor_array_name should be None.
flow: (optional) A float `Tensor` scalar coming from an existing
TensorArray.flow.
name: A name for the operation (optional).
Raises:
ValueError: if both handle and tensor_array_name are provided.
TypeError: if handle is provided but is not a Tensor.
"""
if handle is not None and tensor_array_name:
raise ValueError(
"Cannot construct with both handle and tensor_array_name")
if handle is not None and not isinstance(handle, ops.Tensor):
raise TypeError("Handle must be a Tensor")
if handle is None and size is None:
raise ValueError("Size must be provided if handle is not provided")
if handle is not None and size is not None:
raise ValueError("Cannot provide both a handle and size "
"at the same time")
if handle is not None and dynamic_size is not None:
raise ValueError("Cannot provide both a handle and dynamic_size "
"at the same time")
dynamic_size = dynamic_size or False
self._dtype = dtype
with ops.op_scope([handle, size, flow], name, "TensorArray") as scope:
if handle is not None:
self._handle = handle
else:
self._handle = gen_data_flow_ops._tensor_array(
dtype=dtype, size=size, dynamic_size=dynamic_size,
tensor_array_name=tensor_array_name, name=scope)
if flow is not None:
self._flow = flow
else:
self._flow = constant_op.constant(0, dtype=_dtypes.float32)
@property
def flow(self):
"""The flow `Tensor` forcing ops leading to this TensorArray state."""
return self._flow
@property
def dtype(self):
"""The data type of this TensorArray."""
return self._dtype
@property
def handle(self):
"""The reference to the TensorArray."""
return self._handle
def grad(self, source, flow=None):
# tensor_array_grad requires a flow input when forward
# TensorArrays are dynamically sized. This forces the creation
# of the grad TensorArray only once the final forward array's size
# is fixed.
if flow is None:
flow = self.flow
g_handle = gen_data_flow_ops._tensor_array_grad(
handle=self._handle, source=source, flow_in=flow)
g = TensorArray(dtype=self._dtype, handle=g_handle, flow=flow)
return g
def read(self, index, name=None):
"""Read the value at location `index` in the TensorArray."""
value = gen_data_flow_ops._tensor_array_read(
handle=self._handle, index=index, flow_in=self._flow, dtype=self._dtype,
name=name)
return value
def write(self, index, value, name=None):
"""Write `value` into index `index` of the TensorArray."""
flow_out = gen_data_flow_ops._tensor_array_write(
handle=self._handle, index=index, value=value, flow_in=self._flow,
name=name)
# Size below is ignored
ta = TensorArray(dtype=self._dtype, handle=self._handle)
ta._flow = flow_out
return ta
def pack(self, name=None):
"""Return the values in the TensorArray as a packed `Tensor`."""
value = gen_data_flow_ops._tensor_array_pack(
handle=self._handle, flow_in=self._flow, dtype=self._dtype,
name=name)
return value
def concat(self, name=None):
"""Return the values in the TensorArray as a concatenated `Tensor`."""
value, _ = gen_data_flow_ops._tensor_array_concat(
handle=self._handle, flow_in=self._flow, dtype=self._dtype,
name=name)
return value
def unpack(self, value, name=None):
"""Pack the values of a `Tensor` in the TensorArray."""
flow_out = gen_data_flow_ops._tensor_array_unpack(
handle=self._handle, value=value, flow_in=self._flow,
name=name)
ta = TensorArray(dtype=self._dtype, handle=self._handle)
ta._flow = flow_out
return ta
def split(self, value, lengths, name=None):
"""Split the values of a `Tensor` into the TensorArray."""
with ops.op_scope(
[self._handle, value, lengths], name, "TensorArraySplit"):
lengths = math_ops.to_int64(lengths)
flow_out = gen_data_flow_ops._tensor_array_split(
handle=self._handle, value=value, lengths=lengths, flow_in=self._flow,
name=name)
ta = TensorArray(dtype=self._dtype, handle=self._handle)
ta._flow = flow_out
return ta
def size(self, name=None):
"""Return the size of the TensorArray."""
return gen_data_flow_ops._tensor_array_size(
handle=self._handle, flow_in=self.flow, name=name)
def close(self, name=None):
"""Close the current TensorArray."""
return gen_data_flow_ops._tensor_array_close(
handle=self._handle, name=name)
@ops.RegisterShape("TensorArray")
def _TensorArrayShape(op):
# size is a scalar
op.inputs[0].get_shape().merge_with(tensor_shape.scalar())
return [tensor_shape.vector(2)]
@ops.RegisterShape("TensorArrayRead")
def _TensorArrayReadShape(op):
# handle, index, flow_in
op.inputs[0].get_shape().merge_with(tensor_shape.vector(2))
op.inputs[1].get_shape().merge_with(tensor_shape.scalar())
op.inputs[2].get_shape().merge_with(tensor_shape.scalar())
# value
return [tensor_shape.unknown_shape()]
@ops.RegisterShape("TensorArrayWrite")
def _TensorArrayWriteShape(op):
# handle, index, value, flow_in
op.inputs[0].get_shape().merge_with(tensor_shape.vector(2))
op.inputs[1].get_shape().merge_with(tensor_shape.scalar())
op.inputs[3].get_shape().merge_with(tensor_shape.scalar())
# flow_out
return [tensor_shape.scalar()]
@ops.RegisterShape("TensorArraySize")
def _TensorArraySizeShape(op):
op.inputs[0].get_shape().merge_with(tensor_shape.vector(2))
return [tensor_shape.scalar()]
@ops.RegisterShape("TensorArrayClose")
def _TensorArrayCloseShape(op):
"""Shape function for ops that take a scalar and produce no outputs."""
op.inputs[0].get_shape().merge_with(tensor_shape.vector(2))
return []
@ops.RegisterShape("TensorArrayGrad")
def _TensorArrayGradShape(op):
"""Shape function for ops that take a scalar and produce no outputs."""
op.inputs[0].get_shape().merge_with(tensor_shape.vector(2))
return [tensor_shape.vector(2)]
@ops.RegisterShape("TensorArrayPack")
def _TensorArrayPackShape(op):
# handle, flow_in
op.inputs[0].get_shape().merge_with(tensor_shape.vector(2))
op.inputs[1].get_shape().merge_with(tensor_shape.scalar())
# value
return [tensor_shape.unknown_shape()]
@ops.RegisterShape("TensorArrayConcat")
def _TensorArrayConcatShape(op):
# handle, flow_in
op.inputs[0].get_shape().merge_with(tensor_shape.vector(2))
op.inputs[1].get_shape().merge_with(tensor_shape.scalar())
# value, lengths
return [tensor_shape.unknown_shape(), tensor_shape.vector(None)]
@ops.RegisterShape("TensorArraySplit")
def _TensorArraySplitShape(op):
# handle, value, lengths, flow_in
op.inputs[0].get_shape().merge_with(tensor_shape.vector(2))
op.inputs[2].get_shape().merge_with(tensor_shape.vector(None))
op.inputs[3].get_shape().merge_with(tensor_shape.scalar())
# flow_out
return [tensor_shape.scalar()]
@ops.RegisterShape("TensorArrayUnpack")
def _TensorArrayUnpackShape(op):
# handle, value, flow_in
op.inputs[0].get_shape().merge_with(tensor_shape.vector(2))
op.inputs[2].get_shape().merge_with(tensor_shape.scalar())
# flow_out
return [tensor_shape.scalar()]
# pylint: enable=protected-access
|
apache-2.0
|
dynaryu/inasafe
|
safe/utilities/analysis_handler.py
|
1
|
26118
|
# coding=utf-8
"""
InaSAFE Disaster risk assessment tool by AusAid **GUI InaSAFE Wizard Dialog.**
Contact : ole.moller.nielsen@gmail.com
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'qgis@borysjurgiel.pl'
__revision__ = '$Format:%H$'
__date__ = '21/02/2011'
__copyright__ = ('Copyright 2012, Australia Indonesia Facility for '
'Disaster Reduction')
import os
import logging
# noinspection PyPackageRequirements
from qgis.core import (
QgsCoordinateTransform,
QgsRectangle,
QgsMapLayerRegistry,
QgsCoordinateReferenceSystem)
# noinspection PyPackageRequirements
from PyQt4 import QtGui, QtCore
# noinspection PyPackageRequirements
from PyQt4.QtCore import QObject, QSettings, pyqtSignal
from safe.utilities.keyword_io import KeywordIO
from safe.utilities.utilities import (
get_error_message,
impact_attribution)
from safe.utilities.gis import (
extent_string_to_array,
read_impact_layer,
viewport_geo_array)
from safe.utilities.resources import (
resources_path,
resource_url)
from safe.defaults import (
supporters_logo_path)
from safe.utilities.styling import (
setRasterStyle,
set_vector_graduated_style,
set_vector_categorized_style)
from safe.common.utilities import temp_dir
from safe.common.exceptions import ReadLayerError
from safe.common.signals import (
send_static_message,
send_error_message,
DYNAMIC_MESSAGE_SIGNAL,
STATIC_MESSAGE_SIGNAL,
ERROR_MESSAGE_SIGNAL,
BUSY_SIGNAL,
NOT_BUSY_SIGNAL,
ANALYSIS_DONE_SIGNAL)
from safe import messaging as m
from safe.messaging import styles
from safe.common.exceptions import (
InsufficientOverlapError, TemplateLoadingError)
from safe.report.impact_report import ImpactReport
from safe.gui.tools.impact_report_dialog import ImpactReportDialog
from safe_extras.pydispatch import dispatcher
from safe.utilities.analysis import Analysis
from safe.utilities.extent import Extent
from safe.impact_functions.impact_function_manager import ImpactFunctionManager
PROGRESS_UPDATE_STYLE = styles.PROGRESS_UPDATE_STYLE
INFO_STYLE = styles.INFO_STYLE
WARNING_STYLE = styles.WARNING_STYLE
LOGO_ELEMENT = m.Image(
resource_url(
resources_path('img', 'logos', 'inasafe-logo.png')),
'InaSAFE Logo')
LOGGER = logging.getLogger('InaSAFE')
class AnalysisHandler(QObject):
"""Analysis handler for the dock and the wizard."""
analysisDone = pyqtSignal(bool)
# noinspection PyUnresolvedReferences
def __init__(self, parent):
"""Constructor for the class.
:param parent: Parent widget i.e. the wizard dialog.
:type parent: QWidget
"""
QtCore.QObject.__init__(self)
self.parent = parent
# Do not delete this
self.iface = parent.iface
self.keyword_io = KeywordIO()
self.impact_function_manager = ImpactFunctionManager()
self.extent = Extent(self.iface)
self.analysis = None
self.composer = None
# Values for settings these get set in read_settings.
self.run_in_thread_flag = None
self.zoom_to_impact_flag = None
self.hide_exposure_flag = None
self.clip_hard = None
self.show_intermediate_layers = None
self.show_rubber_bands = False
self.last_analysis_rubberband = None
# This is a rubber band to show what the AOI of the
# next analysis will be. Also added in 2.1.0
self.next_analysis_rubberband = None
self.read_settings()
def enable_signal_receiver(self):
"""Setup dispatcher for all available signal from Analysis.
.. note:: Adapted from the dock
"""
dispatcher.connect(
self.show_busy,
signal=BUSY_SIGNAL)
dispatcher.connect(
self.hide_busy,
signal=NOT_BUSY_SIGNAL)
dispatcher.connect(
self.completed,
signal=ANALYSIS_DONE_SIGNAL)
# noinspection PyArgumentEqualDefault
dispatcher.connect(
self.show_dynamic_message,
signal=DYNAMIC_MESSAGE_SIGNAL)
# noinspection PyArgumentEqualDefault,PyUnresolvedReferences
dispatcher.connect(
self.parent.wvResults.static_message_event,
signal=STATIC_MESSAGE_SIGNAL,
sender=dispatcher.Any)
# noinspection PyArgumentEqualDefault,PyUnresolvedReferences
dispatcher.connect(
self.parent.wvResults.error_message_event,
signal=ERROR_MESSAGE_SIGNAL,
sender=dispatcher.Any)
def disable_signal_receiver(self):
"""Remove dispatcher for all available signal from Analysis.
.. note:: Adapted from the dock
"""
dispatcher.disconnect(
self.show_busy,
signal=BUSY_SIGNAL)
dispatcher.disconnect(
self.hide_busy,
signal=NOT_BUSY_SIGNAL)
dispatcher.disconnect(
self.completed,
signal=ANALYSIS_DONE_SIGNAL)
dispatcher.disconnect(
self.show_dynamic_message,
signal=DYNAMIC_MESSAGE_SIGNAL)
def show_dynamic_message(self, sender, message):
"""Send a dynamic message to the message viewer.
Dynamic messages are appended to any existing content in the
MessageViewer.
.. note:: Modified from the dock
:param sender: The object that sent the message.
:type sender: Object, None
:param message: An instance of our rich message class.
:type message: Message
"""
# TODO Hardcoded step - may overflow, if number of messages increase
# noinspection PyUnresolvedReferences
self.parent.pbProgress.setValue(self.parent.pbProgress.value() + 15)
# noinspection PyUnresolvedReferences
self.parent.wvResults.dynamic_message_event(sender, message)
def read_settings(self):
"""Restore settings from QSettings.
Do this on init and after changing options in the options dialog.
"""
settings = QSettings()
flag = bool(settings.value(
'inasafe/showRubberBands', False, type=bool))
self.extent.show_rubber_bands = flag
try:
extent = settings.value('inasafe/analysis_extent', '', type=str)
crs = settings.value('inasafe/analysis_extent_crs', '', type=str)
except TypeError:
# Any bogus stuff in settings and we just clear them
extent = ''
crs = ''
if extent != '' and crs != '':
extent = extent_string_to_array(extent)
try:
self.extent.user_extent = QgsRectangle(*extent)
self.extent.user_extent_crs = QgsCoordinateReferenceSystem(crs)
self.extent.show_user_analysis_extent()
except TypeError:
self.extent.user_extent = None
self.extent.user_extent_crs = None
flag = settings.value(
'inasafe/useThreadingFlag', False, type=bool)
self.run_in_thread_flag = flag
flag = settings.value(
'inasafe/setZoomToImpactFlag', True, type=bool)
self.zoom_to_impact_flag = flag
# whether exposure layer should be hidden after model completes
flag = settings.value(
'inasafe/setHideExposureFlag', False, type=bool)
self.hide_exposure_flag = flag
# whether to 'hard clip' layers (e.g. cut buildings in half if they
# lie partially in the AOI
self.clip_hard = settings.value('inasafe/clip_hard', False, type=bool)
# whether to show or not postprocessing generated layers
self.show_intermediate_layers = settings.value(
'inasafe/show_intermediate_layers', False, type=bool)
# whether to show or not dev only options
# noinspection PyAttributeOutsideInit
self.developer_mode = settings.value(
'inasafe/developer_mode', False, type=bool)
# whether to show or not a custom Logo
# noinspection PyAttributeOutsideInit
self.organisation_logo_path = settings.value(
'inasafe/organisation_logo_path',
supporters_logo_path(),
type=str)
# noinspection PyUnresolvedReferences
def show_busy(self):
"""Lock buttons and enable the busy cursor."""
self.parent.pbnNext.setEnabled(False)
self.parent.pbnBack.setEnabled(False)
self.parent.pbnCancel.setEnabled(False)
QtGui.qApp.setOverrideCursor(QtGui.QCursor(QtCore.Qt.WaitCursor))
self.parent.repaint()
QtGui.qApp.processEvents()
# noinspection PyUnresolvedReferences
def hide_busy(self):
"""Unlock buttons A helper function to indicate processing is done."""
self.parent.pbnNext.setEnabled(True)
self.parent.pbnBack.setEnabled(True)
self.parent.pbnCancel.setEnabled(True)
self.parent.repaint()
QtGui.qApp.restoreOverrideCursor()
def analysis_error(self, exception, message):
"""A helper to spawn an error and halt processing.
An exception will be logged, busy status removed and a message
displayed.
.. note:: Copied from the dock
:param message: an ErrorMessage to display
:type message: ErrorMessage, Message
:param exception: An exception that was raised
:type exception: Exception
"""
self.hide_busy()
LOGGER.exception(message)
message = get_error_message(exception, context=message)
send_error_message(self, message)
self.analysisDone.emit(False)
def setup_and_run_analysis(self):
"""Setup and execute the analysis"""
self.enable_signal_receiver()
self.show_busy()
self.init_analysis()
try:
self.analysis.setup_analysis()
except InsufficientOverlapError as e:
raise e
clip_parameters = self.analysis.impact_function.clip_parameters
self.extent.show_last_analysis_extent(
clip_parameters['adjusted_geo_extent'])
# Start the analysis
self.analysis.run_analysis()
self.disable_signal_receiver()
# noinspection PyUnresolvedReferences
def init_analysis(self):
"""Setup analysis to make it ready to work.
.. note:: Copied or adapted from the dock
"""
self.analysis = Analysis()
# Impact Function
impact_function = self.impact_function_manager.get(
self.parent.selected_function()['id'])
impact_function.parameters = self.parent.if_params
self.analysis.impact_function = impact_function
# Layers
self.analysis.hazard = self.parent.hazard_layer
self.analysis.exposure = self.parent.exposure_layer
self.analysis.aggregation = self.parent.aggregation_layer
# TODO test if the implement aggregation layer works!
# Variables
self.analysis.clip_hard = self.clip_hard
self.analysis.show_intermediate_layers = self.show_intermediate_layers
viewport = viewport_geo_array(self.iface.mapCanvas())
self.analysis.viewport_extent = viewport
# Extent
if self.analysis.user_extent:
self.analysis.user_extent = self.extent.user_extent
self.analysis.user_extent_crs = self.extent.user_extent_crs
# noinspection PyUnresolvedReferences
def completed(self):
"""Slot activated when the process is done.
.. note:: Adapted from the dock
"""
# Try to run completion code
try:
from datetime import datetime
LOGGER.debug(datetime.now())
LOGGER.debug('get engine impact layer')
LOGGER.debug(self.analysis is None)
engine_impact_layer = self.analysis.impact_layer
# Load impact layer into QGIS
qgis_impact_layer = read_impact_layer(engine_impact_layer)
report = self.show_results(
qgis_impact_layer, engine_impact_layer)
except Exception, e: # pylint: disable=W0703
# FIXME (Ole): This branch is not covered by the tests
self.analysis_error(e, self.tr('Error loading impact layer.'))
else:
# On success, display generated report
impact_path = qgis_impact_layer.source()
message = m.Message(report)
# message.add(m.Heading(self.tr('View processing log as HTML'),
# **INFO_STYLE))
# message.add(m.Link('file://%s' % self.parent.wvResults.log_path))
# noinspection PyTypeChecker
send_static_message(self, message)
self.parent.wvResults.impact_path = impact_path
self.parent.pbProgress.hide()
self.parent.lblAnalysisStatus.setText('Analysis done.')
self.parent.pbnReportWeb.show()
self.parent.pbnReportPDF.show()
self.parent.pbnReportComposer.show()
self.hide_busy()
self.analysisDone.emit(True)
def show_results(self, qgis_impact_layer, engine_impact_layer):
"""Helper function for slot activated when the process is done.
.. note:: Adapted from the dock
:param qgis_impact_layer: A QGIS layer representing the impact.
:type qgis_impact_layer: QgsMapLayer, QgsVectorLayer, QgsRasterLayer
:param engine_impact_layer: A safe_layer representing the impact.
:type engine_impact_layer: ReadLayer
:returns: Provides a report for writing to the dock.
:rtype: str
"""
keywords = self.keyword_io.read_keywords(qgis_impact_layer)
# write postprocessing report to keyword
output = self.analysis.postprocessor_manager.get_output(
self.analysis.aggregator.aoi_mode)
keywords['postprocessing_report'] = output.to_html(
suppress_newlines=True)
self.keyword_io.write_keywords(qgis_impact_layer, keywords)
# Get tabular information from impact layer
report = m.Message()
report.add(LOGO_ELEMENT)
report.add(m.Heading(self.tr(
'Analysis Results'), **INFO_STYLE))
report.add(self.keyword_io.read_keywords(
qgis_impact_layer, 'impact_summary'))
# Get requested style for impact layer of either kind
style = engine_impact_layer.get_style_info()
style_type = engine_impact_layer.get_style_type()
# Determine styling for QGIS layer
if engine_impact_layer.is_vector:
LOGGER.debug('myEngineImpactLayer.is_vector')
if not style:
# Set default style if possible
pass
elif style_type == 'categorizedSymbol':
LOGGER.debug('use categorized')
set_vector_categorized_style(qgis_impact_layer, style)
elif style_type == 'graduatedSymbol':
LOGGER.debug('use graduated')
set_vector_graduated_style(qgis_impact_layer, style)
elif engine_impact_layer.is_raster:
LOGGER.debug('myEngineImpactLayer.is_raster')
if not style:
qgis_impact_layer.setDrawingStyle("SingleBandPseudoColor")
# qgis_impact_layer.setColorShadingAlgorithm(
# QgsRasterLayer.PseudoColorShader)
else:
setRasterStyle(qgis_impact_layer, style)
else:
message = self.tr(
'Impact layer %s was neither a raster or a vector layer') % (
qgis_impact_layer.source())
# noinspection PyExceptionInherit
raise ReadLayerError(message)
# Add layers to QGIS
layers_to_add = []
if self.show_intermediate_layers:
layers_to_add.append(self.analysis.aggregator.layer)
layers_to_add.append(qgis_impact_layer)
# noinspection PyArgumentList
QgsMapLayerRegistry.instance().addMapLayers(layers_to_add)
# make sure it is active in the legend - needed since QGIS 2.4
self.iface.setActiveLayer(qgis_impact_layer)
# then zoom to it
if self.zoom_to_impact_flag:
self.iface.zoomToActiveLayer()
if self.hide_exposure_flag:
exposure_layer = self.analysis.exposure_layer
legend = self.iface.legendInterface()
legend.setLayerVisible(exposure_layer, False)
# append postprocessing report
report.add(output.to_html())
# Layer attribution comes last
report.add(impact_attribution(keywords).to_html(True))
# Return text to display in report panel
return report
def print_map(self, mode='pdf'):
"""Open impact report dialog that used define report options.
:param mode: Mode for report - defaults to PDF.
:type mode:
"""
# Check if selected layer is valid
impact_layer = self.iface.activeLayer()
if impact_layer is None:
# noinspection PyCallByClass,PyTypeChecker,PyArgumentList
QtGui.QMessageBox.warning(
self.parent,
self.tr('InaSAFE'),
self.tr(
'Please select a valid impact layer before trying to '
'print.'))
return
# Open Impact Report Dialog
print_dialog = ImpactReportDialog(self.iface)
print_dialog.button_ok = QtGui.QPushButton(self.tr('OK'))
print_dialog.button_box.addButton(
print_dialog.button_ok,
QtGui.QDialogButtonBox.ActionRole)
# noinspection PyUnresolvedReferences
print_dialog.button_ok.clicked.connect(print_dialog.accept)
print_dialog.button_save_pdf.hide()
print_dialog.button_open_composer.hide()
if not print_dialog.exec_() == QtGui.QDialog.Accepted:
# noinspection PyTypeChecker
self.show_dynamic_message(
self,
m.Message(
m.Heading(self.tr('Map Creator'), **WARNING_STYLE),
m.Text(self.tr('Report generation cancelled!'))))
return
# Get the extent of the map for report
use_full_extent = print_dialog.analysis_extent_radio.isChecked()
if use_full_extent:
map_crs = self.iface.mapCanvas().mapRenderer().destinationCrs()
layer_crs = self.iface.activeLayer().crs()
layer_extent = self.iface.activeLayer().extent()
if map_crs != layer_crs:
# noinspection PyCallingNonCallable
transform = QgsCoordinateTransform(layer_crs, map_crs)
layer_extent = transform.transformBoundingBox(layer_extent)
area_extent = layer_extent
else:
area_extent = self.iface.mapCanvas().extent()
# Get selected template path to use
if print_dialog.default_template_radio.isChecked():
template_path = print_dialog.template_combo.itemData(
print_dialog.template_combo.currentIndex())
else:
template_path = print_dialog.template_path.text()
if not os.path.exists(template_path):
# noinspection PyCallByClass,PyTypeChecker,PyArgumentList
QtGui.QMessageBox.warning(
self.parent,
self.tr('InaSAFE'),
self.tr('Please select a valid template before printing. '
'The template you choose does not exist.'))
return
# Instantiate and prepare Report
# noinspection PyTypeChecker
self.show_dynamic_message(
self,
m.Message(
m.Heading(self.tr('Map Creator'), **PROGRESS_UPDATE_STYLE),
m.Text(self.tr('Preparing map and report'))))
impact_report = ImpactReport(self.iface, template_path, impact_layer)
impact_report.extent = area_extent
# Get other setting
settings = QSettings()
logo_path = settings.value(
'inasafe/organisation_logo_path', '', type=str)
impact_report.organisation_logo = logo_path
disclaimer_text = settings.value(
'inasafe/reportDisclaimer', '', type=str)
impact_report.disclaimer = disclaimer_text
north_arrow_path = settings.value(
'inasafe/north_arrow_path', '', type=str)
impact_report.north_arrow = north_arrow_path
template_warning_verbose = bool(settings.value(
'inasafe/template_warning_verbose', True, type=bool))
# Check if there's missing elements needed in the template
component_ids = ['safe-logo', 'north-arrow', 'organisation-logo',
'impact-map', 'impact-legend']
impact_report.component_ids = component_ids
if template_warning_verbose and \
len(impact_report.missing_elements) != 0:
title = self.tr('Template is missing some elements')
question = self.tr(
'The composer template you are printing to is missing '
'these elements: %s. Do you still want to continue') % (
', '.join(impact_report.missing_elements))
# noinspection PyCallByClass,PyTypeChecker
answer = QtGui.QMessageBox.question(
self.parent,
title,
question,
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No)
if answer == QtGui.QMessageBox.No:
return
create_pdf_flag = bool(mode == 'pdf')
self.show_busy()
if create_pdf_flag:
self.print_map_to_pdf(impact_report)
else:
self.open_map_in_composer(impact_report)
self.hide_busy()
def print_map_to_pdf(self, impact_report):
"""Print map to PDF given MapReport instance.
:param impact_report: Impact Report instance that is ready to print
:type impact_report: ImpactReport
"""
impact_report.setup_composition()
# Get Filename
map_title = impact_report.map_title
if map_title is not None:
default_file_name = map_title + '.pdf'
default_file_name = default_file_name.replace(' ', '_')
else:
send_error_message(
self, self.tr('Keyword "map_title" not found.'))
return
# Get output path
# noinspection PyCallByClass,PyTypeChecker
output_path = QtGui.QFileDialog.getSaveFileName(
self.parent,
self.tr('Write to PDF'),
os.path.join(temp_dir(), default_file_name),
self.tr('Pdf File (*.pdf)'))
output_path = str(output_path)
if output_path is None or output_path == '':
# noinspection PyTypeChecker
self.show_dynamic_message(
self,
m.Message(
m.Heading(self.tr('Map Creator'), **WARNING_STYLE),
m.Text(self.tr('Printing cancelled!'))))
return
try:
map_pdf_path, table_pdf_path = impact_report.print_to_pdf(
output_path)
# Make sure the file paths can wrap nicely:
wrapped_map_path = map_pdf_path.replace(os.sep, '<wbr>' + os.sep)
wrapped_table_path = table_pdf_path.replace(
os.sep, '<wbr>' + os.sep)
status = m.Message(
m.Heading(self.tr('Map Creator'), **INFO_STYLE),
m.Paragraph(self.tr('Your PDF was created....')),
m.Paragraph(self.tr(
'Opening using the default PDF viewer on your system. '
'The generated pdfs were saved as:')),
m.Paragraph(wrapped_map_path),
m.Paragraph(self.tr('and')),
m.Paragraph(wrapped_table_path))
# noinspection PyCallByClass,PyTypeChecker,PyArgumentList
QtGui.QDesktopServices.openUrl(
QtCore.QUrl.fromLocalFile(table_pdf_path))
# noinspection PyCallByClass,PyTypeChecker,PyArgumentList
QtGui.QDesktopServices.openUrl(
QtCore.QUrl.fromLocalFile(map_pdf_path))
# noinspection PyTypeChecker
self.show_dynamic_message(self, status)
except TemplateLoadingError, e:
send_error_message(self, get_error_message(e))
except Exception, e: # pylint: disable=broad-except
send_error_message(self, get_error_message(e))
def open_map_in_composer(self, impact_report):
"""Open map in composer given MapReport instance.
..note:: (AG) See https://github.com/AIFDR/inasafe/issues/911. We
need to set the composition to the composer before loading the
template.
:param impact_report: Impact Report to be opened in composer.
:type impact_report: ImpactReport
"""
impact_report.setup_composition()
self.composer = self.iface.createNewComposer()
self.composer.setComposition(impact_report.composition)
impact_report.load_template()
impact_report.draw_composition()
# Fit In View
number_pages = impact_report.composition.numPages()
paper_height = impact_report.composition.paperHeight()
paper_width = impact_report.composition.paperWidth()
space_between_pages = impact_report.composition.spaceBetweenPages()
if number_pages > 0:
height = (paper_height * number_pages) + (
space_between_pages * (number_pages - 1))
self.composer.fitInView(
0, 0, paper_width + 1, height + 1, QtCore.Qt.KeepAspectRatio)
|
gpl-3.0
|
vwvww/servo
|
tests/wpt/web-platform-tests/webdriver/tests/sessions/new_session/invalid_capabilities.py
|
12
|
4145
|
#META: timeout=long
import pytest
from webdriver import error
from conftest import product, flatten
@pytest.mark.parametrize("value", [None, 1, "{}", []])
def test_invalid_capabilites(new_session, value):
with pytest.raises(error.InvalidArgumentException):
new_session({"capabilities": value})
@pytest.mark.parametrize("value", [None, 1, "{}", []])
def test_invalid_always_match(new_session, add_browser_capabilites, value):
with pytest.raises(error.InvalidArgumentException):
new_session({"capabilities": {"alwaysMatch": value, "firstMatch": [add_browser_capabilites({})]}})
@pytest.mark.parametrize("value", [None, 1, "[]", {}])
def test_invalid_first_match(new_session, add_browser_capabilites, value):
with pytest.raises(error.InvalidArgumentException):
new_session({"capabilities": {"alwaysMatch": add_browser_capabilites({}), "firstMatch": value}})
invalid_data = [
("acceptInsecureCerts", [1, [], {}, "false"]),
("browserName", [1, [], {}, False]),
("browserVersion", [1, [], {}, False]),
("platformName", [1, [], {}, False]),
("pageLoadStrategy", [1, [], {}, False, "invalid", "NONE", "Eager", "eagerblah", "interactive",
" eager", "eager "]),
("proxy", [1, [], "{}", {"proxyType": "SYSTEM"}, {"proxyType": "systemSomething"},
{"proxy type": "pac"}, {"proxy-Type": "system"}, {"proxy_type": "system"},
{"proxytype": "system"}, {"PROXYTYPE": "system"}, {"proxyType": None},
{"proxyType": 1}, {"proxyType": []}, {"proxyType": {"value": "system"}},
{" proxyType": "system"}, {"proxyType ": "system"}, {"proxyType ": " system"},
{"proxyType": "system "}]),
("timeouts", [1, [], "{}", False, {"pageLOAD": 10}, {"page load": 10},
{"page load": 10}, {"pageLoad": "10"}, {"pageLoad": {"value": 10}},
{"invalid": 10}, {"pageLoad": -1}, {"pageLoad": 2**64},
{"pageLoad": None}, {"pageLoad": 1.1}, {"pageLoad": 10, "invalid": 10},
{" pageLoad": 10}, {"pageLoad ": 10}]),
("unhandledPromptBehavior", [1, [], {}, False, "DISMISS", "dismissABC", "Accept",
" dismiss", "dismiss "])
]
@pytest.mark.parametrize("body", [lambda key, value: {"alwaysMatch": {key: value}},
lambda key, value: {"firstMatch": [{key: value}]}])
@pytest.mark.parametrize("key,value", flatten(product(*item) for item in invalid_data))
def test_invalid_values(new_session, add_browser_capabilites, body, key, value):
capabilities = body(key, value)
if "alwaysMatch" in capabilities:
capabilities["alwaysMatch"] = add_browser_capabilites(capabilities["alwaysMatch"])
else:
capabilities["firstMatch"][0] = add_browser_capabilites(capabilities["firstMatch"][0])
with pytest.raises(error.InvalidArgumentException):
resp = new_session({"capabilities": capabilities})
invalid_extensions = [
"firefox",
"firefox_binary",
"firefoxOptions",
"chromeOptions",
"automaticInspection",
"automaticProfiling",
"platform",
"version",
"browser",
"platformVersion",
"javascriptEnabled",
"nativeEvents",
"seleniumProtocol",
"profile",
"trustAllSSLCertificates",
"initialBrowserUrl",
"requireWindowFocus",
"logFile",
"logLevel",
"safari.options",
"ensureCleanSession",
]
@pytest.mark.parametrize("body", [lambda key, value: {"alwaysMatch": {key: value}},
lambda key, value: {"firstMatch": [{key: value}]}])
@pytest.mark.parametrize("key", invalid_extensions)
def test_invalid_extensions(new_session, add_browser_capabilites, body, key):
capabilities = body(key, {})
if "alwaysMatch" in capabilities:
capabilities["alwaysMatch"] = add_browser_capabilites(capabilities["alwaysMatch"])
else:
capabilities["firstMatch"][0] = add_browser_capabilites(capabilities["firstMatch"][0])
with pytest.raises(error.InvalidArgumentException):
resp = new_session({"capabilities": capabilities})
|
mpl-2.0
|
vishalbedi/CollegeScorecard
|
backend/flask_app/models.py
|
1
|
7158
|
"""All the database related entities are in this module."""
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import text
import numpy as np
import math
from scipy.stats import lognorm
from sqlalchemy.ext.hybrid import hybrid_property
db = SQLAlchemy()
class Ethnicity(db.Model):
"""Ethnicity Model. We will be using the Bayes factors from this model to compute utility function"""
__tablename__ = 'ethnicity'
unitID = db.Column(db.Integer, primary_key=True)
College = db.Column(db.String(80), nullable=False)
UGDS = db.Column(db.Integer)
UGDS_WHITE = db.Column(db.Float)
UGDS_BLACK = db.Column(db.Float)
UGDS_HISP = db.Column(db.Float)
UGDS_ASIAN = db.Column(db.Float)
UGDS_AIAN = db.Column(db.Float)
UGDS_NHPI = db.Column(db.Float)
UGDS_2MOR = db.Column(db.Float)
UGDS_NRA = db.Column(db.Float)
UGDS_UNKN = db.Column(db.Float)
@hybrid_property
def totprob(self):
return self.UGDS_WHITE + self.UGDS_2MOR + self.UGDS_AIAN + self.UGDS_ASIAN + self.UGDS_BLACK + self.UGDS_HISP + \
self.UGDS_NHPI + self.UGDS_NRA + self.UGDS_UNKN
@hybrid_property
def probSchool(self):
return self.UGDS / sum(row.UGDS for row in self)
def __repr__(self):
"""String representation of the class."""
return '<ethnicity %r>' % self.College
@hybrid_property
def BF_WHITE (self):
return self.make_bf(self.UGDS_WHITE)
@hybrid_property
def BF_BLACK(self):
return self.make_bf(self.UGDS_BLACK)
@hybrid_property
def BF_2MOR(self):
return self.make_bf(self.UGDS_2MOR)
@hybrid_property
def BF_AIAN(self):
return self.make_bf(self.UGDS_AIAN)
@hybrid_property
def BF_ASIAN(self):
return self.make_bf(self.UGDS_ASIAN)
@hybrid_property
def BF_HISP(self):
return self.make_bf(self.UGDS_WHITE)
@hybrid_property
def BF_NHPI(self):
return self.make_bf(self.UGDS_NHPI)
@hybrid_property
def BF_NRA(self):
return self.make_bf(self.UGDS_NRA)
@hybrid_property
def BF_UNKN(self):
return self.make_bf(self.UGDS_UNKN)
def make_bf(self, attr):
"""Utility function to create bayes factors"""
attr = 1.0E-9 + attr
return math.log10(attr/sum(attr * row.probSchool for row in self))
class Academic(db.Model):
"""Academic Model"""
__tablename__ = 'academic'
unitID = db.Column(db.Integer, primary_key=True)
College = db.Column(db.String(80), nullable=False)
UGDS = db.Column(db.Integer)
SATVR25 = db.Column(db.Integer)
SATMT25 = db.Column(db.Integer)
SATVR75 = db.Column(db.Integer)
SATMT75 = db.Column(db.Integer)
C150_4_POOLED_SUPP = db.Column(db.Integer)
SAT_AVG = db.Column(db.Integer)
@hybrid_property
def SAT_25(self):
return self.SATVR25 + self.SATMT25
@hybrid_property
def SAT_75(self):
return self.SATVR75 + self.SATMT75
@hybrid_property
def pSAT_25(self):
return self.SAT_25 / sum(row.SAT_25 for row in self)
@hybrid_property
def pSAT_75(self):
return self.SAT_75 / sum(row.SAT_75 for row in self)
@hybrid_property
def pSAT_AVG(self):
return self.SAT_AVG / sum(row.SAT_AVG for row in self)
@hybrid_property
def probSchool(self):
return self.UGDS / sum(row.UGDS for row in self)
class StudentAid(db.Model):
"""Student AID model"""
__tablename__ = 'student_aid'
unitID = db.Column(db.Integer, primary_key=True)
College = db.Column(db.String(80), nullable=False)
UGDS = db.Column(db.Integer)
pell_ever_2005 = db.Column(db.Model)
fsend_1_2005 = db.Column(db.REAL)
fsend_2_2005 = db.Column(db.REAL)
fsend_3_2005 = db.Column(db.REAL)
fsend_4_2005 = db.Column(db.REAL)
fsend_5_2005 = db.Column(db.REAL)
@hybrid_property
def probSchool(self):
return self.UGDS / sum(row.UGDS for row in self)
@hybrid_property
def totprob(self):
return self.fsend_1_2005 + self.fsend_2_2005 + self.fsend_3_2005 + self.fsend_4_2005 + self.fsend_5_2005
@hybrid_property
def BF_fsend_1_2005(self):
return self.make_bf(self.fsend_1_2005)
@hybrid_property
def BF_fsend_2_2005(self):
return self.make_bf(self.fsend_2_2005)
@hybrid_property
def BF_fsend_3_2005(self):
return self.make_bf(self.fsend_3_2005)
@hybrid_property
def BF_fsend_4_2005(self):
return self.make_bf(self.fsend_4_2005)
@hybrid_property
def BF_fsend_5_2005(self):
return self.make_bf(self.fsend_5_2005)
@hybrid_property
def BF_pell_ever_2005(self):
return self.make_bf(self.pell_ever_2005)
def make_bf(self, attr):
attr = 1.0E-9 + attr
return math.log10(attr/sum(attr * row.probSchool for row in self))
class Earnings(db.Model):
"""Earnings after graduation model"""
__tablename__ = 'earnings'
unitID = db.Column(db.Integer, primary_key=True)
College = db.Column(db.String(80), nullable=False)
UGDS = db.Column(db.Integer)
CDR3 = db.Column(db.REAL)
RPY_3YR_RT = db.Column(db.REAL)
RPY_5YR_RT = db.Column(db.REAL)
RPY_7YR_RT = db.Column(db.REAL)
mn_earn_wne_p6_2005 = db.Column(db.REAL)
md_earn_wne_p6_2005 = db.Column(db.REAL)
pct10_earn_wne_p6_2005 = db.Column(db.REAL)
pct25_earn_wne_p6_2005 = db.Column(db.REAL)
pct75_earn_wne_p6_2005 = db.Column(db.REAL)
pct90_earn_wne_p6_2005= db.Column(db.REAL)
sd_earn_wne_p6_2005= db.Column(db.REAL)
@hybrid_property
def probSchool(self):
return self.UGDS / sum(row.UGDS for row in self)
@hybrid_property
def sdlog(self):
return math.sqrt(math.log((self.sd_earn_wne_p6_2005 / self.mn_earn_wne_p6_2005) ** 2 + 1))
@hybrid_property
def meanlog(self):
return math.log(self.mn_earn_wne_p6_2005 ** 2 / math.sqrt(self.mn_earn_wne_p6_2005 ** 2 + self.sd_earn_wne_p6_2005 ** 2))
@hybrid_property
def p_le30K (self):
return lognorm.cdf(30.0E3, self.sdlog, 0, math.exp(self.meanlog))
@hybrid_property
def p_gt30Kle48K(self):
return lognorm.cdf(48.0E3, self.sdlog, 0, math.exp(self.meanlog)) - self.p_le30K
@hybrid_property
def p_gt48Kle75K(self):
return lognorm.cdf(75.0E3, self.sdlog, 0, math.exp(self.meanlog)) - lognorm.cdf(48.0E3, self.sdlog, 0, math.exp(self.meanlog))
@hybrid_property
def p_gt75Kle110K(self):
return lognorm.cdf(110.0E3, self.sdlog, 0, math.exp(self.meanlog)) - lognorm.cdf(75.0E3, self.sdlog, 0, math.exp(self.meanlog))
@hybrid_property
def p_gt110K(self):
return lognorm.cdf(110.0E3, self.sdlog, 0, math.exp(self.meanlog))
@hybrid_property
def totprob(self):
return self.p_le30K + self.p_gt30Kle48K + self.p_gt48Kle75K + self.p_gt75Kle110K + self.p_gt110K
# def getCollegesPerYear():
# query = "SELECT INSTNM college FROM Scorecard Limit 5 "
#
# sql = text(query)
# result = db.engine.execute(sql)
# names = [["Name"]]
# for row in result:
# names.append([row[0]])
# return names
|
mit
|
av8ramit/tensorflow
|
tensorflow/python/saved_model/saved_model_test.py
|
4
|
44336
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SavedModel."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.core.framework import op_def_pb2
from tensorflow.core.framework import types_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import op_def_registry
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import constants
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import loader_impl
from tensorflow.python.saved_model import main_op
from tensorflow.python.saved_model import signature_def_utils
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.training import saver_test_utils
from tensorflow.python.util import compat
SAVED_MODEL_PATH = ("cc/saved_model/testdata/half_plus_two/00000123")
def tearDownModule():
file_io.delete_recursively(test.get_temp_dir())
@test_util.with_c_api
class SavedModelTest(test.TestCase):
def _get_export_dir(self, label):
if ops._USE_C_API:
label += "_c_api"
return os.path.join(test.get_temp_dir(), label)
def _init_and_validate_variable(self, sess, variable_name, variable_value):
v = variables.Variable(variable_value, name=variable_name)
sess.run(variables.global_variables_initializer())
self.assertEqual(variable_value, v.eval())
def _build_asset_collection(self, asset_file_name, asset_file_contents,
asset_file_tensor_name):
asset_filepath = os.path.join(
compat.as_bytes(test.get_temp_dir()), compat.as_bytes(asset_file_name))
file_io.write_string_to_file(asset_filepath, asset_file_contents)
asset_file_tensor = constant_op.constant(
asset_filepath, name=asset_file_tensor_name)
ops.add_to_collection(ops.GraphKeys.ASSET_FILEPATHS, asset_file_tensor)
asset_collection = ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS)
return asset_collection
def _validate_asset_collection(self, export_dir, graph_collection_def,
expected_asset_file_name,
expected_asset_file_contents,
expected_asset_tensor_name):
assets_any = graph_collection_def[constants.ASSETS_KEY].any_list.value
asset = meta_graph_pb2.AssetFileDef()
assets_any[0].Unpack(asset)
assets_path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes(constants.ASSETS_DIRECTORY),
compat.as_bytes(expected_asset_file_name))
actual_asset_contents = file_io.read_file_to_string(assets_path)
self.assertEqual(expected_asset_file_contents,
compat.as_text(actual_asset_contents))
self.assertEqual(expected_asset_file_name, asset.filename)
self.assertEqual(expected_asset_tensor_name, asset.tensor_info.name)
def _validate_inputs_tensor_info(self, builder, tensor_info):
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
foo_signature = signature_def_utils.build_signature_def({
"foo_inputs": tensor_info
}, dict(), "foo")
self.assertRaises(
AssertionError,
builder.add_meta_graph_and_variables,
sess, ["foo"],
signature_def_map={"foo_key": foo_signature})
def _validate_outputs_tensor_info(self, builder, tensor_info):
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
foo_signature = signature_def_utils.build_signature_def(
dict(), {"foo_outputs": tensor_info}, "foo")
self.assertRaises(
AssertionError,
builder.add_meta_graph_and_variables,
sess, ["foo"],
signature_def_map={"foo_key": foo_signature})
def testMaybeSavedModelDir(self):
base_path = test.test_src_dir_path("/python/saved_model")
self.assertFalse(loader.maybe_saved_model_directory(base_path))
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
self.assertTrue(loader.maybe_saved_model_directory(base_path))
base_path = "complete_garbage"
self.assertFalse(loader.maybe_saved_model_directory(base_path))
def testBadSavedModelFileFormat(self):
export_dir = self._get_export_dir("test_bad_saved_model_file_format")
# Attempt to load a SavedModel from an export directory that does not exist.
with self.test_session(graph=ops.Graph()) as sess:
with self.assertRaisesRegexp(IOError,
"SavedModel file does not exist at: %s" %
export_dir):
loader.load(sess, ["foo"], export_dir)
os.makedirs(export_dir)
# Write an invalid binary proto to saved_model.pb.
path_to_pb = os.path.join(export_dir, constants.SAVED_MODEL_FILENAME_PB)
with open(path_to_pb, "w") as f:
f.write("invalid content")
with self.test_session(graph=ops.Graph()) as sess:
with self.assertRaisesRegexp(IOError, "Cannot parse file.*%s" %
constants.SAVED_MODEL_FILENAME_PB):
loader.load(sess, ["foo"], export_dir)
# Cleanup the directory and start again.
file_io.delete_recursively(export_dir)
os.makedirs(export_dir)
# Write an invalid text proto to saved_model.pbtxt
path_to_pbtxt = os.path.join(export_dir,
constants.SAVED_MODEL_FILENAME_PBTXT)
with open(path_to_pbtxt, "w") as f:
f.write("invalid content")
with self.test_session(graph=ops.Graph()) as sess:
with self.assertRaisesRegexp(IOError, "Cannot parse file.*%s" %
constants.SAVED_MODEL_FILENAME_PBTXT):
loader.load(sess, ["foo"], export_dir)
def testVerifySessionGraphUsage(self):
export_dir = self._get_export_dir("test_verify_session_graph_usage")
builder = saved_model_builder.SavedModelBuilder(export_dir)
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
builder.add_meta_graph_and_variables(sess, [tag_constants.TRAINING])
# Save the SavedModel to disk.
builder.save()
# Build a session and supply it to the load operation.
sess = session.Session(graph=ops.Graph())
loader.load(sess, [tag_constants.TRAINING], export_dir)
# Check the variable within the scope of the session and its graph.
with sess:
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
def testSequence(self):
export_dir = self._get_export_dir("test_sequence")
builder = saved_model_builder.SavedModelBuilder(export_dir)
# Expect an assertion error since add_meta_graph_and_variables() should be
# invoked before any add_meta_graph() calls.
with self.test_session(graph=ops.Graph()) as sess:
self.assertRaises(AssertionError, builder.add_meta_graph, ["foo"])
# Expect an assertion error for multiple calls of
# add_meta_graph_and_variables() since weights should be saved exactly once.
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
builder.add_meta_graph_and_variables(sess, ["bar"])
self.assertRaises(AssertionError, builder.add_meta_graph_and_variables,
sess, ["baz"])
def testTags(self):
export_dir = self._get_export_dir("test_tags")
builder = saved_model_builder.SavedModelBuilder(export_dir)
# Graph with a single variable. SavedModel invoked to:
# - add with weights.
# - a single tag (from predefined constants).
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
builder.add_meta_graph_and_variables(sess, [tag_constants.TRAINING])
# Graph that updates the single variable. SavedModel invoked to:
# - simply add the model (weights are not updated).
# - a single tag (from predefined constants).
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 43)
builder.add_meta_graph([tag_constants.SERVING])
# Graph that updates the single variable. SavedModel invoked to:
# - simply add the model (weights are not updated).
# - multiple tags (from predefined constants).
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 45)
builder.add_meta_graph([tag_constants.SERVING, tag_constants.GPU])
# Graph that updates the single variable. SavedModel invoked to:
# - simply add the model (weights are not updated).
# - multiple tags (from predefined constants for serving on TPU).
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 45)
builder.add_meta_graph([tag_constants.SERVING, tag_constants.TPU])
# Graph that updates the single variable. SavedModel is invoked:
# - to add the model (weights are not updated).
# - multiple custom tags.
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 44)
builder.add_meta_graph(["foo", "bar"])
# Save the SavedModel to disk.
builder.save()
# Restore the graph with a single predefined tag whose variables were saved.
with self.test_session(graph=ops.Graph()) as sess:
loader.load(sess, [tag_constants.TRAINING], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
# Restore the graph with a single predefined tag whose variables were not
# saved.
with self.test_session(graph=ops.Graph()) as sess:
loader.load(sess, [tag_constants.SERVING], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
# Restore the graph with multiple predefined tags whose variables were not
# saved.
with self.test_session(graph=ops.Graph()) as sess:
loader.load(sess, [tag_constants.SERVING, tag_constants.GPU], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
# Restore the graph with multiple predefined tags (for serving on TPU)
# whose variables were not saved.
with self.test_session(graph=ops.Graph()) as sess:
loader.load(sess, [tag_constants.SERVING, tag_constants.TPU], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
# Restore the graph with multiple tags. Provide duplicate tags to test set
# semantics.
with self.test_session(graph=ops.Graph()) as sess:
loader.load(sess, ["foo", "bar", "foo"], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
# Try restoring a graph with a non-existent tag. This should yield a runtime
# error.
with self.test_session(graph=ops.Graph()) as sess:
self.assertRaises(RuntimeError, loader.load, sess, ["INVALID"],
export_dir)
# Try restoring a graph where a subset of the tags match. Since tag matching
# for meta graph defs follows "all" semantics, this should yield a runtime
# error.
with self.test_session(graph=ops.Graph()) as sess:
self.assertRaises(RuntimeError, loader.load, sess, ["foo", "baz"],
export_dir)
def testVariables(self):
export_dir = self._get_export_dir("test_variables")
builder = saved_model_builder.SavedModelBuilder(export_dir)
# Graph with two variables. SavedModel invoked to:
# - add with weights.
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v1", 1)
self._init_and_validate_variable(sess, "v2", 2)
builder.add_meta_graph_and_variables(sess, ["foo"])
# Graph with a single variable (subset of the variables from the previous
# graph whose weights were saved). SavedModel invoked to:
# - simply add the model (weights are not updated).
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v2", 3)
builder.add_meta_graph(["bar"])
# Graph with a single variable (disjoint set of variables from the previous
# graph whose weights were saved). SavedModel invoked to:
# - simply add the model (weights are not updated).
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v3", 4)
builder.add_meta_graph(["baz"])
# Save the SavedModel to disk.
builder.save()
# Restore the graph with tag "foo", whose variables were saved.
with self.test_session(graph=ops.Graph()) as sess:
loader.load(sess, ["foo"], export_dir)
collection_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertEqual(len(collection_vars), 2)
self.assertEqual(1, collection_vars[0].eval())
self.assertEqual(2, collection_vars[1].eval())
# Restore the graph with tag "bar", whose variables were not saved. Only the
# subset of the variables added to the graph will be restored with the
# checkpointed value.
with self.test_session(graph=ops.Graph()) as sess:
loader.load(sess, ["bar"], export_dir)
collection_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertEqual(len(collection_vars), 1)
self.assertEqual(2, collection_vars[0].eval())
# Try restoring the graph with tag "baz", whose variables were not saved.
# Since this graph has a disjoint set of variables from the set that was
# saved, this should raise an error.
with self.test_session(graph=ops.Graph()) as sess:
self.assertRaises(errors.NotFoundError, loader.load, sess, ["baz"],
export_dir)
def testGraphWithoutVariables(self):
export_dir = self._get_export_dir("test_graph_has_variables")
builder = saved_model_builder.SavedModelBuilder(export_dir)
# Graph with no variables.
with self.test_session(graph=ops.Graph()) as sess:
constant_5_name = constant_op.constant(5.0).name
builder.add_meta_graph_and_variables(sess, ["foo"])
# Second graph with no variables
with self.test_session(graph=ops.Graph()) as sess:
constant_6_name = constant_op.constant(6.0).name
builder.add_meta_graph(["bar"])
# Save the SavedModel to disk.
builder.save()
# Restore the graph with tag "foo".
with self.test_session(graph=ops.Graph()) as sess:
loader.load(sess, ["foo"], export_dir)
# Read the constant a from the graph.
a = ops.get_default_graph().get_tensor_by_name(constant_5_name)
b = constant_op.constant(6.0)
c = a * b
self.assertEqual(30.0, sess.run(c))
# Restore the graph with tag "bar".
with self.test_session(graph=ops.Graph()) as sess:
loader.load(sess, ["bar"], export_dir)
# Read the constant a from the graph.
a = ops.get_default_graph().get_tensor_by_name(constant_6_name)
b = constant_op.constant(5.0)
c = a * b
self.assertEqual(30.0, sess.run(c))
def testNoOverwrite(self):
export_dir = self._get_export_dir("test_no_overwrite")
builder = saved_model_builder.SavedModelBuilder(export_dir)
# Graph with a single variable. SavedModel invoked to:
# - add with weights.
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
builder.add_meta_graph_and_variables(sess, ["foo"])
# Save the SavedModel to disk in text format.
builder.save(as_text=True)
# Restore the graph with tag "foo", whose variables were saved.
with self.test_session(graph=ops.Graph()) as sess:
loader.load(sess, ["foo"], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
# An attempt to create another builder with the same export directory should
# result in an assertion error.
self.assertRaises(AssertionError, saved_model_builder.SavedModelBuilder,
export_dir)
def testSaveAsText(self):
export_dir = self._get_export_dir("test_astext")
builder = saved_model_builder.SavedModelBuilder(export_dir)
# Graph with a single variable. SavedModel invoked to:
# - add with weights.
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
builder.add_meta_graph_and_variables(sess, ["foo"])
# Graph with the same single variable. SavedModel invoked to:
# - simply add the model (weights are not updated).
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 43)
builder.add_meta_graph(["bar"])
# Save the SavedModel to disk in text format.
builder.save(as_text=True)
# Restore the graph with tag "foo", whose variables were saved.
with self.test_session(graph=ops.Graph()) as sess:
loader.load(sess, ["foo"], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
# Restore the graph with tag "bar", whose variables were not saved.
with self.test_session(graph=ops.Graph()) as sess:
loader.load(sess, ["bar"], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
def testCollections(self):
export_dir = self._get_export_dir("test_collections")
builder = saved_model_builder.SavedModelBuilder(export_dir)
# Graph with a single variable added to a collection. SavedModel invoked to:
# - add with weights.
with self.test_session(graph=ops.Graph()) as sess:
v = variables.Variable(42, name="v")
ops.add_to_collection("foo_vars", v)
sess.run(variables.global_variables_initializer())
self.assertEqual(42, v.eval())
builder.add_meta_graph_and_variables(sess, ["foo"])
# Graph with the same single variable added to a different collection.
# SavedModel invoked to:
# - simply add the model (weights are not updated).
with self.test_session(graph=ops.Graph()) as sess:
v = variables.Variable(43, name="v")
ops.add_to_collection("bar_vars", v)
sess.run(variables.global_variables_initializer())
self.assertEqual(43, v.eval())
builder.add_meta_graph(["bar"])
# Save the SavedModel to disk.
builder.save()
# Restore the graph with tag "foo", whose variables were saved. The
# collection 'foo_vars' should contain a single element. The collection
# 'bar_vars' should not be found.
with self.test_session(graph=ops.Graph()) as sess:
loader.load(sess, ["foo"], export_dir)
collection_foo_vars = ops.get_collection("foo_vars")
self.assertEqual(len(collection_foo_vars), 1)
self.assertEqual(42, collection_foo_vars[0].eval())
self.assertEqual(len(ops.get_collection("bar_vars")), 0)
# Restore the graph with tag "bar", whose variables were not saved. The
# collection-def exported as part of the meta graph def is updated to
# reflect the new collection. The value of the variable in the
# collection-def corresponds to the saved value (from the previous graph
# with tag "foo").
with self.test_session(graph=ops.Graph()) as sess:
loader.load(sess, ["bar"], export_dir)
collection_bar_vars = ops.get_collection("bar_vars")
self.assertEqual(len(collection_bar_vars), 1)
self.assertEqual(42, collection_bar_vars[0].eval())
self.assertEqual(len(ops.get_collection("foo_vars")), 0)
def testSignatureDefs(self):
export_dir = self._get_export_dir("test_signature_defs")
builder = saved_model_builder.SavedModelBuilder(export_dir)
# Graph with a single variable and a single entry in the signature def map.
# SavedModel is invoked to add with weights.
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
# Build and populate an empty SignatureDef for testing.
foo_signature = signature_def_utils.build_signature_def(dict(),
dict(), "foo")
builder.add_meta_graph_and_variables(
sess, ["foo"], signature_def_map={"foo_key": foo_signature})
# Graph with the same single variable and multiple entries in the signature
# def map. No weights are saved by SavedModel.
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 43)
# Build and populate a different SignatureDef for testing.
bar_signature = signature_def_utils.build_signature_def(dict(),
dict(), "bar")
# Also, build a different SignatureDef corresponding to "foo_key" defined
# in the previous graph.
foo_new_signature = signature_def_utils.build_signature_def(dict(),
dict(),
"foo_new")
builder.add_meta_graph(
["bar"],
signature_def_map={
"bar_key": bar_signature,
"foo_key": foo_new_signature
})
# Save the SavedModel to disk.
builder.save()
# Restore the graph with tag "foo". The single entry in the SignatureDef map
# corresponding to "foo_key" should exist.
with self.test_session(graph=ops.Graph()) as sess:
foo_graph = loader.load(sess, ["foo"], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
foo_signature = foo_graph.signature_def
self.assertEqual(len(foo_signature), 1)
self.assertEqual("foo", foo_signature["foo_key"].method_name)
# Restore the graph with tag "bar". The SignatureDef map should have two
# entries. One corresponding to "bar_key" and another corresponding to the
# new value of "foo_key".
with self.test_session(graph=ops.Graph()) as sess:
bar_graph = loader.load(sess, ["bar"], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
bar_signature = bar_graph.signature_def
self.assertEqual(len(bar_signature), 2)
self.assertEqual("bar", bar_signature["bar_key"].method_name)
self.assertEqual("foo_new", bar_signature["foo_key"].method_name)
def testSignatureDefValidation(self):
export_dir = self._get_export_dir("test_signature_def_validation")
builder = saved_model_builder.SavedModelBuilder(export_dir)
tensor_without_name = meta_graph_pb2.TensorInfo()
tensor_without_name.dtype = types_pb2.DT_FLOAT
self._validate_inputs_tensor_info(builder, tensor_without_name)
self._validate_outputs_tensor_info(builder, tensor_without_name)
tensor_without_dtype = meta_graph_pb2.TensorInfo()
tensor_without_dtype.name = "x"
self._validate_inputs_tensor_info(builder, tensor_without_dtype)
self._validate_outputs_tensor_info(builder, tensor_without_dtype)
tensor_empty = meta_graph_pb2.TensorInfo()
self._validate_inputs_tensor_info(builder, tensor_empty)
self._validate_outputs_tensor_info(builder, tensor_empty)
def testAssets(self):
export_dir = self._get_export_dir("test_assets")
builder = saved_model_builder.SavedModelBuilder(export_dir)
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
# Build an asset collection.
ignored_filepath = os.path.join(
compat.as_bytes(test.get_temp_dir()), compat.as_bytes("ignored.txt"))
file_io.write_string_to_file(ignored_filepath, "will be ignored")
asset_collection = self._build_asset_collection("hello42.txt",
"foo bar baz",
"asset_file_tensor")
builder.add_meta_graph_and_variables(
sess, ["foo"], assets_collection=asset_collection)
# Save the SavedModel to disk.
builder.save()
with self.test_session(graph=ops.Graph()) as sess:
foo_graph = loader.load(sess, ["foo"], export_dir)
self._validate_asset_collection(export_dir, foo_graph.collection_def,
"hello42.txt", "foo bar baz",
"asset_file_tensor:0")
ignored_asset_path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes(constants.ASSETS_DIRECTORY),
compat.as_bytes("ignored.txt"))
self.assertFalse(file_io.file_exists(ignored_asset_path))
def testCustomMainOp(self):
export_dir = self._get_export_dir("test_main_op")
builder = saved_model_builder.SavedModelBuilder(export_dir)
with self.test_session(graph=ops.Graph()) as sess:
# Add `v1` and `v2` variables to the graph.
v1 = variables.Variable(1, name="v1")
ops.add_to_collection("v", v1)
v2 = variables.Variable(2, name="v2")
ops.add_to_collection("v", v2)
# Initialize another variable `v3` to 42.
v3 = variables.Variable(42, name="v3")
ops.add_to_collection("v", v3)
# Set up an assignment op to be run as part of the main_op.
with ops.control_dependencies([main_op.main_op()]):
add_v1_v2 = math_ops.add(v1._ref(), v2._ref())
custom_main_op = control_flow_ops.group(state_ops.assign(v3, add_v1_v2))
sess.run(custom_main_op)
builder.add_meta_graph_and_variables(
sess, ["foo"], main_op=custom_main_op)
# Save the SavedModel to disk.
builder.save()
with self.test_session(graph=ops.Graph()) as sess:
loader.load(sess, ["foo"], export_dir)
self.assertEqual(1, ops.get_collection("v")[0].eval())
self.assertEqual(2, ops.get_collection("v")[1].eval())
# Evaluates to the sum of the first two variables and assigned as part of
# the main_op, following a restore.
self.assertEqual(3, ops.get_collection("v")[2].eval())
def testLegacyInitOp(self):
export_dir = self._get_export_dir("test_legacy_init_op")
builder = saved_model_builder.SavedModelBuilder(export_dir)
with self.test_session(graph=ops.Graph()) as sess:
# Add `v1` and `v2` variables to the graph.
v1 = variables.Variable(1, name="v1")
ops.add_to_collection("v", v1)
v2 = variables.Variable(2, name="v2")
ops.add_to_collection("v", v2)
# Initialize another variable `v3` to 42.
v3 = variables.Variable(42, name="v3", trainable=False, collections=[])
ops.add_to_collection("v", v3)
# Set up an assignment op to be run as part of the legacy_init_op.
assign_v3 = state_ops.assign(v3, math_ops.add(v1, v2))
legacy_init_op = control_flow_ops.group(assign_v3, name="legacy_init_op")
sess.run(variables.global_variables_initializer())
builder.add_meta_graph_and_variables(
sess, ["foo"], legacy_init_op=legacy_init_op)
# Save the SavedModel to disk.
builder.save()
with self.test_session(graph=ops.Graph()) as sess:
loader.load(sess, ["foo"], export_dir)
self.assertEqual(1, ops.get_collection("v")[0].eval())
self.assertEqual(2, ops.get_collection("v")[1].eval())
# Evaluates to the sum of the first two variables and assigned as part of
# the legacy_init_op, following a restore.
self.assertEqual(3, ops.get_collection("v")[2].eval())
def testLegacyInitOpWithNonEmptyCollection(self):
export_dir = self._get_export_dir(
"test_legacy_init_op_with_non_empty_collection")
builder = saved_model_builder.SavedModelBuilder(export_dir)
with self.test_session(graph=ops.Graph()) as sess:
# Initialize variable `v1` to 1.
v1 = variables.Variable(1, name="v1")
ops.add_to_collection("v", v1)
# Initialize another variable `v2` to 42.
v2 = variables.Variable(42, name="v2", trainable=False, collections=[])
ops.add_to_collection("v", v2)
# Set up an assignment op to be run as part of the legacy_init_op.
assign_v2 = state_ops.assign(v2, v1)
legacy_init_op = control_flow_ops.group(assign_v2, name="legacy_init_op")
sess.run(variables.global_variables_initializer())
ops.add_to_collection(constants.LEGACY_INIT_OP_KEY,
control_flow_ops.no_op())
# AssertionError should be raised since the LEGACY_INIT_OP_KEY collection
# is not empty and we don't support multiple init ops.
with self.assertRaises(AssertionError):
builder.add_meta_graph_and_variables(
sess, ["foo"], legacy_init_op=legacy_init_op)
def testMultipleAssets(self):
export_dir = self._get_export_dir("test_multiple_assets")
builder = saved_model_builder.SavedModelBuilder(export_dir)
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
# Build an asset collection specific to `foo` graph.
asset_collection = self._build_asset_collection("foo.txt", "content_foo",
"asset_file_tensor")
# Add the asset collection as part of the graph with tag "foo".
builder.add_meta_graph_and_variables(
sess, ["foo"], assets_collection=asset_collection)
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
# Build an asset collection specific to `bar` graph.
asset_collection = self._build_asset_collection("bar.txt", "content_bar",
"asset_file_tensor")
# Add the asset collection as part of the graph with tag "bar".
builder.add_meta_graph(["bar"], assets_collection=asset_collection)
# Save the SavedModel to disk.
builder.save()
# Check assets restored for graph with tag "foo".
with self.test_session(graph=ops.Graph()) as sess:
foo_graph = loader.load(sess, ["foo"], export_dir)
self._validate_asset_collection(export_dir, foo_graph.collection_def,
"foo.txt", "content_foo",
"asset_file_tensor:0")
# Check assets restored for graph with tag "bar".
with self.test_session(graph=ops.Graph()) as sess:
bar_graph = loader.load(sess, ["bar"], export_dir)
self._validate_asset_collection(export_dir, bar_graph.collection_def,
"bar.txt", "content_bar",
"asset_file_tensor:0")
def testDuplicateAssets(self):
export_dir = self._get_export_dir("test_duplicate_assets")
builder = saved_model_builder.SavedModelBuilder(export_dir)
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
# Build an asset collection with `foo.txt` that has `foo` specific
# content.
asset_collection = self._build_asset_collection("foo.txt", "content_foo",
"asset_file_tensor")
# Add the asset collection as part of the graph with tag "foo".
builder.add_meta_graph_and_variables(
sess, ["foo"], assets_collection=asset_collection)
with self.test_session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
# Build an asset collection with `foo.txt` that has `bar` specific
# content.
asset_collection = self._build_asset_collection("foo.txt", "content_bar",
"asset_file_tensor")
# Add the asset collection as part of the graph with tag "bar".
builder.add_meta_graph(["bar"], assets_collection=asset_collection)
# Save the SavedModel to disk.
builder.save()
# Check assets restored for graph with tag "foo".
with self.test_session(graph=ops.Graph()) as sess:
foo_graph = loader.load(sess, ["foo"], export_dir)
self._validate_asset_collection(export_dir, foo_graph.collection_def,
"foo.txt", "content_foo",
"asset_file_tensor:0")
# Check assets restored for graph with tag "bar".
with self.test_session(graph=ops.Graph()) as sess:
bar_graph = loader.load(sess, ["bar"], export_dir)
# Validate the assets for `bar` graph. `foo.txt` should contain the
# original contents corresponding to `foo` graph since an asset with the
# same name across multiple graphs is only stored the first time
self._validate_asset_collection(export_dir, bar_graph.collection_def,
"foo.txt", "content_foo",
"asset_file_tensor:0")
def testOp(self):
export_dir = self._get_export_dir("test_op")
builder = saved_model_builder.SavedModelBuilder(export_dir)
with session.Session(
graph=ops.Graph(),
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v1 = variables.Variable(1, name="v1")
with sess.graph.device("/cpu:1"):
v2 = variables.Variable(2, name="v2")
# v3 is an unsaved variable derived from v1 and v2. It is used to
# exercise the ability to run an init op when restoring a graph.
v3 = variables.Variable(1, name="v3", trainable=False, collections=[])
assign_v3 = state_ops.assign(v3, math_ops.add(v1, v2))
init_op = control_flow_ops.group(assign_v3, name="init_op")
ops.add_to_collection("v", v1)
ops.add_to_collection("v", v2)
ops.add_to_collection("v", v3)
ops.add_to_collection("init_op", init_op)
sess.run(variables.global_variables_initializer())
self.assertEqual(1, ops.get_collection("v")[0].eval())
self.assertEqual(2, ops.get_collection("v")[1].eval())
builder.add_meta_graph_and_variables(sess, ["foo"])
# Save the SavedModel to disk.
builder.save()
with session.Session(
graph=ops.Graph(),
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
loader.load(sess, ["foo"], export_dir)
# Validate variables, run the init op and verify result.
self.assertEqual(1, ops.get_collection("v")[0].eval())
self.assertEqual(2, ops.get_collection("v")[1].eval())
ops.get_collection("init_op")[0].run()
self.assertEqual(3, ops.get_collection("v")[2].eval())
def testCustomSaveable(self):
export_dir = self._get_export_dir("custom_saveable")
builder = saved_model_builder.SavedModelBuilder(export_dir)
with session.Session(
graph=ops.Graph(),
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
# CheckpointedOp is a key-value table that can be saved across sessions.
# The table register itself in SAVEABLE_OBJECTS collection.
v1 = saver_test_utils.CheckpointedOp(name="v1")
variables.global_variables_initializer().run()
v1.insert("k1", 3.0).run()
# Once the table is restored, we can access it through this reference.
ops.add_to_collection("table_ref", v1.table_ref)
builder.add_meta_graph_and_variables(sess, ["foo"])
# Save the SavedModel to disk.
builder.save()
with session.Session(
graph=ops.Graph(),
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
loader.load(sess, ["foo"], export_dir)
# Instantiate a wrapper object from the checkpointed reference.
v1 = saver_test_utils.CheckpointedOp(
name="v1", table_ref=ops.get_collection("table_ref")[0])
self.assertEqual(b"k1", v1.keys().eval())
self.assertEqual(3.0, v1.values().eval())
def testClearDevices(self):
export_dir = self._get_export_dir("test_clear_devices")
builder = saved_model_builder.SavedModelBuilder(export_dir)
# Specify a device and save a variable.
ops.reset_default_graph()
with session.Session(
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
self._init_and_validate_variable(sess, "v", 42)
builder.add_meta_graph_and_variables(
sess, [tag_constants.TRAINING], clear_devices=True)
# Save the SavedModel to disk.
builder.save()
# Restore the graph with a single predefined tag whose variables were saved
# without any device information.
with self.test_session(graph=ops.Graph()) as sess:
loader.load(sess, [tag_constants.TRAINING], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
def testStripDefaultAttrs(self):
if ops._USE_C_API: return # TODO(skyewm): get this working
export_dir = self._get_export_dir("test_strip_default_attrs")
builder = saved_model_builder.SavedModelBuilder(export_dir)
# Add a graph with two float32 variables and a Complex Op composing them
# with strip_default_attrs enabled.
with session.Session(graph=ops.Graph()) as sess:
real_num = variables.Variable(1.0, dtype=dtypes.float32, name="real")
imag_num = variables.Variable(2.0, dtype=dtypes.float32, name="imag")
math_ops.complex(real_num, imag_num, name="complex")
sess.run(variables.global_variables_initializer())
builder.add_meta_graph_and_variables(
sess, ["foo"], strip_default_attrs=True)
# Add a graph with the same float32 variables and a Complex Op composing
# them with strip_default_attrs disabled.
with session.Session(graph=ops.Graph()) as sess:
real_num = variables.Variable(1.0, dtype=dtypes.float32, name="real")
imag_num = variables.Variable(2.0, dtype=dtypes.float32, name="imag")
math_ops.complex(real_num, imag_num, name="complex")
sess.run(variables.global_variables_initializer())
builder.add_meta_graph(["bar"], strip_default_attrs=False)
# Save the SavedModel to disk in text format.
builder.save(as_text=True)
# Loading graph "foo" via the loader must restore the defaults for the
# "Complex" node based on the "Complex" OpDef in the Op registry.
sess = session.Session(graph=ops.Graph())
meta_graph_def = loader.load(sess, ["foo"], export_dir)
complex_node = test_util.get_node_def_from_graph("complex",
meta_graph_def.graph_def)
self.assertIn("T", complex_node.attr)
self.assertIn("Tout", complex_node.attr)
# Load graph "foo" from disk as-is to verify default attrs are stripped.
# pylint: disable=protected-access
saved_model_pb = loader_impl._parse_saved_model(export_dir)
self.assertIsNotNone(saved_model_pb)
# pylint: enable=protected-access
meta_graph_foo_def = None
meta_graph_bar_def = None
for meta_graph_def in saved_model_pb.meta_graphs:
if set(meta_graph_def.meta_info_def.tags) == set(["foo"]):
meta_graph_foo_def = meta_graph_def
elif set(meta_graph_def.meta_info_def.tags) == set(["bar"]):
meta_graph_bar_def = meta_graph_def
self.assertIsNotNone(meta_graph_foo_def)
self.assertIsNotNone(meta_graph_bar_def)
# "Complex" Op has 2 attributes with defaults:
# o "T" : float32. (input type)
# o "Tout" : complex64. (output type)
# "Complex" Op in graph "foo" shouldn't have attributes "T" and "Tout".
# Graph "foo" was saved with strip_default_attrs set to True.
node_def = test_util.get_node_def_from_graph("complex",
meta_graph_foo_def.graph_def)
self.assertNotIn("T", node_def.attr)
self.assertNotIn("Tout", node_def.attr)
# "Complex" Op in graph "bar" must have attributes "T" and "Tout".
# Graph "bar" was saved with strip_default_attrs set to False.
node_def = test_util.get_node_def_from_graph("complex",
meta_graph_bar_def.graph_def)
self.assertIn("T", node_def.attr)
self.assertIn("Tout", node_def.attr)
def testStripDefaultAttrsInconsistentConsumerDefaults(self):
if ops._USE_C_API: return # TODO(skyewm): get this working
export_dir = self._get_export_dir(
"test_strip_default_attrs_no_consumer_defaults")
builder = saved_model_builder.SavedModelBuilder(export_dir)
# Add a graph with two float32 variables and a Complex Op composing them
# with strip_default_attrs enabled. This must remove the following
# defaults for the "Complex" Op:
# o "T" : float32. (input type)
# o "Tout" : complex64. (output type)
with session.Session(graph=ops.Graph()) as sess:
real_num = variables.Variable(1.0, dtype=dtypes.float32, name="real")
imag_num = variables.Variable(2.0, dtype=dtypes.float32, name="imag")
math_ops.complex(real_num, imag_num, name="complex")
sess.run(variables.global_variables_initializer())
builder.add_meta_graph_and_variables(
sess, ["foo"], strip_default_attrs=True)
# Save the SavedModel to disk in text format.
builder.save(as_text=True)
# Update the Op registry to remove defaults for all attrs("T", "Tout") from
# the "Complex" OpDef.
complex_op_def = op_def_registry.get_registered_ops()["Complex"]
original_complex_op_def = op_def_pb2.OpDef()
original_complex_op_def.CopyFrom(complex_op_def)
for attr_def in complex_op_def.attr:
attr_def.ClearField("default_value")
# Loading the SavedModel via the loader must fail because the SavedModel
# does not have any attr values for the "Complex" node and the current
# op registry does not have have any default values for the "Complex" op.
sess = session.Session(graph=ops.Graph())
with self.assertRaisesRegexp(
ValueError,
"Expected one attr with name .*T(out)?.* in name: \"complex\".*"):
loader.load(sess, ["foo"], export_dir)
# Update the Op registry to change the defaults for attr "Tout"
# (complex64 -> complex128).
complex_op_def.CopyFrom(original_complex_op_def)
for attr_def in complex_op_def.attr:
if attr_def.name == "Tout":
attr_def.default_value.type = types_pb2.DT_COMPLEX128
# Loading the SavedModel via the loader must set "Tout" attr_value for the
# "Complex" node according to the latest defaults (complex128). This is
# expected to fail the model import as there is no OpKernel registered to
# handle attrs "T" (float32) and "Tout" (complex128).
sess = session.Session(graph=ops.Graph())
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
".*No OpKernel was registered to support Op \'Complex\' with these "
"attrs..*"):
loader.load(sess, ["foo"], export_dir)
if __name__ == "__main__":
test.main()
|
apache-2.0
|
mtnbikenc/ansible-modules-extras
|
cloud/vmware/vca_vapp.py
|
72
|
8888
|
#!/usr/bin/python
# Copyright (c) 2015 Ansible, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: vca_vapp
short_description: Manages vCloud Air vApp instances.
description:
- This module will actively managed vCloud Air vApp instances. Instances
can be created and deleted as well as both deployed and undeployed.
version_added: "2.0"
author: Peter Sprygada (@privateip)
options:
vapp_name:
description:
- The name of the vCloud Air vApp instance
required: yes
template_name:
description:
- The name of the vApp template to use to create the vApp instance. If
the I(state) is not `absent` then the I(template_name) value must be
provided. The I(template_name) must be previously uploaded to the
catalog specified by I(catalog_name)
required: no
default: None
network_name:
description:
- The name of the network that should be attached to the virtual machine
in the vApp. The virtual network specified must already be created in
the vCloud Air VDC. If the I(state) is not 'absent' then the
I(network_name) argument must be provided.
required: no
default: None
network_mode:
description:
- Configures the mode of the network connection.
required: no
default: pool
choices: ['pool', 'dhcp', 'static']
vm_name:
description:
- The name of the virtual machine instance in the vApp to manage.
required: no
default: None
vm_cpus:
description:
- The number of vCPUs to configure for the VM in the vApp. If the
I(vm_name) argument is provided, then this becomes a per VM setting
otherwise it is applied to all VMs in the vApp.
required: no
default: None
vm_memory:
description:
- The amount of memory in MB to allocate to VMs in the vApp. If the
I(vm_name) argument is provided, then this becomes a per VM setting
otherise it is applied to all VMs in the vApp.
required: no
default: None
operation:
description:
- Specifies an operation to be performed on the vApp.
required: no
default: noop
choices: ['noop', 'poweron', 'poweroff', 'suspend', 'shutdown', 'reboot', 'reset']
state:
description:
- Configures the state of the vApp.
required: no
default: present
choices: ['present', 'absent', 'deployed', 'undeployed']
username:
description:
- The vCloud Air username to use during authentication
required: false
default: None
password:
description:
- The vCloud Air password to use during authentication
required: false
default: None
org:
description:
- The org to login to for creating vapp, mostly set when the service_type is vdc.
required: false
default: None
instance_id:
description:
- The instance id in a vchs environment to be used for creating the vapp
required: false
default: None
host:
description:
- The authentication host to be used when service type is vcd.
required: false
default: None
api_version:
description:
- The api version to be used with the vca
required: false
default: "5.7"
service_type:
description:
- The type of service we are authenticating against
required: false
default: vca
choices: [ "vca", "vchs", "vcd" ]
vdc_name:
description:
- The name of the virtual data center (VDC) where the vm should be created or contains the vAPP.
required: false
default: None
'''
EXAMPLES = '''
- name: Creates a new vApp in a VCA instance
vca_vapp:
vapp_name: tower
state=present
template_name='Ubuntu Server 12.04 LTS (amd64 20150127)'
vdc_name=VDC1
instance_id=<your instance id here>
username=<your username here>
password=<your password here>
'''
DEFAULT_VAPP_OPERATION = 'noop'
VAPP_STATUS = {
'Powered off': 'poweroff',
'Powered on': 'poweron',
'Suspended': 'suspend'
}
VAPP_STATES = ['present', 'absent', 'deployed', 'undeployed']
VAPP_OPERATIONS = ['poweron', 'poweroff', 'suspend', 'shutdown',
'reboot', 'reset', 'noop']
def get_instance(module):
vapp_name = module.params['vapp_name']
inst = dict(vapp_name=vapp_name, state='absent')
try:
vapp = module.get_vapp(vapp_name)
if vapp:
status = module.vca.get_status(vapp.me.get_status())
inst['status'] = VAPP_STATUS.get(status, 'unknown')
inst['state'] = 'deployed' if vapp.me.deployed else 'undeployed'
return inst
except VcaError:
return inst
def create(module):
vdc_name = module.params['vdc_name']
vapp_name = module.params['vapp_name']
template_name = module.params['template_name']
catalog_name = module.params['catalog_name']
network_name = module.params['network_name']
network_mode = module.params['network_mode']
vm_name = module.params['vm_name']
vm_cpus = module.params['vm_cpus']
vm_memory = module.params['vm_memory']
deploy = module.params['state'] == 'deploy'
poweron = module.params['operation'] == 'poweron'
task = module.vca.create_vapp(vdc_name, vapp_name, template_name,
catalog_name, network_name, network_mode,
vm_name, vm_cpus, vm_memory, deploy, poweron)
module.vca.block_until_completed(task)
def delete(module):
vdc_name = module.params['vdc_name']
vapp_name = module.params['vapp_name']
module.vca.delete_vapp(vdc_name, vapp_name)
def do_operation(module):
vapp_name = module.params['vapp_name']
operation = module.params['operation']
vm_name = module.params.get('vm_name')
vm = None
if vm_name:
vm = module.get_vm(vapp_name, vm_name)
if operation == 'poweron':
operation = 'powerOn'
elif operation == 'poweroff':
operation = 'powerOff'
cmd = 'power:%s' % operation
module.get_vapp(vapp_name).execute(cmd, 'post', targetVM=vm)
def set_state(module):
state = module.params['state']
vapp = module.get_vapp(module.params['vapp_name'])
if state == 'deployed':
action = module.params['operation'] == 'poweron'
if not vapp.deploy(action):
module.fail('unable to deploy vapp')
elif state == 'undeployed':
action = module.params['operation']
if action == 'poweroff':
action = 'powerOff'
elif action != 'suspend':
action = None
if not vapp.undeploy(action):
module.fail('unable to undeploy vapp')
def main():
argument_spec = dict(
vapp_name=dict(required=True),
vdc_name=dict(required=True),
template_name=dict(),
catalog_name=dict(default='Public Catalog'),
network_name=dict(),
network_mode=dict(default='pool', choices=['dhcp', 'static', 'pool']),
vm_name=dict(),
vm_cpus=dict(),
vm_memory=dict(),
operation=dict(default=DEFAULT_VAPP_OPERATION, choices=VAPP_OPERATIONS),
state=dict(default='present', choices=VAPP_STATES)
)
module = VcaAnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
state = module.params['state']
operation = module.params['operation']
instance = get_instance(module)
result = dict(changed=False)
if instance and state == 'absent':
if not module.check_mode:
delete(module)
result['changed'] = True
elif state != 'absent':
if instance['state'] == 'absent':
if not module.check_mode:
create(module)
result['changed'] = True
elif instance['state'] != state and state != 'present':
if not module.check_mode:
set_state(module)
result['changed'] = True
if operation != instance.get('status') and operation != 'noop':
if not module.check_mode:
do_operation(module)
result['changed'] = True
return module.exit(**result)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.vca import *
if __name__ == '__main__':
main()
|
gpl-3.0
|
arhik/nupic
|
examples/opf/clients/hotgym/anomaly/one_gym/remove_tuesdays.py
|
15
|
1039
|
import csv
import shutil
import datetime
ORIGINAL = "rec-center-hourly.csv"
BACKUP = "rec-center-hourly-backup.csv"
DATE_FORMAT = "%m/%d/%y %H:%M"
def isTuesday(date):
return date.weekday() is 1
def withinOctober(date):
return datetime.datetime(2010, 10, 1) <= date < datetime.datetime(2010, 11, 1)
def run():
# Backup original
shutil.copyfile(ORIGINAL, BACKUP)
with open(ORIGINAL, 'rb') as inputFile:
reader = csv.reader(inputFile)
outputCache = ""
headers = reader.next()
types = reader.next()
flags = reader.next()
for row in [headers, types, flags]:
outputCache += ",".join(row) + "\n"
for row in reader:
dateString = row[0]
date = datetime.datetime.strptime(dateString, DATE_FORMAT)
consumption = float(row[1])
if isTuesday(date) and withinOctober(date):
consumption = 5.0
outputCache += "%s,%f\n" % (dateString, consumption)
with open(ORIGINAL, 'wb') as outputFile:
outputFile.write(outputCache)
if __name__ == "__main__":
run()
|
agpl-3.0
|
abs0/tapiriik
|
tapiriik/services/Pulsstory/pulsstory.py
|
3
|
18516
|
from tapiriik.settings import PULSSTORY_CLIENT_ID, PULSSTORY_CLIENT_SECRET
from tapiriik.services.service_base import ServiceAuthenticationType, ServiceBase
from tapiriik.services.service_record import ServiceRecord
from tapiriik.services.stream_sampling import StreamSampler
from tapiriik.services.auto_pause import AutoPauseCalculator
from tapiriik.services.api import APIException, UserException, UserExceptionType, APIExcludeActivity
from tapiriik.services.interchange import UploadedActivity, ActivityType, ActivityStatistic, ActivityStatisticUnit, WaypointType, Waypoint, Location, Lap
from tapiriik.database import cachedb
from datetime import datetime, timedelta
import requests
import urllib.parse
import json
import logging
import collections
import zipfile
import io
logger = logging.getLogger(__name__)
class PulsstoryService(ServiceBase):
ID = "pulsstory"
DisplayName = "pulsstory"
DisplayAbbreviation = "PLS"
URLBase = 'https://www.pulsstory.com'
AuthenticationType = ServiceAuthenticationType.OAuth
UserProfileURL = URLBase + "/user/{0}/profile"
AuthenticationNoFrame = True # Chrome update broke this
_activityMappings = {"Running": ActivityType.Running,
"Cycling": ActivityType.Cycling,
"Mountain Biking": ActivityType.MountainBiking,
"Walking": ActivityType.Walking,
"Hiking": ActivityType.Hiking,
"Downhill Skiing": ActivityType.DownhillSkiing,
"Cross-Country Skiing": ActivityType.CrossCountrySkiing,
"Snowboarding": ActivityType.Snowboarding,
"Skating": ActivityType.Skating,
"Swimming": ActivityType.Swimming,
"Wheelchair": ActivityType.Wheelchair,
"Rowing": ActivityType.Rowing,
"Elliptical": ActivityType.Elliptical,
"Other": ActivityType.Other}
SupportedActivities = list(_activityMappings.values())
SupportsHR = True
SupportsCalories = True
SupportsCadence = True
SupportsPower = True
_wayptTypeMappings = {"start": WaypointType.Start, "end": WaypointType.End, "pause": WaypointType.Pause, "resume": WaypointType.Resume}
def WebInit(self):
self.UserAuthorizationURL = self.URLBase + "/Account/LogOn?&ReturnUrl=/ExternalSyncAPI/GenerateCode"
def RetrieveAuthorizationToken(self, req, level):
# might consider a real OAuth client
code = req.GET.get("code")
params = {"code": code, "client_id": PULSSTORY_CLIENT_ID, "client_secret": PULSSTORY_CLIENT_SECRET}
response = requests.post(self.URLBase + "/ExternalSyncAPI/GenerateToken", data=urllib.parse.urlencode(params), headers={"Content-Type": "application/x-www-form-urlencoded"})
if response.status_code != 200:
raise APIException("Invalid code")
token = response.json()["access_token"]
# This used to check with GetServiceRecordWithAuthDetails but that's hideously slow on an unindexed field.
uid = self._getUserId(ServiceRecord({"Authorization": {"Token": token}})) # meh
return (uid, {"Token": token})
def RevokeAuthorization(self, serviceRecord):
resp = requests.post(self.URLBase + "/ExternalSyncAPI/Deauthorize", data=self._apiData(serviceRecord))
if resp.status_code != 204 and resp.status_code != 200:
raise APIException("Unable to deauthorize pulsstory auth token, status " + str(resp.status_code) + " resp " + resp.text)
pass
def _apiData(self, serviceRecord):
return {"access_token": serviceRecord.Authorization["Token"]}
def _getAPIUris(self, serviceRecord):
if hasattr(self, "_uris"): # cache these for the life of the batch job at least? hope so
return self._uris
else:
response = requests.post(self.URLBase + "/ExternalSyncAPI/Uris", data=self._apiData(serviceRecord))
if response.status_code != 200:
if response.status_code == 401 or response.status_code == 403:
raise APIException("No authorization to retrieve user URLs", block=True, user_exception=UserException(UserExceptionType.Authorization, intervention_required=True))
raise APIException("Unable to retrieve user URLs" + str(response))
uris = response.json()
for k in uris.keys():
if type(uris[k]) == str:
uris[k] = self.URLBase + uris[k]
self._uris = uris
return uris
def _getUserId(self, serviceRecord):
resp = requests.post(self.URLBase + "/ExternalSyncAPI/GetUserId", data=self._apiData(serviceRecord))
if resp.status_code != 200:
raise APIException("Unable to retrieve user id" + str(resp));
data = resp.json()
return data["userID"]
def DownloadActivityList(self, serviceRecord, exhaustive=False):
uris = self._getAPIUris(serviceRecord)
allItems = []
pageUri = uris["fitness_activities"]
while True:
response = requests.post(pageUri, data=self._apiData(serviceRecord))
if response.status_code != 200:
if response.status_code == 401 or response.status_code == 403:
raise APIException("No authorization to retrieve activity list", block=True, user_exception=UserException(UserExceptionType.Authorization, intervention_required=True))
raise APIException("Unable to retrieve activity list " + str(response) + " " + response.text)
data = response.json()
allItems += data["Data"]["items"]
if not exhaustive or "next" not in data["Data"] or data["Data"]["next"] == "":
break
pageUri = self.URLBase + data["Data"]["next"]
activities = []
exclusions = []
for act in allItems:
try:
activity = self._populateActivity(act)
except KeyError as e:
exclusions.append(APIExcludeActivity("Missing key in activity data " + str(e), activity_id=act["URI"], user_exception=UserException(UserExceptionType.Corrupt)))
continue
logger.debug("\tActivity s/t " + str(activity.StartTime))
activity.ServiceData = {"ActivityID": act["URI"]}
activities.append(activity)
return activities, exclusions
def _populateActivity(self, rawRecord):
''' Populate the 1st level of the activity object with all details required for UID from pulsstory API data '''
activity = UploadedActivity()
# can stay local + naive here, recipient services can calculate TZ as required
activity.Name = rawRecord["Name"] if "Name" in rawRecord else None
activity.StartTime = datetime.strptime(rawRecord["StartTime"], "%Y-%m-%d %H:%M:%S")
activity.Stats.MovingTime = ActivityStatistic(ActivityStatisticUnit.Seconds, value=float(rawRecord["Duration"]))
activity.EndTime = activity.StartTime + timedelta(seconds=float(rawRecord["Duration"]))
activity.Stats.Distance = ActivityStatistic(ActivityStatisticUnit.Meters, value=rawRecord["Distance"])
if (activity.EndTime - activity.StartTime).total_seconds() > 0:
activity.Stats.Speed = ActivityStatistic(ActivityStatisticUnit.KilometersPerHour, avg=activity.Stats.Distance.asUnits(ActivityStatisticUnit.Kilometers).Value / ((activity.EndTime - activity.StartTime).total_seconds() / 60 / 60))
activity.Stats.Energy = ActivityStatistic(ActivityStatisticUnit.Kilocalories, value=rawRecord["Energy"] if "Energy" in rawRecord else None)
if rawRecord["Type"] in self._activityMappings:
activity.Type = self._activityMappings[rawRecord["Type"]]
activity.GPS = rawRecord["HasPath"] if "HasPath" in rawRecord else False
activity.Stationary = rawRecord["HasPoints"] if "HasPoints" in rawRecord else True
activity.Notes = rawRecord["Notes"] if "Notes" in rawRecord else None
activity.Private = rawRecord["Private"] if "Private" in rawRecord else True
activity.CalculateUID()
return activity
def DownloadActivity(self, serviceRecord, activity):
activityID = activity.ServiceData["ActivityID"]
response = requests.post(self.URLBase + activityID, data=self._apiData(serviceRecord))
if response.status_code != 200:
if response.status_code == 401 or response.status_code == 403:
raise APIException("No authorization to download activity" + activityID, block=True, user_exception=UserException(UserExceptionType.Authorization, intervention_required=True))
raise APIException("Unable to download activity " + activityID + " response " + str(response) + " " + response.text)
ridedata = response.json()
ridedata["Owner"] = serviceRecord.ExternalID
if "UserID" in ridedata and int(ridedata["UserID"]) != int(serviceRecord.ExternalID):
raise APIExcludeActivity("Not the user's own activity", activity_id=activityID, user_exception=UserException(UserExceptionType.Other))
self._populateActivityWaypoints(ridedata, activity)
if "Climb" in ridedata:
activity.Stats.Elevation = ActivityStatistic(ActivityStatisticUnit.Meters, gain=float(ridedata["Climb"]))
if "AvgHr" in ridedata:
activity.Stats.HR = ActivityStatistic(ActivityStatisticUnit.BeatsPerMinute, avg=float(ridedata["AvgHr"]))
activity.Stationary = activity.CountTotalWaypoints() <= 1
return activity
def _convertList(self, streamData, streamDataKey, rawData, listName):
timeListName = listName + "Time"
valueListName = listName + "Value"
check = timeListName is not None and timeListName in rawData
check = check and valueListName is not None and valueListName in rawData
if check:
timeList = rawData[timeListName]
valueList = rawData[valueListName]
if timeList is not None and valueList is not None:
if len(timeList) > 0:
result = list(zip(timeList, valueList))
streamData[streamDataKey] = result
def _convertPathList(self, streamData, streamDataKey, rawData):
result = []
timeListName = "PathTime"
longitudeListName = "LongitudePathValue"
latitudeListName = "LatitudePathValue"
altitudeListName = "AltitudePathValue"
check = timeListName is not None and timeListName in rawData
check = check and longitudeListName in rawData
check = check and latitudeListName in rawData
if check:
timeList = rawData[timeListName]
longitudeList = rawData[longitudeListName]
latitudeList = rawData[latitudeListName]
if altitudeListName in rawData:
altitudeList = rawData[altitudeListName]
else:
altitudeList = None
if timeList is not None and longitudeList is not None and latitudeList is not None:
Nt = len(timeList)
if Nt > 0:
for n in range(Nt):
point = { "longitude" : longitudeList[n], "latitude": latitudeList[n] }
if altitudeList is not None:
point["altitude"] = altitudeList[n]
result.append((timeList[n], point))
streamData[streamDataKey] = result
def _populateActivityWaypoints(self, rawData, activity):
''' populate the Waypoints collection from pulsstory API data '''
lap = Lap(stats=activity.Stats, startTime=activity.StartTime, endTime=activity.EndTime)
activity.Laps = [lap]
streamData = {}
self._convertList(streamData, "heart_rate", rawData, "HeartRate")
self._convertList(streamData, "distance", rawData, "Distance")
self._convertList(streamData, "speed", rawData, "Speed")
self._convertList(streamData, "power", rawData, "Power")
self._convertList(streamData, "cadence", rawData, "Cadence")
self._convertPathList(streamData, "path", rawData)
def _addWaypoint(timestamp, path=None, heart_rate=None, power=None, distance=None, speed=None, cadence=None):
waypoint = Waypoint(activity.StartTime + timedelta(seconds=timestamp))
if path:
if path["latitude"] != 0 and path["longitude"] != 0:
waypoint.Location = Location(path["latitude"], path["longitude"], path["altitude"] if "altitude" in path and float(path["altitude"]) != 0 else None) # if you're running near sea level, well...
waypoint.Type = WaypointType.Regular
waypoint.HR = heart_rate
waypoint.Distance = distance
waypoint.Speed = speed
waypoint.Cadence = cadence
waypoint.Power = power
lap.Waypoints.append(waypoint)
StreamSampler.SampleWithCallback(_addWaypoint, streamData)
activity.Stationary = len(lap.Waypoints) == 0
activity.GPS = any(wp.Location and wp.Location.Longitude is not None and wp.Location.Latitude is not None for wp in lap.Waypoints)
if not activity.Stationary:
lap.Waypoints[0].Type = WaypointType.Start
lap.Waypoints[-1].Type = WaypointType.End
def UploadActivity(self, serviceRecord, activity):
# assembly dict to post to pulsstory
uploadData = self._createUploadData(activity, False)
uris = self._getAPIUris(serviceRecord)
data = self._apiData(serviceRecord)
headers={}
jsonData = json.dumps(uploadData)
buffer = io.BytesIO()
with zipfile.ZipFile(buffer, 'w') as myzip:
myzip.writestr('activity.txt', jsonData, compress_type=zipfile.ZIP_DEFLATED)
files = {"data": buffer.getvalue()}
response = requests.post(uris["upload_activity_zip"], data=data, files=files, headers=headers)
if response.status_code != 200:
if response.status_code == 401 or response.status_code == 403:
raise APIException("No authorization to upload activity " + activity.UID, block=True, user_exception=UserException(UserExceptionType.Authorization, intervention_required=True))
raise APIException("Unable to upload activity " + activity.UID + " response " + str(response) + " " + response.text)
return response.json()["Id"]
def _getDuration(self, activity):
if activity.Stats.MovingTime.Value is not None:
return activity.Stats.MovingTime.asUnits(ActivityStatisticUnit.Seconds).Value
elif activity.Stats.TimerTime.Value is not None:
return activity.Stats.TimerTime.asUnits(ActivityStatisticUnit.Seconds).Value
else:
return (activity.EndTime - activity.StartTime).total_seconds()
def _createUploadData(self, activity, auto_pause=False):
''' create data dict for posting to pulsstory API '''
record = {}
record["Basic"] = {
"Name" : activity.Name,
"Duration" : self._getDuration(activity),
"Distance" : activity.Stats.Distance.asUnits(ActivityStatisticUnit.Meters).Value,
"StartTime": activity.StartTime.strftime("%Y-%m-%d %H:%M:%S"),
"Type": activity.Type,
"Energy": activity.Stats.Energy.asUnits(ActivityStatisticUnit.Kilocalories).Value,
"Notes" : activity.Notes,
"Private" : activity.Private,
}
waypoints = {
"AvgHR" : activity.Stats.HR.Average,
"HeartRateValue" : [],
"HeartRateTime" : [],
"CadenceValue" : [],
"CadenceTime" : [],
"LongitudePathValue" : [],
"LatitudePathValue" : [],
"AltitudePathValue" : [],
"PathTime" : [],
"SpeedValue" : [],
"SpeedTime" : [],
"PowerValue" : [],
"PowerTime" : [],
}
record["Waypoints"] = waypoints;
if activity.CountTotalWaypoints() > 1:
flat_wps = activity.GetFlatWaypoints()
anchor_ts = flat_wps[0].Timestamp
# By default, use the provided waypoint types
wp_type_iter = (wp.Type for wp in flat_wps)
inPause = False
for waypoint, waypoint_type in zip(flat_wps, wp_type_iter):
timestamp = (waypoint.Timestamp - anchor_ts).total_seconds()
if not inPause and waypoint_type == WaypointType.Pause:
inPause = True
elif inPause and waypoint_type == WaypointType.Pause:
continue
elif inPause and waypoint_type != WaypointType.Pause:
inPause = False
if waypoint.HR is not None:
waypoints["HeartRateTime"].append(timestamp)
waypoints["HeartRateValue"].append(round(waypoint.HR))
if waypoint.Power is not None:
waypoints["PowerTime"].append(timestamp)
waypoints["PowerValue"].append(waypoint.Power)
if waypoint.Speed is not None:
waypoints["SpeedTime"].append(timestamp)
waypoints["SpeedValue"].append(waypoint.Speed)
if waypoint.Cadence is not None:
waypoints["CadenceTime"].append(timestamp)
waypoints["CadenceValue"].append(waypoint.Cadence)
if waypoint.Location is not None:
waypoints["PathTime"].append(timestamp)
if waypoint.Location.Longitude is not None and waypoint.Location.Latitude is not None:
waypoints["LongitudePathValue"].append(waypoint.Location.Longitude)
waypoints["LatitudePathValue"].append(waypoint.Location.Latitude)
else:
waypoints["LongitudePathValue"].append(None)
waypoints["LatitudePathValue"].append(None)
waypoints["AltitudePathValue"].append(waypoint.Location.Altitude)
return record
def DeleteCachedData(self, serviceRecord):
# nothing cached...
pass
|
apache-2.0
|
kaiyuanheshang/zulip
|
zerver/management/commands/query_ldap.py
|
113
|
1068
|
from __future__ import absolute_import
import sys
from django.contrib.auth import authenticate, login, get_backends
from django.core.management.base import BaseCommand
from django.conf import settings
from django_auth_ldap.backend import LDAPBackend, _LDAPUser
# Run this on a cronjob to pick up on name changes.
def query_ldap(**options):
email = options['email']
for backend in get_backends():
if isinstance(backend, LDAPBackend):
ldap_attrs = _LDAPUser(backend, backend.django_to_ldap_username(email)).attrs
if ldap_attrs is None:
print "No such user found"
else:
for django_field, ldap_field in settings.AUTH_LDAP_USER_ATTR_MAP.items():
print "%s: %s" % (django_field, ldap_attrs[ldap_field])
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('email', metavar='<email>', type=str,
help="email of user to query")
def handle(self, *args, **options):
query_ldap(**options)
|
apache-2.0
|
mapennell/ansible
|
v1/ansible/runner/lookup_plugins/together.py
|
174
|
2135
|
# (c) 2013, Bradley Young <young.bradley@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import ansible.utils as utils
from ansible.utils import safe_eval
import ansible.errors as errors
from itertools import izip_longest
def flatten(terms):
ret = []
for term in terms:
if isinstance(term, list):
ret.extend(term)
elif isinstance(term, tuple):
ret.extend(term)
else:
ret.append(term)
return ret
class LookupModule(object):
"""
Transpose a list of arrays:
[1, 2, 3], [4, 5, 6] -> [1, 4], [2, 5], [3, 6]
Replace any empty spots in 2nd array with None:
[1, 2], [3] -> [1, 3], [2, None]
"""
def __init__(self, basedir=None, **kwargs):
self.basedir = basedir
def __lookup_injects(self, terms, inject):
results = []
for x in terms:
intermediate = utils.listify_lookup_plugin_terms(x, self.basedir, inject)
results.append(intermediate)
return results
def run(self, terms, inject=None, **kwargs):
# this code is common with 'items.py' consider moving to utils if we need it again
terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
terms = self.__lookup_injects(terms, inject)
my_list = terms[:]
if len(my_list) == 0:
raise errors.AnsibleError("with_together requires at least one element in each list")
return [flatten(x) for x in izip_longest(*my_list, fillvalue=None)]
|
gpl-3.0
|
molotof/infernal-twin
|
build/reportlab/build/lib.linux-i686-2.7/reportlab/platypus/figures.py
|
29
|
18747
|
#Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/platypus/figures.py
"""This includes some demos of platypus for use in the API proposal"""
__version__=''' $Id$ '''
import os
from reportlab.lib import colors
from reportlab.pdfgen.canvas import Canvas
from reportlab.lib.styles import ParagraphStyle
from reportlab.lib.utils import recursiveImport, strTypes
from reportlab.platypus import Frame
from reportlab.platypus import Flowable
from reportlab.platypus import Paragraph
from reportlab.lib.units import inch
from reportlab.lib.enums import TA_LEFT, TA_RIGHT, TA_CENTER
from reportlab.lib.validators import isColor
from reportlab.lib.colors import toColor
from reportlab.lib.styles import _baseFontName, _baseFontNameI
captionStyle = ParagraphStyle('Caption', fontName=_baseFontNameI, fontSize=10, alignment=TA_CENTER)
class Figure(Flowable):
def __init__(self, width, height, caption="",
captionFont=_baseFontNameI, captionSize=12,
background=None,
captionTextColor=toColor('black'),
captionBackColor=None,
border=None,
spaceBefore=12,
spaceAfter=12,
captionGap=None,
captionAlign='centre',
captionPosition='bottom',
hAlign='CENTER',
):
Flowable.__init__(self)
self.width = width
self.figureHeight = height
self.caption = caption
self.captionFont = captionFont
self.captionSize = captionSize
self.captionTextColor = captionTextColor
self.captionBackColor = captionBackColor
self.captionGap = captionGap or 0.5*captionSize
self.captionAlign = captionAlign
self.captionPosition = captionPosition
self._captionData = None
self.captionHeight = 0 # work out later
self.background = background
self.border = border
self.spaceBefore = spaceBefore
self.spaceAfter = spaceAfter
self.hAlign=hAlign
self._getCaptionPara() #Larry Meyn's fix - otherwise they all get the number of the last chapter.
def _getCaptionPara(self):
caption = self.caption
captionFont = self.captionFont
captionSize = self.captionSize
captionTextColor = self.captionTextColor
captionBackColor = self.captionBackColor
captionAlign = self.captionAlign
captionPosition = self.captionPosition
if self._captionData!=(caption,captionFont,captionSize,captionTextColor,captionBackColor,captionAlign,captionPosition):
self._captionData = (caption,captionFont,captionSize,captionTextColor,captionBackColor,captionAlign,captionPosition)
if isinstance(caption,Paragraph):
self.captionPara = caption
elif isinstance(caption,strTypes):
self.captionStyle = ParagraphStyle(
'Caption',
fontName=captionFont,
fontSize=captionSize,
leading=1.2*captionSize,
textColor = captionTextColor,
backColor = captionBackColor,
#seems to be getting ignored
spaceBefore=self.captionGap,
alignment=TA_LEFT if captionAlign=='left' else TA_RIGHT if captionAlign=='right' else TA_CENTER,
)
#must build paragraph now to get sequencing in synch with rest of story
self.captionPara = Paragraph(self.caption, self.captionStyle)
else:
raise ValueError('Figure caption of type %r is not a string or Paragraph' % type(caption))
def wrap(self, availWidth, availHeight):
# try to get the caption aligned
if self.caption:
self._getCaptionPara()
w, h = self.captionPara.wrap(self.width, availHeight - self.figureHeight)
self.captionHeight = h + self.captionGap
self.height = self.captionHeight + self.figureHeight
if w>self.width: self.width = w
else:
self.height = self.figureHeight
if self.hAlign in ('CENTER','CENTRE',TA_CENTER):
self.dx = 0.5 * (availWidth - self.width)
elif self.hAlign in ('RIGHT',TA_RIGHT):
self.dx = availWidth - self.width
else:
self.dx = 0
return (self.width, self.height)
def draw(self):
self.canv.translate(self.dx, 0)
if self.caption and self.captionPosition=='bottom':
self.canv.translate(0, self.captionHeight)
if self.background:
self.drawBackground()
if self.border:
self.drawBorder()
self.canv.saveState()
self.drawFigure()
self.canv.restoreState()
if self.caption:
if self.captionPosition=='bottom':
self.canv.translate(0, -self.captionHeight)
else:
self.canv.translate(0, self.figureHeight+self.captionGap)
self._getCaptionPara()
self.drawCaption()
def drawBorder(self):
canv = self.canv
border = self.border
bc = getattr(border,'color',None)
bw = getattr(border,'width',None)
bd = getattr(border,'dashArray',None)
ss = bc or bw or bd
if ss:
canv.saveState()
if bc: canv.setStrokeColor(bc)
if bw: canv.setLineWidth(bw)
if bd: canv.setDash(bd)
canv.rect(0, 0, self.width, self.figureHeight,fill=0,stroke=1)
if ss:
canv.restoreState()
def _doBackground(self, color):
self.canv.saveState()
self.canv.setFillColor(self.background)
self.canv.rect(0, 0, self.width, self.figureHeight, fill=1)
self.canv.restoreState()
def drawBackground(self):
"""For use when using a figure on a differently coloured background.
Allows you to specify a colour to be used as a background for the figure."""
if isColor(self.background):
self._doBackground(self.background)
else:
try:
c = toColor(self.background)
self._doBackground(c)
except:
pass
def drawCaption(self):
self.captionPara.drawOn(self.canv, 0, 0)
def drawFigure(self):
pass
def drawPage(canvas,x, y, width, height):
#draws something which looks like a page
pth = canvas.beginPath()
corner = 0.05*width
# shaded backdrop offset a little
canvas.setFillColorRGB(0.5,0.5,0.5)
canvas.rect(x + corner, y - corner, width, height, stroke=0, fill=1)
#'sheet of paper' in light yellow
canvas.setFillColorRGB(1,1,0.9)
canvas.setLineWidth(0)
canvas.rect(x, y, width, height, stroke=1, fill=1)
#reset
canvas.setFillColorRGB(0,0,0)
canvas.setStrokeColorRGB(0,0,0)
class PageFigure(Figure):
"""Shows a blank page in a frame, and draws on that. Used in
illustrations of how PLATYPUS works."""
def __init__(self, background=None):
Figure.__init__(self, 3*inch, 3*inch)
self.caption = 'Figure 1 - a blank page'
self.captionStyle = captionStyle
self.background = background
def drawVirtualPage(self):
pass
def drawFigure(self):
drawPage(self.canv, 0.625*inch, 0.25*inch, 1.75*inch, 2.5*inch)
self.canv.translate(0.625*inch, 0.25*inch)
self.canv.scale(1.75/8.27, 2.5/11.69)
self.drawVirtualPage()
class PlatPropFigure1(PageFigure):
"""This shows a page with a frame on it"""
def __init__(self):
PageFigure.__init__(self)
self.caption = "Figure 1 - a page with a simple frame"
def drawVirtualPage(self):
demo1(self.canv)
class FlexFigure(Figure):
"""Base for a figure class with a caption. Can grow or shrink in proportion"""
def __init__(self, width, height, caption, background=None,
captionFont='Helvetica-Oblique',captionSize=8,
captionTextColor=colors.black,
shrinkToFit=1,
growToFit=1,
spaceBefore=12,
spaceAfter=12,
captionGap=9,
captionAlign='centre',
captionPosition='top',
scaleFactor=None,
hAlign='CENTER',
border=1,
):
Figure.__init__(self, width, height, caption,
captionFont=captionFont,
captionSize=captionSize,
background=None,
captionTextColor=captionTextColor,
spaceBefore = spaceBefore,
spaceAfter = spaceAfter,
captionGap=captionGap,
captionAlign=captionAlign,
captionPosition=captionPosition,
hAlign=hAlign,
border=border,
)
self.shrinkToFit = shrinkToFit #if set and wrap is too tight, shrinks
self.growToFit = growToFit #if set and wrap is too small, grows
self.scaleFactor = scaleFactor
self._scaleFactor = None
self.background = background
def _scale(self,availWidth,availHeight):
"Rescale to fit according to the rules, but only once"
if self._scaleFactor is None or self.width>availWidth or self.height>availHeight:
w, h = Figure.wrap(self, availWidth, availHeight)
captionHeight = h - self.figureHeight
if self.scaleFactor is None:
#scale factor None means auto
self._scaleFactor = min(availWidth/self.width,(availHeight-captionHeight)/self.figureHeight)
else: #they provided a factor
self._scaleFactor = self.scaleFactor
if self._scaleFactor<1 and self.shrinkToFit:
self.width = self.width * self._scaleFactor - 0.0001
self.figureHeight = self.figureHeight * self._scaleFactor
elif self._scaleFactor>1 and self.growToFit:
self.width = self.width*self._scaleFactor - 0.0001
self.figureHeight = self.figureHeight * self._scaleFactor
def wrap(self, availWidth, availHeight):
self._scale(availWidth,availHeight)
return Figure.wrap(self, availWidth, availHeight)
def split(self, availWidth, availHeight):
self._scale(availWidth,availHeight)
return Figure.split(self, availWidth, availHeight)
class ImageFigure(FlexFigure):
"""Image with a caption below it"""
def __init__(self, filename, caption, background=None,scaleFactor=None,hAlign='CENTER',border=None):
assert os.path.isfile(filename), 'image file %s not found' % filename
from reportlab.lib.utils import ImageReader
w, h = ImageReader(filename).getSize()
self.filename = filename
FlexFigure.__init__(self, w, h, caption, background,scaleFactor=scaleFactor,hAlign=hAlign,border=border)
def drawFigure(self):
self.canv.drawImage(self.filename,
0, 0,self.width, self.figureHeight)
class DrawingFigure(FlexFigure):
"""Drawing with a caption below it. Clunky, scaling fails."""
def __init__(self, modulename, classname, caption, baseDir=None, background=None):
module = recursiveImport(modulename, baseDir)
klass = getattr(module, classname)
self.drawing = klass()
FlexFigure.__init__(self,
self.drawing.width,
self.drawing.height,
caption,
background)
self.growToFit = 1
def drawFigure(self):
self.canv.scale(self._scaleFactor, self._scaleFactor)
self.drawing.drawOn(self.canv, 0, 0)
try:
from rlextra.pageCatcher.pageCatcher import restoreForms, storeForms, storeFormsInMemory, restoreFormsInMemory
_hasPageCatcher = 1
except ImportError:
_hasPageCatcher = 0
if _hasPageCatcher:
####################################################################
#
# PageCatcher plugins
# These let you use our PageCatcher product to add figures
# to other documents easily.
####################################################################
class PageCatcherCachingMixIn:
"Helper functions to cache pages for figures"
def getFormName(self, pdfFileName, pageNo):
#naming scheme works within a directory only
dirname, filename = os.path.split(pdfFileName)
root, ext = os.path.splitext(filename)
return '%s_page%d' % (root, pageNo)
def needsProcessing(self, pdfFileName, pageNo):
"returns 1 if no forms or form is older"
formName = self.getFormName(pdfFileName, pageNo)
if os.path.exists(formName + '.frm'):
formModTime = os.stat(formName + '.frm')[8]
pdfModTime = os.stat(pdfFileName)[8]
return (pdfModTime > formModTime)
else:
return 1
def processPDF(self, pdfFileName, pageNo):
formName = self.getFormName(pdfFileName, pageNo)
storeForms(pdfFileName, formName + '.frm',
prefix= formName + '_',
pagenumbers=[pageNo])
#print 'stored %s.frm' % formName
return formName + '.frm'
class cachePageCatcherFigureNonA4(FlexFigure, PageCatcherCachingMixIn):
"""PageCatcher page with a caption below it. Size to be supplied."""
# This should merge with PageFigure into one class that reuses
# form information to determine the page orientation...
def __init__(self, filename, pageNo, caption, width, height, background=None):
self.dirname, self.filename = os.path.split(filename)
if self.dirname == '':
self.dirname = os.curdir
self.pageNo = pageNo
self.formName = self.getFormName(self.filename, self.pageNo) + '_' + str(pageNo)
FlexFigure.__init__(self, width, height, caption, background)
def drawFigure(self):
self.canv.saveState()
if not self.canv.hasForm(self.formName):
restorePath = self.dirname + os.sep + self.filename
#does the form file exist? if not, generate it.
formFileName = self.getFormName(restorePath, self.pageNo) + '.frm'
if self.needsProcessing(restorePath, self.pageNo):
#print 'preprocessing PDF %s page %s' % (restorePath, self.pageNo)
self.processPDF(restorePath, self.pageNo)
names = restoreForms(formFileName, self.canv)
self.canv.scale(self._scaleFactor, self._scaleFactor)
self.canv.doForm(self.formName)
self.canv.restoreState()
class cachePageCatcherFigure(cachePageCatcherFigureNonA4):
"""PageCatcher page with a caption below it. Presumes A4, Portrait.
This needs our commercial PageCatcher product, or you'll get a blank."""
def __init__(self, filename, pageNo, caption, width=595, height=842, background=None):
cachePageCatcherFigureNonA4.__init__(self, filename, pageNo, caption, width, height, background=background)
class PageCatcherFigureNonA4(FlexFigure):
"""PageCatcher page with a caption below it. Size to be supplied."""
# This should merge with PageFigure into one class that reuses
# form information to determine the page orientation...
_cache = {}
def __init__(self, filename, pageNo, caption, width, height, background=None, caching=None):
fn = self.filename = filename
self.pageNo = pageNo
fn = fn.replace(os.sep,'_').replace('/','_').replace('\\','_').replace('-','_').replace(':','_')
self.prefix = fn.replace('.','_')+'_'+str(pageNo)+'_'
self.formName = self.prefix + str(pageNo)
self.caching = caching
FlexFigure.__init__(self, width, height, caption, background)
def drawFigure(self):
if not self.canv.hasForm(self.formName):
if self.filename in self._cache:
f,data = self._cache[self.filename]
else:
f = open(self.filename,'rb')
pdf = f.read()
f.close()
f, data = storeFormsInMemory(pdf, pagenumbers=[self.pageNo], prefix=self.prefix)
if self.caching=='memory':
self._cache[self.filename] = f, data
f = restoreFormsInMemory(data, self.canv)
self.canv.saveState()
self.canv.scale(self._scaleFactor, self._scaleFactor)
self.canv.doForm(self.formName)
self.canv.restoreState()
class PageCatcherFigure(PageCatcherFigureNonA4):
"""PageCatcher page with a caption below it. Presumes A4, Portrait.
This needs our commercial PageCatcher product, or you'll get a blank."""
def __init__(self, filename, pageNo, caption, width=595, height=842, background=None, caching=None):
PageCatcherFigureNonA4.__init__(self, filename, pageNo, caption, width, height, background=background, caching=caching)
def demo1(canvas):
frame = Frame(
2*inch, # x
4*inch, # y at bottom
4*inch, # width
5*inch, # height
showBoundary = 1 # helps us see what's going on
)
bodyStyle = ParagraphStyle('Body', fontName=_baseFontName, fontSize=24, leading=28, spaceBefore=6)
para1 = Paragraph('Spam spam spam spam. ' * 5, bodyStyle)
para2 = Paragraph('Eggs eggs eggs. ' * 5, bodyStyle)
mydata = [para1, para2]
#this does the packing and drawing. The frame will consume
#items from the front of the list as it prints them
frame.addFromList(mydata,canvas)
def test1():
c = Canvas('figures.pdf')
f = Frame(inch, inch, 6*inch, 9*inch, showBoundary=1)
v = PlatPropFigure1()
v.captionTextColor = toColor('blue')
v.captionBackColor = toColor('lightyellow')
f.addFromList([v],c)
c.save()
if __name__ == '__main__':
test1()
|
gpl-3.0
|
jankeromnes/depot_tools
|
third_party/boto/manage/propget.py
|
115
|
2498
|
# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
def get(prop, choices=None):
prompt = prop.verbose_name
if not prompt:
prompt = prop.name
if choices:
if callable(choices):
choices = choices()
else:
choices = prop.get_choices()
valid = False
while not valid:
if choices:
min = 1
max = len(choices)
for i in range(min, max+1):
value = choices[i-1]
if isinstance(value, tuple):
value = value[0]
print '[%d] %s' % (i, value)
value = raw_input('%s [%d-%d]: ' % (prompt, min, max))
try:
int_value = int(value)
value = choices[int_value-1]
if isinstance(value, tuple):
value = value[1]
valid = True
except ValueError:
print '%s is not a valid choice' % value
except IndexError:
print '%s is not within the range[%d-%d]' % (min, max)
else:
value = raw_input('%s: ' % prompt)
try:
value = prop.validate(value)
if prop.empty(value) and prop.required:
print 'A value is required'
else:
valid = True
except:
print 'Invalid value: %s' % value
return value
|
bsd-3-clause
|
egeor/libxsmm
|
samples/transpose/transpose_opentuner.py
|
1
|
6321
|
#!/usr/bin/env python
###############################################################################
# Copyright (c) 2017-2018, Intel Corporation #
# All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# 1. Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# 2. Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer in the #
# documentation and/or other materials provided with the distribution. #
# 3. Neither the name of the copyright holder nor the names of its #
# contributors may be used to endorse or promote products derived #
# from this software without specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR #
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT #
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, #
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED #
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR #
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF #
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING #
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS #
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #
###############################################################################
# Hans Pabst (Intel Corp.)
###############################################################################
#
# This script is based on OpenTuner's tutorial:
# "Optimizing Block Matrix Multiplication".
#
import opentuner
from opentuner import ConfigurationManipulator
from opentuner import IntegerParameter
from opentuner import MeasurementInterface
from opentuner import Result
import time
import sys
import re
class TransposeTune(MeasurementInterface):
def manipulator(self):
"""
Define the search space by creating a
ConfigurationManipulator
"""
m_max = min(160, self.args.end)
n_max = min(160, self.args.end)
self.granularity = 1
assert(0 < self.granularity)
m_max = (m_max + self.granularity - 1) / self.granularity
n_max = (n_max + self.granularity - 1) / self.granularity
m_param = IntegerParameter("M", self.granularity, m_max)
n_param = IntegerParameter("N", self.granularity, n_max)
manipulator = ConfigurationManipulator()
manipulator.add_parameter(m_param)
manipulator.add_parameter(n_param)
return manipulator
def seed_configurations(self):
m_seed = [self.args.n, self.args.m][0 != self.args.m]
n_seed = [self.args.m, self.args.n][0 != self.args.n]
if 0 == m_seed or 0 == n_seed:
return []
else:
return [{"M": m_seed, "N": n_seed}]
def objective(self):
return opentuner.search.objective.MaximizeAccuracyMinimizeSize()
def run(self, desired_result, input, limit):
"""
Compile and run a given configuration then
return performance
"""
cfg = desired_result.configuration.data
nruns = max(self.args.nruns, 1)
begin = max(self.args.begin, 2)
end = max(self.args.end, 2)
run_cmd = (
"CHECK=-1" # repeatable runs
" LIBXSMM_TRANS_M=" + str(self.granularity * cfg["M"]) +
" LIBXSMM_TRANS_N=" + str(self.granularity * cfg["N"]) +
" ./transpose.sh o" + " " + str(end) + " " + str(end) +
" " + str(end) + " " + str(end) + " " + str(nruns) +
" -" + str(begin))
run_result = self.call_program(run_cmd)
if (0 == run_result["returncode"]):
match = re.search(
"\s*duration:\s+([0-9]+(\.[0-9]*)*)",
run_result["stdout"])
assert(match is not None)
mseconds = float(match.group(1)) / nruns
assert(0 < mseconds)
frequency = 1000.0 / mseconds
kernelsize = (self.granularity**2) * cfg["M"] * cfg["N"]
return Result(time=mseconds, accuracy=frequency, size=kernelsize)
else:
sys.tracebacklimit = 0
raise RuntimeError("Execution failed for \"" + run_cmd + "\"!")
def save_final_config(self, configuration):
"""
called at the end of tuning
"""
filename = (
"transpose-" + str(max(self.args.begin, 1)) +
"_" + str(max(self.args.end, 1)) +
"_" + str(max(self.args.nruns, 1)) +
time.strftime("-%Y%m%d-%H%M%S") + ".json")
print("Optimal block size written to " + filename +
": ", configuration.data)
self.manipulator().save_to_file(configuration.data, filename)
if __name__ == "__main__":
argparser = opentuner.default_argparser()
argparser.add_argument(
"begin", type=int,
help="Begin of the range (min. M and N)")
argparser.add_argument(
"end", type=int,
help="End of the range (max. M and N)")
argparser.add_argument(
"nruns", type=int, default=100, nargs='?',
help="Number of experiments per epoch")
argparser.add_argument(
"m", type=int, default=0, nargs='?',
help="Initial tile size (M)")
argparser.add_argument(
"n", type=int, default=0, nargs='?',
help="Initial tile size (N)")
TransposeTune.main(argparser.parse_args())
|
bsd-3-clause
|
StefanRijnhart/odoo
|
addons/website_forum/models/forum.py
|
1
|
37021
|
# -*- coding: utf-8 -*-
from datetime import datetime
import uuid
from werkzeug.exceptions import Forbidden
import openerp
from openerp import api, tools
from openerp import SUPERUSER_ID
from openerp.addons.website.models.website import slug
from openerp.exceptions import Warning
from openerp.osv import osv, fields
from openerp.tools import html2plaintext
from openerp.tools.translate import _
class KarmaError(Forbidden):
""" Karma-related error, used for forum and posts. """
pass
class Forum(osv.Model):
"""TDE TODO: set karma values for actions dynamic for a given forum"""
_name = 'forum.forum'
_description = 'Forums'
_inherit = ['mail.thread', 'website.seo.metadata']
def init(self, cr):
""" Add forum uuid for user email validation. """
forum_uuids = self.pool['ir.config_parameter'].search(cr, SUPERUSER_ID, [('key', '=', 'website_forum.uuid')])
if not forum_uuids:
self.pool['ir.config_parameter'].set_param(cr, SUPERUSER_ID, 'website_forum.uuid', str(uuid.uuid4()), ['base.group_system'])
_columns = {
'name': fields.char('Forum Name', required=True, translate=True),
'faq': fields.html('Guidelines'),
'description': fields.html('Description'),
'introduction_message': fields.html('Introduction Message'),
'relevancy_option_first': fields.float('First Relevancy Parameter'),
'relevancy_option_second': fields.float('Second Relevancy Parameter'),
'default_order': fields.selection([
('create_date desc','Newest'),
('write_date desc','Last Updated'),
('vote_count desc','Most Voted'),
('relevancy desc','Relevancy'),
('child_count desc','Answered'),
], 'Default Order', required=True),
'default_allow': fields.selection([('post_link','Link'),('ask_question','Question'),('post_discussion','Discussion')], 'Default Post', required=True),
'allow_link': fields.boolean('Links', help="When clicking on the post, it redirects to an external link"),
'allow_question': fields.boolean('Questions', help="Users can answer only once per question. Contributors can edit answers and mark the right ones."),
'allow_discussion': fields.boolean('Discussions'),
# karma generation
'karma_gen_question_new': fields.integer('Asking a question'),
'karma_gen_question_upvote': fields.integer('Question upvoted'),
'karma_gen_question_downvote': fields.integer('Question downvoted'),
'karma_gen_answer_upvote': fields.integer('Answer upvoted'),
'karma_gen_answer_downvote': fields.integer('Answer downvoted'),
'karma_gen_answer_accept': fields.integer('Accepting an answer'),
'karma_gen_answer_accepted': fields.integer('Answer accepted'),
'karma_gen_answer_flagged': fields.integer('Answer flagged'),
# karma-based actions
'karma_ask': fields.integer('Ask a new question'),
'karma_answer': fields.integer('Answer a question'),
'karma_edit_own': fields.integer('Edit its own posts'),
'karma_edit_all': fields.integer('Edit all posts'),
'karma_close_own': fields.integer('Close its own posts'),
'karma_close_all': fields.integer('Close all posts'),
'karma_unlink_own': fields.integer('Delete its own posts'),
'karma_unlink_all': fields.integer('Delete all posts'),
'karma_upvote': fields.integer('Upvote'),
'karma_downvote': fields.integer('Downvote'),
'karma_answer_accept_own': fields.integer('Accept an answer on its own questions'),
'karma_answer_accept_all': fields.integer('Accept an answer to all questions'),
'karma_editor_link_files': fields.integer('Linking files (Editor)'),
'karma_editor_clickable_link': fields.integer('Add clickable links (Editor)'),
'karma_comment_own': fields.integer('Comment its own posts'),
'karma_comment_all': fields.integer('Comment all posts'),
'karma_comment_convert_own': fields.integer('Convert its own answers to comments and vice versa'),
'karma_comment_convert_all': fields.integer('Convert all answers to comments and vice versa'),
'karma_comment_unlink_own': fields.integer('Unlink its own comments'),
'karma_comment_unlink_all': fields.integer('Unlink all comments'),
'karma_retag': fields.integer('Change question tags'),
'karma_flag': fields.integer('Flag a post as offensive'),
}
def _get_default_faq(self, cr, uid, context=None):
fname = openerp.modules.get_module_resource('website_forum', 'data', 'forum_default_faq.html')
with open(fname, 'r') as f:
return f.read()
return False
_defaults = {
'default_order': 'write_date desc',
'allow_question': True,
'default_allow': 'ask_question',
'allow_link': False,
'allow_discussion': False,
'description': 'This community is for professionals and enthusiasts of our products and services.',
'faq': _get_default_faq,
'karma_gen_question_new': 0, # set to null for anti spam protection
'introduction_message': """<h1 class="mt0">Welcome!</h1>
<p> This community is for professionals and enthusiasts of our products and services.
Share and discuss the best content and new marketing ideas,
build your professional profile and become a better marketer together.
</p>""",
'relevancy_option_first': 0.8,
'relevancy_option_second': 1.8,
'karma_gen_question_upvote': 5,
'karma_gen_question_downvote': -2,
'karma_gen_answer_upvote': 10,
'karma_gen_answer_downvote': -2,
'karma_gen_answer_accept': 2,
'karma_gen_answer_accepted': 15,
'karma_gen_answer_flagged': -100,
'karma_ask': 3, # set to not null for anti spam protection
'karma_answer': 3, # set to not null for anti spam protection
'karma_edit_own': 1,
'karma_edit_all': 300,
'karma_close_own': 100,
'karma_close_all': 500,
'karma_unlink_own': 500,
'karma_unlink_all': 1000,
'karma_upvote': 5,
'karma_downvote': 50,
'karma_answer_accept_own': 20,
'karma_answer_accept_all': 500,
'karma_editor_link_files': 20,
'karma_editor_clickable_link': 20,
'karma_comment_own': 3,
'karma_comment_all': 5,
'karma_comment_convert_own': 50,
'karma_comment_convert_all': 500,
'karma_comment_unlink_own': 50,
'karma_comment_unlink_all': 500,
'karma_retag': 75,
'karma_flag': 500,
}
def create(self, cr, uid, values, context=None):
if context is None:
context = {}
create_context = dict(context, mail_create_nolog=True)
return super(Forum, self).create(cr, uid, values, context=create_context)
class Post(osv.Model):
_name = 'forum.post'
_description = 'Forum Post'
_inherit = ['mail.thread', 'website.seo.metadata']
_order = "is_correct DESC, vote_count DESC, write_date DESC"
def _get_post_relevancy(self, cr, uid, ids, field_name, arg, context):
res = dict.fromkeys(ids, 0)
for post in self.browse(cr, uid, ids, context=context):
days = (datetime.today() - datetime.strptime(post.create_date, tools.DEFAULT_SERVER_DATETIME_FORMAT)).days
relavency = abs(post.vote_count - 1) ** post.forum_id.relevancy_option_first / ( days + 2) ** post.forum_id.relevancy_option_second
res[post.id] = relavency if (post.vote_count - 1) >= 0 else -relavency
return res
def _get_user_vote(self, cr, uid, ids, field_name, arg, context):
res = dict.fromkeys(ids, 0)
vote_ids = self.pool['forum.post.vote'].search(cr, uid, [('post_id', 'in', ids), ('user_id', '=', uid)], context=context)
for vote in self.pool['forum.post.vote'].browse(cr, uid, vote_ids, context=context):
res[vote.post_id.id] = vote.vote
return res
def _get_vote_count(self, cr, uid, ids, field_name, arg, context):
res = dict.fromkeys(ids, 0)
for post in self.browse(cr, uid, ids, context=context):
for vote in post.vote_ids:
res[post.id] += int(vote.vote)
return res
def _get_post_from_vote(self, cr, uid, ids, context=None):
result = {}
for vote in self.pool['forum.post.vote'].browse(cr, uid, ids, context=context):
result[vote.post_id.id] = True
return result.keys()
def _get_user_favourite(self, cr, uid, ids, field_name, arg, context):
res = dict.fromkeys(ids, False)
for post in self.browse(cr, uid, ids, context=context):
if uid in [f.id for f in post.favourite_ids]:
res[post.id] = True
return res
def _get_favorite_count(self, cr, uid, ids, field_name, arg, context):
res = dict.fromkeys(ids, 0)
for post in self.browse(cr, uid, ids, context=context):
res[post.id] += len(post.favourite_ids)
return res
def _get_post_from_hierarchy(self, cr, uid, ids, context=None):
post_ids = set(ids)
for post in self.browse(cr, SUPERUSER_ID, ids, context=context):
if post.parent_id:
post_ids.add(post.parent_id.id)
return list(post_ids)
def _get_child_count(self, cr, uid, ids, field_name=False, arg={}, context=None):
res = dict.fromkeys(ids, 0)
for post in self.browse(cr, uid, ids, context=context):
if post.parent_id:
res[post.parent_id.id] = len(post.parent_id.child_ids)
else:
res[post.id] = len(post.child_ids)
return res
def _get_uid_answered(self, cr, uid, ids, field_name, arg, context=None):
res = dict.fromkeys(ids, False)
for post in self.browse(cr, uid, ids, context=context):
res[post.id] = any(answer.create_uid.id == uid for answer in post.child_ids)
return res
def _get_has_validated_answer(self, cr, uid, ids, field_name, arg, context=None):
res = dict.fromkeys(ids, False)
ans_ids = self.search(cr, uid, [('parent_id', 'in', ids), ('is_correct', '=', True)], context=context)
for answer in self.browse(cr, uid, ans_ids, context=context):
res[answer.parent_id.id] = True
return res
def _is_self_reply(self, cr, uid, ids, field_name, arg, context=None):
res = dict.fromkeys(ids, False)
for post in self.browse(cr, uid, ids, context=context):
res[post.id] = post.parent_id and post.parent_id.create_uid == post.create_uid or False
return res
def _get_post_karma_rights(self, cr, uid, ids, field_name, arg, context=None):
user = self.pool['res.users'].browse(cr, uid, uid, context=context)
res = dict.fromkeys(ids, False)
for post in self.browse(cr, uid, ids, context=context):
res[post.id] = {
'karma_ask': post.forum_id.karma_ask,
'karma_answer': post.forum_id.karma_answer,
'karma_accept': post.parent_id and post.parent_id.create_uid.id == uid and post.forum_id.karma_answer_accept_own or post.forum_id.karma_answer_accept_all,
'karma_edit': post.create_uid.id == uid and post.forum_id.karma_edit_own or post.forum_id.karma_edit_all,
'karma_close': post.create_uid.id == uid and post.forum_id.karma_close_own or post.forum_id.karma_close_all,
'karma_unlink': post.create_uid.id == uid and post.forum_id.karma_unlink_own or post.forum_id.karma_unlink_all,
'karma_upvote': post.forum_id.karma_upvote,
'karma_downvote': post.forum_id.karma_downvote,
'karma_comment': post.create_uid.id == uid and post.forum_id.karma_comment_own or post.forum_id.karma_comment_all,
'karma_comment_convert': post.create_uid.id == uid and post.forum_id.karma_comment_convert_own or post.forum_id.karma_comment_convert_all,
}
res[post.id].update({
'can_ask': uid == SUPERUSER_ID or user.karma >= res[post.id]['karma_ask'],
'can_answer': uid == SUPERUSER_ID or user.karma >= res[post.id]['karma_answer'],
'can_accept': uid == SUPERUSER_ID or user.karma >= res[post.id]['karma_accept'],
'can_edit': uid == SUPERUSER_ID or user.karma >= res[post.id]['karma_edit'],
'can_close': uid == SUPERUSER_ID or user.karma >= res[post.id]['karma_close'],
'can_unlink': uid == SUPERUSER_ID or user.karma >= res[post.id]['karma_unlink'],
'can_upvote': uid == SUPERUSER_ID or user.karma >= res[post.id]['karma_upvote'],
'can_downvote': uid == SUPERUSER_ID or user.karma >= res[post.id]['karma_downvote'],
'can_comment': uid == SUPERUSER_ID or user.karma >= res[post.id]['karma_comment'],
'can_comment_convert': uid == SUPERUSER_ID or user.karma >= res[post.id]['karma_comment_convert'],
})
return res
_columns = {
'name': fields.char('Title'),
'forum_id': fields.many2one('forum.forum', 'Forum', required=True),
'content': fields.html('Content'),
'content_link': fields.char('URL', help="URL of Link Articles"),
'tag_ids': fields.many2many('forum.tag', 'forum_tag_rel', 'forum_id', 'forum_tag_id', 'Tags'),
'state': fields.selection([('active', 'Active'), ('close', 'Close'), ('offensive', 'Offensive')], 'Status'),
'views': fields.integer('Number of Views'),
'active': fields.boolean('Active'),
'type': fields.selection([('question', 'Question'), ('link', 'Article'), ('discussion', 'Discussion')], 'Type'),
'relevancy': fields.function(
_get_post_relevancy, string="Relevancy", type='float',
store={
'forum.post': (lambda self, cr, uid, ids, c={}: ids, ['vote_ids'], 10),
'forum.post.vote': (_get_post_from_vote, [], 10),
}),
'is_correct': fields.boolean('Valid Answer', help='Correct Answer or Answer on this question accepted.'),
'website_message_ids': fields.one2many(
'mail.message', 'res_id',
domain=lambda self: [
'&', ('model', '=', self._name), ('type', 'in', ['email', 'comment'])
],
string='Post Messages', help="Comments on forum post",
),
# history
'create_date': fields.datetime('Asked on', select=True, readonly=True),
'create_uid': fields.many2one('res.users', 'Created by', select=True, readonly=True),
'write_date': fields.datetime('Update on', select=True, readonly=True),
'write_uid': fields.many2one('res.users', 'Updated by', select=True, readonly=True),
# vote fields
'vote_ids': fields.one2many('forum.post.vote', 'post_id', 'Votes'),
'user_vote': fields.function(_get_user_vote, string='My Vote', type='integer'),
'vote_count': fields.function(
_get_vote_count, string="Votes", type='integer',
store={
'forum.post': (lambda self, cr, uid, ids, c={}: ids, ['vote_ids'], 10),
'forum.post.vote': (_get_post_from_vote, [], 10),
}),
# favorite fields
'favourite_ids': fields.many2many('res.users', string='Favourite'),
'user_favourite': fields.function(_get_user_favourite, string="My Favourite", type='boolean'),
'favourite_count': fields.function(
_get_favorite_count, string='Favorite Count', type='integer',
store={
'forum.post': (lambda self, cr, uid, ids, c={}: ids, ['favourite_ids'], 10),
}),
# hierarchy
'parent_id': fields.many2one('forum.post', 'Question', ondelete='cascade'),
'self_reply': fields.function(
_is_self_reply, 'Reply to own question', type='boolean',
store={
'forum.post': (lambda self, cr, uid, ids, c={}: ids, ['parent_id', 'create_uid'], 10),
}),
'child_ids': fields.one2many('forum.post', 'parent_id', 'Answers'),
'child_count': fields.function(
_get_child_count, string="Answers", type='integer',
store={
'forum.post': (_get_post_from_hierarchy, ['parent_id', 'child_ids'], 10),
}),
'uid_has_answered': fields.function(
_get_uid_answered, string='Has Answered', type='boolean',
),
'has_validated_answer': fields.function(
_get_has_validated_answer, string='Has a Validated Answered', type='boolean',
store={
'forum.post': (_get_post_from_hierarchy, ['parent_id', 'child_ids', 'is_correct'], 10),
}
),
# closing
'closed_reason_id': fields.many2one('forum.post.reason', 'Reason'),
'closed_uid': fields.many2one('res.users', 'Closed by', select=1),
'closed_date': fields.datetime('Closed on', readonly=True),
# karma
'karma_ask': fields.function(_get_post_karma_rights, string='Karma to ask', type='integer', multi='_get_post_karma_rights'),
'karma_answer': fields.function(_get_post_karma_rights, string='Karma to answer', type='integer', multi='_get_post_karma_rights'),
'karma_accept': fields.function(_get_post_karma_rights, string='Karma to accept this answer', type='integer', multi='_get_post_karma_rights'),
'karma_edit': fields.function(_get_post_karma_rights, string='Karma to edit', type='integer', multi='_get_post_karma_rights'),
'karma_close': fields.function(_get_post_karma_rights, string='Karma to close', type='integer', multi='_get_post_karma_rights'),
'karma_unlink': fields.function(_get_post_karma_rights, string='Karma to unlink', type='integer', multi='_get_post_karma_rights'),
'karma_upvote': fields.function(_get_post_karma_rights, string='Karma to upvote', type='integer', multi='_get_post_karma_rights'),
'karma_downvote': fields.function(_get_post_karma_rights, string='Karma to downvote', type='integer', multi='_get_post_karma_rights'),
'karma_comment': fields.function(_get_post_karma_rights, string='Karma to comment', type='integer', multi='_get_post_karma_rights'),
'karma_comment_convert': fields.function(_get_post_karma_rights, string='karma to convert as a comment', type='integer', multi='_get_post_karma_rights'),
# access rights
'can_ask': fields.function(_get_post_karma_rights, string='Can Ask', type='boolean', multi='_get_post_karma_rights'),
'can_answer': fields.function(_get_post_karma_rights, string='Can Answer', type='boolean', multi='_get_post_karma_rights'),
'can_accept': fields.function(_get_post_karma_rights, string='Can Accept', type='boolean', multi='_get_post_karma_rights'),
'can_edit': fields.function(_get_post_karma_rights, string='Can Edit', type='boolean', multi='_get_post_karma_rights'),
'can_close': fields.function(_get_post_karma_rights, string='Can Close', type='boolean', multi='_get_post_karma_rights'),
'can_unlink': fields.function(_get_post_karma_rights, string='Can Unlink', type='boolean', multi='_get_post_karma_rights'),
'can_upvote': fields.function(_get_post_karma_rights, string='Can Upvote', type='boolean', multi='_get_post_karma_rights'),
'can_downvote': fields.function(_get_post_karma_rights, string='Can Downvote', type='boolean', multi='_get_post_karma_rights'),
'can_comment': fields.function(_get_post_karma_rights, string='Can Comment', type='boolean', multi='_get_post_karma_rights'),
'can_comment_convert': fields.function(_get_post_karma_rights, string='Can Convert to Comment', type='boolean', multi='_get_post_karma_rights'),
}
_defaults = {
'state': 'active',
'views': 0,
'active': True,
'type': 'question',
'vote_ids': list(),
'favourite_ids': list(),
'child_ids': list(),
}
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
create_context = dict(context, mail_create_nolog=True)
post_id = super(Post, self).create(cr, uid, vals, context=create_context)
post = self.browse(cr, uid, post_id, context=context)
# deleted or closed questions
if post.parent_id and (post.parent_id.state == 'close' or post.parent_id.active == False):
osv.except_osv(_('Error !'), _('Posting answer on [Deleted] or [Closed] question is prohibited'))
# karma-based access
if not post.parent_id and not post.can_ask:
raise KarmaError('Not enough karma to create a new question')
elif post.parent_id and not post.can_answer:
raise KarmaError('Not enough karma to answer to a question')
# messaging and chatter
base_url = self.pool['ir.config_parameter'].get_param(cr, uid, 'web.base.url')
if post.parent_id:
body = _(
'<p>A new answer for <i>%s</i> has been posted. <a href="%s/forum/%s/question/%s">Click here to access the post.</a></p>' %
(post.parent_id.name, base_url, slug(post.parent_id.forum_id), slug(post.parent_id))
)
self.message_post(cr, uid, post.parent_id.id, subject=_('Re: %s') % post.parent_id.name, body=body, subtype='website_forum.mt_answer_new', context=context)
else:
body = _(
'<p>A new question <i>%s</i> has been asked on %s. <a href="%s/forum/%s/question/%s">Click here to access the question.</a></p>' %
(post.name, post.forum_id.name, base_url, slug(post.forum_id), slug(post))
)
self.message_post(cr, uid, post_id, subject=post.name, body=body, subtype='website_forum.mt_question_new', context=context)
self.pool['res.users'].add_karma(cr, SUPERUSER_ID, [uid], post.forum_id.karma_gen_question_new, context=context)
return post_id
def write(self, cr, uid, ids, vals, context=None):
posts = self.browse(cr, uid, ids, context=context)
if 'state' in vals:
if vals['state'] in ['active', 'close'] and any(not post.can_close for post in posts):
raise KarmaError('Not enough karma to close or reopen a post.')
if 'active' in vals:
if any(not post.can_unlink for post in posts):
raise KarmaError('Not enough karma to delete or reactivate a post')
if 'is_correct' in vals:
if any(not post.can_accept for post in posts):
raise KarmaError('Not enough karma to accept or refuse an answer')
# update karma except for self-acceptance
mult = 1 if vals['is_correct'] else -1
for post in self.browse(cr, uid, ids, context=context):
if vals['is_correct'] != post.is_correct and post.create_uid.id != uid:
self.pool['res.users'].add_karma(cr, SUPERUSER_ID, [post.create_uid.id], post.forum_id.karma_gen_answer_accepted * mult, context=context)
self.pool['res.users'].add_karma(cr, SUPERUSER_ID, [uid], post.forum_id.karma_gen_answer_accept * mult, context=context)
if any(key not in ['state', 'active', 'is_correct', 'closed_uid', 'closed_date', 'closed_reason_id'] for key in vals.keys()) and any(not post.can_edit for post in posts):
raise KarmaError('Not enough karma to edit a post.')
res = super(Post, self).write(cr, uid, ids, vals, context=context)
# if post content modify, notify followers
if 'content' in vals or 'name' in vals:
for post in posts:
if post.parent_id:
body, subtype = _('Answer Edited'), 'website_forum.mt_answer_edit'
obj_id = post.parent_id.id
else:
body, subtype = _('Question Edited'), 'website_forum.mt_question_edit'
obj_id = post.id
self.message_post(cr, uid, obj_id, body=body, subtype=subtype, context=context)
return res
def close(self, cr, uid, ids, reason_id, context=None):
if any(post.parent_id for post in self.browse(cr, uid, ids, context=context)):
return False
return self.pool['forum.post'].write(cr, uid, ids, {
'state': 'close',
'closed_uid': uid,
'closed_date': datetime.today().strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT),
'closed_reason_id': reason_id,
}, context=context)
def unlink(self, cr, uid, ids, context=None):
posts = self.browse(cr, uid, ids, context=context)
if any(not post.can_unlink for post in posts):
raise KarmaError('Not enough karma to unlink a post')
# if unlinking an answer with accepted answer: remove provided karma
for post in posts:
if post.is_correct:
self.pool['res.users'].add_karma(cr, SUPERUSER_ID, [post.create_uid.id], post.forum_id.karma_gen_answer_accepted * -1, context=context)
self.pool['res.users'].add_karma(cr, SUPERUSER_ID, [uid], post.forum_id.karma_gen_answer_accept * -1, context=context)
return super(Post, self).unlink(cr, uid, ids, context=context)
def vote(self, cr, uid, ids, upvote=True, context=None):
Vote = self.pool['forum.post.vote']
vote_ids = Vote.search(cr, uid, [('post_id', 'in', ids), ('user_id', '=', uid)], context=context)
new_vote = '1' if upvote else '-1'
voted_forum_ids = set()
if vote_ids:
for vote in Vote.browse(cr, uid, vote_ids, context=context):
if upvote:
new_vote = '0' if vote.vote == '-1' else '1'
else:
new_vote = '0' if vote.vote == '1' else '-1'
Vote.write(cr, uid, vote_ids, {'vote': new_vote}, context=context)
voted_forum_ids.add(vote.post_id.id)
for post_id in set(ids) - voted_forum_ids:
for post_id in ids:
Vote.create(cr, uid, {'post_id': post_id, 'vote': new_vote}, context=context)
return {'vote_count': self._get_vote_count(cr, uid, ids, None, None, context=context)[ids[0]], 'user_vote': new_vote}
def convert_answer_to_comment(self, cr, uid, id, context=None):
""" Tools to convert an answer (forum.post) to a comment (mail.message).
The original post is unlinked and a new comment is posted on the question
using the post create_uid as the comment's author. """
post = self.browse(cr, SUPERUSER_ID, id, context=context)
if not post.parent_id:
return False
# karma-based action check: use the post field that computed own/all value
if not post.can_comment_convert:
raise KarmaError('Not enough karma to convert an answer to a comment')
# post the message
question = post.parent_id
values = {
'author_id': post.create_uid.partner_id.id,
'body': html2plaintext(post.content),
'type': 'comment',
'subtype': 'mail.mt_comment',
'date': post.create_date,
}
message_id = self.pool['forum.post'].message_post(
cr, uid, question.id,
context=dict(context, mail_create_nosubcribe=True),
**values)
# unlink the original answer, using SUPERUSER_ID to avoid karma issues
self.pool['forum.post'].unlink(cr, SUPERUSER_ID, [post.id], context=context)
return message_id
def convert_comment_to_answer(self, cr, uid, message_id, default=None, context=None):
""" Tool to convert a comment (mail.message) into an answer (forum.post).
The original comment is unlinked and a new answer from the comment's author
is created. Nothing is done if the comment's author already answered the
question. """
comment = self.pool['mail.message'].browse(cr, SUPERUSER_ID, message_id, context=context)
post = self.pool['forum.post'].browse(cr, uid, comment.res_id, context=context)
user = self.pool['res.users'].browse(cr, uid, uid, context=context)
if not comment.author_id or not comment.author_id.user_ids: # only comment posted by users can be converted
return False
# karma-based action check: must check the message's author to know if own / all
karma_convert = comment.author_id.id == user.partner_id.id and post.forum_id.karma_comment_convert_own or post.forum_id.karma_comment_convert_all
can_convert = uid == SUPERUSER_ID or user.karma >= karma_convert
if not can_convert:
raise KarmaError('Not enough karma to convert a comment to an answer')
# check the message's author has not already an answer
question = post.parent_id if post.parent_id else post
post_create_uid = comment.author_id.user_ids[0]
if any(answer.create_uid.id == post_create_uid.id for answer in question.child_ids):
return False
# create the new post
post_values = {
'forum_id': question.forum_id.id,
'content': comment.body,
'parent_id': question.id,
}
# done with the author user to have create_uid correctly set
new_post_id = self.pool['forum.post'].create(cr, post_create_uid.id, post_values, context=context)
# delete comment
self.pool['mail.message'].unlink(cr, SUPERUSER_ID, [comment.id], context=context)
return new_post_id
def unlink_comment(self, cr, uid, id, message_id, context=None):
comment = self.pool['mail.message'].browse(cr, SUPERUSER_ID, message_id, context=context)
post = self.pool['forum.post'].browse(cr, uid, id, context=context)
user = self.pool['res.users'].browse(cr, SUPERUSER_ID, uid, context=context)
if not comment.model == 'forum.post' or not comment.res_id == id:
return False
# karma-based action check: must check the message's author to know if own or all
karma_unlink = comment.author_id.id == user.partner_id.id and post.forum_id.karma_comment_unlink_own or post.forum_id.karma_comment_unlink_all
can_unlink = uid == SUPERUSER_ID or user.karma >= karma_unlink
if not can_unlink:
raise KarmaError('Not enough karma to unlink a comment')
return self.pool['mail.message'].unlink(cr, SUPERUSER_ID, [message_id], context=context)
def set_viewed(self, cr, uid, ids, context=None):
cr.execute("""UPDATE forum_post SET views = views+1 WHERE id IN %s""", (tuple(ids),))
return True
def _get_access_link(self, cr, uid, mail, partner, context=None):
post = self.pool['forum.post'].browse(cr, uid, mail.res_id, context=context)
res_id = post.parent_id and "%s#answer-%s" % (post.parent_id.id, post.id) or post.id
return "/forum/%s/question/%s" % (post.forum_id.id, res_id)
@api.cr_uid_ids_context
def message_post(self, cr, uid, thread_id, type='notification', subtype=None, context=None, **kwargs):
if thread_id and type == 'comment': # user comments have a restriction on karma
if isinstance(thread_id, (list, tuple)):
post_id = thread_id[0]
else:
post_id = thread_id
post = self.browse(cr, uid, post_id, context=context)
if not post.can_comment:
raise KarmaError('Not enough karma to comment')
return super(Post, self).message_post(cr, uid, thread_id, type=type, subtype=subtype, context=context, **kwargs)
class PostReason(osv.Model):
_name = "forum.post.reason"
_description = "Post Closing Reason"
_order = 'name'
_columns = {
'name': fields.char('Post Reason', required=True, translate=True),
}
class Vote(osv.Model):
_name = 'forum.post.vote'
_description = 'Vote'
_columns = {
'post_id': fields.many2one('forum.post', 'Post', ondelete='cascade', required=True),
'user_id': fields.many2one('res.users', 'User', required=True),
'vote': fields.selection([('1', '1'), ('-1', '-1'), ('0', '0')], 'Vote', required=True),
'create_date': fields.datetime('Create Date', select=True, readonly=True),
# TODO master: store these two
'forum_id': fields.related('post_id', 'forum_id', type='many2one', relation='forum.forum', string='Forum'),
'recipient_id': fields.related('post_id', 'create_uid', type='many2one', relation='res.users', string='To', help="The user receiving the vote"),
}
_defaults = {
'user_id': lambda self, cr, uid, ctx: uid,
'vote': lambda *args: '1',
}
def _get_karma_value(self, old_vote, new_vote, up_karma, down_karma):
_karma_upd = {
'-1': {'-1': 0, '0': -1 * down_karma, '1': -1 * down_karma + up_karma},
'0': {'-1': 1 * down_karma, '0': 0, '1': up_karma},
'1': {'-1': -1 * up_karma + down_karma, '0': -1 * up_karma, '1': 0}
}
return _karma_upd[old_vote][new_vote]
def create(self, cr, uid, vals, context=None):
vote_id = super(Vote, self).create(cr, uid, vals, context=context)
vote = self.browse(cr, uid, vote_id, context=context)
# own post check
if vote.user_id.id == vote.post_id.create_uid.id:
raise Warning('Not allowed to vote for its own post')
# karma check
if vote.vote == '1' and not vote.post_id.can_upvote:
raise KarmaError('Not enough karma to upvote.')
elif vote.vote == '-1' and not vote.post_id.can_downvote:
raise KarmaError('Not enough karma to downvote.')
# karma update
if vote.post_id.parent_id:
karma_value = self._get_karma_value('0', vote.vote, vote.forum_id.karma_gen_answer_upvote, vote.forum_id.karma_gen_answer_downvote)
else:
karma_value = self._get_karma_value('0', vote.vote, vote.forum_id.karma_gen_question_upvote, vote.forum_id.karma_gen_question_downvote)
self.pool['res.users'].add_karma(cr, SUPERUSER_ID, [vote.recipient_id.id], karma_value, context=context)
return vote_id
def write(self, cr, uid, ids, values, context=None):
if 'vote' in values:
for vote in self.browse(cr, uid, ids, context=context):
# own post check
if vote.user_id.id == vote.post_id.create_uid.id:
raise Warning('Not allowed to vote for its own post')
# karma check
if (values['vote'] == '1' or vote.vote == '-1' and values['vote'] == '0') and not vote.post_id.can_upvote:
raise KarmaError('Not enough karma to upvote.')
elif (values['vote'] == '-1' or vote.vote == '1' and values['vote'] == '0') and not vote.post_id.can_downvote:
raise KarmaError('Not enough karma to downvote.')
# karma update
if vote.post_id.parent_id:
karma_value = self._get_karma_value(vote.vote, values['vote'], vote.forum_id.karma_gen_answer_upvote, vote.forum_id.karma_gen_answer_downvote)
else:
karma_value = self._get_karma_value(vote.vote, values['vote'], vote.forum_id.karma_gen_question_upvote, vote.forum_id.karma_gen_question_downvote)
self.pool['res.users'].add_karma(cr, SUPERUSER_ID, [vote.recipient_id.id], karma_value, context=context)
res = super(Vote, self).write(cr, uid, ids, values, context=context)
return res
class Tags(osv.Model):
_name = "forum.tag"
_description = "Tag"
_inherit = ['website.seo.metadata']
def _get_posts_count(self, cr, uid, ids, field_name, arg, context=None):
return dict((tag_id, self.pool['forum.post'].search_count(cr, uid, [('tag_ids', 'in', tag_id)], context=context)) for tag_id in ids)
def _get_tag_from_post(self, cr, uid, ids, context=None):
return list(set(
[tag.id for post in self.pool['forum.post'].browse(cr, SUPERUSER_ID, ids, context=context) for tag in post.tag_ids]
))
_columns = {
'name': fields.char('Name', required=True),
'forum_id': fields.many2one('forum.forum', 'Forum', required=True),
'post_ids': fields.many2many('forum.post', 'forum_tag_rel', 'tag_id', 'post_id', 'Posts'),
'posts_count': fields.function(
_get_posts_count, type='integer', string="Number of Posts",
store={
'forum.post': (_get_tag_from_post, ['tag_ids'], 10),
}
),
'create_uid': fields.many2one('res.users', 'Created by', readonly=True),
}
|
agpl-3.0
|
PluginCafe/cinema4d_py_sdk
|
scripts/takesystem/takesystem_OverrideGroupMaterials.py
|
1
|
1057
|
import c4d
from c4d import gui
#Author: s_bach
#TakeSystem Example
def main():
takeData = doc.GetTakeData()
if takeData is None:
return
# This code creates a take with an override group for each selected material and adds the object "object" to the newly created group.
obj = doc.GetActiveObject()
if obj is None:
return
materials = doc.GetActiveMaterials()
for material in materials:
take = takeData.AddTake(material.GetName(), None, None)
if take is not None:
group = take.AddOverrideGroup()
if group is not None:
group.AddToGroup(takeData, obj)
tag = group.AddTag(takeData, c4d.Ttexture, material)
if tag is not None:
tag.SetParameter(c4d.TEXTURETAG_PROJECTION, c4d.TEXTURETAG_PROJECTION_UVW, c4d.DESCFLAGS_SET_0)
c4d.EventAdd()
if __name__=='__main__':
main()
|
apache-2.0
|
dotcool/zulip
|
api/integrations/svn/zulip_svn_config.py
|
124
|
2363
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright © 2014 Zulip, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Change these values to configure authentication for the plugin
ZULIP_USER = "svn-bot@example.com"
ZULIP_API_KEY = "0123456789abcdef0123456789abcdef"
# commit_notice_destination() lets you customize where commit notices
# are sent to with the full power of a Python function.
#
# It takes the following arguments:
# * path = the path to the svn repository on the server
# * commit = the commit id
#
# Returns a dictionary encoding the stream and subject to send the
# notification to (or None to send no notification).
#
# The default code below will send every commit except for the "evil-master-plan"
# and "my-super-secret-repository" repos to
# * stream "commits"
# * topic "branch_name"
def commit_notice_destination(path, commit):
repo = path.split('/')[-1]
if repo not in ["evil-master-plan", "my-super-secret-repository"]:
return dict(stream = "commits",
subject = u"%s" % (repo,))
# Return None for cases where you don't want a notice sent
return None
## If properly installed, the Zulip API should be in your import
## path, but if not, set a custom path below
ZULIP_API_PATH = None
# Set this to your Zulip server's API URI
ZULIP_SITE = "https://api.zulip.com"
|
apache-2.0
|
sharhar/USB-Thing
|
UpdaterFiles/Lib/python-3.5.1.amd64/Lib/site-packages/pip/_vendor/distlib/util.py
|
224
|
51518
|
#
# Copyright (C) 2012-2014 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
import codecs
from collections import deque
import contextlib
import csv
from glob import iglob as std_iglob
import io
import json
import logging
import os
import py_compile
import re
import shutil
import socket
import ssl
import subprocess
import sys
import tarfile
import tempfile
try:
import threading
except ImportError:
import dummy_threading as threading
import time
from . import DistlibException
from .compat import (string_types, text_type, shutil, raw_input, StringIO,
cache_from_source, urlopen, httplib, xmlrpclib, splittype,
HTTPHandler, HTTPSHandler as BaseHTTPSHandler,
BaseConfigurator, valid_ident, Container, configparser,
URLError, match_hostname, CertificateError, ZipFile)
logger = logging.getLogger(__name__)
#
# Requirement parsing code for name + optional constraints + optional extras
#
# e.g. 'foo >= 1.2, < 2.0 [bar, baz]'
#
# The regex can seem a bit hairy, so we build it up out of smaller pieces
# which are manageable.
#
COMMA = r'\s*,\s*'
COMMA_RE = re.compile(COMMA)
IDENT = r'(\w|[.-])+'
EXTRA_IDENT = r'(\*|:(\*|\w+):|' + IDENT + ')'
VERSPEC = IDENT + r'\*?'
RELOP = '([<>=!~]=)|[<>]'
#
# The first relop is optional - if absent, will be taken as '~='
#
BARE_CONSTRAINTS = ('(' + RELOP + r')?\s*(' + VERSPEC + ')(' + COMMA + '(' +
RELOP + r')\s*(' + VERSPEC + '))*')
DIRECT_REF = '(from\s+(?P<diref>.*))'
#
# Either the bare constraints or the bare constraints in parentheses
#
CONSTRAINTS = (r'\(\s*(?P<c1>' + BARE_CONSTRAINTS + '|' + DIRECT_REF +
r')\s*\)|(?P<c2>' + BARE_CONSTRAINTS + '\s*)')
EXTRA_LIST = EXTRA_IDENT + '(' + COMMA + EXTRA_IDENT + ')*'
EXTRAS = r'\[\s*(?P<ex>' + EXTRA_LIST + r')?\s*\]'
REQUIREMENT = ('(?P<dn>' + IDENT + r')\s*(' + EXTRAS + r'\s*)?(\s*' +
CONSTRAINTS + ')?$')
REQUIREMENT_RE = re.compile(REQUIREMENT)
#
# Used to scan through the constraints
#
RELOP_IDENT = '(?P<op>' + RELOP + r')\s*(?P<vn>' + VERSPEC + ')'
RELOP_IDENT_RE = re.compile(RELOP_IDENT)
def parse_requirement(s):
def get_constraint(m):
d = m.groupdict()
return d['op'], d['vn']
result = None
m = REQUIREMENT_RE.match(s)
if m:
d = m.groupdict()
name = d['dn']
cons = d['c1'] or d['c2']
if not d['diref']:
url = None
else:
# direct reference
cons = None
url = d['diref'].strip()
if not cons:
cons = None
constr = ''
rs = d['dn']
else:
if cons[0] not in '<>!=':
cons = '~=' + cons
iterator = RELOP_IDENT_RE.finditer(cons)
cons = [get_constraint(m) for m in iterator]
rs = '%s (%s)' % (name, ', '.join(['%s %s' % con for con in cons]))
if not d['ex']:
extras = None
else:
extras = COMMA_RE.split(d['ex'])
result = Container(name=name, constraints=cons, extras=extras,
requirement=rs, source=s, url=url)
return result
def get_resources_dests(resources_root, rules):
"""Find destinations for resources files"""
def get_rel_path(base, path):
# normalizes and returns a lstripped-/-separated path
base = base.replace(os.path.sep, '/')
path = path.replace(os.path.sep, '/')
assert path.startswith(base)
return path[len(base):].lstrip('/')
destinations = {}
for base, suffix, dest in rules:
prefix = os.path.join(resources_root, base)
for abs_base in iglob(prefix):
abs_glob = os.path.join(abs_base, suffix)
for abs_path in iglob(abs_glob):
resource_file = get_rel_path(resources_root, abs_path)
if dest is None: # remove the entry if it was here
destinations.pop(resource_file, None)
else:
rel_path = get_rel_path(abs_base, abs_path)
rel_dest = dest.replace(os.path.sep, '/').rstrip('/')
destinations[resource_file] = rel_dest + '/' + rel_path
return destinations
def in_venv():
if hasattr(sys, 'real_prefix'):
# virtualenv venvs
result = True
else:
# PEP 405 venvs
result = sys.prefix != getattr(sys, 'base_prefix', sys.prefix)
return result
def get_executable():
# The __PYVENV_LAUNCHER__ dance is apparently no longer needed, as
# changes to the stub launcher mean that sys.executable always points
# to the stub on OS X
# if sys.platform == 'darwin' and ('__PYVENV_LAUNCHER__'
# in os.environ):
# result = os.environ['__PYVENV_LAUNCHER__']
# else:
# result = sys.executable
# return result
return os.path.normcase(sys.executable)
def proceed(prompt, allowed_chars, error_prompt=None, default=None):
p = prompt
while True:
s = raw_input(p)
p = prompt
if not s and default:
s = default
if s:
c = s[0].lower()
if c in allowed_chars:
break
if error_prompt:
p = '%c: %s\n%s' % (c, error_prompt, prompt)
return c
def extract_by_key(d, keys):
if isinstance(keys, string_types):
keys = keys.split()
result = {}
for key in keys:
if key in d:
result[key] = d[key]
return result
def read_exports(stream):
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getreader('utf-8')(stream)
# Try to load as JSON, falling back on legacy format
data = stream.read()
stream = StringIO(data)
try:
data = json.load(stream)
result = data['extensions']['python.exports']['exports']
for group, entries in result.items():
for k, v in entries.items():
s = '%s = %s' % (k, v)
entry = get_export_entry(s)
assert entry is not None
entries[k] = entry
return result
except Exception:
stream.seek(0, 0)
cp = configparser.ConfigParser()
if hasattr(cp, 'read_file'):
cp.read_file(stream)
else:
cp.readfp(stream)
result = {}
for key in cp.sections():
result[key] = entries = {}
for name, value in cp.items(key):
s = '%s = %s' % (name, value)
entry = get_export_entry(s)
assert entry is not None
#entry.dist = self
entries[name] = entry
return result
def write_exports(exports, stream):
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getwriter('utf-8')(stream)
cp = configparser.ConfigParser()
for k, v in exports.items():
# TODO check k, v for valid values
cp.add_section(k)
for entry in v.values():
if entry.suffix is None:
s = entry.prefix
else:
s = '%s:%s' % (entry.prefix, entry.suffix)
if entry.flags:
s = '%s [%s]' % (s, ', '.join(entry.flags))
cp.set(k, entry.name, s)
cp.write(stream)
@contextlib.contextmanager
def tempdir():
td = tempfile.mkdtemp()
try:
yield td
finally:
shutil.rmtree(td)
@contextlib.contextmanager
def chdir(d):
cwd = os.getcwd()
try:
os.chdir(d)
yield
finally:
os.chdir(cwd)
@contextlib.contextmanager
def socket_timeout(seconds=15):
cto = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(seconds)
yield
finally:
socket.setdefaulttimeout(cto)
class cached_property(object):
def __init__(self, func):
self.func = func
#for attr in ('__name__', '__module__', '__doc__'):
# setattr(self, attr, getattr(func, attr, None))
def __get__(self, obj, cls=None):
if obj is None:
return self
value = self.func(obj)
object.__setattr__(obj, self.func.__name__, value)
#obj.__dict__[self.func.__name__] = value = self.func(obj)
return value
def convert_path(pathname):
"""Return 'pathname' as a name that will work on the native filesystem.
The path is split on '/' and put back together again using the current
directory separator. Needed because filenames in the setup script are
always supplied in Unix style, and have to be converted to the local
convention before we can actually use them in the filesystem. Raises
ValueError on non-Unix-ish systems if 'pathname' either starts or
ends with a slash.
"""
if os.sep == '/':
return pathname
if not pathname:
return pathname
if pathname[0] == '/':
raise ValueError("path '%s' cannot be absolute" % pathname)
if pathname[-1] == '/':
raise ValueError("path '%s' cannot end with '/'" % pathname)
paths = pathname.split('/')
while os.curdir in paths:
paths.remove(os.curdir)
if not paths:
return os.curdir
return os.path.join(*paths)
class FileOperator(object):
def __init__(self, dry_run=False):
self.dry_run = dry_run
self.ensured = set()
self._init_record()
def _init_record(self):
self.record = False
self.files_written = set()
self.dirs_created = set()
def record_as_written(self, path):
if self.record:
self.files_written.add(path)
def newer(self, source, target):
"""Tell if the target is newer than the source.
Returns true if 'source' exists and is more recently modified than
'target', or if 'source' exists and 'target' doesn't.
Returns false if both exist and 'target' is the same age or younger
than 'source'. Raise PackagingFileError if 'source' does not exist.
Note that this test is not very accurate: files created in the same
second will have the same "age".
"""
if not os.path.exists(source):
raise DistlibException("file '%r' does not exist" %
os.path.abspath(source))
if not os.path.exists(target):
return True
return os.stat(source).st_mtime > os.stat(target).st_mtime
def copy_file(self, infile, outfile, check=True):
"""Copy a file respecting dry-run and force flags.
"""
self.ensure_dir(os.path.dirname(outfile))
logger.info('Copying %s to %s', infile, outfile)
if not self.dry_run:
msg = None
if check:
if os.path.islink(outfile):
msg = '%s is a symlink' % outfile
elif os.path.exists(outfile) and not os.path.isfile(outfile):
msg = '%s is a non-regular file' % outfile
if msg:
raise ValueError(msg + ' which would be overwritten')
shutil.copyfile(infile, outfile)
self.record_as_written(outfile)
def copy_stream(self, instream, outfile, encoding=None):
assert not os.path.isdir(outfile)
self.ensure_dir(os.path.dirname(outfile))
logger.info('Copying stream %s to %s', instream, outfile)
if not self.dry_run:
if encoding is None:
outstream = open(outfile, 'wb')
else:
outstream = codecs.open(outfile, 'w', encoding=encoding)
try:
shutil.copyfileobj(instream, outstream)
finally:
outstream.close()
self.record_as_written(outfile)
def write_binary_file(self, path, data):
self.ensure_dir(os.path.dirname(path))
if not self.dry_run:
with open(path, 'wb') as f:
f.write(data)
self.record_as_written(path)
def write_text_file(self, path, data, encoding):
self.ensure_dir(os.path.dirname(path))
if not self.dry_run:
with open(path, 'wb') as f:
f.write(data.encode(encoding))
self.record_as_written(path)
def set_mode(self, bits, mask, files):
if os.name == 'posix' or (os.name == 'java' and os._name == 'posix'):
# Set the executable bits (owner, group, and world) on
# all the files specified.
for f in files:
if self.dry_run:
logger.info("changing mode of %s", f)
else:
mode = (os.stat(f).st_mode | bits) & mask
logger.info("changing mode of %s to %o", f, mode)
os.chmod(f, mode)
set_executable_mode = lambda s, f: s.set_mode(0o555, 0o7777, f)
def ensure_dir(self, path):
path = os.path.abspath(path)
if path not in self.ensured and not os.path.exists(path):
self.ensured.add(path)
d, f = os.path.split(path)
self.ensure_dir(d)
logger.info('Creating %s' % path)
if not self.dry_run:
os.mkdir(path)
if self.record:
self.dirs_created.add(path)
def byte_compile(self, path, optimize=False, force=False, prefix=None):
dpath = cache_from_source(path, not optimize)
logger.info('Byte-compiling %s to %s', path, dpath)
if not self.dry_run:
if force or self.newer(path, dpath):
if not prefix:
diagpath = None
else:
assert path.startswith(prefix)
diagpath = path[len(prefix):]
py_compile.compile(path, dpath, diagpath, True) # raise error
self.record_as_written(dpath)
return dpath
def ensure_removed(self, path):
if os.path.exists(path):
if os.path.isdir(path) and not os.path.islink(path):
logger.debug('Removing directory tree at %s', path)
if not self.dry_run:
shutil.rmtree(path)
if self.record:
if path in self.dirs_created:
self.dirs_created.remove(path)
else:
if os.path.islink(path):
s = 'link'
else:
s = 'file'
logger.debug('Removing %s %s', s, path)
if not self.dry_run:
os.remove(path)
if self.record:
if path in self.files_written:
self.files_written.remove(path)
def is_writable(self, path):
result = False
while not result:
if os.path.exists(path):
result = os.access(path, os.W_OK)
break
parent = os.path.dirname(path)
if parent == path:
break
path = parent
return result
def commit(self):
"""
Commit recorded changes, turn off recording, return
changes.
"""
assert self.record
result = self.files_written, self.dirs_created
self._init_record()
return result
def rollback(self):
if not self.dry_run:
for f in list(self.files_written):
if os.path.exists(f):
os.remove(f)
# dirs should all be empty now, except perhaps for
# __pycache__ subdirs
# reverse so that subdirs appear before their parents
dirs = sorted(self.dirs_created, reverse=True)
for d in dirs:
flist = os.listdir(d)
if flist:
assert flist == ['__pycache__']
sd = os.path.join(d, flist[0])
os.rmdir(sd)
os.rmdir(d) # should fail if non-empty
self._init_record()
def resolve(module_name, dotted_path):
if module_name in sys.modules:
mod = sys.modules[module_name]
else:
mod = __import__(module_name)
if dotted_path is None:
result = mod
else:
parts = dotted_path.split('.')
result = getattr(mod, parts.pop(0))
for p in parts:
result = getattr(result, p)
return result
class ExportEntry(object):
def __init__(self, name, prefix, suffix, flags):
self.name = name
self.prefix = prefix
self.suffix = suffix
self.flags = flags
@cached_property
def value(self):
return resolve(self.prefix, self.suffix)
def __repr__(self):
return '<ExportEntry %s = %s:%s %s>' % (self.name, self.prefix,
self.suffix, self.flags)
def __eq__(self, other):
if not isinstance(other, ExportEntry):
result = False
else:
result = (self.name == other.name and
self.prefix == other.prefix and
self.suffix == other.suffix and
self.flags == other.flags)
return result
__hash__ = object.__hash__
ENTRY_RE = re.compile(r'''(?P<name>(\w|[-.])+)
\s*=\s*(?P<callable>(\w+)([:\.]\w+)*)
\s*(\[\s*(?P<flags>\w+(=\w+)?(,\s*\w+(=\w+)?)*)\s*\])?
''', re.VERBOSE)
def get_export_entry(specification):
m = ENTRY_RE.search(specification)
if not m:
result = None
if '[' in specification or ']' in specification:
raise DistlibException('Invalid specification '
'%r' % specification)
else:
d = m.groupdict()
name = d['name']
path = d['callable']
colons = path.count(':')
if colons == 0:
prefix, suffix = path, None
else:
if colons != 1:
raise DistlibException('Invalid specification '
'%r' % specification)
prefix, suffix = path.split(':')
flags = d['flags']
if flags is None:
if '[' in specification or ']' in specification:
raise DistlibException('Invalid specification '
'%r' % specification)
flags = []
else:
flags = [f.strip() for f in flags.split(',')]
result = ExportEntry(name, prefix, suffix, flags)
return result
def get_cache_base(suffix=None):
"""
Return the default base location for distlib caches. If the directory does
not exist, it is created. Use the suffix provided for the base directory,
and default to '.distlib' if it isn't provided.
On Windows, if LOCALAPPDATA is defined in the environment, then it is
assumed to be a directory, and will be the parent directory of the result.
On POSIX, and on Windows if LOCALAPPDATA is not defined, the user's home
directory - using os.expanduser('~') - will be the parent directory of
the result.
The result is just the directory '.distlib' in the parent directory as
determined above, or with the name specified with ``suffix``.
"""
if suffix is None:
suffix = '.distlib'
if os.name == 'nt' and 'LOCALAPPDATA' in os.environ:
result = os.path.expandvars('$localappdata')
else:
# Assume posix, or old Windows
result = os.path.expanduser('~')
# we use 'isdir' instead of 'exists', because we want to
# fail if there's a file with that name
if os.path.isdir(result):
usable = os.access(result, os.W_OK)
if not usable:
logger.warning('Directory exists but is not writable: %s', result)
else:
try:
os.makedirs(result)
usable = True
except OSError:
logger.warning('Unable to create %s', result, exc_info=True)
usable = False
if not usable:
result = tempfile.mkdtemp()
logger.warning('Default location unusable, using %s', result)
return os.path.join(result, suffix)
def path_to_cache_dir(path):
"""
Convert an absolute path to a directory name for use in a cache.
The algorithm used is:
#. On Windows, any ``':'`` in the drive is replaced with ``'---'``.
#. Any occurrence of ``os.sep`` is replaced with ``'--'``.
#. ``'.cache'`` is appended.
"""
d, p = os.path.splitdrive(os.path.abspath(path))
if d:
d = d.replace(':', '---')
p = p.replace(os.sep, '--')
return d + p + '.cache'
def ensure_slash(s):
if not s.endswith('/'):
return s + '/'
return s
def parse_credentials(netloc):
username = password = None
if '@' in netloc:
prefix, netloc = netloc.split('@', 1)
if ':' not in prefix:
username = prefix
else:
username, password = prefix.split(':', 1)
return username, password, netloc
def get_process_umask():
result = os.umask(0o22)
os.umask(result)
return result
def is_string_sequence(seq):
result = True
i = None
for i, s in enumerate(seq):
if not isinstance(s, string_types):
result = False
break
assert i is not None
return result
PROJECT_NAME_AND_VERSION = re.compile('([a-z0-9_]+([.-][a-z_][a-z0-9_]*)*)-'
'([a-z0-9_.+-]+)', re.I)
PYTHON_VERSION = re.compile(r'-py(\d\.?\d?)')
def split_filename(filename, project_name=None):
"""
Extract name, version, python version from a filename (no extension)
Return name, version, pyver or None
"""
result = None
pyver = None
m = PYTHON_VERSION.search(filename)
if m:
pyver = m.group(1)
filename = filename[:m.start()]
if project_name and len(filename) > len(project_name) + 1:
m = re.match(re.escape(project_name) + r'\b', filename)
if m:
n = m.end()
result = filename[:n], filename[n + 1:], pyver
if result is None:
m = PROJECT_NAME_AND_VERSION.match(filename)
if m:
result = m.group(1), m.group(3), pyver
return result
# Allow spaces in name because of legacy dists like "Twisted Core"
NAME_VERSION_RE = re.compile(r'(?P<name>[\w .-]+)\s*'
r'\(\s*(?P<ver>[^\s)]+)\)$')
def parse_name_and_version(p):
"""
A utility method used to get name and version from a string.
From e.g. a Provides-Dist value.
:param p: A value in a form 'foo (1.0)'
:return: The name and version as a tuple.
"""
m = NAME_VERSION_RE.match(p)
if not m:
raise DistlibException('Ill-formed name/version string: \'%s\'' % p)
d = m.groupdict()
return d['name'].strip().lower(), d['ver']
def get_extras(requested, available):
result = set()
requested = set(requested or [])
available = set(available or [])
if '*' in requested:
requested.remove('*')
result |= available
for r in requested:
if r == '-':
result.add(r)
elif r.startswith('-'):
unwanted = r[1:]
if unwanted not in available:
logger.warning('undeclared extra: %s' % unwanted)
if unwanted in result:
result.remove(unwanted)
else:
if r not in available:
logger.warning('undeclared extra: %s' % r)
result.add(r)
return result
#
# Extended metadata functionality
#
def _get_external_data(url):
result = {}
try:
# urlopen might fail if it runs into redirections,
# because of Python issue #13696. Fixed in locators
# using a custom redirect handler.
resp = urlopen(url)
headers = resp.info()
if headers.get('Content-Type') != 'application/json':
logger.debug('Unexpected response for JSON request')
else:
reader = codecs.getreader('utf-8')(resp)
#data = reader.read().decode('utf-8')
#result = json.loads(data)
result = json.load(reader)
except Exception as e:
logger.exception('Failed to get external data for %s: %s', url, e)
return result
def get_project_data(name):
url = ('https://www.red-dove.com/pypi/projects/'
'%s/%s/project.json' % (name[0].upper(), name))
result = _get_external_data(url)
return result
def get_package_data(name, version):
url = ('https://www.red-dove.com/pypi/projects/'
'%s/%s/package-%s.json' % (name[0].upper(), name, version))
return _get_external_data(url)
class Cache(object):
"""
A class implementing a cache for resources that need to live in the file system
e.g. shared libraries. This class was moved from resources to here because it
could be used by other modules, e.g. the wheel module.
"""
def __init__(self, base):
"""
Initialise an instance.
:param base: The base directory where the cache should be located.
"""
# we use 'isdir' instead of 'exists', because we want to
# fail if there's a file with that name
if not os.path.isdir(base):
os.makedirs(base)
if (os.stat(base).st_mode & 0o77) != 0:
logger.warning('Directory \'%s\' is not private', base)
self.base = os.path.abspath(os.path.normpath(base))
def prefix_to_dir(self, prefix):
"""
Converts a resource prefix to a directory name in the cache.
"""
return path_to_cache_dir(prefix)
def clear(self):
"""
Clear the cache.
"""
not_removed = []
for fn in os.listdir(self.base):
fn = os.path.join(self.base, fn)
try:
if os.path.islink(fn) or os.path.isfile(fn):
os.remove(fn)
elif os.path.isdir(fn):
shutil.rmtree(fn)
except Exception:
not_removed.append(fn)
return not_removed
class EventMixin(object):
"""
A very simple publish/subscribe system.
"""
def __init__(self):
self._subscribers = {}
def add(self, event, subscriber, append=True):
"""
Add a subscriber for an event.
:param event: The name of an event.
:param subscriber: The subscriber to be added (and called when the
event is published).
:param append: Whether to append or prepend the subscriber to an
existing subscriber list for the event.
"""
subs = self._subscribers
if event not in subs:
subs[event] = deque([subscriber])
else:
sq = subs[event]
if append:
sq.append(subscriber)
else:
sq.appendleft(subscriber)
def remove(self, event, subscriber):
"""
Remove a subscriber for an event.
:param event: The name of an event.
:param subscriber: The subscriber to be removed.
"""
subs = self._subscribers
if event not in subs:
raise ValueError('No subscribers: %r' % event)
subs[event].remove(subscriber)
def get_subscribers(self, event):
"""
Return an iterator for the subscribers for an event.
:param event: The event to return subscribers for.
"""
return iter(self._subscribers.get(event, ()))
def publish(self, event, *args, **kwargs):
"""
Publish a event and return a list of values returned by its
subscribers.
:param event: The event to publish.
:param args: The positional arguments to pass to the event's
subscribers.
:param kwargs: The keyword arguments to pass to the event's
subscribers.
"""
result = []
for subscriber in self.get_subscribers(event):
try:
value = subscriber(event, *args, **kwargs)
except Exception:
logger.exception('Exception during event publication')
value = None
result.append(value)
logger.debug('publish %s: args = %s, kwargs = %s, result = %s',
event, args, kwargs, result)
return result
#
# Simple sequencing
#
class Sequencer(object):
def __init__(self):
self._preds = {}
self._succs = {}
self._nodes = set() # nodes with no preds/succs
def add_node(self, node):
self._nodes.add(node)
def remove_node(self, node, edges=False):
if node in self._nodes:
self._nodes.remove(node)
if edges:
for p in set(self._preds.get(node, ())):
self.remove(p, node)
for s in set(self._succs.get(node, ())):
self.remove(node, s)
# Remove empties
for k, v in list(self._preds.items()):
if not v:
del self._preds[k]
for k, v in list(self._succs.items()):
if not v:
del self._succs[k]
def add(self, pred, succ):
assert pred != succ
self._preds.setdefault(succ, set()).add(pred)
self._succs.setdefault(pred, set()).add(succ)
def remove(self, pred, succ):
assert pred != succ
try:
preds = self._preds[succ]
succs = self._succs[pred]
except KeyError:
raise ValueError('%r not a successor of anything' % succ)
try:
preds.remove(pred)
succs.remove(succ)
except KeyError:
raise ValueError('%r not a successor of %r' % (succ, pred))
def is_step(self, step):
return (step in self._preds or step in self._succs or
step in self._nodes)
def get_steps(self, final):
if not self.is_step(final):
raise ValueError('Unknown: %r' % final)
result = []
todo = []
seen = set()
todo.append(final)
while todo:
step = todo.pop(0)
if step in seen:
# if a step was already seen,
# move it to the end (so it will appear earlier
# when reversed on return) ... but not for the
# final step, as that would be confusing for
# users
if step != final:
result.remove(step)
result.append(step)
else:
seen.add(step)
result.append(step)
preds = self._preds.get(step, ())
todo.extend(preds)
return reversed(result)
@property
def strong_connections(self):
#http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm
index_counter = [0]
stack = []
lowlinks = {}
index = {}
result = []
graph = self._succs
def strongconnect(node):
# set the depth index for this node to the smallest unused index
index[node] = index_counter[0]
lowlinks[node] = index_counter[0]
index_counter[0] += 1
stack.append(node)
# Consider successors
try:
successors = graph[node]
except Exception:
successors = []
for successor in successors:
if successor not in lowlinks:
# Successor has not yet been visited
strongconnect(successor)
lowlinks[node] = min(lowlinks[node],lowlinks[successor])
elif successor in stack:
# the successor is in the stack and hence in the current
# strongly connected component (SCC)
lowlinks[node] = min(lowlinks[node],index[successor])
# If `node` is a root node, pop the stack and generate an SCC
if lowlinks[node] == index[node]:
connected_component = []
while True:
successor = stack.pop()
connected_component.append(successor)
if successor == node: break
component = tuple(connected_component)
# storing the result
result.append(component)
for node in graph:
if node not in lowlinks:
strongconnect(node)
return result
@property
def dot(self):
result = ['digraph G {']
for succ in self._preds:
preds = self._preds[succ]
for pred in preds:
result.append(' %s -> %s;' % (pred, succ))
for node in self._nodes:
result.append(' %s;' % node)
result.append('}')
return '\n'.join(result)
#
# Unarchiving functionality for zip, tar, tgz, tbz, whl
#
ARCHIVE_EXTENSIONS = ('.tar.gz', '.tar.bz2', '.tar', '.zip',
'.tgz', '.tbz', '.whl')
def unarchive(archive_filename, dest_dir, format=None, check=True):
def check_path(path):
if not isinstance(path, text_type):
path = path.decode('utf-8')
p = os.path.abspath(os.path.join(dest_dir, path))
if not p.startswith(dest_dir) or p[plen] != os.sep:
raise ValueError('path outside destination: %r' % p)
dest_dir = os.path.abspath(dest_dir)
plen = len(dest_dir)
archive = None
if format is None:
if archive_filename.endswith(('.zip', '.whl')):
format = 'zip'
elif archive_filename.endswith(('.tar.gz', '.tgz')):
format = 'tgz'
mode = 'r:gz'
elif archive_filename.endswith(('.tar.bz2', '.tbz')):
format = 'tbz'
mode = 'r:bz2'
elif archive_filename.endswith('.tar'):
format = 'tar'
mode = 'r'
else:
raise ValueError('Unknown format for %r' % archive_filename)
try:
if format == 'zip':
archive = ZipFile(archive_filename, 'r')
if check:
names = archive.namelist()
for name in names:
check_path(name)
else:
archive = tarfile.open(archive_filename, mode)
if check:
names = archive.getnames()
for name in names:
check_path(name)
if format != 'zip' and sys.version_info[0] < 3:
# See Python issue 17153. If the dest path contains Unicode,
# tarfile extraction fails on Python 2.x if a member path name
# contains non-ASCII characters - it leads to an implicit
# bytes -> unicode conversion using ASCII to decode.
for tarinfo in archive.getmembers():
if not isinstance(tarinfo.name, text_type):
tarinfo.name = tarinfo.name.decode('utf-8')
archive.extractall(dest_dir)
finally:
if archive:
archive.close()
def zip_dir(directory):
"""zip a directory tree into a BytesIO object"""
result = io.BytesIO()
dlen = len(directory)
with ZipFile(result, "w") as zf:
for root, dirs, files in os.walk(directory):
for name in files:
full = os.path.join(root, name)
rel = root[dlen:]
dest = os.path.join(rel, name)
zf.write(full, dest)
return result
#
# Simple progress bar
#
UNITS = ('', 'K', 'M', 'G','T','P')
class Progress(object):
unknown = 'UNKNOWN'
def __init__(self, minval=0, maxval=100):
assert maxval is None or maxval >= minval
self.min = self.cur = minval
self.max = maxval
self.started = None
self.elapsed = 0
self.done = False
def update(self, curval):
assert self.min <= curval
assert self.max is None or curval <= self.max
self.cur = curval
now = time.time()
if self.started is None:
self.started = now
else:
self.elapsed = now - self.started
def increment(self, incr):
assert incr >= 0
self.update(self.cur + incr)
def start(self):
self.update(self.min)
return self
def stop(self):
if self.max is not None:
self.update(self.max)
self.done = True
@property
def maximum(self):
return self.unknown if self.max is None else self.max
@property
def percentage(self):
if self.done:
result = '100 %'
elif self.max is None:
result = ' ?? %'
else:
v = 100.0 * (self.cur - self.min) / (self.max - self.min)
result = '%3d %%' % v
return result
def format_duration(self, duration):
if (duration <= 0) and self.max is None or self.cur == self.min:
result = '??:??:??'
#elif duration < 1:
# result = '--:--:--'
else:
result = time.strftime('%H:%M:%S', time.gmtime(duration))
return result
@property
def ETA(self):
if self.done:
prefix = 'Done'
t = self.elapsed
#import pdb; pdb.set_trace()
else:
prefix = 'ETA '
if self.max is None:
t = -1
elif self.elapsed == 0 or (self.cur == self.min):
t = 0
else:
#import pdb; pdb.set_trace()
t = float(self.max - self.min)
t /= self.cur - self.min
t = (t - 1) * self.elapsed
return '%s: %s' % (prefix, self.format_duration(t))
@property
def speed(self):
if self.elapsed == 0:
result = 0.0
else:
result = (self.cur - self.min) / self.elapsed
for unit in UNITS:
if result < 1000:
break
result /= 1000.0
return '%d %sB/s' % (result, unit)
#
# Glob functionality
#
RICH_GLOB = re.compile(r'\{([^}]*)\}')
_CHECK_RECURSIVE_GLOB = re.compile(r'[^/\\,{]\*\*|\*\*[^/\\,}]')
_CHECK_MISMATCH_SET = re.compile(r'^[^{]*\}|\{[^}]*$')
def iglob(path_glob):
"""Extended globbing function that supports ** and {opt1,opt2,opt3}."""
if _CHECK_RECURSIVE_GLOB.search(path_glob):
msg = """invalid glob %r: recursive glob "**" must be used alone"""
raise ValueError(msg % path_glob)
if _CHECK_MISMATCH_SET.search(path_glob):
msg = """invalid glob %r: mismatching set marker '{' or '}'"""
raise ValueError(msg % path_glob)
return _iglob(path_glob)
def _iglob(path_glob):
rich_path_glob = RICH_GLOB.split(path_glob, 1)
if len(rich_path_glob) > 1:
assert len(rich_path_glob) == 3, rich_path_glob
prefix, set, suffix = rich_path_glob
for item in set.split(','):
for path in _iglob(''.join((prefix, item, suffix))):
yield path
else:
if '**' not in path_glob:
for item in std_iglob(path_glob):
yield item
else:
prefix, radical = path_glob.split('**', 1)
if prefix == '':
prefix = '.'
if radical == '':
radical = '*'
else:
# we support both
radical = radical.lstrip('/')
radical = radical.lstrip('\\')
for path, dir, files in os.walk(prefix):
path = os.path.normpath(path)
for fn in _iglob(os.path.join(path, radical)):
yield fn
#
# HTTPSConnection which verifies certificates/matches domains
#
class HTTPSConnection(httplib.HTTPSConnection):
ca_certs = None # set this to the path to the certs file (.pem)
check_domain = True # only used if ca_certs is not None
# noinspection PyPropertyAccess
def connect(self):
sock = socket.create_connection((self.host, self.port), self.timeout)
if getattr(self, '_tunnel_host', False):
self.sock = sock
self._tunnel()
if not hasattr(ssl, 'SSLContext'):
# For 2.x
if self.ca_certs:
cert_reqs = ssl.CERT_REQUIRED
else:
cert_reqs = ssl.CERT_NONE
self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file,
cert_reqs=cert_reqs,
ssl_version=ssl.PROTOCOL_SSLv23,
ca_certs=self.ca_certs)
else:
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.options |= ssl.OP_NO_SSLv2
if self.cert_file:
context.load_cert_chain(self.cert_file, self.key_file)
kwargs = {}
if self.ca_certs:
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(cafile=self.ca_certs)
if getattr(ssl, 'HAS_SNI', False):
kwargs['server_hostname'] = self.host
self.sock = context.wrap_socket(sock, **kwargs)
if self.ca_certs and self.check_domain:
try:
match_hostname(self.sock.getpeercert(), self.host)
logger.debug('Host verified: %s', self.host)
except CertificateError:
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
raise
class HTTPSHandler(BaseHTTPSHandler):
def __init__(self, ca_certs, check_domain=True):
BaseHTTPSHandler.__init__(self)
self.ca_certs = ca_certs
self.check_domain = check_domain
def _conn_maker(self, *args, **kwargs):
"""
This is called to create a connection instance. Normally you'd
pass a connection class to do_open, but it doesn't actually check for
a class, and just expects a callable. As long as we behave just as a
constructor would have, we should be OK. If it ever changes so that
we *must* pass a class, we'll create an UnsafeHTTPSConnection class
which just sets check_domain to False in the class definition, and
choose which one to pass to do_open.
"""
result = HTTPSConnection(*args, **kwargs)
if self.ca_certs:
result.ca_certs = self.ca_certs
result.check_domain = self.check_domain
return result
def https_open(self, req):
try:
return self.do_open(self._conn_maker, req)
except URLError as e:
if 'certificate verify failed' in str(e.reason):
raise CertificateError('Unable to verify server certificate '
'for %s' % req.host)
else:
raise
#
# To prevent against mixing HTTP traffic with HTTPS (examples: A Man-In-The-
# Middle proxy using HTTP listens on port 443, or an index mistakenly serves
# HTML containing a http://xyz link when it should be https://xyz),
# you can use the following handler class, which does not allow HTTP traffic.
#
# It works by inheriting from HTTPHandler - so build_opener won't add a
# handler for HTTP itself.
#
class HTTPSOnlyHandler(HTTPSHandler, HTTPHandler):
def http_open(self, req):
raise URLError('Unexpected HTTP request on what should be a secure '
'connection: %s' % req)
#
# XML-RPC with timeouts
#
_ver_info = sys.version_info[:2]
if _ver_info == (2, 6):
class HTTP(httplib.HTTP):
def __init__(self, host='', port=None, **kwargs):
if port == 0: # 0 means use port 0, not the default port
port = None
self._setup(self._connection_class(host, port, **kwargs))
class HTTPS(httplib.HTTPS):
def __init__(self, host='', port=None, **kwargs):
if port == 0: # 0 means use port 0, not the default port
port = None
self._setup(self._connection_class(host, port, **kwargs))
class Transport(xmlrpclib.Transport):
def __init__(self, timeout, use_datetime=0):
self.timeout = timeout
xmlrpclib.Transport.__init__(self, use_datetime)
def make_connection(self, host):
h, eh, x509 = self.get_host_info(host)
if _ver_info == (2, 6):
result = HTTP(h, timeout=self.timeout)
else:
if not self._connection or host != self._connection[0]:
self._extra_headers = eh
self._connection = host, httplib.HTTPConnection(h)
result = self._connection[1]
return result
class SafeTransport(xmlrpclib.SafeTransport):
def __init__(self, timeout, use_datetime=0):
self.timeout = timeout
xmlrpclib.SafeTransport.__init__(self, use_datetime)
def make_connection(self, host):
h, eh, kwargs = self.get_host_info(host)
if not kwargs:
kwargs = {}
kwargs['timeout'] = self.timeout
if _ver_info == (2, 6):
result = HTTPS(host, None, **kwargs)
else:
if not self._connection or host != self._connection[0]:
self._extra_headers = eh
self._connection = host, httplib.HTTPSConnection(h, None,
**kwargs)
result = self._connection[1]
return result
class ServerProxy(xmlrpclib.ServerProxy):
def __init__(self, uri, **kwargs):
self.timeout = timeout = kwargs.pop('timeout', None)
# The above classes only come into play if a timeout
# is specified
if timeout is not None:
scheme, _ = splittype(uri)
use_datetime = kwargs.get('use_datetime', 0)
if scheme == 'https':
tcls = SafeTransport
else:
tcls = Transport
kwargs['transport'] = t = tcls(timeout, use_datetime=use_datetime)
self.transport = t
xmlrpclib.ServerProxy.__init__(self, uri, **kwargs)
#
# CSV functionality. This is provided because on 2.x, the csv module can't
# handle Unicode. However, we need to deal with Unicode in e.g. RECORD files.
#
def _csv_open(fn, mode, **kwargs):
if sys.version_info[0] < 3:
mode += 'b'
else:
kwargs['newline'] = ''
return open(fn, mode, **kwargs)
class CSVBase(object):
defaults = {
'delimiter': str(','), # The strs are used because we need native
'quotechar': str('"'), # str in the csv API (2.x won't take
'lineterminator': str('\n') # Unicode)
}
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.stream.close()
class CSVReader(CSVBase):
def __init__(self, **kwargs):
if 'stream' in kwargs:
stream = kwargs['stream']
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getreader('utf-8')(stream)
self.stream = stream
else:
self.stream = _csv_open(kwargs['path'], 'r')
self.reader = csv.reader(self.stream, **self.defaults)
def __iter__(self):
return self
def next(self):
result = next(self.reader)
if sys.version_info[0] < 3:
for i, item in enumerate(result):
if not isinstance(item, text_type):
result[i] = item.decode('utf-8')
return result
__next__ = next
class CSVWriter(CSVBase):
def __init__(self, fn, **kwargs):
self.stream = _csv_open(fn, 'w')
self.writer = csv.writer(self.stream, **self.defaults)
def writerow(self, row):
if sys.version_info[0] < 3:
r = []
for item in row:
if isinstance(item, text_type):
item = item.encode('utf-8')
r.append(item)
row = r
self.writer.writerow(row)
#
# Configurator functionality
#
class Configurator(BaseConfigurator):
value_converters = dict(BaseConfigurator.value_converters)
value_converters['inc'] = 'inc_convert'
def __init__(self, config, base=None):
super(Configurator, self).__init__(config)
self.base = base or os.getcwd()
def configure_custom(self, config):
def convert(o):
if isinstance(o, (list, tuple)):
result = type(o)([convert(i) for i in o])
elif isinstance(o, dict):
if '()' in o:
result = self.configure_custom(o)
else:
result = {}
for k in o:
result[k] = convert(o[k])
else:
result = self.convert(o)
return result
c = config.pop('()')
if not callable(c):
c = self.resolve(c)
props = config.pop('.', None)
# Check for valid identifiers
args = config.pop('[]', ())
if args:
args = tuple([convert(o) for o in args])
items = [(k, convert(config[k])) for k in config if valid_ident(k)]
kwargs = dict(items)
result = c(*args, **kwargs)
if props:
for n, v in props.items():
setattr(result, n, convert(v))
return result
def __getitem__(self, key):
result = self.config[key]
if isinstance(result, dict) and '()' in result:
self.config[key] = result = self.configure_custom(result)
return result
def inc_convert(self, value):
"""Default converter for the inc:// protocol."""
if not os.path.isabs(value):
value = os.path.join(self.base, value)
with codecs.open(value, 'r', encoding='utf-8') as f:
result = json.load(f)
return result
#
# Mixin for running subprocesses and capturing their output
#
class SubprocessMixin(object):
def __init__(self, verbose=False, progress=None):
self.verbose = verbose
self.progress = progress
def reader(self, stream, context):
"""
Read lines from a subprocess' output stream and either pass to a progress
callable (if specified) or write progress information to sys.stderr.
"""
progress = self.progress
verbose = self.verbose
while True:
s = stream.readline()
if not s:
break
if progress is not None:
progress(s, context)
else:
if not verbose:
sys.stderr.write('.')
else:
sys.stderr.write(s.decode('utf-8'))
sys.stderr.flush()
stream.close()
def run_command(self, cmd, **kwargs):
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, **kwargs)
t1 = threading.Thread(target=self.reader, args=(p.stdout, 'stdout'))
t1.start()
t2 = threading.Thread(target=self.reader, args=(p.stderr, 'stderr'))
t2.start()
p.wait()
t1.join()
t2.join()
if self.progress is not None:
self.progress('done.', 'main')
elif self.verbose:
sys.stderr.write('done.\n')
return p
|
apache-2.0
|
agoose77/hivesystem
|
hiveguilib/HQt/widgets/connectionHook.py
|
1
|
16335
|
# <license>
# Copyright (C) 2011 Andrea Interguglielmi, All rights reserved.
# This file is part of the coral repository downloaded from http://code.google.com/p/coral-repo.
#
# Modified for the Hive system by Sjoerd de Vries
# All modifications copyright (C) 2012 Sjoerd de Vries, All rights reserved
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# </license>
from __future__ import print_function, absolute_import
import copy
import weakref
from ..anyQt import QtGui, QtCore
#from ... import coralApp
#from ..._coral import ErrorObject
#from . import nodeView
class ConnectionHook(QtGui.QGraphicsItem):
def __init__(self,
parentAttributeUi, mode, shape, style,
parentItem=None, hoverText=None, orderDependent=False
):
if parentItem is None: # parentItem is used by builtinUis.ContainedAttributeUiProxy
parentItem = parentAttributeUi
QtGui.QGraphicsItem.__init__(self, parentItem)
self._parentNodeUi = weakref.ref(parentAttributeUi.parentNodeUi())
self._parentAttributeUi = weakref.ref(parentAttributeUi)
assert mode in ("input", "output"), mode
self._mode = mode
assert shape in ("circle", "square"), shape
self._shape = shape
assert style in ("dot", "dashed", "solid"), style
self._style = style
self._rect = QtCore.QRectF(0, 0, 12, 12)
self._color = QtGui.QColor(200, 200, 200)
self._brush = QtGui.QBrush(self.color())
self._pen = QtGui.QPen(QtCore.Qt.NoPen)
self._draggingConnection = None
self._draggingConnectionEndHook = None
self._connections = []
self._hoverText = hoverText
self._orderDependent = orderDependent
self._mixedColor = False
self.setFlag(QtGui.QGraphicsItem.ItemSendsScenePositionChanges, True)
self._pen.setWidthF(1.0)
self.setAcceptsHoverEvents(True)
self._selectedConnection = None
"""
def reparent(self, parentAttributeUi):
self._parentNodeUi = weakref.ref(parentAttributeUi.parentNodeUi())
self._parentAttributeUi = weakref.ref(parentAttributeUi)
pos = self.scenePos()
self.setParentItem(parentAttributeUi)
self.setPos(self.mapFromScene(pos))
"""
def _tabKey(self):
self._selectNextConnection()
def _bspKey(self):
self._selectPrevConnection()
def _deleteKey(self):
l = len(self._connections)
if l == 0: return
sel = self._selectedConnection
if sel is None:
if l == 1:
nr = 0
else:
return
else:
nr = self._selectedConnection
inputConnection = self._connections[nr]
canvas = self._parentNodeUi().scene()._hqt
if canvas:
ok = canvas().gui_removes_connection(inputConnection)
if not ok:
return
self.parentAttributeUi().parentNodeUi().update()
inputConnection.deleteIt()
self._selectConnection(None)
def _plusKey(self):
if not self._orderDependent: return
if self._selectedConnection is None: return
old_pos = self._selectedConnection
new_pos = old_pos + 1
if new_pos == len(self._connections): new_pos = 0
self._rearrange_connection(old_pos, new_pos)
def _minusKey(self):
if not self._orderDependent: return
if self._selectedConnection is None: return
old_pos = self._selectedConnection
new_pos = old_pos - 1
if new_pos == -1: new_pos = len(self._connections) - 1
self._rearrange_connection(old_pos, new_pos)
def _numKey(self, num):
if not self._orderDependent: return
if self._selectedConnection is None: return
old_pos = self._selectedConnection
new_pos = num - 1
if new_pos >= len(self._connections):
new_pos = len(self._connections) - 1
self._rearrange_connection(old_pos, new_pos)
def _rearrange_connection(self, old_pos, new_pos):
connection = self._connections[old_pos]
mode = "before"
new_pos2 = new_pos + 1
if new_pos2 == len(self._connections):
new_pos2 = new_pos
mode = "after"
other_connection = self._connections[new_pos2]
ok = True
canvas = self._parentNodeUi().scene()._hqt
if canvas:
ok = canvas().gui_rearranges_connection(
connection, other_connection, mode
)
if not ok: return
sel_con = self._connections[self._selectedConnection]
connection = self._connections.pop(old_pos)
if new_pos == len(self._connections):
self._connections.append(connection)
else:
self._connections.insert(new_pos, connection)
self._selectedConnection = self._connections.index(sel_con)
for con in self._connections: con.updatePath()
def _selectNextConnection(self):
l = len(self._connections)
if l <= 1: return
if self._selectedConnection is None:
nr = 0
else:
nr = self._selectedConnection + 1
if nr == l: nr = 0
self._selectConnection(nr)
def _selectPrevConnection(self):
l = len(self._connections)
if l <= 1: return
if self._selectedConnection is None:
nr = l - 1
else:
nr = self._selectedConnection - 1
if nr == -1: nr = l - 1
self._selectConnection(nr)
def _selectConnection(self, nr):
if nr is None:
if self._selectedConnection is None: return
for cnr, conn in enumerate(self.connections()):
conn.setSelected(False)
conn.setActive(True)
conn.update()
else:
for cnr, conn in enumerate(self.connections()):
conn.setSelected(cnr == nr)
conn.update()
self._selectedConnection = nr
def isInput(self):
return self._mode == "input"
def isOutput(self):
return self._mode == "output"
def connections(self):
return self._connections
def hoverEnterEvent(self, event):
self.scene().setFocusedHook(self)
self._selectedConnection = None
for conn in self.connections():
conn.setActive(True)
conn.update()
canvas = self._parentNodeUi().scene()._hqt
if canvas:
if self._hoverText is None:
canvas().clear_statusbar_message()
else:
canvas().set_statusbar_message(self._hoverText)
def hoverLeaveEvent(self, event):
self.scene().setFocusedHook(None)
for conn in self.connections():
conn.setActive(False)
conn.update()
canvas = self._parentNodeUi().scene()._hqt
if canvas:
canvas().clear_statusbar_message()
def setMixedColor(self, value=True):
self._mixedColor = value
def setBorderEnabled(self, value=True):
if value:
self._pen.setStyle(QtCore.Qt.SolidLine)
else:
self._pen.setStyle(QtCore.Qt.NoPen)
def updateToolTip(self):
tooltip = self._parentAttributeUi().toolTip()
if tooltip is None: tooltip = ""
self.setToolTip(tooltip)
def itemChange(self, change, value):
if change == QtGui.QGraphicsItem.ItemScenePositionHasChanged:
self.updateWorldPos()
return value
def updateWorldPos(self):
if self._mode == "input":
for conn in self._connections:
conn.updateEndPos()
else:
for conn in self._connections:
conn.updateStartPos()
def addConnection(self, connection):
self._selectConnection(None)
assert connection not in self._connections
self._connections.append(connection)
for con in self._connections: con.updatePath()
def connectionIndex(self, connection):
return self._connections.index(connection), len(self._connections)
def removeConnection(self, connection):
self._selectConnection(None)
self._connections.remove(connection)
for con in self._connections: con.updatePath()
def parentAttributeUi(self):
return self._parentAttributeUi()
def parentNodeUi(self):
return self._parentNodeUi()
def setColor(self, color):
self._color.setRgb(color.red(), color.green(), color.blue())
self._brush.setColor(self._color)
self._pen.setColor(self._color.darker(150))
def color(self):
return QtGui.QColor(self._color)
def mixedColor(self):
return self._mixedColor
def setColorRef(self, color):
self._color = color
def colorRef(self):
return self._color
def mousePressEvent(self, event):
from . import Connection
if self._draggingConnection:
if event.button() == QtCore.Qt.RightButton:
mousePos = self._draggingConnection.mapFromScene(event.scenePos())
self._draggingConnection.insertInterpoint(None, mousePos)
event.accept()
return
if event.button() == QtCore.Qt.RightButton:
event.ignore()
return
if self._mode == "output":
self._selectConnection(None)
self._draggingConnection = Connection(self)
self._draggingConnection.setActive(False)
elif self._mode == "input" and len(self._connections):
con_index = -1
if self._selectedConnection is not None:
con_index = self._selectedConnection
inputConnection = self._connections[con_index]
canvas = self._parentNodeUi().scene()._hqt
if canvas:
ok = canvas().gui_removes_connection(inputConnection)
if not ok:
event.accept()
return
self.parentAttributeUi().parentNodeUi().update()
outHook = inputConnection.startHook()
inputConnection.deleteIt()
self._draggingConnection = Connection(outHook)
self._draggingConnection.setActive(False)
mousePos = self._draggingConnection.mapFromScene(event.scenePos())
self._draggingConnection.endHook().setPos(mousePos)
self._draggingConnection.updatePath()
def _handleHover(self, item):
nodeHovered = None
collidingItems = item.collidingItems(QtCore.Qt.IntersectsItemBoundingRect)
if collidingItems:
nodeHovered = collidingItems[0]
if nodeHovered:
nodeHovered.hoverEnterEvent(None)
#elif nodeView.NodeView._lastHoveredItem:
# nodeView.NodeView._lastHoveredItem.hoverLeaveEvent(None)
def mouseMoveEvent(self, event):
if self._draggingConnection:
mousePos = self._draggingConnection.mapFromScene(event.scenePos())
self.drag(mousePos)
def drag(self, mousePos):
connectionStartHook = self._draggingConnection.startHook()
self._draggingConnection.setColor(connectionStartHook.color())
connectionEndHook = self._draggingConnection.endHook()
connectionEndHook.setPos(mousePos)
self._handleHover(self._draggingConnection.endHook())
endHook = self._draggingConnection.findClosestHook()
self._draggingConnection.setActive(False)
if endHook:
ok = True
canvas = self._parentNodeUi().scene()._hqt
if canvas is not None:
ok = canvas().gui_asks_connection(self._draggingConnection, endHook)
if ok:
self._draggingConnection.setActive(True)
hookSize = endHook.boundingRect().bottomRight() / 2.0
hookPos = self._draggingConnection.mapFromItem(endHook, hookSize.x(), hookSize.y())
connectionEndHook.setPos(hookPos)
else:
self._draggingConnection.setActive(False)
else:
#TODO
if QtGui.QToolTip.isVisible():
QtGui.QToolTip.hideText()
self._draggingConnection.updateEndPos()
def mouseReleaseEvent(self, event):
if event.button() == QtCore.Qt.LeftButton:
if self._draggingConnection:
endHook = self._draggingConnection.findClosestHook()
if endHook:
dummyEndHook = self._draggingConnection.endHook()
self._draggingConnection.setEndHook(endHook)
if dummyEndHook is not None and dummyEndHook is not endHook:
self.scene().removeItem(dummyEndHook)
hookSize = endHook.boundingRect().bottomRight() / 2.0
hookPos = self._draggingConnection.mapFromItem(endHook, hookSize.x(), hookSize.y())
self._draggingConnection.setActive(True)
self._draggingConnection._isTempConnection = False
draggingConnection = self._draggingConnection
self._draggingConnection = None
canvas = self._parentNodeUi().scene()._hqt
force = False
if event.modifiers() == QtCore.Qt.ControlModifier:
force = True
if canvas:
ok = canvas().gui_adds_connection(draggingConnection, force)
if not ok:
if not force:
draggingConnection.deleteIt()
self.parentAttributeUi().parentNodeUi().update()
else:
self._cancelDraggingConnection()
def _cancelDraggingConnection(self):
startHook = self._draggingConnection.startHook()
self._draggingConnection.deleteIt()
self._draggingConnection = None
if self._draggingConnectionEndHook:
self._draggingConnectionEndHook = None
self.parentAttributeUi().parentNodeUi().update()
def boundingRect(self):
return self._rect
def paint(self, painter, option, widget):
painter.setBrush(self._brush)
painter.setPen(self._pen)
if self._shape == "circle":
painter.drawEllipse(self._rect)
elif self._shape == "square":
painter.save()
c = self._rect.center()
painter.translate(c)
painter.rotate(45)
painter.scale(0.8, 0.8)
painter.drawRect(self._rect.translated(-c))
painter.restore()
else:
raise ValueError(self._shape)
if self._mixedColor:
painter.setBrush(painter.brush().color().darker(130))
painter.drawChord(self._rect, 1 * 16, 180 * 16)
def deleteIt(self):
conns = list(self.connections())
for conn in conns:
conn.deleteIt()
|
bsd-2-clause
|
parag2489/Image-Quality
|
train_imageQuality_Estmn_multiPatchSmallNetwork.py
|
1
|
19578
|
import numpy as np
import pdb
import sys
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten, Reshape
from keras.layers.convolutional import Convolution1D, Convolution2D, MaxPooling2D
from keras.layers.convolutional import Convolution3D, MaxPooling3D
# from keras.layers.normalization import BatchNormalization
# from keras.layers.advanced_activations import LeakyReLU
from keras.optimizers import SGD, RMSprop, Adam
from keras.layers.core import Merge
from keras.regularizers import l2, activity_l2
import scipy
import theano
from keras.layers.convolutional import ZeroPadding2D
# from scipy import io
import h5py
from keras.utils import np_utils
import time
import cv2
import logging
from keras import callbacks
from keras.callbacks import ModelCheckpoint, EarlyStopping
from keras import backend as K
from decimal import Decimal
mySeed = sys.argv[1]
np.random.seed(int(float(mySeed)))
doWeightLoadSaveTest = True
patchHeight = 23
patchWidth = 31
channels = 800
learningRate = 0.005
regularizer = 0.0005
initialization = "he_normal"
# leak = 1./3. # for PReLU()
Numepochs = 120
batchSize = 20
validateAfterEpochs = 1
# numSamplesPerfile = 1590
# NumSamplesinValidation = 530
nb_output = 1
TrainFilesPath = '/media/ASUAD\pchandak/Seagate Expansion Drive/imageQuality_mulitPatchBackup_Apr23/imageQuality_HDF5Files_Apr20/hdf5Files_train/'
ValFilesPath = '/media/ASUAD\pchandak/Seagate Expansion Drive/imageQuality_mulitPatchBackup_Apr23/imageQuality_HDF5Files_Apr20/hdf5Files_val/'
TestFilesPath = '/media/ASUAD\pchandak/Seagate Expansion Drive/imageQuality_mulitPatchBackup_Apr23/imageQuality_HDF5Files_Apr20/hdf5Files_test/'
logger1Name = '/media/AccessParag/Code/DNN_imageQuality_Estmn_Apr23_consolidatedResults.txt'
logger2Name = '/media/AccessParag/Code/DNN_imageQuality_Estmn_finalResults.txt'
weightSavePath = '/media/AccessParag/Code/weights_QualityEstmn/'
class myCallback(callbacks.Callback):
def on_epoch_begin(self, epoch, logs={}):
# pdb.set_trace()
if epoch == 0:
printing('------------------------------------- Multi-patch quality estimation started --------------------------------------',logger1)
printing('-------------------------------------------------------------------------------------------------------------------',logger1)
self.best_mean_corr = -np.inf
self.metric = []
def on_epoch_end(self, epoch, logs={}):
model.save_weights(weightSavePath + "bestWeights_qualityEstmn_smallNetwork_latestModel.h5",overwrite=True)
predictedScoresVal = np.ravel(model.predict(valData,batch_size=batchSize))
predictedScoresTest = np.ravel(model.predict(testData,batch_size=batchSize))
sroccVal = scipy.stats.spearmanr(predictedScoresVal, valLabels)
plccVal = scipy.stats.pearsonr(predictedScoresVal, valLabels)
t_str_val = '\nSpearman corr for validation set is ' + str(sroccVal[0]) + '\nPearson corr for validation set is '+ str(plccVal[0]) + '\nMean absolute error for validation set is ' + str(np.mean(np.abs(predictedScoresVal-valLabels))) + '\n'
printing(t_str_val,logger1)
# sroccTest = scipy.stats.spearmanr(predictedScoresTest, testLabels)
# plccTest = scipy.stats.pearsonr(predictedScoresTest, testLabels)
# t_str_test = '\nSpearman corr for test set is ' + str(sroccTest[0]) + '\nPearson corr for test set is '+ str(plccTest[0]) + '\nMean absolute error for test set is ' + str(np.mean(np.abs(predictedScoresTest-testLabels))) + '\n'
# printing(t_str_test)
if epoch == Numepochs:
printing('------------------------------------- Multi-patch quality estimation finished -------------------------------------',logger1)
printing('-------------------------------------------------------------------------------------------------------------------',logger1)
mean_corr = sroccVal[0] + plccVal[0]
if mean_corr > self.best_mean_corr:
self.best_mean_corr = mean_corr
model.save_weights(weightSavePath + "bestWeights_qualityEstmn_smallNetwork_bestCorr.h5",overwrite=True)
printing("Best correlation loss model saved at Epoch " + str(epoch),logger1)
self.metric.append(logs.get("val_loss"))
if epoch % 5 == 0:
model.optimizer.lr.set_value(round(Decimal(0.7*model.optimizer.lr.get_value()),8))
# learningRate = model.optimizer.lr.get_value()
# printing("")
# printing("The current learning rate is: " + str(learningRate))
# if epoch > 0:
# # pdb.set_trace()
# metric_history = self.metric[-2:]
# metric_history_diff = np.diff(metric_history)
# testIncrease = np.any(metric_history_diff>=0)
# if testIncrease:
# model.optimizer.lr.set_value(round(Decimal(0.7*model.optimizer.lr.get_value()),8))
# learningRate = model.optimizer.lr.get_value()
# printing("")
# printing("The current learning rate is: " + str(learningRate))
def constructDNNModel(modelIndex):
model = []
if modelIndex == 1:
model = Sequential()
#
model.add(Activation('linear',input_shape=(channels,patchHeight,patchWidth))) # 23 x 31
model.add(Convolution2D(64, 3, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu")) # 21 x 29
model.add(Convolution2D(64, 3, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu")) # 19 x 27
model.add(MaxPooling2D(pool_size=(2,2),strides=(1,1))) # 18 x 26
#
# # ------------------------------------------------------------------------------------------------------------------------------------------------ #
#
model.add(Convolution2D(128, 3, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu"))
model.add(Convolution2D(128, 3, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu"))
model.add(Convolution2D(128, 3, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu"))
model.add(MaxPooling2D(pool_size=(2,2),strides=(1,1))) # 11 x 19
# ------------------------------------------------------------------------------------------------------------------------------------------------ #
model.add(Convolution2D(128, 3, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu"))
model.add(Convolution2D(128, 3, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu"))
model.add(Convolution2D(128, 3, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu"))
model.add(MaxPooling2D(pool_size=(2,2),strides=(1,1))) # 2 x 6
#
# ------------------------------------------------------------------------------------------------------------------------------------------------ #
model.add(Flatten())
# model.add(Reshape(1))
# model.add(Dropout(0.25))
model.add(Dense(1024, trainable=True, init=initialization, W_regularizer=l2(regularizer), activation = "relu"))
model.add(Dropout(0.5))
model.add(Dense(1024, trainable=True, init=initialization, W_regularizer=l2(regularizer), activation = "relu"))
model.add(Dropout(0.5))
model.add(Dense(nb_output, trainable=True, init=initialization, W_regularizer=l2(regularizer), activation = "linear"))
printing("Built the model")
print("Model parameters = " + str(model.count_params()))
# ------------------------------------------------------------------------------------------------------------------------------------------------ #
if doWeightLoadSaveTest:
# pdb.set_trace()
model.save_weights(weightSavePath + 'weightsLoadSaveTest.h5', overwrite=True)
model.load_weights(weightSavePath + 'weightsLoadSaveTest.h5')
printing("Weight load/save test passed...")
# ------------------------------------------------------------------------------------------------------------------------------------------------ #
sgd = SGD(lr=learningRate, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss=linear_correlation_loss, optimizer=sgd)
printing("Compilation Finished")
elif modelIndex == 2:
model = Sequential()
model.add(Activation('linear',input_shape=(channels,patchHeight,patchWidth))) # 23 x 31
model.add(Convolution2D(32, 1, 1, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu")) # 21 x 29
model.add(Convolution2D(32, 2, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu")) # 19 x 27 22, 29
model.add(Convolution2D(32, 3, 4, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu")) # 17 x 25 20, 26
model.add(MaxPooling2D(pool_size=(2,2),strides=(1,1))) # 16 x 24 19, 25
#
# # ------------------------------------------------------------------------------------------------------------------------------------------------ #
#
model.add(Convolution2D(48, 1, 1, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu")) # 19, 25
model.add(Convolution2D(48, 2, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu")) # 18, 23
model.add(Convolution2D(48, 3, 4, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu")) # 16, 20
model.add(MaxPooling2D(pool_size=(2,2),strides=(1,1))) # 9 x 17 15, 19
# ------------------------------------------------------------------------------------------------------------------------------------------------ #
model.add(Convolution2D(48, 1, 1, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu"))
model.add(Convolution2D(48, 2, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu"))
model.add(Convolution2D(48, 3, 4, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu"))
model.add(MaxPooling2D(pool_size=(2,2), strides=(1,1))) # 1 x 5 11, 13
#
# # ------------------------------------------------------------------------------------------------------------------------------------------------ #
model.add(Convolution2D(48, 1, 1, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu"))
model.add(Convolution2D(48, 2, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu"))
model.add(Convolution2D(48, 3, 4, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu"))
model.add(MaxPooling2D(pool_size=(2,2), strides=(1,1))) # 1 x 5 7, 7
# ------------------------------------------------------------------------------------------------------------------------------------------------ #
model.add(Convolution2D(48, 1, 1, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu"))
model.add(Convolution2D(48, 3, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu"))
model.add(Convolution2D(48, 3, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu"))
model.add(MaxPooling2D(pool_size=(2,2), strides=(1,1))) # 1 x 5 2, 2
# ------------------------------------------------------------------------------------------------------------------------------------------------ #
model.add(Reshape((2 * 2 * 48,)))
model.add(Dense(400, trainable=True, init=initialization, W_regularizer=l2(regularizer), activation = "relu"))
model.add(Dropout(0.5))
model.add(Dense(400, trainable=True, init=initialization, W_regularizer=l2(regularizer), activation = "relu"))
model.add(Dropout(0.5))
model.add(Dense(nb_output, trainable=True, init=initialization, W_regularizer=l2(regularizer), activation = "linear"))
printing("Built the model")
# ------------------------------------------------------------------------------------------------------------------------------------------------ #
if doWeightLoadSaveTest:
# pdb.set_trace()
model.save_weights(weightSavePath + 'weightsLoadSaveTest.h5', overwrite=True)
model.load_weights(weightSavePath + 'weightsLoadSaveTest.h5')
printing("Weight load/save test passed...")
# ------------------------------------------------------------------------------------------------------------------------------------------------ #
sgd = SGD(lr=learningRate, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss=linear_correlation_loss, optimizer=sgd)
printing("Compilation Finished")
return model
def linear_correlation_loss(y_true, y_pred):
mean_y_true = K.mean(y_true)
mean_y_pred = K.mean(y_pred)
std_y_true = K.std(y_true)+1e-6
std_y_pred = K.std(y_pred)+1e-6
nSamples = K.shape(y_true)[0]
firstTerm = (y_true - mean_y_true)/std_y_true
secondTerm = (y_pred - mean_y_pred)/std_y_pred
pearsonCorr = K.sum(firstTerm*secondTerm)/(nSamples-1)
pearsonCorr = K.clip(pearsonCorr,-1.,1.)
maeLoss = K.mean(K.abs(y_true-y_pred))
# loss = 1./(0.1+K.exp(-0.5*K.log(maeLoss+(1-pearsonCorr))))
loss = (1./(0.1+K.exp(-0.5*K.log(maeLoss))))*(2-pearsonCorr)
return loss
def setup_logger(logger_name, log_file, level=logging.INFO):
l = logging.getLogger(logger_name)
formatter = logging.Formatter('%(message)s')
fileHandler = logging.FileHandler(log_file, mode='a')
fileHandler.setFormatter(formatter)
# streamHandler = logging.StreamHandler()
# streamHandler.setFormatter(formatter)
l.setLevel(level)
l.addHandler(fileHandler)
# l.addHandler(streamHandler)
def printing(str,logger=-1):
print str
if logger != -1:
logger.info(str)
setup_logger('log1',logger1Name)
setup_logger('log2',logger2Name)
logger1 = logging.getLogger('log1')
logger2 = logging.getLogger('log2')
printing('Parameters that will be used')
printing("---------------------------------------------------------------------------------")
printing("**Image Sizes**")
printing("Image Height : "+str(patchHeight))
printing("Image Width : "+str(patchWidth))
printing("Image Channels: "+str(channels))
printing("\n")
printing("**Network Parameters**")
printing("Learning Rate : "+str(learningRate))
printing("Regularizer : "+str(regularizer))
printing("Initialization : "+initialization)
printing("\n")
printing("**Run Variables**")
printing("Total # of epochs : "+str(Numepochs))
printing("# samples per batch : "+str(batchSize))
printing("Validate After Epochs : "+str(validateAfterEpochs))
printing("\n")
printing("**Files Path**")
printing("Trainig Files Path : "+TrainFilesPath)
printing("Valid Files Path : "+ValFilesPath)
printing("Weights Save Path : "+weightSavePath)
printing("\n")
printing("---------------------------------------------------------------------------------")
modelIndex = int(float(sys.argv[2]))
model = constructDNNModel(modelIndex)
# checkpointer = ModelCheckpoint(filepath = weightSavePath + "bestWeights_At_Epoch_{epoch:03d}.h5", monitor='val_loss', verbose=1, save_best_only=True, mode='auto')
checkpointer = ModelCheckpoint(filepath = weightSavePath + "bestWeights_qualityEstmn_smallNetwork_bestLoss.h5", monitor='val_loss', verbose=1, save_best_only=True, mode='auto')
cb = myCallback()
terminateTraining = EarlyStopping(monitor='val_loss', patience=30, verbose=1, mode='auto')
hdfFileTrain = h5py.File(TrainFilesPath + "QualityEstmn_MultiPatchNetwork_data_Apr19.h5","r")
trainData = hdfFileTrain["data"][:]
trainLabels = hdfFileTrain["labels"][:]
# random selection to make the number of samples equal to numSamplesPerfile and/or NumSamplesinValidation
# randIndices = np.random.permutation(len(trainLabels))
# randIndices = randIndices[0:numSamplesPerfile]
# trainData = trainData[randIndices,...]
# trainLabels = trainLabels[randIndices,...]
hdfFileVal = h5py.File(ValFilesPath + "QualityEstmn_MultiPatchNetwork_data_Apr19.h5","r")
valData = hdfFileVal["data"][:]
valLabels = hdfFileVal["labels"][:]
# random selection to make the number of samples equal to numSamplesPerfile and/or NumSamplesinValidation
# randIndices = np.random.permutation(len(valLabels))
# randIndices = randIndices[0:NumSamplesinValidation]
# valData = valData[randIndices,...]
# valLabels = valLabels[randIndices,...]
hdfFileTest = h5py.File(TestFilesPath + "QualityEstmn_MultiPatchNetwork_data_Apr19.h5","r")
testData = hdfFileTest["data"][:]
testLabels = hdfFileTest["labels"][:]
model.fit(trainData,trainLabels,batch_size=batchSize,nb_epoch=Numepochs,verbose=0,callbacks=[cb,checkpointer,terminateTraining],validation_data=(valData,valLabels),shuffle=True,show_accuracy=False)
# pdb.set_trace()
model.load_weights(weightSavePath + "bestWeights_qualityEstmn_smallNetwork_bestLoss.h5")
print "Best val loss weights loaded."
printing('------------------------------------- Multi-patch quality estimation started ---------------------------------------',logger2)
printing('--------------------------------------------------------------------------------------------------------------------',logger2)
printing('',logger2)
predictedTestMOS = np.ravel(model.predict(testData,batch_size=batchSize))
sroccTest = scipy.stats.spearmanr(predictedTestMOS, testLabels)
plccTest = scipy.stats.pearsonr(predictedTestMOS, testLabels)
t_str_test = '\nSpearman corr for test set is ' + str(sroccTest[0]) + '\nPearson corr for test set is '+ str(plccTest[0]) + '\nMean absolute error for test set is ' + str(np.mean(np.abs(predictedTestMOS-testLabels))) + '\n'
printing(t_str_test,logger2)
printing('',logger2)
printing('------------------------------------- Multi-patch quality estimation finished --------------------------------------',logger2)
printing('--------------------------------------------------------------------------------------------------------------------',logger2)
|
mit
|
Intel-Corporation/tensorflow
|
tensorflow/python/keras/layers/dense_attention.py
|
1
|
12205
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Attention layers that can be used in sequence DNN/CNN models.
This file follows the terminology of https://arxiv.org/abs/1706.03762 Figure 2.
Attention is formed by three tensors: Query, Key and Value.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.util.tf_export import keras_export
class BaseDenseAttention(Layer):
"""Base Attention class for Dense networks.
This class is suitable for Dense or CNN networks, and not for RNN networks.
Implementations of attention mechanisms should inherit from this class, and
reuse the `apply_attention_scores()` method.
Args:
causal: Boolean. Set to `True` for decoder self-attention. Adds a mask such
that position `i` cannot attend to positions `j > i`. This prevents the
flow of information from the future towards the past.
Call Arguments:
inputs: List of the following tensors:
* query: Query `Tensor` of shape `[batch_size, Tq, dim]`.
* value: Value `Tensor` of shape `[batch_size, Tv, dim]`.
* key: Optional key `Tensor` of shape `[batch_size, Tv, dim]`. If not
given, will use `value` for both `key` and `value`, which is the
most common case.
mask: List of the following tensors:
* query_mask: A boolean mask `Tensor` of shape `[batch_size, Tq]`.
If given, the output will be zero at the positions where
`mask==False`.
* value_mask: A boolean mask `Tensor` of shape `[batch_size, Tv]`.
If given, will apply the mask such that values at positions where
`mask==False` do not contribute to the result.
Output shape:
Attention outputs of shape `[batch_size, Tq, dim]`.
"""
def __init__(self, causal=False, **kwargs):
super(BaseDenseAttention, self).__init__(**kwargs)
self.causal = causal
def _calculate_scores(self, query, key):
"""Calculates attention scores.
Args:
query: Query tensor of shape `[batch_size, Tq, dim]`.
key: Key tensor of shape `[batch_size, Tv, dim]`.
Returns:
Tensor of shape `[batch_size, Tq, Tv]`.
"""
return NotImplementedError
def _apply_scores(self, scores, value, scores_mask=None):
"""Applies attention scores to the given value tensor.
To use this method in your attention layer, follow the steps:
* Use `query` tensor of shape `[batch_size, Tq]` and `key` tensor of shape
`[batch_size, Tv]` to calculate the attention `scores`.
* Pass `scores` and `value` tensors to this method. The method applies
`scores_mask`, calculates `attention_distribution = softmax(scores)`, then
returns `matmul(attention_distribution, value).
* Apply `query_mask` and return the result.
Args:
scores: Scores float tensor of shape `[batch_size, Tq, Tv]`.
value: Value tensor of shape `[batch_size, Tv, dim]`.
scores_mask: A boolean mask `Tensor` of shape `[batch_size, 1, Tv]` or
`[batch_size, Tq, Tv]`. If given, scores at positions where
`scores_mask==False` do not contribute to the result. It must contain
at least one `True` value in each line along the last dimension.
Returns:
Tensor of shape `[batch_size, Tq, dim]`.
"""
if scores_mask is not None:
padding_mask = math_ops.logical_not(scores_mask)
# Bias so padding positions do not contribute to attention distribution.
scores -= 1.e9 * math_ops.cast(padding_mask, dtype=K.floatx())
attention_distribution = nn.softmax(scores)
return math_ops.matmul(attention_distribution, value)
# TODO(b/125916026): Consider exposing a __call__ method with named args.
def call(self, inputs, mask=None):
self._validate_call_args(inputs=inputs, mask=mask)
q = inputs[0]
v = inputs[1]
k = inputs[2] if len(inputs) > 2 else v
q_mask = mask[0] if mask else None
v_mask = mask[1] if mask else None
scores = self._calculate_scores(query=q, key=k)
if v_mask is not None:
# Mask of shape [batch_size, 1, Tv].
v_mask = array_ops.expand_dims(v_mask, axis=-2)
if self.causal:
# Creates a lower triangular mask, so position i cannot attend to
# positions j>i. This prevents the flow of information from the future
# into the past.
scores_shape = array_ops.shape(scores)
# causal_mask_shape = [1, Tq, Tv].
causal_mask_shape = array_ops.concat(
[array_ops.ones_like(scores_shape[:-2]), scores_shape[-2:]],
axis=0)
causal_mask = _lower_triangular_mask(causal_mask_shape)
else:
causal_mask = None
scores_mask = _merge_masks(v_mask, causal_mask)
result = self._apply_scores(scores=scores, value=v, scores_mask=scores_mask)
if q_mask is not None:
# Mask of shape [batch_size, Tq, 1].
q_mask = array_ops.expand_dims(q_mask, axis=-1)
result *= math_ops.cast(q_mask, dtype=result.dtype)
return result
def _validate_call_args(self, inputs, mask):
"""Validates arguments of the call method."""
class_name = self.__class__.__name__
if not isinstance(inputs, list):
raise ValueError(
'{} layer must be called on a list of inputs, namely [query, value] '
'or [query, value, key].'.format(class_name))
if len(inputs) < 2 or len(inputs) > 3:
raise ValueError(
'{} layer accepts inputs list of length 2 or 3, '
'namely [query, value] or [query, value, key]. '
'Given length: {}'.format(class_name, len(inputs)))
if mask:
if not isinstance(mask, list):
raise ValueError(
'{} layer mask must be a list, '
'namely [query_mask, value_mask].'.format(class_name))
if len(mask) != 2:
raise ValueError(
'{} layer mask must be a list of length 2, namely [query_mask, '
'value_mask]. Given length: {}'.format(class_name, len(mask)))
@keras_export('keras.layers.Attention')
class Attention(BaseDenseAttention):
"""Dot-product attention layer, a.k.a. Luong-style attention.
Inputs are `query` tensor of shape `[batch_size, Tq]`, `value` tensor of shape
`[batch_size, Tv]` and `key` tensor of shape `[batch_size, Tv]`.
The calculation follows the steps:
1. Calculate scores with shape `[batch_size, Tq, Tv]` as a `query`-`key` dot
product: `scores = tf.matmul(query, key, transpose_b=True)`.
2. Use scores to calculate a distribution with shape
`[batch_size, Tq, Tv]`: `distribution = tf.nn.softmax(scores)`.
3. Use `distribution` to create a linear combination of `value` with
shape `batch_size, Tq, dim]`:
`return tf.matmul(distribution, value)`.
Args:
use_scale: If `True`, will create a scalar variable to scale the attention
scores.
causal: Boolean. Set to `True` for decoder self-attention. Adds a mask such
that position `i` cannot attend to positions `j > i`. This prevents the
flow of information from the future towards the past.
Call Arguments:
inputs: List of the following tensors:
* query: Query `Tensor` of shape `[batch_size, Tq, dim]`.
* value: Value `Tensor` of shape `[batch_size, Tv, dim]`.
* key: Optional key `Tensor` of shape `[batch_size, Tv, dim]`. If not
given, will use `value` for both `key` and `value`, which is the
most common case.
mask: List of the following tensors:
* query_mask: A boolean mask `Tensor` of shape `[batch_size, Tq]`.
If given, the output will be zero at the positions where
`mask==False`.
* value_mask: A boolean mask `Tensor` of shape `[batch_size, Tv]`.
If given, will apply the mask such that values at positions where
`mask==False` do not contribute to the result.
Output shape:
Attention outputs of shape `[batch_size, Tq, dim]`.
The meaning of `query`, `value` and `key` depend on the application. In the
case of text similarity, for example, `query` is the sequence embeddings of
the first piece of text and `value` is the sequence embeddings of the second
piece of text. `key` is usually the same tensor as `value`.
Here is a code example for using `Attention` in a CNN+Attention network:
```python
# Variable-length int sequences.
query_input = tf.keras.Input(shape=(None,), dtype='int32')
value_input = tf.keras.Input(shape=(None,), dtype='int32')
# Embedding lookup.
token_embedding = tf.keras.layers.Embedding(max_tokens, dimension)
# Query embeddings of shape [batch_size, Tq, dimension].
query_embeddings = token_embedding(query_input)
# Value embeddings of shape [batch_size, Tv, dimension].
value_embeddings = token_embedding(query_input)
# CNN layer.
cnn_layer = tf.keras.layers.Conv1D(
filters=100,
kernel_size=4,
# Use 'same' padding so outputs have the same shape as inputs.
padding='same')
# Query encoding of shape [batch_size, Tq, filters].
query_seq_encoding = cnn_layer(query_embeddings)
# Value encoding of shape [batch_size, Tv, filters].
value_seq_encoding = cnn_layer(value_embeddings)
# Query-value attention of shape [batch_size, Tq, filters].
query_value_attention_seq = tf.keras.layers.Attention()(
[query_seq_encoding, value_seq_encoding])
# Reduce over the sequence axis to produce encodings of shape
# [batch_size, filters].
query_encoding = tf.keras.layers.GlobalAveragePooling1D()(
query_seq_encoding)
query_value_attention = tf.keras.layers.GlobalAveragePooling1D()(
query_value_attention_seq)
# Concatenate query and document encodings to produce a DNN input layer.
input_layer = tf.keras.layers.Concatenate()(
[query_encoding, query_value_attention])
# Add DNN layers, and create Model.
# ...
```
"""
def __init__(self, use_scale=False, **kwargs):
super(Attention, self).__init__(**kwargs)
self.use_scale = use_scale
def build(self, input_shape):
"""Creates scale variable if use_scale==True."""
if self.use_scale:
self.scale = self.add_weight(
name='scale',
shape=(),
initializer=init_ops.ones_initializer(),
dtype=self.dtype,
trainable=self.trainable)
else:
self.scale = None
super(Attention, self).build(input_shape)
def _calculate_scores(self, query, key):
"""Calculates attention scores as a query-key dot product.
Args:
query: Query tensor of shape `[batch_size, Tq, dim]`.
key: Key tensor of shape `[batch_size, Tv, dim]`.
Returns:
Tensor of shape `[batch_size, Tq, Tv]`.
"""
scores = math_ops.matmul(query, key, transpose_b=True)
if self.scale is not None:
scores *= self.scale
return scores
def _lower_triangular_mask(shape):
"""Creates a lower-triangular boolean mask over the last 2 dimensions."""
row_index = math_ops.cumsum(
array_ops.ones(shape=shape, dtype=dtypes.int32), axis=-2)
col_index = math_ops.cumsum(
array_ops.ones(shape=shape, dtype=dtypes.int32), axis=-1)
return math_ops.greater_equal(row_index, col_index)
def _merge_masks(x, y):
if x is None:
return y
if y is None:
return x
return math_ops.logical_and(x, y)
|
apache-2.0
|
transtone/transconfig
|
bin/classifier.py
|
1
|
2805
|
#!/usr/bin/env python
import os
import sys
import argparse
#list of known formats here can be added
"""
All format lists were taken from wikipedia, not all of them were added due to extensions
not being exclusive to one format such as webm, or raw
Audio - https://en.wikipedia.org/wiki/Audio_file_format
Images - https://en.wikipedia.org/wiki/Image_file_formats
Video - https://en.wikipedia.org/wiki/Video_file_format
Documents - https://en.wikipedia.org/wiki/List_of_Microsoft_Office_filename_extensions Majority of it is from MS Office
"""
def moveto(file, from_folder, to_folder):
#print "Moving ",file," to ", to_folder
if not os.path.exists(to_folder):
os.makedirs(to_folder)
from_file = os.path.join(from_folder, file)
to_file = os.path.join(to_folder, file)
os.rename(from_file,to_file)
def classify(formats, output):
print("Scanning Files")
directory = os.getcwd()
for file in os.listdir(directory):
filename, file_ext = os.path.splitext(file)
file_ext = file_ext.lower()
for folder, ext_list in list(formats.items()):
folder = os.path.join(output, folder)
if file_ext in ext_list:
moveto(file, directory, folder)
print("Done!")
def main():
description = "Organize files in your directory instantly, "
description += "by classifying them into different folders"
parser = argparse.ArgumentParser(description = description)
parser.add_argument("-st", "--specific-types", type=str, nargs='+',
help="Move all file extensions, given in the args list, in the current directory into the Specific Folder")
parser.add_argument("-sf", "--specific-folder", type=str,
help="Folder to move Specific File Type")
parser.add_argument("-o","--output", type=str,
help="Main directory to put organized folders")
args = parser.parse_args()
formats = {
'Music' : ['.mp3','.aac','.flac','.ogg','.wma','.m4a','.aiff'],
'Videos' : ['.flv','.ogv','.avi','.mp4','.mpg','.mpeg','.3gp'],
'Pictures' : ['.png','.jpeg','.gif','.jpg','.bmp','.svg','.webp', '.psd'],
'Archives' : ['.rar','.zip','.7z','.gz','.bz2','.tar','.dmg'],
'Documents' : ['.txt', '.pdf','.doc','.docx','.xls','.xlsv','.xlsx',
'.ppt','.pptx','.ppsx','.odp','.odt','.ods','.md','.json','.csv']
}
if ((args.specific_folder is None) != (args.specific_types is None)):
print('Specific Folder and Specific Types should be combined')
sys.exit()
if ((not (args.specific_folder is None)) and (not (args.specific_types is None))):
formats = {args.specific_folder : args.specific_types}
if (args.output is None):
args.output = os.getcwd()
classify(formats, args.output)
sys.exit()
|
mit
|
davidyezsetz/kuma
|
vendor/packages/ipython/IPython/external/mglob.py
|
7
|
7672
|
#!/usr/bin/env python
r""" mglob - enhanced file list expansion module
Use as stand-alone utility (for xargs, `backticks` etc.),
or a globbing library for own python programs. Globbing the sys.argv is something
that almost every Windows script has to perform manually, and this module is here
to help with that task. Also Unix users will benefit from enhanced modes
such as recursion, exclusion, directory omission...
Unlike glob.glob, directories are not included in the glob unless specified
with 'dir:'
'expand' is the function to use in python programs. Typical use
to expand argv (esp. in windows)::
try:
import mglob
files = mglob.expand(sys.argv[1:])
except ImportError:
print "mglob not found; try 'easy_install mglob' for extra features"
files = sys.argv[1:]
Note that for unix, shell expands *normal* wildcards (*.cpp, etc.) in argv.
Therefore, you might want to use quotes with normal wildcards to prevent this
expansion, in order for mglob to see the wildcards and get the wanted behaviour.
Not quoting the wildcards is harmless and typically has equivalent results, though.
Author: Ville Vainio <vivainio@gmail.com>
License: MIT Open Source license
"""
#Assigned in variable for "usage" printing convenience"
globsyntax = """\
This program allows specifying filenames with "mglob" mechanism.
Supported syntax in globs (wilcard matching patterns)::
*.cpp ?ellowo*
- obvious. Differs from normal glob in that dirs are not included.
Unix users might want to write this as: "*.cpp" "?ellowo*"
rec:/usr/share=*.txt,*.doc
- get all *.txt and *.doc under /usr/share,
recursively
rec:/usr/share
- All files under /usr/share, recursively
rec:*.py
- All .py files under current working dir, recursively
foo
- File or dir foo
!*.bak readme*
- readme*, exclude files ending with .bak
!.svn/ !.hg/ !*_Data/ rec:.
- Skip .svn, .hg, foo_Data dirs (and their subdirs) in recurse.
Trailing / is the key, \ does not work! Use !.*/ for all hidden.
dir:foo
- the directory foo if it exists (not files in foo)
dir:*
- all directories in current folder
foo.py bar.* !h* rec:*.py
- Obvious. !h* exclusion only applies for rec:*.py.
foo.py is *not* included twice.
@filelist.txt
- All files listed in 'filelist.txt' file, on separate lines.
"cont:class \wak:" rec:*.py
- Match files containing regexp. Applies to subsequent files.
note quotes because of whitespace.
"""
__version__ = "0.2"
import os,glob,fnmatch,sys,re
def expand(flist,exp_dirs = False):
""" Expand the glob(s) in flist.
flist may be either a whitespace-separated list of globs/files
or an array of globs/files.
if exp_dirs is true, directory names in glob are expanded to the files
contained in them - otherwise, directory names are returned as is.
"""
if isinstance(flist, basestring):
import shlex
flist = shlex.split(flist)
done_set = set()
denied_set = set()
cont_set = set()
cur_rejected_dirs = set()
def recfind(p, pats = ["*"]):
denied_dirs = [os.path.dirname(d) for d in denied_set if d.endswith("/")]
for (dp,dnames,fnames) in os.walk(p):
# see if we should ignore the whole directory
dp_norm = dp.replace("\\","/") + "/"
deny = False
# do not traverse under already rejected dirs
for d in cur_rejected_dirs:
if dp.startswith(d):
deny = True
break
if deny:
continue
#print "dp",dp
bname = os.path.basename(dp)
for deny_pat in denied_dirs:
if fnmatch.fnmatch( bname, deny_pat):
deny = True
cur_rejected_dirs.add(dp)
break
if deny:
continue
for f in fnames:
matched = False
for p in pats:
if fnmatch.fnmatch(f,p):
matched = True
break
if matched:
yield os.path.join(dp,f)
def once_filter(seq):
for it in seq:
p = os.path.abspath(it)
if p in done_set:
continue
done_set.add(p)
deny = False
for deny_pat in denied_set:
if fnmatch.fnmatch(os.path.basename(p), deny_pat):
deny = True
break
if cont_set:
try:
cont = open(p).read()
except IOError:
# deny
continue
for pat in cont_set:
if not re.search(pat,cont, re.IGNORECASE):
deny = True
break
if not deny:
yield it
return
res = []
for ent in flist:
ent = os.path.expanduser(os.path.expandvars(ent))
if ent.lower().startswith('rec:'):
fields = ent[4:].split('=')
if len(fields) == 2:
pth, patlist = fields
elif len(fields) == 1:
if os.path.isdir(fields[0]):
# single arg is dir
pth, patlist = fields[0], '*'
else:
# single arg is pattern
pth, patlist = '.', fields[0]
elif len(fields) == 0:
pth, pathlist = '.','*'
pats = patlist.split(',')
res.extend(once_filter(recfind(pth, pats)))
# filelist
elif ent.startswith('@') and os.path.isfile(ent[1:]):
res.extend(once_filter(open(ent[1:]).read().splitlines()))
# exclusion
elif ent.startswith('!'):
denied_set.add(ent[1:])
# glob only dirs
elif ent.lower().startswith('dir:'):
res.extend(once_filter(filter(os.path.isdir,glob.glob(ent[4:]))))
elif ent.lower().startswith('cont:'):
cont_set.add(ent[5:])
# get all files in the specified dir
elif os.path.isdir(ent) and exp_dirs:
res.extend(once_filter(filter(os.path.isfile,glob.glob(ent + os.sep+"*"))))
# glob only files
elif '*' in ent or '?' in ent:
res.extend(once_filter(filter(os.path.isfile,glob.glob(ent))))
else:
res.extend(once_filter([ent]))
return res
def test():
assert (
expand("*.py ~/.ipython/*.py rec:/usr/share/doc-base") ==
expand( ['*.py', '~/.ipython/*.py', 'rec:/usr/share/doc-base'] )
)
def main():
if len(sys.argv) < 2:
print globsyntax
return
print "\n".join(expand(sys.argv[1:])),
def mglob_f(self, arg):
from IPython.genutils import SList
if arg.strip():
return SList(expand(arg))
print "Please specify pattern!"
print globsyntax
def init_ipython(ip):
""" register %mglob for IPython """
mglob_f.__doc__ = globsyntax
ip.expose_magic("mglob",mglob_f)
# test()
if __name__ == "__main__":
main()
|
mpl-2.0
|
erickt/hue
|
desktop/core/ext-py/python-openid-2.2.5/openid/yadis/xrires.py
|
157
|
4268
|
# -*- test-case-name: openid.test.test_xrires -*-
"""XRI resolution.
"""
from urllib import urlencode
from openid import fetchers
from openid.yadis import etxrd
from openid.yadis.xri import toURINormal
from openid.yadis.services import iterServices
DEFAULT_PROXY = 'http://proxy.xri.net/'
class ProxyResolver(object):
"""Python interface to a remote XRI proxy resolver.
"""
def __init__(self, proxy_url=DEFAULT_PROXY):
self.proxy_url = proxy_url
def queryURL(self, xri, service_type=None):
"""Build a URL to query the proxy resolver.
@param xri: An XRI to resolve.
@type xri: unicode
@param service_type: The service type to resolve, if you desire
service endpoint selection. A service type is a URI.
@type service_type: str
@returns: a URL
@returntype: str
"""
# Trim off the xri:// prefix. The proxy resolver didn't accept it
# when this code was written, but that may (or may not) change for
# XRI Resolution 2.0 Working Draft 11.
qxri = toURINormal(xri)[6:]
hxri = self.proxy_url + qxri
args = {
# XXX: If the proxy resolver will ensure that it doesn't return
# bogus CanonicalIDs (as per Steve's message of 15 Aug 2006
# 11:13:42), then we could ask for application/xrd+xml instead,
# which would give us a bit less to process.
'_xrd_r': 'application/xrds+xml',
}
if service_type:
args['_xrd_t'] = service_type
else:
# Don't perform service endpoint selection.
args['_xrd_r'] += ';sep=false'
query = _appendArgs(hxri, args)
return query
def query(self, xri, service_types):
"""Resolve some services for an XRI.
Note: I don't implement any service endpoint selection beyond what
the resolver I'm querying does, so the Services I return may well
include Services that were not of the types you asked for.
May raise fetchers.HTTPFetchingError or L{etxrd.XRDSError} if
the fetching or parsing don't go so well.
@param xri: An XRI to resolve.
@type xri: unicode
@param service_types: A list of services types to query for. Service
types are URIs.
@type service_types: list of str
@returns: tuple of (CanonicalID, Service elements)
@returntype: (unicode, list of C{ElementTree.Element}s)
"""
# FIXME: No test coverage!
services = []
# Make a seperate request to the proxy resolver for each service
# type, as, if it is following Refs, it could return a different
# XRDS for each.
canonicalID = None
for service_type in service_types:
url = self.queryURL(xri, service_type)
response = fetchers.fetch(url)
if response.status not in (200, 206):
# XXX: sucks to fail silently.
# print "response not OK:", response
continue
et = etxrd.parseXRDS(response.body)
canonicalID = etxrd.getCanonicalID(xri, et)
some_services = list(iterServices(et))
services.extend(some_services)
# TODO:
# * If we do get hits for multiple service_types, we're almost
# certainly going to have duplicated service entries and
# broken priority ordering.
return canonicalID, services
def _appendArgs(url, args):
"""Append some arguments to an HTTP query.
"""
# to be merged with oidutil.appendArgs when we combine the projects.
if hasattr(args, 'items'):
args = args.items()
args.sort()
if len(args) == 0:
return url
# According to XRI Resolution section "QXRI query parameters":
#
# """If the original QXRI had a null query component (only a leading
# question mark), or a query component consisting of only question
# marks, one additional leading question mark MUST be added when
# adding any XRI resolution parameters."""
if '?' in url.rstrip('?'):
sep = '&'
else:
sep = '?'
return '%s%s%s' % (url, sep, urlencode(args))
|
apache-2.0
|
glennq/scikit-learn
|
sklearn/datasets/__init__.py
|
72
|
3807
|
"""
The :mod:`sklearn.datasets` module includes utilities to load datasets,
including methods to load and fetch popular reference datasets. It also
features some artificial data generators.
"""
from .base import load_diabetes
from .base import load_digits
from .base import load_files
from .base import load_iris
from .base import load_breast_cancer
from .base import load_linnerud
from .base import load_boston
from .base import get_data_home
from .base import clear_data_home
from .base import load_sample_images
from .base import load_sample_image
from .covtype import fetch_covtype
from .kddcup99 import fetch_kddcup99
from .mlcomp import load_mlcomp
from .lfw import load_lfw_pairs
from .lfw import load_lfw_people
from .lfw import fetch_lfw_pairs
from .lfw import fetch_lfw_people
from .twenty_newsgroups import fetch_20newsgroups
from .twenty_newsgroups import fetch_20newsgroups_vectorized
from .mldata import fetch_mldata, mldata_filename
from .samples_generator import make_classification
from .samples_generator import make_multilabel_classification
from .samples_generator import make_hastie_10_2
from .samples_generator import make_regression
from .samples_generator import make_blobs
from .samples_generator import make_moons
from .samples_generator import make_circles
from .samples_generator import make_friedman1
from .samples_generator import make_friedman2
from .samples_generator import make_friedman3
from .samples_generator import make_low_rank_matrix
from .samples_generator import make_sparse_coded_signal
from .samples_generator import make_sparse_uncorrelated
from .samples_generator import make_spd_matrix
from .samples_generator import make_swiss_roll
from .samples_generator import make_s_curve
from .samples_generator import make_sparse_spd_matrix
from .samples_generator import make_gaussian_quantiles
from .samples_generator import make_biclusters
from .samples_generator import make_checkerboard
from .svmlight_format import load_svmlight_file
from .svmlight_format import load_svmlight_files
from .svmlight_format import dump_svmlight_file
from .olivetti_faces import fetch_olivetti_faces
from .species_distributions import fetch_species_distributions
from .california_housing import fetch_california_housing
from .rcv1 import fetch_rcv1
__all__ = ['clear_data_home',
'dump_svmlight_file',
'fetch_20newsgroups',
'fetch_20newsgroups_vectorized',
'fetch_lfw_pairs',
'fetch_lfw_people',
'fetch_mldata',
'fetch_olivetti_faces',
'fetch_species_distributions',
'fetch_california_housing',
'fetch_covtype',
'fetch_rcv1',
'fetch_kddcup99',
'get_data_home',
'load_boston',
'load_diabetes',
'load_digits',
'load_files',
'load_iris',
'load_breast_cancer',
'load_lfw_pairs',
'load_lfw_people',
'load_linnerud',
'load_mlcomp',
'load_sample_image',
'load_sample_images',
'load_svmlight_file',
'load_svmlight_files',
'make_biclusters',
'make_blobs',
'make_circles',
'make_classification',
'make_checkerboard',
'make_friedman1',
'make_friedman2',
'make_friedman3',
'make_gaussian_quantiles',
'make_hastie_10_2',
'make_low_rank_matrix',
'make_moons',
'make_multilabel_classification',
'make_regression',
'make_s_curve',
'make_sparse_coded_signal',
'make_sparse_spd_matrix',
'make_sparse_uncorrelated',
'make_spd_matrix',
'make_swiss_roll',
'mldata_filename']
|
bsd-3-clause
|
JonathanSeguin/Mariana
|
Mariana/tests/decorators_tests.py
|
1
|
1417
|
import unittest
import Mariana.layers as ML
import Mariana.initializations as MI
import Mariana.decorators as MD
import Mariana.costs as MC
import Mariana.regularizations as MR
import Mariana.scenari as MS
import Mariana.activations as MA
import theano.tensor as tt
import numpy
class DecoratorTests(unittest.TestCase):
def setUp(self) :
pass
def tearDown(self) :
pass
# @unittest.skip("skipping")
def test_batch_norm(self) :
import theano, numpy
def batchnorm(W, b, data) :
return numpy.asarray( W * ( (data-numpy.mean(data)) / numpy.std(data) ) + b, dtype= theano.config.floatX)
data = numpy.random.randn(1, 100).astype(theano.config.floatX)
inp = ML.Input(100, 'inp', decorators=[MD.BatchNormalization()])
model = inp.network
m1 = numpy.mean( model.propagate(inp, inp=data)["outputs"])
m2 = numpy.mean( batchnorm(inp.batchnorm_W.get_value(), inp.batchnorm_b.get_value(), data) )
epsilon = 1e-6
self.assertTrue ( (m1 - m2) < epsilon )
# @unittest.skip("skipping")
def test_mask(self) :
import theano, numpy
inp = ML.Input(100, 'inp', decorators=[MD.Mask(mask = numpy.zeros(100))])
model = inp.network
data = numpy.random.randn(1, 100).astype(theano.config.floatX)
out = model.propagate(inp, inp=data)["outputs"]
self.assertEqual(sum(out[0]), 0)
if __name__ == '__main__' :
import Mariana.settings as MSET
MSET.VERBOSE = False
unittest.main()
|
apache-2.0
|
cznweb/arkos
|
arkos/signals.py
|
2
|
2713
|
"""
Classes and functions to manage internal signal hooks and listeners.
arkOS Core
(c) 2016 CitizenWeb
Written by Jacob Cook
Licensed under GPLv3, see LICENSE.md
"""
from arkos import storage, logger
class Listener:
"""
Class representing a signal listener.
A signal listener is set up to track the emittance of certain signals
and to execute a function based on the result of said emittance. These
are good to use for cleanup after an item is removed from the system,
or for making sure certain elements are established after loading a
necessary component.
"""
def __init__(self, by, id, sig, func):
"""
Initialize the signal listener.
:param str by: the name of the module that registered this listener
:param str id: identifier for this listener
:param str sig: signal ID to listen for
:param func func: hook function to execute
"""
self.id = id
self.by = by
self.sig = sig
self.func = func
def trigger(self, data, crit=True):
"""
Trigger the hook function for this listener.
:param data: parameter to provide to the hook function
:param bool crit: Raise hook function exceptions?
"""
try:
if data:
self.func(data)
else:
self.func()
except:
if crit:
raise
def add(by, id, sig, func):
"""
Register a new listener with the system.
:param str by: the name of the module that registered this listener
:param str id: identifier for this listener
:param str sig: signal ID to listen for
:param func func: hook function to execute
"""
if not storage.signals.get(id):
storage.signals[id] = []
storage.signals[id].append(Listener(by, id, sig, func))
logger.debug("Sign", "Registered {0} to {1} for {2}".format(
sig, id, by
))
def emit(id, sig, data=None, crit=True):
"""
Emit a signal.
:param str id: name of the module emitting this signal
:param str sig: signal ID
:param data: parameter to pass to hook function (if necessary)
:param bool crit: Raise hook function exceptions?
"""
if not storage.signals.get(id):
return
sigs = filter(
lambda x: x.sig == sig, storage.signals.get(id)
)
for x in sigs:
x.trigger(data, crit)
def remove(by):
"""
Deregister all listeners for this module.
:param str by: name of the module to dereigster listeners for
"""
for x in storage.signals:
for y in filter(lambda z: z.by == by, storage.signals[x]):
storage.signals.remove(y)
|
gpl-3.0
|
tm-kn/django-pdf-model
|
django_pdf/tests/demosite/models.py
|
1
|
1859
|
from django.conf import settings
from django.db import models
from django.shortcuts import reverse
from django_pdf.models import PDFModelMixin
from django_pdf import pdf_fields
class Report(PDFModelMixin, models.Model):
title = models.CharField(max_length=255)
introduction = models.CharField(max_length=255)
content = models.TextField()
html_content = models.TextField()
author = models.ForeignKey(settings.AUTH_USER_MODEL,
on_delete=models.PROTECT)
pdf_field_list = [
pdf_fields.TitlePDFField('title'),
pdf_fields.HeadingPDFField('introduction', heading_level=3),
pdf_fields.CharPDFField('content'),
pdf_fields.CharPDFField('author_name'),
pdf_fields.HTMLPDFField('html_content'),
pdf_fields.HTMLPDFField('some_html_content'),
]
@property
def author_name(self):
return self.author.get_full_name()
def some_html_content(self):
return """
<p>
Lorem ipsum dolor sit amet, consectetur adipiscing elit.
Nulla ut vestibulum lectus, ac pharetra lectus. Mauris hendrerit
purus sapien, ac egestas ipsum aliquam vitae.
</p>
<u>Pellentesque porta
varius eros. Sed at tincidunt nisl. Phasellus turpis lectus,
aliquam sed facilisis et, dignissim et sem.</u>
<a href="http://dgg.gg">Duck duck!</a>
<strong>Sed venenatisconsectetur tellus, non rhoncus lorem scelerisque
ut. Praesent diam orci, porttitor vel cursus quis, accumsan sed risus.
Ut commodo, ex id ultrices pretium, eros nunc scelerisque risus, et
sodales justo erat id risus. Nulla at condimentum elit.</strong>
"""
def get_absolute_url(self):
return reverse('report-pdf', args=[str(self.pk)])
def __str__(self):
return self.title
|
bsd-2-clause
|
namjae/AutobahnTestSuite
|
autobahntestsuite/autobahntestsuite/case/case5_11.py
|
14
|
1495
|
###############################################################################
##
## Copyright 2011 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from case import Case
class Case5_11(Case):
DESCRIPTION = """Send unfragmented Text Message after Continuation Frame with FIN = true, where there is nothing to continue, sent in octet-wise chops."""
EXPECTATION = """The connection is failed immediately, since there is no message to continue."""
def onOpen(self):
self.expected[Case.OK] = []
self.expectedClose = {"closedByMe":False,"closeCode":[self.p.CLOSE_STATUS_CODE_PROTOCOL_ERROR],"requireClean":False}
self.p.sendFrame(opcode = 0, fin = True, payload = "non-continuation payload", chopsize = 1)
self.p.sendFrame(opcode = 1, fin = True, payload = "Hello, world!", chopsize = 1)
self.p.killAfter(1)
|
apache-2.0
|
SaschaMester/delicium
|
tools/perf/page_sets/gmail_compose_discard.py
|
1
|
2495
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
from telemetry.page import page as page_module
from telemetry.page import shared_page_state
from telemetry import story
def _CreateXpathFunction(xpath):
return ('document.evaluate("%s",'
'document,'
'null,'
'XPathResult.FIRST_ORDERED_NODE_TYPE,'
'null)'
'.singleNodeValue' % re.escape(xpath))
class GmailComposeDiscardPage(page_module.Page):
""" Why: Compose and discard a new email """
def __init__(self, page_set):
super(GmailComposeDiscardPage, self).__init__(
url='https://mail.google.com/mail/',
page_set=page_set,
shared_page_state_class=shared_page_state.SharedDesktopPageState,
credentials_path = 'data/credentials.json')
self.credentials = 'google'
def RunNavigateSteps(self, action_runner):
super(GmailComposeDiscardPage, self).RunNavigateSteps(action_runner)
action_runner.WaitForJavaScriptCondition(
'window.gmonkey !== undefined &&'
'document.getElementById("gb") !== null')
def ComposeClick(self, action_runner):
action_runner.ExecuteJavaScript('''
var button=document.evaluate('//div[text()="COMPOSE"]',
document,null,XPathResult.FIRST_ORDERED_NODE_TYPE,null)
.singleNodeValue;
var mousedownevent=new MouseEvent('mousedown',true,true,window,0,0,0,0,0,
false,false,false,false,0,null);
var mouseupevent=new MouseEvent('mouseup',true,true,window,0,0,0,0,0,
false,false,false,false,0,null);
button.dispatchEvent(mousedownevent);
button.dispatchEvent(mouseupevent);''')
def RunEndure(self, action_runner):
action_runner.WaitForElement(
element_function=_CreateXpathFunction('//div[text()="COMPOSE"]'))
self.ComposeClick(action_runner)
action_runner.Wait(1)
action_runner.WaitForElement(
'div[class~="oh"][data-tooltip="Discard draft"]')
action_runner.ClickElement('div[class~="oh"][data-tooltip="Discard draft"]')
action_runner.Wait(1)
class GmailComposeDiscardPageSet(story.StorySet):
"""
Description: Gmail endure test: compose and discard an email.
"""
def __init__(self):
super(GmailComposeDiscardPageSet, self).__init__()
self.AddUserStory(GmailComposeDiscardPage(self))
|
bsd-3-clause
|
1ukash/horizon
|
horizon/api/base.py
|
4
|
3984
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.conf import settings
from horizon import exceptions
__all__ = ('APIResourceWrapper', 'APIDictWrapper',
'get_service_from_catalog', 'url_for',)
LOG = logging.getLogger(__name__)
class APIResourceWrapper(object):
""" Simple wrapper for api objects
Define _attrs on the child class and pass in the
api object as the only argument to the constructor
"""
_attrs = []
def __init__(self, apiresource):
self._apiresource = apiresource
def __getattr__(self, attr):
if attr in self._attrs:
# __getattr__ won't find properties
return self._apiresource.__getattribute__(attr)
else:
msg = ('Attempted to access unknown attribute "%s" on '
'APIResource object of type "%s" wrapping resource of '
'type "%s".') % (attr, self.__class__,
self._apiresource.__class__)
LOG.debug(exceptions.error_color(msg))
raise AttributeError(attr)
class APIDictWrapper(object):
""" Simple wrapper for api dictionaries
Some api calls return dictionaries. This class provides identical
behavior as APIResourceWrapper, except that it will also behave as a
dictionary, in addition to attribute accesses.
Attribute access is the preferred method of access, to be
consistent with api resource objects from novclient.
"""
def __init__(self, apidict):
self._apidict = apidict
def __getattr__(self, attr):
try:
return self._apidict[attr]
except KeyError:
msg = 'Unknown attribute "%(attr)s" on APIResource object ' \
'of type "%(cls)s"' % {'attr': attr, 'cls': self.__class__}
LOG.debug(exceptions.error_color(msg))
raise AttributeError(msg)
def __getitem__(self, item):
try:
return self.__getattr__(item)
except AttributeError, e:
# caller is expecting a KeyError
raise KeyError(e)
def get(self, item, default=None):
try:
return self.__getattr__(item)
except AttributeError:
return default
def get_service_from_catalog(catalog, service_type):
if catalog:
for service in catalog:
if service['type'] == service_type:
return service
return None
def url_for(request, service_type, admin=False, endpoint_type=None):
endpoint_type = endpoint_type or getattr(settings,
'OPENSTACK_ENDPOINT_TYPE',
'publicURL')
catalog = request.user.service_catalog
service = get_service_from_catalog(catalog, service_type)
if service:
try:
if admin:
return service['endpoints'][0]['adminURL']
else:
return service['endpoints'][0][endpoint_type]
except (IndexError, KeyError):
raise exceptions.ServiceCatalogException(service_type)
else:
raise exceptions.ServiceCatalogException(service_type)
|
apache-2.0
|
gmatteo/pymatgen
|
pymatgen/cli/pmg_config.py
|
3
|
6633
|
#!/usr/bin/env python
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Implementation for `pmg config` CLI.
"""
import glob
import os
import shutil
import subprocess
import sys
from urllib.request import urlretrieve
from monty.serialization import dumpfn, loadfn
from pymatgen.core import SETTINGS_FILE
def setup_potcars(args):
"""
Setup POTCAR directirt,
:param args: args from command.
"""
pspdir, targetdir = [os.path.abspath(d) for d in args.potcar_dirs]
try:
os.makedirs(targetdir)
except OSError:
r = input("Destination directory exists. Continue (y/n)? ")
if r != "y":
print("Exiting ...")
sys.exit(0)
print("Generating pymatgen resources directory...")
name_mappings = {
"potpaw_PBE": "POT_GGA_PAW_PBE",
"potpaw_PBE_52": "POT_GGA_PAW_PBE_52",
"potpaw_PBE_54": "POT_GGA_PAW_PBE_54",
"potpaw_PBE.52": "POT_GGA_PAW_PBE_52",
"potpaw_PBE.54": "POT_GGA_PAW_PBE_54",
"potpaw_LDA": "POT_LDA_PAW",
"potpaw_LDA.52": "POT_LDA_PAW_52",
"potpaw_LDA.54": "POT_LDA_PAW_54",
"potpaw_LDA_52": "POT_LDA_PAW_52",
"potpaw_LDA_54": "POT_LDA_PAW_54",
"potUSPP_LDA": "POT_LDA_US",
"potpaw_GGA": "POT_GGA_PAW_PW91",
"potUSPP_GGA": "POT_GGA_US_PW91",
}
for (parent, subdirs, files) in os.walk(pspdir):
basename = os.path.basename(parent)
basename = name_mappings.get(basename, basename)
for subdir in subdirs:
filenames = glob.glob(os.path.join(parent, subdir, "POTCAR*"))
if len(filenames) > 0:
try:
basedir = os.path.join(targetdir, basename)
if not os.path.exists(basedir):
os.makedirs(basedir)
fname = filenames[0]
dest = os.path.join(basedir, os.path.basename(fname))
shutil.copy(fname, dest)
ext = fname.split(".")[-1]
if ext.upper() in ["Z", "GZ"]:
subprocess.Popen(["gunzip", dest]).communicate()
elif ext.upper() in ["BZ2"]:
subprocess.Popen(["bunzip2", dest]).communicate()
if subdir == "Osmium":
subdir = "Os"
dest = os.path.join(basedir, "POTCAR.{}".format(subdir))
shutil.move(os.path.join(basedir, "POTCAR"), dest)
subprocess.Popen(["gzip", "-f", dest]).communicate()
except Exception as ex:
print("An error has occured. Message is %s. Trying to " "continue... " % str(ex))
print("")
print(
"PSP resources directory generated. It is recommended that you "
"run 'pmg config --add PMG_VASP_PSP_DIR %s'" % os.path.abspath(targetdir)
)
print("Start a new terminal to ensure that your environment variables " "are properly set.")
def build_enum(fortran_command="gfortran"):
"""
Build enum.
:param fortran_command:
"""
currdir = os.getcwd()
state = True
try:
subprocess.call(["git", "clone", "--recursive", "https://github.com/msg-byu/enumlib.git"])
os.chdir(os.path.join(currdir, "enumlib", "symlib", "src"))
os.environ["F90"] = fortran_command
subprocess.call(["make"])
enumpath = os.path.join(currdir, "enumlib", "src")
os.chdir(enumpath)
subprocess.call(["make"])
for f in ["enum.x", "makestr.x"]:
subprocess.call(["make", f])
shutil.copy(f, os.path.join("..", ".."))
except Exception as ex:
print(str(ex))
state = False
finally:
os.chdir(currdir)
shutil.rmtree("enumlib")
return state
def build_bader(fortran_command="gfortran"):
"""
Build bader package.
:param fortran_command:
"""
bader_url = "http://theory.cm.utexas.edu/henkelman/code/bader/download/bader.tar.gz"
currdir = os.getcwd()
state = True
try:
urlretrieve(bader_url, "bader.tar.gz")
subprocess.call(["tar", "-zxf", "bader.tar.gz"])
os.chdir("bader")
subprocess.call(["cp", "makefile.osx_" + fortran_command, "makefile"])
subprocess.call(["make"])
shutil.copy("bader", os.path.join("..", "bader_exe"))
os.chdir("..")
shutil.rmtree("bader")
os.remove("bader.tar.gz")
shutil.move("bader_exe", "bader")
except Exception as ex:
print(str(ex))
state = False
finally:
os.chdir(currdir)
return state
def install_software(args):
"""
Install all optional external software.
:param args:
"""
try:
subprocess.call(["ifort", "--version"])
print("Found ifort")
fortran_command = "ifort"
except Exception:
try:
subprocess.call(["gfortran", "--version"])
print("Found gfortran")
fortran_command = "gfortran"
except Exception as ex:
print(str(ex))
print("No fortran compiler found.")
sys.exit(-1)
enum = None
bader = None
if args.install == "enumlib":
print("Building enumlib")
enum = build_enum(fortran_command)
print("")
elif args.install == "bader":
print("Building bader")
bader = build_bader(fortran_command)
print("")
if bader or enum:
print(
"Please add {} to your PATH or move the executables multinum.x, "
"makestr.x and/or bader to a location in your PATH.".format(os.path.abspath("."))
)
print("")
def add_config_var(args):
"""
Add configuration args.
:param args:
"""
d = {}
if os.path.exists(SETTINGS_FILE):
shutil.copy(SETTINGS_FILE, SETTINGS_FILE + ".bak")
print("Existing %s backed up to %s" % (SETTINGS_FILE, SETTINGS_FILE + ".bak"))
d = loadfn(SETTINGS_FILE)
toks = args.var_spec
if len(toks) % 2 != 0:
print("Bad variable specification!")
sys.exit(-1)
for i in range(int(len(toks) / 2)):
d[toks[2 * i]] = toks[2 * i + 1]
dumpfn(d, SETTINGS_FILE, default_flow_style=False)
print("New %s written!" % (SETTINGS_FILE))
def configure_pmg(args):
"""
Handle configure command.
:param args:
"""
if args.potcar_dirs:
setup_potcars(args)
elif args.install:
install_software(args)
elif args.var_spec:
add_config_var(args)
|
mit
|
ApisSys/linux-analogdevicesinc-ap6
|
Documentation/sphinx/rstFlatTable.py
|
64
|
13304
|
#!/usr/bin/env python3
# -*- coding: utf-8; mode: python -*-
# pylint: disable=C0330, R0903, R0912
u"""
flat-table
~~~~~~~~~~
Implementation of the ``flat-table`` reST-directive.
:copyright: Copyright (C) 2016 Markus Heiser
:license: GPL Version 2, June 1991 see linux/COPYING for details.
The ``flat-table`` (:py:class:`FlatTable`) is a double-stage list similar to
the ``list-table`` with some additional features:
* *column-span*: with the role ``cspan`` a cell can be extended through
additional columns
* *row-span*: with the role ``rspan`` a cell can be extended through
additional rows
* *auto span* rightmost cell of a table row over the missing cells on the
right side of that table-row. With Option ``:fill-cells:`` this behavior
can changed from *auto span* to *auto fill*, which automaticly inserts
(empty) cells instead of spanning the last cell.
Options:
* header-rows: [int] count of header rows
* stub-columns: [int] count of stub columns
* widths: [[int] [int] ... ] widths of columns
* fill-cells: instead of autospann missing cells, insert missing cells
roles:
* cspan: [int] additionale columns (*morecols*)
* rspan: [int] additionale rows (*morerows*)
"""
# ==============================================================================
# imports
# ==============================================================================
import sys
from docutils import nodes
from docutils.parsers.rst import directives, roles
from docutils.parsers.rst.directives.tables import Table
from docutils.utils import SystemMessagePropagation
# ==============================================================================
# common globals
# ==============================================================================
# The version numbering follows numbering of the specification
# (Documentation/books/kernel-doc-HOWTO).
__version__ = '1.0'
PY3 = sys.version_info[0] == 3
PY2 = sys.version_info[0] == 2
if PY3:
# pylint: disable=C0103, W0622
unicode = str
basestring = str
# ==============================================================================
def setup(app):
# ==============================================================================
app.add_directive("flat-table", FlatTable)
roles.register_local_role('cspan', c_span)
roles.register_local_role('rspan', r_span)
return dict(
version = __version__,
parallel_read_safe = True,
parallel_write_safe = True
)
# ==============================================================================
def c_span(name, rawtext, text, lineno, inliner, options=None, content=None):
# ==============================================================================
# pylint: disable=W0613
options = options if options is not None else {}
content = content if content is not None else []
nodelist = [colSpan(span=int(text))]
msglist = []
return nodelist, msglist
# ==============================================================================
def r_span(name, rawtext, text, lineno, inliner, options=None, content=None):
# ==============================================================================
# pylint: disable=W0613
options = options if options is not None else {}
content = content if content is not None else []
nodelist = [rowSpan(span=int(text))]
msglist = []
return nodelist, msglist
# ==============================================================================
class rowSpan(nodes.General, nodes.Element): pass # pylint: disable=C0103,C0321
class colSpan(nodes.General, nodes.Element): pass # pylint: disable=C0103,C0321
# ==============================================================================
# ==============================================================================
class FlatTable(Table):
# ==============================================================================
u"""FlatTable (``flat-table``) directive"""
option_spec = {
'name': directives.unchanged
, 'class': directives.class_option
, 'header-rows': directives.nonnegative_int
, 'stub-columns': directives.nonnegative_int
, 'widths': directives.positive_int_list
, 'fill-cells' : directives.flag }
def run(self):
if not self.content:
error = self.state_machine.reporter.error(
'The "%s" directive is empty; content required.' % self.name,
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
return [error]
title, messages = self.make_title()
node = nodes.Element() # anonymous container for parsing
self.state.nested_parse(self.content, self.content_offset, node)
tableBuilder = ListTableBuilder(self)
tableBuilder.parseFlatTableNode(node)
tableNode = tableBuilder.buildTableNode()
# SDK.CONSOLE() # print --> tableNode.asdom().toprettyxml()
if title:
tableNode.insert(0, title)
return [tableNode] + messages
# ==============================================================================
class ListTableBuilder(object):
# ==============================================================================
u"""Builds a table from a double-stage list"""
def __init__(self, directive):
self.directive = directive
self.rows = []
self.max_cols = 0
def buildTableNode(self):
colwidths = self.directive.get_column_widths(self.max_cols)
stub_columns = self.directive.options.get('stub-columns', 0)
header_rows = self.directive.options.get('header-rows', 0)
table = nodes.table()
tgroup = nodes.tgroup(cols=len(colwidths))
table += tgroup
for colwidth in colwidths:
colspec = nodes.colspec(colwidth=colwidth)
# FIXME: It seems, that the stub method only works well in the
# absence of rowspan (observed by the html buidler, the docutils-xml
# build seems OK). This is not extraordinary, because there exists
# no table directive (except *this* flat-table) which allows to
# define coexistent of rowspan and stubs (there was no use-case
# before flat-table). This should be reviewed (later).
if stub_columns:
colspec.attributes['stub'] = 1
stub_columns -= 1
tgroup += colspec
stub_columns = self.directive.options.get('stub-columns', 0)
if header_rows:
thead = nodes.thead()
tgroup += thead
for row in self.rows[:header_rows]:
thead += self.buildTableRowNode(row)
tbody = nodes.tbody()
tgroup += tbody
for row in self.rows[header_rows:]:
tbody += self.buildTableRowNode(row)
return table
def buildTableRowNode(self, row_data, classes=None):
classes = [] if classes is None else classes
row = nodes.row()
for cell in row_data:
if cell is None:
continue
cspan, rspan, cellElements = cell
attributes = {"classes" : classes}
if rspan:
attributes['morerows'] = rspan
if cspan:
attributes['morecols'] = cspan
entry = nodes.entry(**attributes)
entry.extend(cellElements)
row += entry
return row
def raiseError(self, msg):
error = self.directive.state_machine.reporter.error(
msg
, nodes.literal_block(self.directive.block_text
, self.directive.block_text)
, line = self.directive.lineno )
raise SystemMessagePropagation(error)
def parseFlatTableNode(self, node):
u"""parses the node from a :py:class:`FlatTable` directive's body"""
if len(node) != 1 or not isinstance(node[0], nodes.bullet_list):
self.raiseError(
'Error parsing content block for the "%s" directive: '
'exactly one bullet list expected.' % self.directive.name )
for rowNum, rowItem in enumerate(node[0]):
row = self.parseRowItem(rowItem, rowNum)
self.rows.append(row)
self.roundOffTableDefinition()
def roundOffTableDefinition(self):
u"""Round off the table definition.
This method rounds off the table definition in :py:member:`rows`.
* This method inserts the needed ``None`` values for the missing cells
arising from spanning cells over rows and/or columns.
* recount the :py:member:`max_cols`
* Autospan or fill (option ``fill-cells``) missing cells on the right
side of the table-row
"""
y = 0
while y < len(self.rows):
x = 0
while x < len(self.rows[y]):
cell = self.rows[y][x]
if cell is None:
x += 1
continue
cspan, rspan = cell[:2]
# handle colspan in current row
for c in range(cspan):
try:
self.rows[y].insert(x+c+1, None)
except: # pylint: disable=W0702
# the user sets ambiguous rowspans
pass # SDK.CONSOLE()
# handle colspan in spanned rows
for r in range(rspan):
for c in range(cspan + 1):
try:
self.rows[y+r+1].insert(x+c, None)
except: # pylint: disable=W0702
# the user sets ambiguous rowspans
pass # SDK.CONSOLE()
x += 1
y += 1
# Insert the missing cells on the right side. For this, first
# re-calculate the max columns.
for row in self.rows:
if self.max_cols < len(row):
self.max_cols = len(row)
# fill with empty cells or cellspan?
fill_cells = False
if 'fill-cells' in self.directive.options:
fill_cells = True
for row in self.rows:
x = self.max_cols - len(row)
if x and not fill_cells:
if row[-1] is None:
row.append( ( x - 1, 0, []) )
else:
cspan, rspan, content = row[-1]
row[-1] = (cspan + x, rspan, content)
elif x and fill_cells:
for i in range(x):
row.append( (0, 0, nodes.comment()) )
def pprint(self):
# for debugging
retVal = "[ "
for row in self.rows:
retVal += "[ "
for col in row:
if col is None:
retVal += ('%r' % col)
retVal += "\n , "
else:
content = col[2][0].astext()
if len (content) > 30:
content = content[:30] + "..."
retVal += ('(cspan=%s, rspan=%s, %r)'
% (col[0], col[1], content))
retVal += "]\n , "
retVal = retVal[:-2]
retVal += "]\n , "
retVal = retVal[:-2]
return retVal + "]"
def parseRowItem(self, rowItem, rowNum):
row = []
childNo = 0
error = False
cell = None
target = None
for child in rowItem:
if (isinstance(child , nodes.comment)
or isinstance(child, nodes.system_message)):
pass
elif isinstance(child , nodes.target):
target = child
elif isinstance(child, nodes.bullet_list):
childNo += 1
cell = child
else:
error = True
break
if childNo != 1 or error:
self.raiseError(
'Error parsing content block for the "%s" directive: '
'two-level bullet list expected, but row %s does not '
'contain a second-level bullet list.'
% (self.directive.name, rowNum + 1))
for cellItem in cell:
cspan, rspan, cellElements = self.parseCellItem(cellItem)
if target is not None:
cellElements.insert(0, target)
row.append( (cspan, rspan, cellElements) )
return row
def parseCellItem(self, cellItem):
# search and remove cspan, rspan colspec from the first element in
# this listItem (field).
cspan = rspan = 0
if not len(cellItem):
return cspan, rspan, []
for elem in cellItem[0]:
if isinstance(elem, colSpan):
cspan = elem.get("span")
elem.parent.remove(elem)
continue
if isinstance(elem, rowSpan):
rspan = elem.get("span")
elem.parent.remove(elem)
continue
return cspan, rspan, cellItem[:]
|
gpl-2.0
|
tsnoam/Flexget
|
flexget/plugins/modify/torrent_scrub.py
|
13
|
3968
|
""" Torrent Scrubber Plugin.
"""
from __future__ import unicode_literals, division, absolute_import
import logging
from flexget import plugin, validator
from flexget.event import event
from flexget.plugins.modify.torrent import TorrentFilename
from flexget.utils import bittorrent
log = logging.getLogger('torrent_scrub')
class TorrentScrub(object):
""" Scrubs torrents from unwanted keys.
Example:
tasks:
rutorrent-fast-resume-infected-task:
torrent_scrub: resume
"""
# Scrub at high level, but BELOW "torrent"
SCRUB_PRIO = TorrentFilename.TORRENT_PRIO - 10
# Scrubbing modes
SCRUB_MODES = ("off", "on", "all", "resume", "rtorrent",)
# Keys of rTorrent / ruTorrent session data
RT_KEYS = ("libtorrent_resume", "log_callback", "err_callback", "rtorrent")
schema = {
'oneOf': [
{'type': 'boolean'},
{'type': 'string', 'enum': list(SCRUB_MODES)},
{'type': 'array', 'items': {'type': 'string'}} # list of keys to scrub
]
}
@plugin.priority(SCRUB_PRIO)
def on_task_modify(self, task, config):
""" Scrub items that are torrents, if they're affected.
"""
if isinstance(config, list):
mode = "fields"
else:
mode = str(config).lower()
if mode in ("off", "false"):
log.debug("Plugin configured, but disabled")
return
for entry in task.entries:
# Skip non-torrents
if "torrent" not in entry:
continue
# Scrub keys as configured
modified = set()
metainfo = entry["torrent"].content
infohash = entry["torrent"].info_hash
if mode in ("on", "all", "true"):
modified = bittorrent.clean_meta(metainfo, including_info=(mode == "all"), logger=log.debug)
elif mode in ("resume", "rtorrent"):
if mode == "resume":
self.RT_KEYS = self.RT_KEYS[:1]
for key in self.RT_KEYS:
if key in metainfo:
log.debug("Removing key '%s'..." % (key,))
del metainfo[key]
modified.add(key)
elif mode == "fields":
# Scrub all configured fields
for key in config:
fieldname = key # store for logging
key = bittorrent.Torrent.KEY_TYPE(key)
field = metainfo
while field and '.' in key:
name, key = key.split('.', 1)
try:
field = field[name]
except KeyError:
# Key not found in this entry
field = None
log.trace((key, field))
if field and key in field:
log.debug("Removing key '%s'..." % (fieldname,))
del field[key]
modified.add(fieldname)
else:
raise ValueError("INTERNAL ERROR: Unknown mode %r" % mode)
# Commit any changes back into entry
if modified:
entry["torrent"].content = metainfo
entry["torrent"].modified = True
log.info((("Key %s was" if len(modified) == 1 else "Keys %s were") +
" scrubbed from torrent '%s'!") % (", ".join(sorted(modified)), entry['title']))
new_infohash = entry["torrent"].info_hash
if infohash != new_infohash:
log.warn("Info hash changed from #%s to #%s in '%s'" %
(infohash, new_infohash, entry['filename']))
@event('plugin.register')
def register_plugin():
plugin.register(TorrentScrub, groups=["torrent"], api_ver=2)
|
mit
|
IllusionRom-deprecated/android_platform_external_chromium_org_tools_grit
|
grit/tool/xmb.py
|
34
|
9656
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""The 'grit xmb' tool.
"""
import getopt
import os
from xml.sax import saxutils
from grit import grd_reader
from grit import lazy_re
from grit import tclib
from grit import util
from grit.tool import interface
# Used to collapse presentable content to determine if
# xml:space="preserve" is needed.
_WHITESPACES_REGEX = lazy_re.compile(ur'\s\s*')
# See XmlEscape below.
_XML_QUOTE_ESCAPES = {
u"'": u''',
u'"': u'"',
}
_XML_BAD_CHAR_REGEX = lazy_re.compile(u'[^\u0009\u000A\u000D'
u'\u0020-\uD7FF\uE000-\uFFFD]')
def _XmlEscape(s):
"""Returns text escaped for XML in a way compatible with Google's
internal Translation Console tool. May be used for attributes as
well as for contents.
"""
if not type(s) == unicode:
s = unicode(s)
result = saxutils.escape(s, _XML_QUOTE_ESCAPES)
return _XML_BAD_CHAR_REGEX.sub(u'', result).encode('utf-8')
def _WriteAttribute(file, name, value):
"""Writes an XML attribute to the specified file.
Args:
file: file to write to
name: name of the attribute
value: (unescaped) value of the attribute
"""
if value:
file.write(' %s="%s"' % (name, _XmlEscape(value)))
def _WriteMessage(file, message):
presentable_content = message.GetPresentableContent()
assert (type(presentable_content) == unicode or
(len(message.parts) == 1 and
type(message.parts[0] == tclib.Placeholder)))
preserve_space = presentable_content != _WHITESPACES_REGEX.sub(
u' ', presentable_content.strip())
file.write('<msg')
_WriteAttribute(file, 'desc', message.GetDescription())
_WriteAttribute(file, 'id', message.GetId())
_WriteAttribute(file, 'meaning', message.GetMeaning())
if preserve_space:
_WriteAttribute(file, 'xml:space', 'preserve')
file.write('>')
if not preserve_space:
file.write('\n ')
parts = message.GetContent()
for part in parts:
if isinstance(part, tclib.Placeholder):
file.write('<ph')
_WriteAttribute(file, 'name', part.GetPresentation())
file.write('><ex>')
file.write(_XmlEscape(part.GetExample()))
file.write('</ex>')
file.write(_XmlEscape(part.GetOriginal()))
file.write('</ph>')
else:
file.write(_XmlEscape(part))
if not preserve_space:
file.write('\n')
file.write('</msg>\n')
def WriteXmbFile(file, messages):
"""Writes the given grit.tclib.Message items to the specified open
file-like object in the XMB format.
"""
file.write("""<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE messagebundle [
<!ELEMENT messagebundle (msg)*>
<!ATTLIST messagebundle class CDATA #IMPLIED>
<!ELEMENT msg (#PCDATA|ph|source)*>
<!ATTLIST msg id CDATA #IMPLIED>
<!ATTLIST msg seq CDATA #IMPLIED>
<!ATTLIST msg name CDATA #IMPLIED>
<!ATTLIST msg desc CDATA #IMPLIED>
<!ATTLIST msg meaning CDATA #IMPLIED>
<!ATTLIST msg obsolete (obsolete) #IMPLIED>
<!ATTLIST msg xml:space (default|preserve) "default">
<!ATTLIST msg is_hidden CDATA #IMPLIED>
<!ELEMENT source (#PCDATA)>
<!ELEMENT ph (#PCDATA|ex)*>
<!ATTLIST ph name CDATA #REQUIRED>
<!ELEMENT ex (#PCDATA)>
]>
<messagebundle>
""")
for message in messages:
_WriteMessage(file, message)
file.write('</messagebundle>')
class OutputXmb(interface.Tool):
"""Outputs all translateable messages in the .grd input file to an
.xmb file, which is the format used to give source messages to
Google's internal Translation Console tool. The format could easily
be used for other systems.
Usage: grit xmb [-i|-h] [-l LIMITFILE] OUTPUTPATH
OUTPUTPATH is the path you want to output the .xmb file to.
The -l option can be used to output only some of the resources to the .xmb file.
LIMITFILE is the path to a file that is used to limit the items output to the
xmb file. If the filename extension is .grd, the file must be a .grd file
and the tool only output the contents of nodes from the input file that also
exist in the limit file (as compared on the 'name' attribute). Otherwise it must
contain a list of the IDs that output should be limited to, one ID per line, and
the tool will only output nodes with 'name' attributes that match one of the
IDs.
The -i option causes 'grit xmb' to output an "IDs only" file instead of an XMB
file. The "IDs only" file contains the message ID of each message that would
normally be output to the XMB file, one message ID per line. It is designed for
use with the 'grit transl2tc' tool's -l option.
Other options:
-D NAME[=VAL] Specify a C-preprocessor-like define NAME with optional
value VAL (defaults to 1) which will be used to control
conditional inclusion of resources.
-E NAME=VALUE Set environment variable NAME to VALUE (within grit).
"""
# The different output formats supported by this tool
FORMAT_XMB = 0
FORMAT_IDS_ONLY = 1
def __init__(self, defines=None):
super(OutputXmb, self).__init__()
self.format = self.FORMAT_XMB
self.defines = defines or {}
def ShortDescription(self):
return 'Exports all translateable messages into an XMB file.'
def Run(self, opts, args):
self.SetOptions(opts)
limit_file = None
limit_is_grd = False
limit_file_dir = None
own_opts, args = getopt.getopt(args, 'l:D:ih')
for key, val in own_opts:
if key == '-l':
limit_file = open(val, 'r')
limit_file_dir = util.dirname(val)
if not len(limit_file_dir):
limit_file_dir = '.'
limit_is_grd = os.path.splitext(val)[1] == '.grd'
elif key == '-i':
self.format = self.FORMAT_IDS_ONLY
elif key == '-D':
name, val = util.ParseDefine(val)
self.defines[name] = val
elif key == '-E':
(env_name, env_value) = val.split('=', 1)
os.environ[env_name] = env_value
if not len(args) == 1:
print ('grit xmb takes exactly one argument, the path to the XMB file '
'to output.')
return 2
xmb_path = args[0]
res_tree = grd_reader.Parse(opts.input, debug=opts.extra_verbose)
res_tree.SetOutputLanguage('en')
res_tree.SetDefines(self.defines)
res_tree.OnlyTheseTranslations([])
res_tree.RunGatherers()
with open(xmb_path, 'wb') as output_file:
self.Process(
res_tree, output_file, limit_file, limit_is_grd, limit_file_dir)
if limit_file:
limit_file.close()
print "Wrote %s" % xmb_path
def Process(self, res_tree, output_file, limit_file=None, limit_is_grd=False,
dir=None):
"""Writes a document with the contents of res_tree into output_file,
limiting output to the IDs specified in limit_file, which is a GRD file if
limit_is_grd is true, otherwise a file with one ID per line.
The format of the output document depends on this object's format attribute.
It can be FORMAT_XMB or FORMAT_IDS_ONLY.
The FORMAT_IDS_ONLY format causes this function to write just a list
of the IDs of all messages that would have been added to the XMB file, one
ID per line.
The FORMAT_XMB format causes this function to output the (default) XMB
format.
Args:
res_tree: base.Node()
output_file: file open for writing
limit_file: None or file open for reading
limit_is_grd: True | False
dir: Directory of the limit file
"""
if limit_file:
if limit_is_grd:
limit_list = []
limit_tree = grd_reader.Parse(limit_file,
dir=dir,
debug=self.o.extra_verbose)
for node in limit_tree:
if 'name' in node.attrs:
limit_list.append(node.attrs['name'])
else:
# Not a GRD file, so it's just a file with one ID per line
limit_list = [item.strip() for item in limit_file.read().split('\n')]
ids_already_done = {}
messages = []
for node in res_tree:
if (limit_file and
not ('name' in node.attrs and node.attrs['name'] in limit_list)):
continue
if not node.IsTranslateable():
continue
for clique in node.GetCliques():
if not clique.IsTranslateable():
continue
if not clique.GetMessage().GetRealContent():
continue
# Some explanation is in order here. Note that we can have
# many messages with the same ID.
#
# The way we work around this is to maintain a list of cliques
# per message ID (in the UberClique) and select the "best" one
# (the first one that has a description, or an arbitrary one
# if there is no description) for inclusion in the XMB file.
# The translations are all going to be the same for messages
# with the same ID, although the way we replace placeholders
# might be slightly different.
id = clique.GetMessage().GetId()
if id in ids_already_done:
continue
ids_already_done[id] = 1
message = node.UberClique().BestClique(id).GetMessage()
messages += [message]
# Ensure a stable order of messages, to help regression testing.
messages.sort(key=lambda x:x.GetId())
if self.format == self.FORMAT_IDS_ONLY:
# We just print the list of IDs to the output file.
for msg in messages:
output_file.write(msg.GetId())
output_file.write('\n')
else:
assert self.format == self.FORMAT_XMB
WriteXmbFile(output_file, messages)
|
bsd-2-clause
|
maxalbert/ansible
|
v1/ansible/inventory/script.py
|
109
|
5971
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#############################################
import os
import subprocess
import ansible.constants as C
from ansible.inventory.host import Host
from ansible.inventory.group import Group
from ansible.module_utils.basic import json_dict_bytes_to_unicode
from ansible import utils
from ansible import errors
import sys
class InventoryScript(object):
''' Host inventory parser for ansible using external inventory scripts. '''
def __init__(self, filename=C.DEFAULT_HOST_LIST):
# Support inventory scripts that are not prefixed with some
# path information but happen to be in the current working
# directory when '.' is not in PATH.
self.filename = os.path.abspath(filename)
cmd = [ self.filename, "--list" ]
try:
sp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError, e:
raise errors.AnsibleError("problem running %s (%s)" % (' '.join(cmd), e))
(stdout, stderr) = sp.communicate()
if sp.returncode != 0:
raise errors.AnsibleError("Inventory script (%s) had an execution error: %s " % (filename,stderr))
self.data = stdout
# see comment about _meta below
self.host_vars_from_top = None
self.groups = self._parse(stderr)
def _parse(self, err):
all_hosts = {}
# not passing from_remote because data from CMDB is trusted
self.raw = utils.parse_json(self.data)
self.raw = json_dict_bytes_to_unicode(self.raw)
all = Group('all')
groups = dict(all=all)
group = None
if 'failed' in self.raw:
sys.stderr.write(err + "\n")
raise errors.AnsibleError("failed to parse executable inventory script results: %s" % self.raw)
for (group_name, data) in self.raw.items():
# in Ansible 1.3 and later, a "_meta" subelement may contain
# a variable "hostvars" which contains a hash for each host
# if this "hostvars" exists at all then do not call --host for each
# host. This is for efficiency and scripts should still return data
# if called with --host for backwards compat with 1.2 and earlier.
if group_name == '_meta':
if 'hostvars' in data:
self.host_vars_from_top = data['hostvars']
continue
if group_name != all.name:
group = groups[group_name] = Group(group_name)
else:
group = all
host = None
if not isinstance(data, dict):
data = {'hosts': data}
# is not those subkeys, then simplified syntax, host with vars
elif not any(k in data for k in ('hosts','vars','children')):
data = {'hosts': [group_name], 'vars': data}
if 'hosts' in data:
if not isinstance(data['hosts'], list):
raise errors.AnsibleError("You defined a group \"%s\" with bad "
"data for the host list:\n %s" % (group_name, data))
for hostname in data['hosts']:
if not hostname in all_hosts:
all_hosts[hostname] = Host(hostname)
host = all_hosts[hostname]
group.add_host(host)
if 'vars' in data:
if not isinstance(data['vars'], dict):
raise errors.AnsibleError("You defined a group \"%s\" with bad "
"data for variables:\n %s" % (group_name, data))
for k, v in data['vars'].iteritems():
if group.name == all.name:
all.set_variable(k, v)
else:
group.set_variable(k, v)
# Separate loop to ensure all groups are defined
for (group_name, data) in self.raw.items():
if group_name == '_meta':
continue
if isinstance(data, dict) and 'children' in data:
for child_name in data['children']:
if child_name in groups:
groups[group_name].add_child_group(groups[child_name])
for group in groups.values():
if group.depth == 0 and group.name != 'all':
all.add_child_group(group)
return groups
def get_host_variables(self, host):
""" Runs <script> --host <hostname> to determine additional host variables """
if self.host_vars_from_top is not None:
got = self.host_vars_from_top.get(host.name, {})
return got
cmd = [self.filename, "--host", host.name]
try:
sp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError, e:
raise errors.AnsibleError("problem running %s (%s)" % (' '.join(cmd), e))
(out, err) = sp.communicate()
if out.strip() == '':
return dict()
try:
return json_dict_bytes_to_unicode(utils.parse_json(out))
except ValueError:
raise errors.AnsibleError("could not parse post variable response: %s, %s" % (cmd, out))
|
gpl-3.0
|
dnjohnstone/hyperspy
|
hyperspy/defaults_parser.py
|
1
|
10780
|
# -*- coding: utf-8 -*-
# Copyright 2007-2020 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import os.path
import configparser
import logging
import traits.api as t
from matplotlib.cm import cmap_d
from hyperspy.misc.config_dir import config_path, os_name, data_path
from hyperspy.misc.ipython_tools import turn_logging_on, turn_logging_off
from hyperspy.ui_registry import add_gui_method
defaults_file = os.path.join(config_path, 'hyperspyrc')
eels_gos_files = os.path.join(data_path, 'EELS_GOS.tar.gz')
_logger = logging.getLogger(__name__)
def guess_gos_path():
if os_name == 'windows':
# If DM is installed, use the GOS tables from the default
# installation
# location in windows
program_files = os.environ['PROGRAMFILES']
gos = 'Gatan\\DigitalMicrograph\\EELS Reference Data\\H-S GOS Tables'
gos_path = os.path.join(program_files, gos)
# Else, use the default location in the .hyperspy forlder
if os.path.isdir(gos_path) is False and \
'PROGRAMFILES(X86)' in os.environ:
program_files = os.environ['PROGRAMFILES(X86)']
gos_path = os.path.join(program_files, gos)
if os.path.isdir(gos_path) is False:
gos_path = os.path.join(config_path, 'EELS_GOS')
else:
gos_path = os.path.join(config_path, 'EELS_GOS')
return gos_path
if os.path.isfile(defaults_file):
# Remove config file if obsolated
with open(defaults_file) as f:
if 'Not really' in f.readline():
# It is the old config file
defaults_file_exists = False
else:
defaults_file_exists = True
if not defaults_file_exists:
# It actually exists, but is an obsoleted unsupported version of it
# so we delete it.
_logger.info('Removing obsoleted config file')
os.remove(defaults_file)
else:
defaults_file_exists = False
# Defaults template definition starts#####################################
# This "section" is all that has to be modified to add or remove sections and
# options from the defaults
# Due to https://github.com/enthought/traitsui/issues/23 the desc text as
# displayed in the tooltip get "Specifies" prepended.
class GeneralConfig(t.HasTraits):
logger_on = t.CBool(
False,
label='Automatic logging (requires IPython)',
desc='If enabled, HyperSpy will store a log in the current directory '
'of all the commands typed')
show_progressbar = t.CBool(
True,
label='Show progress bar',
desc='If enabled, show a progress bar when available')
dtb_expand_structures = t.CBool(
True,
label='Expand structures in DictionaryTreeBrowser',
desc='If enabled, when printing DictionaryTreeBrowser (e.g. '
'metadata), long lists and tuples will be expanded and any '
'dictionaries in them will be printed similar to '
'DictionaryTreeBrowser, but with double lines')
logging_level = t.Enum(['CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG', ],
desc='the log level of all hyperspy modules.')
parallel = t.CBool(
True,
desc='Use parallel threads for computations by default.'
)
nb_progressbar = t.CBool(
True,
desc='Attempt to use ipywidgets progressbar'
)
def _logger_on_changed(self, old, new):
if new is True:
turn_logging_on()
else:
turn_logging_off()
class EELSConfig(t.HasTraits):
eels_gos_files_path = t.Directory(
guess_gos_path(),
label='GOS directory',
desc='The GOS files are required to create the EELS edge components')
class GUIs(t.HasTraits):
enable_ipywidgets_gui = t.CBool(
True,
desc="Display ipywidgets in the Jupyter Notebook. "
"Requires installing hyperspy_gui_ipywidgets.")
enable_traitsui_gui = t.CBool(
True,
desc="Display traitsui user interface elements. "
"Requires installing hyperspy_gui_traitsui.")
warn_if_guis_are_missing = t.CBool(
True,
desc="Display warnings, if hyperspy_gui_ipywidgets or hyperspy_gui_traitsui are missing.")
class PlotConfig(t.HasTraits):
saturated_pixels = t.CFloat(0.05,
label='Saturated pixels',
desc='Set the default saturated pixels value '
'for plotting images.'
)
cmap_navigator = t.Enum(list(cmap_d.keys()),
label='Color map navigator',
desc='Set the default color map for the navigator.',
)
cmap_signal = t.Enum(list(cmap_d.keys()),
label='Color map signal',
desc='Set the default color map for the signal plot.',
)
dims_024_increase = t.Str('right',
label='Navigate right'
)
dims_024_decrease = t.Str('left',
label='Navigate left',
)
dims_135_increase = t.Str('down',
label='Navigate down',
)
dims_135_decrease = t.Str('up',
label='Navigate up',
)
modifier_dims_01 = t.Enum(['ctrl', 'alt', 'shift', 'ctrl+alt', 'ctrl+shift', 'alt+shift',
'ctrl+alt+shift'], label='Modifier key for 1st and 2nd dimensions') # 0 elem is default
modifier_dims_23 = t.Enum(['shift', 'alt', 'ctrl', 'ctrl+alt', 'ctrl+shift', 'alt+shift',
'ctrl+alt+shift'], label='Modifier key for 3rd and 4th dimensions') # 0 elem is default
modifier_dims_45 = t.Enum(['alt', 'ctrl', 'shift', 'ctrl+alt', 'ctrl+shift', 'alt+shift',
'ctrl+alt+shift'], label='Modifier key for 5th and 6th dimensions') # 0 elem is default
class EDSConfig(t.HasTraits):
eds_mn_ka = t.CFloat(130.,
label='Energy resolution at Mn Ka (eV)',
desc='default value for FWHM of the Mn Ka peak in eV,'
'This value is used as a first approximation'
'of the energy resolution of the detector.')
eds_tilt_stage = t.CFloat(
0.,
label='Stage tilt',
desc='default value for the stage tilt in degree.')
eds_detector_azimuth = t.CFloat(
0.,
label='Azimuth angle',
desc='default value for the azimuth angle in degree. If the azimuth'
' is zero, the detector is perpendicular to the tilt axis.')
eds_detector_elevation = t.CFloat(
35.,
label='Elevation angle',
desc='default value for the elevation angle in degree.')
template = {
'General': GeneralConfig(),
'GUIs': GUIs(),
'EELS': EELSConfig(),
'EDS': EDSConfig(),
'Plot': PlotConfig(),
}
# Set the enums defaults
template['General'].logging_level = 'WARNING'
template['Plot'].cmap_navigator = 'gray'
template['Plot'].cmap_signal = 'gray'
# Defaults template definition ends ######################################
def template2config(template, config):
for section, traited_class in template.items():
config.add_section(section)
for key, item in traited_class.trait_get().items():
config.set(section, key, str(item))
def config2template(template, config):
for section, traited_class in template.items():
config_dict = {}
for name, value in config.items(section):
if value == 'True':
value = True
elif value == 'False':
value = False
if name == 'fine_structure_smoothing':
value = float(value)
config_dict[name] = value
traited_class.trait_set(True, **config_dict)
def dictionary_from_template(template):
dictionary = {}
for section, traited_class in template.items():
dictionary[section] = traited_class.get()
return dictionary
config = configparser.ConfigParser(allow_no_value=True)
template2config(template, config)
rewrite = False
if defaults_file_exists:
# Parse the config file. It only copy to config the options that are
# already defined. If the file contains any option that was not already
# define the config file is rewritten because it is obsolate
config2 = configparser.ConfigParser(allow_no_value=True)
config2.read(defaults_file)
for section in config2.sections():
if config.has_section(section):
for option in config2.options(section):
if config.has_option(section, option):
config.set(section, option, config2.get(section, option))
else:
rewrite = True
else:
rewrite = True
if not defaults_file_exists or rewrite is True:
_logger.info('Writing the config file')
with open(defaults_file, "w") as df:
config.write(df)
# Use the traited classes to cast the content of the ConfigParser
config2template(template, config)
@add_gui_method(toolkey="hyperspy.Preferences")
class Preferences(t.HasTraits):
EELS = t.Instance(EELSConfig)
EDS = t.Instance(EDSConfig)
General = t.Instance(GeneralConfig)
GUIs = t.Instance(GUIs)
Plot = t.Instance(PlotConfig)
def save(self):
config = configparser.ConfigParser(allow_no_value=True)
template2config(template, config)
config.write(open(defaults_file, 'w'))
preferences = Preferences(
EELS=template['EELS'],
EDS=template['EDS'],
General=template['General'],
GUIs=template['GUIs'],
Plot=template['Plot'],
)
if preferences.General.logger_on:
turn_logging_on(verbose=0)
def file_version(fname):
with open(fname, 'r') as f:
for l in f.readlines():
if '__version__' in l:
return l[l.find('=') + 1:].strip()
return '0'
|
gpl-3.0
|
mozilla/olympia
|
src/olympia/api/tests/test_pagination.py
|
4
|
4205
|
from unittest import mock
from rest_framework import generics, serializers, status
from rest_framework.test import APIRequestFactory
from olympia.amo.tests import TestCase
from olympia.api.pagination import (
CustomPageNumberPagination,
ESPageNumberPagination,
OneOrZeroPageNumberPagination,
)
class PassThroughSerializer(serializers.BaseSerializer):
def to_representation(self, item):
return item
class TestCustomPageNumberPagination(TestCase):
def setUp(self):
self.factory = APIRequestFactory()
self.view = generics.ListAPIView.as_view(
serializer_class=PassThroughSerializer,
queryset=list(range(1, 101)),
pagination_class=CustomPageNumberPagination,
)
def test_metadata_with_page_size(self):
request = self.factory.get('/', {'page_size': 10, 'page': 2})
response = self.view(request)
assert response.status_code == status.HTTP_200_OK
assert response.data == {
'page_size': 10,
'page_count': 10,
'results': list(range(11, 21)),
'previous': 'http://testserver/?page_size=10',
'next': 'http://testserver/?page=3&page_size=10',
'count': 100,
}
def test_metadata_with_default_page_size(self):
request = self.factory.get('/')
response = self.view(request)
assert response.status_code == status.HTTP_200_OK
assert response.data == {
'page_size': 25,
'page_count': 4,
'results': list(range(1, 26)),
'previous': None,
'next': 'http://testserver/?page=2',
'count': 100,
}
class TestESPageNumberPagination(TestCustomPageNumberPagination):
def test_next_page_never_exeeds_max_result_window(self):
mocked_qs = mock.MagicMock()
mocked_qs.__getitem__().execute().hits.total = 30000
view = generics.ListAPIView.as_view(
serializer_class=PassThroughSerializer,
queryset=mocked_qs,
pagination_class=ESPageNumberPagination,
)
request = self.factory.get('/', {'page_size': 5, 'page': 4999})
response = view(request)
assert response.data == {
'page_size': 5,
'page_count': 5000,
'results': mock.ANY,
'previous': 'http://testserver/?page=4998&page_size=5',
'next': 'http://testserver/?page=5000&page_size=5',
'count': 30000,
}
request = self.factory.get('/', {'page_size': 5, 'page': 5000})
response = view(request)
assert response.data == {
'page_size': 5,
'page_count': 5000,
'results': mock.ANY,
'previous': 'http://testserver/?page=4999&page_size=5',
'next': None,
# We don't lie about the total count
'count': 30000,
}
class TestOneOrZeroPageNumberPagination(TestCase):
def setUp(self):
self.factory = APIRequestFactory()
self.view = generics.ListAPIView.as_view(
serializer_class=PassThroughSerializer,
queryset=list(range(1, 101)),
pagination_class=OneOrZeroPageNumberPagination,
)
def test_response(self):
# page size and page should be ignored.
request = self.factory.get('/', {'page_size': 10, 'page': 2})
response = self.view(request)
assert response.data == {
'page_size': 1,
'page_count': 1,
'results': list(range(1, 2)),
'previous': None,
'next': None,
'count': 1,
}
def test_response_with_empty_queryset(self):
self.view = generics.ListAPIView.as_view(
serializer_class=PassThroughSerializer,
queryset=[],
pagination_class=OneOrZeroPageNumberPagination,
)
request = self.factory.get('/')
response = self.view(request)
assert response.data == {
'page_size': 1,
'page_count': 1,
'results': [],
'previous': None,
'next': None,
'count': 0,
}
|
bsd-3-clause
|
alexteodor/odoo
|
addons/base_report_designer/openerp_sxw2rml/openerp_sxw2rml.py
|
301
|
14179
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c):
#
# 2005 pyopenoffice.py Martin Simon (http://www.bezirksreiter.de)
# 2005 Fabien Pinckaers, TINY SPRL. (http://tiny.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
#!/usr/bin/python
"""
OpenERP SXW2RML - The OpenERP's report engine
OpenERP SXW2RML is part of the OpenERP Report Project.
OpenERP Report is a module that allows you to render high quality PDF document
from an OpenOffice template (.sxw) and any relationl database.
"""
__version__ = '0.9'
import re
import string
import os
import zipfile
import xml.dom.minidom
from reportlab.lib.units import toLength
import base64
import copy
class DomApiGeneral:
"""General DOM API utilities."""
def __init__(self, content_string="", file=""):
self.content_string = content_string
self.re_digits = re.compile(r"(.*?\d)(pt|cm|mm|inch|in)")
def _unitTuple(self, string):
"""Split values and units to a tuple."""
temp = self.re_digits.findall(string)
if not temp:
return (string,"")
else:
return (temp[0])
def stringPercentToFloat(self, string):
temp = string.replace("""%""","")
return float(temp)/100
def findChildrenByName(self, parent, name, attr_dict=None):
"""Helper functions. Does not work recursively.
Optional: also test for certain attribute/value pairs."""
if attr_dict is None:
attr_dict = {}
children = []
for c in parent.childNodes:
if c.nodeType == c.ELEMENT_NODE and c.nodeName == name:
children.append(c)
if attr_dict == {}:
return children
else:
return self._selectForAttributes(nodelist=children,attr_dict=attr_dict)
def _selectForAttributes(self, nodelist, attr_dict):
"Helper function."""
selected_nodes = []
for n in nodelist:
check = 1
for a in attr_dict.keys():
if n.getAttribute(a) != attr_dict[a]:
# at least one incorrect attribute value?
check = 0
if check:
selected_nodes.append(n)
return selected_nodes
def _stringToTuple(self, s):
"""Helper function."""
try:
temp = string.split(s,",")
return int(temp[0]),int(temp[1])
except:
return None
def _tupleToString(self, t):
try:
return self.openOfficeStringUtf8("%s,%s" % (t[0],t[1]))
except:
return None
def _lengthToFloat(self, value):
v = value
if not self.re_digits.search(v):
return v
try:
if v[-4:] == "inch":
# OO files use "inch" instead of "in" in Reportlab units
v = v[:-2]
except:
pass
try:
c = round(toLength(v))
return c
except:
return v
def openOfficeStringUtf8(self, string):
if type(string) == unicode:
return string.encode("utf-8")
tempstring = unicode(string,"cp1252").encode("utf-8")
return tempstring
class DomApi(DomApiGeneral):
"""This class provides a DOM-API for XML-Files from an SXW-Archive."""
def __init__(self, xml_content, xml_styles):
DomApiGeneral.__init__(self)
self.content_dom = xml.dom.minidom.parseString(xml_content)
self.styles_dom = xml.dom.minidom.parseString(xml_styles)
body = self.content_dom.getElementsByTagName("office:body")
self.body = body and body[0]
# TODO:
self.style_dict = {}
self.style_properties_dict = {}
# ******** always use the following order:
self.buildStyleDict()
self.buildStylePropertiesDict()
if self.styles_dom.getElementsByTagName("style:page-master").__len__()<>0:
self.page_master = self.styles_dom.getElementsByTagName("style:page-master")[0]
if self.styles_dom.getElementsByTagName("style:page-layout").__len__()<>0 :
self.page_master = self.styles_dom.getElementsByTagName("style:page-layout")[0]
self.document = self.content_dom.getElementsByTagName("office:document-content")[0]
def buildStylePropertiesDict(self):
for s in self.style_dict.keys():
self.style_properties_dict[s] = self.getStylePropertiesDict(s)
def updateWithPercents(self, dict, updatedict):
"""Sometimes you find values like "115%" in the style hierarchy."""
if not updatedict:
# no style hierarchies for this style? =>
return
new_updatedict = copy.copy(updatedict)
for u in new_updatedict.keys():
try:
if new_updatedict[u].find("""%""") != -1 and dict.has_key(u):
number = float(self.re_digits.search(dict[u]).group(1))
unit = self.re_digits.search(dict[u]).group(2)
new_number = self.stringPercentToFloat(new_updatedict[u]) * number
if unit == "pt":
new_number = int(new_number)
# no floats allowed for "pt"
# OOo just takes the int, does not round (try it out!)
new_updatedict[u] = "%s%s" % (new_number,unit)
else:
dict[u] = new_updatedict[u]
except:
dict[u] = new_updatedict[u]
dict.update(new_updatedict)
def normalizeStyleProperties(self):
"""Transfer all style:style-properties attributes from the
self.style_properties_hierarchical dict to the automatic-styles
from content.xml. Use this function to preprocess content.xml for
XSLT transformations etc.Do not try to implement this function
with XSlT - believe me, it's a terrible task..."""
styles_styles = self.styles_dom.getElementsByTagName("style:style")
automatic_styles = self.content_dom.getElementsByTagName("office:automatic-styles")[0]
for s in styles_styles:
automatic_styles.appendChild(s.cloneNode(deep=1))
content_styles = self.content_dom.getElementsByTagName("style:style")
# these are the content_styles with styles_styles added!!!
for s in content_styles:
c = self.findChildrenByName(s,"style:properties")
if c == []:
# some derived automatic styles do not have "style:properties":
temp = self.content_dom.createElement("style:properties")
s.appendChild(temp)
c = self.findChildrenByName(s,"style:properties")
c = c[0]
dict = self.style_properties_dict[(s.getAttribute("style:name")).encode("utf-8")] or {}
for attribute in dict.keys():
c.setAttribute(self.openOfficeStringUtf8(attribute),self.openOfficeStringUtf8(dict[attribute]))
def transferStylesXml(self):
"""Transfer certain sub-trees from styles.xml to the normalized content.xml
(see above). It is not necessary to do this - for example - with paragraph styles.
the "normalized" style properties contain all information needed for
further processing."""
# TODO: What about table styles etc.?
outline_styles = self.styles_dom.getElementsByTagName("text:outline-style")
t = self.content_dom.createElement("transferredfromstylesxml")
self.document.insertBefore(t,self.body)
t_new = self.body.previousSibling
try:
page_master = self.page_master
t_new.appendChild(page_master.cloneNode(deep=1))
t_new.appendChild(outline_styles[0].cloneNode(deep=1))
except:
pass
def normalizeLength(self):
"""Normalize all lengthes to floats (i.e: 1 inch = 72).
Always use this after "normalizeContent" and "transferStyles"!"""
# TODO: The complex attributes of table cell styles are not transferred yet.
#all_styles = self.content_dom.getElementsByTagName("style:properties")
#all_styles += self.content_dom.getElementsByTagName("draw:image")
all_styles = self.content_dom.getElementsByTagName("*")
for s in all_styles:
for x in s._attrs.keys():
v = s.getAttribute(x)
s.setAttribute(x,"%s" % self._lengthToFloat(v))
# convert float to string first!
def normalizeTableColumns(self):
"""Handle this strange table:number-columns-repeated attribute."""
columns = self.content_dom.getElementsByTagName("table:table-column")
for c in columns:
if c.hasAttribute("table:number-columns-repeated"):
number = int(c.getAttribute("table:number-columns-repeated"))
c.removeAttribute("table:number-columns-repeated")
for i in range(number-1):
(c.parentNode).insertBefore(c.cloneNode(deep=1),c)
def buildStyleDict(self):
"""Store all style:style-nodes from content.xml and styles.xml in self.style_dict.
Caution: in this dict the nodes from two dom apis are merged!"""
for st in (self.styles_dom,self.content_dom):
for s in st.getElementsByTagName("style:style"):
name = s.getAttribute("style:name").encode("utf-8")
self.style_dict[name] = s
return True
def toxml(self):
return self.content_dom.toxml(encoding="utf-8")
def getStylePropertiesDict(self, style_name):
res = {}
if self.style_dict[style_name].hasAttribute("style:parent-style-name"):
parent = self.style_dict[style_name].getAttribute("style:parent-style-name").encode("utf-8")
res = self.getStylePropertiesDict(parent)
children = self.style_dict[style_name].childNodes
for c in children:
if c.nodeType == c.ELEMENT_NODE and c.nodeName.find("properties")>0 :
for attr in c._attrs.keys():
res[attr] = c.getAttribute(attr).encode("utf-8")
return res
class PyOpenOffice(object):
"""This is the main class which provides all functionality."""
def __init__(self, path='.', save_pict=False):
self.path = path
self.save_pict = save_pict
self.images = {}
def oo_read(self, fname):
z = zipfile.ZipFile(fname,"r")
content = z.read('content.xml')
style = z.read('styles.xml')
all = z.namelist()
for a in all:
if a[:9]=='Pictures/' and len(a)>10:
pic_content = z.read(a)
self.images[a[9:]] = pic_content
if self.save_pict:
f=open(os.path.join(self.path, os.path.basename(a)),"wb")
f.write(pic_content)
f.close()
z.close()
return content,style
def oo_replace(self, content):
regex = [
(r"<para[^>]*/>", ""),
(r"<para(.*)>(.*?)<text:line-break[^>]*/>", "<para$1>$2</para><para$1>"),
]
for key,val in regex:
content = re.sub(key, val, content)
return content
def unpackNormalize(self, sourcefile):
c,s = self.oo_read(sourcefile)
c = self.oo_replace(c)
dom = DomApi(c,s)
dom.normalizeStyleProperties()
dom.transferStylesXml()
dom.normalizeLength()
dom.normalizeTableColumns()
new_c = dom.toxml()
return new_c
def sxw2rml(sxw_file, xsl, output='.', save_pict=False):
from lxml import etree
from StringIO import StringIO
tool = PyOpenOffice(output, save_pict = save_pict)
res = tool.unpackNormalize(sxw_file)
f = StringIO(xsl)
styledoc = etree.parse(f)
style = etree.XSLT(styledoc)
f = StringIO(res)
doc = etree.parse(f)
result = style(doc)
root = etree.XPathEvaluator(result)("/document/stylesheet")
if root:
root=root[0]
images = etree.Element("images")
for img in tool.images:
node = etree.Element('image', name=img)
node.text = base64.encodestring(tool.images[img])
images.append(node)
root.append(images)
try:
xml = str(result)
return xml
except:
return result
if __name__ == "__main__":
import optparse
parser = optparse.OptionParser(
version="Odoo Report v%s" % __version__,
usage = 'openerp_sxw2rml.py [options] file.sxw')
parser.add_option("-v", "--verbose", default=False, dest="verbose", help="enable basic debugging")
parser.add_option("-o", "--output", dest="output", default='.', help="directory of image output")
(opt, args) = parser.parse_args()
if len(args) != 1:
parser.error("Incorrect number of arguments.")
import sys
fname = sys.argv[1]
f = fname
xsl_file = 'normalized_oo2rml.xsl'
z = zipfile.ZipFile(fname,"r")
mimetype = z.read('mimetype')
if mimetype.split('/')[-1] == 'vnd.oasis.opendocument.text' :
xsl_file = 'normalized_odt2rml.xsl'
xsl = file(os.path.join(os.getcwd(), os.path.dirname(sys.argv[0]), xsl_file)).read()
result = sxw2rml(f, xsl, output=opt.output, save_pict=False)
print result
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
apiaryio/snowcrash
|
tools/gyp/pylib/gyp/MSVSProject.py
|
2736
|
6387
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio project reader/writer."""
import gyp.common
import gyp.easy_xml as easy_xml
#------------------------------------------------------------------------------
class Tool(object):
"""Visual Studio tool."""
def __init__(self, name, attrs=None):
"""Initializes the tool.
Args:
name: Tool name.
attrs: Dict of tool attributes; may be None.
"""
self._attrs = attrs or {}
self._attrs['Name'] = name
def _GetSpecification(self):
"""Creates an element for the tool.
Returns:
A new xml.dom.Element for the tool.
"""
return ['Tool', self._attrs]
class Filter(object):
"""Visual Studio filter - that is, a virtual folder."""
def __init__(self, name, contents=None):
"""Initializes the folder.
Args:
name: Filter (folder) name.
contents: List of filenames and/or Filter objects contained.
"""
self.name = name
self.contents = list(contents or [])
#------------------------------------------------------------------------------
class Writer(object):
"""Visual Studio XML project writer."""
def __init__(self, project_path, version, name, guid=None, platforms=None):
"""Initializes the project.
Args:
project_path: Path to the project file.
version: Format version to emit.
name: Name of the project.
guid: GUID to use for project, if not None.
platforms: Array of string, the supported platforms. If null, ['Win32']
"""
self.project_path = project_path
self.version = version
self.name = name
self.guid = guid
# Default to Win32 for platforms.
if not platforms:
platforms = ['Win32']
# Initialize the specifications of the various sections.
self.platform_section = ['Platforms']
for platform in platforms:
self.platform_section.append(['Platform', {'Name': platform}])
self.tool_files_section = ['ToolFiles']
self.configurations_section = ['Configurations']
self.files_section = ['Files']
# Keep a dict keyed on filename to speed up access.
self.files_dict = dict()
def AddToolFile(self, path):
"""Adds a tool file to the project.
Args:
path: Relative path from project to tool file.
"""
self.tool_files_section.append(['ToolFile', {'RelativePath': path}])
def _GetSpecForConfiguration(self, config_type, config_name, attrs, tools):
"""Returns the specification for a configuration.
Args:
config_type: Type of configuration node.
config_name: Configuration name.
attrs: Dict of configuration attributes; may be None.
tools: List of tools (strings or Tool objects); may be None.
Returns:
"""
# Handle defaults
if not attrs:
attrs = {}
if not tools:
tools = []
# Add configuration node and its attributes
node_attrs = attrs.copy()
node_attrs['Name'] = config_name
specification = [config_type, node_attrs]
# Add tool nodes and their attributes
if tools:
for t in tools:
if isinstance(t, Tool):
specification.append(t._GetSpecification())
else:
specification.append(Tool(t)._GetSpecification())
return specification
def AddConfig(self, name, attrs=None, tools=None):
"""Adds a configuration to the project.
Args:
name: Configuration name.
attrs: Dict of configuration attributes; may be None.
tools: List of tools (strings or Tool objects); may be None.
"""
spec = self._GetSpecForConfiguration('Configuration', name, attrs, tools)
self.configurations_section.append(spec)
def _AddFilesToNode(self, parent, files):
"""Adds files and/or filters to the parent node.
Args:
parent: Destination node
files: A list of Filter objects and/or relative paths to files.
Will call itself recursively, if the files list contains Filter objects.
"""
for f in files:
if isinstance(f, Filter):
node = ['Filter', {'Name': f.name}]
self._AddFilesToNode(node, f.contents)
else:
node = ['File', {'RelativePath': f}]
self.files_dict[f] = node
parent.append(node)
def AddFiles(self, files):
"""Adds files to the project.
Args:
files: A list of Filter objects and/or relative paths to files.
This makes a copy of the file/filter tree at the time of this call. If you
later add files to a Filter object which was passed into a previous call
to AddFiles(), it will not be reflected in this project.
"""
self._AddFilesToNode(self.files_section, files)
# TODO(rspangler) This also doesn't handle adding files to an existing
# filter. That is, it doesn't merge the trees.
def AddFileConfig(self, path, config, attrs=None, tools=None):
"""Adds a configuration to a file.
Args:
path: Relative path to the file.
config: Name of configuration to add.
attrs: Dict of configuration attributes; may be None.
tools: List of tools (strings or Tool objects); may be None.
Raises:
ValueError: Relative path does not match any file added via AddFiles().
"""
# Find the file node with the right relative path
parent = self.files_dict.get(path)
if not parent:
raise ValueError('AddFileConfig: file "%s" not in project.' % path)
# Add the config to the file node
spec = self._GetSpecForConfiguration('FileConfiguration', config, attrs,
tools)
parent.append(spec)
def WriteIfChanged(self):
"""Writes the project file."""
# First create XML content definition
content = [
'VisualStudioProject',
{'ProjectType': 'Visual C++',
'Version': self.version.ProjectVersion(),
'Name': self.name,
'ProjectGUID': self.guid,
'RootNamespace': self.name,
'Keyword': 'Win32Proj'
},
self.platform_section,
self.tool_files_section,
self.configurations_section,
['References'], # empty section
self.files_section,
['Globals'] # empty section
]
easy_xml.WriteXmlIfChanged(content, self.project_path,
encoding="Windows-1252")
|
mit
|
jwren/intellij-community
|
python/helpers/typeshed/tests/check_consistent.py
|
5
|
8367
|
#!/usr/bin/env python3
# For security (and simplicity) reasons, only a limited kind of files can be
# present in /stdlib and /stubs directories, see README for detail. Here we
# verify these constraints.
# In addition, for various reasons we need the contents of certain files to be
# duplicated in two places, for example stdlib/@python2/builtins.pyi and
# stdlib/@python2/__builtin__.pyi must be identical. In the past we used
# symlinks but that doesn't always work on Windows, so now you must
# manually update both files, and this test verifies that they are
# identical. The list below indicates which sets of files must match.
import filecmp
import os
import toml
consistent_files = [
{"stdlib/@python2/builtins.pyi", "stdlib/@python2/__builtin__.pyi"},
{"stdlib/threading.pyi", "stdlib/_dummy_threading.pyi"},
]
def assert_stubs_only(directory):
"""Check that given directory contains only valid stub files."""
top = directory.split(os.sep)[-1]
assert top.isidentifier(), f"Bad directory name: {top}"
for _, dirs, files in os.walk(directory):
for file in files:
name, ext = os.path.splitext(file)
assert name.isidentifier(), f"Files must be valid modules, got: {name}"
assert ext == ".pyi", f"Only stub flies allowed. Got: {file} in {directory}"
for subdir in dirs:
assert subdir.isidentifier(), f"Directories must be valid packages, got: {subdir}"
def check_stdlib():
for entry in os.listdir("stdlib"):
if os.path.isfile(os.path.join("stdlib", entry)):
name, ext = os.path.splitext(entry)
if ext != ".pyi":
assert entry == "VERSIONS", f"Unexpected file in stdlib root: {entry}"
assert name.isidentifier(), "Bad file name in stdlib"
else:
if entry == "@python2":
continue
assert_stubs_only(os.path.join("stdlib", entry))
for entry in os.listdir("stdlib/@python2"):
if os.path.isfile(os.path.join("stdlib/@python2", entry)):
name, ext = os.path.splitext(entry)
assert name.isidentifier(), "Bad file name in stdlib"
assert ext == ".pyi", "Unexpected file in stdlib/@python2 root"
else:
assert_stubs_only(os.path.join("stdlib/@python2", entry))
def check_stubs():
for distribution in os.listdir("stubs"):
assert not os.path.isfile(distribution), f"Only directories allowed in stubs, got {distribution}"
for entry in os.listdir(os.path.join("stubs", distribution)):
if os.path.isfile(os.path.join("stubs", distribution, entry)):
name, ext = os.path.splitext(entry)
if ext != ".pyi":
assert entry in {"METADATA.toml", "README", "README.md", "README.rst"}, entry
else:
assert name.isidentifier(), f"Bad file name '{entry}' in stubs"
else:
if entry == "@python2":
continue
assert_stubs_only(os.path.join("stubs", distribution, entry))
if os.path.isdir(os.path.join("stubs", distribution, "@python2")):
for entry in os.listdir(os.path.join("stubs", distribution, "@python2")):
if os.path.isfile(os.path.join("stubs", distribution, "@python2", entry)):
name, ext = os.path.splitext(entry)
assert name.isidentifier(), f"Bad file name '{entry}' in stubs"
assert ext == ".pyi", f"Unexpected file {entry} in @python2 stubs"
else:
assert_stubs_only(os.path.join("stubs", distribution, "@python2", entry))
def check_same_files():
files = [os.path.join(root, file) for root, dir, files in os.walk(".") for file in files]
no_symlink = "You cannot use symlinks in typeshed, please copy {} to its link."
for file in files:
_, ext = os.path.splitext(file)
if ext == ".pyi" and os.path.islink(file):
raise ValueError(no_symlink.format(file))
for file1, *others in consistent_files:
f1 = os.path.join(os.getcwd(), file1)
for file2 in others:
f2 = os.path.join(os.getcwd(), file2)
if not filecmp.cmp(f1, f2):
raise ValueError(
"File {f1} does not match file {f2}. Please copy it to {f2}\n"
"Run either:\ncp {f1} {f2}\nOr:\ncp {f2} {f1}".format(f1=file1, f2=file2)
)
def check_versions():
versions = {}
with open("stdlib/VERSIONS") as f:
data = f.read().splitlines()
for line in data:
if not line or line.lstrip().startswith("#"):
continue
assert ": " in line, f"Bad line in VERSIONS: {line}"
module, version = line.split(": ")
msg = f"Unsupported Python version{version}"
assert version.count(".") == 1, msg
major, minor = version.split(".")
assert major in {"2", "3"}, msg
assert minor.isdigit(), msg
assert module not in versions, f"Duplicate module {module} in VERSIONS"
versions[module] = (int(major), int(minor))
modules = set()
for entry in os.listdir("stdlib"):
if entry == "@python2" or entry == "VERSIONS":
continue
if os.path.isfile(os.path.join("stdlib", entry)):
mod, _ = os.path.splitext(entry)
modules.add(mod)
else:
modules.add(entry)
extra = modules - set(versions)
assert not extra, f"Modules not in versions: {extra}"
extra = set(versions) - modules
assert not extra, f"Versions not in modules: {extra}"
def _strip_dep_version(dependency):
dep_version_pos = len(dependency)
for pos, c in enumerate(dependency):
if c in "=<>":
dep_version_pos = pos
break
stripped = dependency[:dep_version_pos]
rest = dependency[dep_version_pos:]
if not rest:
return stripped, "", ""
number_pos = 0
for pos, c in enumerate(rest):
if c not in "=<>":
number_pos = pos
break
relation = rest[:number_pos]
version = rest[number_pos:]
return stripped, relation, version
def check_metadata():
known_distributions = set(os.listdir("stubs"))
for distribution in os.listdir("stubs"):
with open(os.path.join("stubs", distribution, "METADATA.toml")) as f:
data = toml.loads(f.read())
assert "version" in data, f"Missing version for {distribution}"
version = data["version"]
msg = f"Unsupported Python version {version}"
assert version.count(".") == 1, msg
major, minor = version.split(".")
assert major.isdigit() and minor.isdigit(), msg
for key in data:
assert key in {
"version", "python2", "python3", "requires"
}, f"Unexpected key {key} for {distribution}"
assert isinstance(data.get("python2", False), bool), f"Invalid python2 value for {distribution}"
assert isinstance(data.get("python3", True), bool), f"Invalid python3 value for {distribution}"
assert isinstance(data.get("requires", []), list), f"Invalid requires value for {distribution}"
for dep in data.get("requires", []):
assert isinstance(dep, str), f"Invalid dependency {dep} for {distribution}"
assert dep.startswith("types-"), f"Only stub dependencies supported, got {dep}"
dep = dep[len("types-"):]
for space in " \t\n":
assert space not in dep, f"For consistency dependency should not have whitespace: {dep}"
assert ";" not in dep, f"Semicolons in dependencies are not supported, got {dep}"
stripped, relation, dep_version = _strip_dep_version(dep)
assert stripped in known_distributions, f"Only dependencies from typeshed are supported, got {stripped}"
if relation:
msg = f"Bad version in dependency {dep}"
assert relation in {"==", ">", ">=", "<", "<="}, msg
assert version.count(".") <= 2, msg
for part in version.split("."):
assert part.isnumeric(), msg
if __name__ == "__main__":
check_stdlib()
check_versions()
check_stubs()
check_metadata()
check_same_files()
|
apache-2.0
|
ChanderG/scipy
|
scipy/signal/tests/test_filter_design.py
|
63
|
96267
|
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from numpy.testing import (TestCase, assert_array_almost_equal,
assert_array_equal, assert_array_less,
assert_raises, assert_equal, assert_,
run_module_suite, assert_allclose, assert_warns)
from numpy import array, spacing, sin, pi, sort
from scipy.signal import (tf2zpk, zpk2tf, tf2sos, sos2tf, sos2zpk, zpk2sos,
BadCoefficients, freqz, normalize,
buttord, cheby1, cheby2, ellip, cheb1ord, cheb2ord,
ellipord, butter, bessel, buttap, besselap,
cheb1ap, cheb2ap, ellipap, iirfilter, freqs,
lp2lp, lp2hp, lp2bp, lp2bs, bilinear, group_delay,
firwin)
from scipy.signal.filter_design import _cplxreal, _cplxpair
class TestCplxPair(TestCase):
def test_trivial_input(self):
assert_equal(_cplxpair([]).size, 0)
assert_equal(_cplxpair(1), 1)
def test_output_order(self):
assert_allclose(_cplxpair([1+1j, 1-1j]), [1-1j, 1+1j])
a = [1+1j, 1+1j, 1, 1-1j, 1-1j, 2]
b = [1-1j, 1+1j, 1-1j, 1+1j, 1, 2]
assert_allclose(_cplxpair(a), b)
# points spaced around the unit circle
z = np.exp(2j*pi*array([4, 3, 5, 2, 6, 1, 0])/7)
z1 = np.copy(z)
np.random.shuffle(z)
assert_allclose(_cplxpair(z), z1)
np.random.shuffle(z)
assert_allclose(_cplxpair(z), z1)
np.random.shuffle(z)
assert_allclose(_cplxpair(z), z1)
# Should be able to pair up all the conjugates
x = np.random.rand(10000) + 1j * np.random.rand(10000)
y = x.conj()
z = np.random.rand(10000)
x = np.concatenate((x, y, z))
np.random.shuffle(x)
c = _cplxpair(x)
# Every other element of head should be conjugates:
assert_allclose(c[0:20000:2], np.conj(c[1:20000:2]))
# Real parts of head should be in sorted order:
assert_allclose(c[0:20000:2].real, np.sort(c[0:20000:2].real))
# Tail should be sorted real numbers:
assert_allclose(c[20000:], np.sort(c[20000:]))
def test_real_integer_input(self):
assert_array_equal(_cplxpair([2, 0, 1]), [0, 1, 2])
def test_tolerances(self):
eps = spacing(1)
assert_allclose(_cplxpair([1j, -1j, 1+1j*eps], tol=2*eps),
[-1j, 1j, 1+1j*eps])
# sorting close to 0
assert_allclose(_cplxpair([-eps+1j, +eps-1j]), [-1j, +1j])
assert_allclose(_cplxpair([+eps+1j, -eps-1j]), [-1j, +1j])
assert_allclose(_cplxpair([+1j, -1j]), [-1j, +1j])
def test_unmatched_conjugates(self):
# 1+2j is unmatched
assert_raises(ValueError, _cplxpair, [1+3j, 1-3j, 1+2j])
# 1+2j and 1-3j are unmatched
assert_raises(ValueError, _cplxpair, [1+3j, 1-3j, 1+2j, 1-3j])
# 1+3j is unmatched
assert_raises(ValueError, _cplxpair, [1+3j, 1-3j, 1+3j])
# Not conjugates
assert_raises(ValueError, _cplxpair, [4+5j, 4+5j])
assert_raises(ValueError, _cplxpair, [1-7j, 1-7j])
# No pairs
assert_raises(ValueError, _cplxpair, [1+3j])
assert_raises(ValueError, _cplxpair, [1-3j])
class TestCplxReal(TestCase):
def test_trivial_input(self):
assert_equal(_cplxreal([]), ([], []))
assert_equal(_cplxreal(1), ([], [1]))
def test_output_order(self):
zc, zr = _cplxreal(np.roots(array([1, 0, 0, 1])))
assert_allclose(np.append(zc, zr), [1/2 + 1j*sin(pi/3), -1])
eps = spacing(1)
a = [0+1j, 0-1j, eps + 1j, eps - 1j, -eps + 1j, -eps - 1j,
1, 4, 2, 3, 0, 0,
2+3j, 2-3j,
1-eps + 1j, 1+2j, 1-2j, 1+eps - 1j, # sorts out of order
3+1j, 3+1j, 3+1j, 3-1j, 3-1j, 3-1j,
2-3j, 2+3j]
zc, zr = _cplxreal(a)
assert_allclose(zc, [1j, 1j, 1j, 1+1j, 1+2j, 2+3j, 2+3j, 3+1j, 3+1j,
3+1j])
assert_allclose(zr, [0, 0, 1, 2, 3, 4])
z = array([1-eps + 1j, 1+2j, 1-2j, 1+eps - 1j, 1+eps+3j, 1-2*eps-3j,
0+1j, 0-1j, 2+4j, 2-4j, 2+3j, 2-3j, 3+7j, 3-7j, 4-eps+1j,
4+eps-2j, 4-1j, 4-eps+2j])
zc, zr = _cplxreal(z)
assert_allclose(zc, [1j, 1+1j, 1+2j, 1+3j, 2+3j, 2+4j, 3+7j, 4+1j,
4+2j])
assert_equal(zr, [])
def test_unmatched_conjugates(self):
# 1+2j is unmatched
assert_raises(ValueError, _cplxreal, [1+3j, 1-3j, 1+2j])
# 1+2j and 1-3j are unmatched
assert_raises(ValueError, _cplxreal, [1+3j, 1-3j, 1+2j, 1-3j])
# 1+3j is unmatched
assert_raises(ValueError, _cplxreal, [1+3j, 1-3j, 1+3j])
# No pairs
assert_raises(ValueError, _cplxreal, [1+3j])
assert_raises(ValueError, _cplxreal, [1-3j])
def test_real_integer_input(self):
zc, zr = _cplxreal([2, 0, 1, 4])
assert_array_equal(zc, [])
assert_array_equal(zr, [0, 1, 2, 4])
class TestTf2zpk(TestCase):
def test_simple(self):
z_r = np.array([0.5, -0.5])
p_r = np.array([1.j / np.sqrt(2), -1.j / np.sqrt(2)])
# Sort the zeros/poles so that we don't fail the test if the order
# changes
z_r.sort()
p_r.sort()
b = np.poly(z_r)
a = np.poly(p_r)
z, p, k = tf2zpk(b, a)
z.sort()
p.sort()
assert_array_almost_equal(z, z_r)
assert_array_almost_equal(p, p_r)
def test_bad_filter(self):
"""Regression test for #651: better handling of badly conditioned
filter coefficients."""
warnings.simplefilter("error", BadCoefficients)
try:
assert_raises(BadCoefficients, tf2zpk, [1e-15], [1.0, 1.0])
finally:
warnings.simplefilter("always", BadCoefficients)
class TestZpk2Tf(TestCase):
def test_identity(self):
"""Test the identity transfer function."""
z = []
p = []
k = 1.
b, a = zpk2tf(z, p, k)
b_r = np.array([1.]) # desired result
a_r = np.array([1.]) # desired result
# The test for the *type* of the return values is a regression
# test for ticket #1095. In the case p=[], zpk2tf used to
# return the scalar 1.0 instead of array([1.0]).
assert_array_equal(b, b_r)
assert_(isinstance(b, np.ndarray))
assert_array_equal(a, a_r)
assert_(isinstance(a, np.ndarray))
class TestSos2Zpk(TestCase):
def test_basic(self):
sos = [[1, 0, 1, 1, 0, -0.81],
[1, 0, 0, 1, 0, +0.49]]
z, p, k = sos2zpk(sos)
z2 = [1j, -1j, 0, 0]
p2 = [0.9, -0.9, 0.7j, -0.7j]
k2 = 1
assert_array_almost_equal(sort(z), sort(z2), decimal=4)
assert_array_almost_equal(sort(p), sort(p2), decimal=4)
assert_array_almost_equal(k, k2)
sos = [[1.00000, +0.61803, 1.0000, 1.00000, +0.60515, 0.95873],
[1.00000, -1.61803, 1.0000, 1.00000, -1.58430, 0.95873],
[1.00000, +1.00000, 0.0000, 1.00000, +0.97915, 0.00000]]
z, p, k = sos2zpk(sos)
z2 = [-0.3090 + 0.9511j, -0.3090 - 0.9511j, 0.8090 + 0.5878j,
0.8090 - 0.5878j, -1.0000 + 0.0000j, 0]
p2 = [-0.3026 + 0.9312j, -0.3026 - 0.9312j, 0.7922 + 0.5755j,
0.7922 - 0.5755j, -0.9791 + 0.0000j, 0]
k2 = 1
assert_array_almost_equal(sort(z), sort(z2), decimal=4)
assert_array_almost_equal(sort(p), sort(p2), decimal=4)
sos = array([[1, 2, 3, 1, 0.2, 0.3],
[4, 5, 6, 1, 0.4, 0.5]])
z = array([-1 - 1.41421356237310j, -1 + 1.41421356237310j,
-0.625 - 1.05326872164704j, -0.625 + 1.05326872164704j])
p = array([-0.2 - 0.678232998312527j, -0.2 + 0.678232998312527j,
-0.1 - 0.538516480713450j, -0.1 + 0.538516480713450j])
k = 4
z2, p2, k2 = sos2zpk(sos)
assert_allclose(_cplxpair(z2), z)
assert_allclose(_cplxpair(p2), p)
assert_allclose(k2, k)
class TestSos2Tf(TestCase):
def test_basic(self):
sos = [[1, 1, 1, 1, 0, -1],
[-2, 3, 1, 1, 10, 1]]
b, a = sos2tf(sos)
assert_array_almost_equal(b, [-2, 1, 2, 4, 1])
assert_array_almost_equal(a, [1, 10, 0, -10, -1])
class TestTf2Sos(TestCase):
def test_basic(self):
num = [2, 16, 44, 56, 32]
den = [3, 3, -15, 18, -12]
sos = tf2sos(num, den)
sos2 = [[0.6667, 4.0000, 5.3333, 1.0000, +2.0000, -4.0000],
[1.0000, 2.0000, 2.0000, 1.0000, -1.0000, +1.0000]]
assert_array_almost_equal(sos, sos2, decimal=4)
b = [1, -3, 11, -27, 18]
a = [16, 12, 2, -4, -1]
sos = tf2sos(b, a)
sos2 = [[0.0625, -0.1875, 0.1250, 1.0000, -0.2500, -0.1250],
[1.0000, +0.0000, 9.0000, 1.0000, +1.0000, +0.5000]]
# assert_array_almost_equal(sos, sos2, decimal=4)
class TestZpk2Sos(TestCase):
def test_basic(self):
for pairing in ('nearest', 'keep_odd'):
#
# Cases that match octave
#
z = [-1, -1]
p = [0.57149 + 0.29360j, 0.57149 - 0.29360j]
k = 1
sos = zpk2sos(z, p, k, pairing=pairing)
sos2 = [[1, 2, 1, 1, -1.14298, 0.41280]] # octave & MATLAB
assert_array_almost_equal(sos, sos2, decimal=4)
z = [1j, -1j]
p = [0.9, -0.9, 0.7j, -0.7j]
k = 1
sos = zpk2sos(z, p, k, pairing=pairing)
sos2 = [[1, 0, 1, 1, 0, +0.49],
[1, 0, 0, 1, 0, -0.81]] # octave
# sos2 = [[0, 0, 1, 1, -0.9, 0],
# [1, 0, 1, 1, 0.9, 0]] # MATLAB
assert_array_almost_equal(sos, sos2, decimal=4)
z = []
p = [0.8, -0.5+0.25j, -0.5-0.25j]
k = 1.
sos = zpk2sos(z, p, k, pairing=pairing)
sos2 = [[1., 0., 0., 1., 1., 0.3125],
[1., 0., 0., 1., -0.8, 0.]] # octave, MATLAB fails
assert_array_almost_equal(sos, sos2, decimal=4)
z = [1., 1., 0.9j, -0.9j]
p = [0.99+0.01j, 0.99-0.01j, 0.1+0.9j, 0.1-0.9j]
k = 1
sos = zpk2sos(z, p, k, pairing=pairing)
sos2 = [[1, 0, 0.81, 1, -0.2, 0.82],
[1, -2, 1, 1, -1.98, 0.9802]] # octave
# sos2 = [[1, -2, 1, 1, -0.2, 0.82],
# [1, 0, 0.81, 1, -1.98, 0.9802]] # MATLAB
assert_array_almost_equal(sos, sos2, decimal=4)
z = [0.9+0.1j, 0.9-0.1j, -0.9]
p = [0.75+0.25j, 0.75-0.25j, 0.9]
k = 1
sos = zpk2sos(z, p, k, pairing=pairing)
if pairing == 'keep_odd':
sos2 = [[1, -1.8, 0.82, 1, -1.5, 0.625],
[1, 0.9, 0, 1, -0.9, 0]] # octave; MATLAB fails
assert_array_almost_equal(sos, sos2, decimal=4)
else: # pairing == 'nearest'
sos2 = [[1, 0.9, 0, 1, -1.5, 0.625],
[1, -1.8, 0.82, 1, -0.9, 0]] # our algorithm
assert_array_almost_equal(sos, sos2, decimal=4)
#
# Cases that differ from octave:
#
z = [-0.3090 + 0.9511j, -0.3090 - 0.9511j, 0.8090 + 0.5878j,
+0.8090 - 0.5878j, -1.0000 + 0.0000j]
p = [-0.3026 + 0.9312j, -0.3026 - 0.9312j, 0.7922 + 0.5755j,
+0.7922 - 0.5755j, -0.9791 + 0.0000j]
k = 1
sos = zpk2sos(z, p, k, pairing=pairing)
# sos2 = [[1, 0.618, 1, 1, 0.6052, 0.95870],
# [1, -1.618, 1, 1, -1.5844, 0.95878],
# [1, 1, 0, 1, 0.9791, 0]] # octave, MATLAB fails
sos2 = [[1, 1, 0, 1, +0.97915, 0],
[1, 0.61803, 1, 1, +0.60515, 0.95873],
[1, -1.61803, 1, 1, -1.58430, 0.95873]]
assert_array_almost_equal(sos, sos2, decimal=4)
z = [-1 - 1.4142j, -1 + 1.4142j,
-0.625 - 1.0533j, -0.625 + 1.0533j]
p = [-0.2 - 0.6782j, -0.2 + 0.6782j,
-0.1 - 0.5385j, -0.1 + 0.5385j]
k = 4
sos = zpk2sos(z, p, k, pairing=pairing)
sos2 = [[4, 8, 12, 1, 0.2, 0.3],
[1, 1.25, 1.5, 1, 0.4, 0.5]] # MATLAB
# sos2 = [[4, 8, 12, 1, 0.4, 0.5],
# [1, 1.25, 1.5, 1, 0.2, 0.3]] # octave
assert_allclose(sos, sos2, rtol=1e-4, atol=1e-4)
z = []
p = [0.2, -0.5+0.25j, -0.5-0.25j]
k = 1.
sos = zpk2sos(z, p, k, pairing=pairing)
sos2 = [[1., 0., 0., 1., -0.2, 0.],
[1., 0., 0., 1., 1., 0.3125]]
# sos2 = [[1., 0., 0., 1., 1., 0.3125],
# [1., 0., 0., 1., -0.2, 0]] # octave, MATLAB fails
assert_array_almost_equal(sos, sos2, decimal=4)
# The next two examples are adapted from Leland B. Jackson,
# "Digital Filters and Signal Processing (1995) p.400:
# http://books.google.com/books?id=VZ8uabI1pNMC&lpg=PA400&ots=gRD9pi8Jua&dq=Pole%2Fzero%20pairing%20for%20minimum%20roundoff%20noise%20in%20BSF.&pg=PA400#v=onepage&q=Pole%2Fzero%20pairing%20for%20minimum%20roundoff%20noise%20in%20BSF.&f=false
deg2rad = np.pi / 180.
k = 1.
# first example
thetas = [22.5, 45, 77.5]
mags = [0.8, 0.6, 0.9]
z = np.array([np.exp(theta * deg2rad * 1j) for theta in thetas])
z = np.concatenate((z, np.conj(z)))
p = np.array([mag * np.exp(theta * deg2rad * 1j)
for theta, mag in zip(thetas, mags)])
p = np.concatenate((p, np.conj(p)))
sos = zpk2sos(z, p, k)
# sos2 = [[1, -0.43288, 1, 1, -0.38959, 0.81], # octave,
# [1, -1.41421, 1, 1, -0.84853, 0.36], # MATLAB fails
# [1, -1.84776, 1, 1, -1.47821, 0.64]]
# Note that pole-zero pairing matches, but ordering is different
sos2 = [[1, -1.41421, 1, 1, -0.84853, 0.36],
[1, -1.84776, 1, 1, -1.47821, 0.64],
[1, -0.43288, 1, 1, -0.38959, 0.81]]
assert_array_almost_equal(sos, sos2, decimal=4)
# second example
z = np.array([np.exp(theta * deg2rad * 1j)
for theta in (85., 10.)])
z = np.concatenate((z, np.conj(z), [1, -1]))
sos = zpk2sos(z, p, k)
# sos2 = [[1, -0.17431, 1, 1, -0.38959, 0.81], # octave "wrong",
# [1, -1.96962, 1, 1, -0.84853, 0.36], # MATLAB fails
# [1, 0, -1, 1, -1.47821, 0.64000]]
# Our pole-zero pairing matches the text, Octave does not
sos2 = [[1, 0, -1, 1, -0.84853, 0.36],
[1, -1.96962, 1, 1, -1.47821, 0.64],
[1, -0.17431, 1, 1, -0.38959, 0.81]]
assert_array_almost_equal(sos, sos2, decimal=4)
class TestFreqz(TestCase):
def test_ticket1441(self):
"""Regression test for ticket 1441."""
# Because freqz previously used arange instead of linspace,
# when N was large, it would return one more point than
# requested.
N = 100000
w, h = freqz([1.0], worN=N)
assert_equal(w.shape, (N,))
def test_basic(self):
w, h = freqz([1.0], worN=8)
assert_array_almost_equal(w, np.pi * np.arange(8.0) / 8)
assert_array_almost_equal(h, np.ones(8))
def test_basic_whole(self):
w, h = freqz([1.0], worN=8, whole=True)
assert_array_almost_equal(w, 2 * np.pi * np.arange(8.0) / 8)
assert_array_almost_equal(h, np.ones(8))
def test_plot(self):
def plot(w, h):
assert_array_almost_equal(w, np.pi * np.arange(8.0) / 8)
assert_array_almost_equal(h, np.ones(8))
assert_raises(ZeroDivisionError,
freqz, [1.0], worN=8, plot=lambda w, h: 1 / 0)
freqz([1.0], worN=8, plot=plot)
class TestNormalize(TestCase):
def test_allclose(self):
"""Test for false positive on allclose in normalize() in
filter_design.py"""
# Test to make sure the allclose call within signal.normalize does not
# choose false positives. Then check against a known output from MATLAB
# to make sure the fix doesn't break anything.
# These are the coefficients returned from
# `[b,a] = cheby1(8, 0.5, 0.048)'
# in MATLAB. There are at least 15 significant figures in each
# coefficient, so it makes sense to test for errors on the order of
# 1e-13 (this can always be relaxed if different platforms have
# different rounding errors)
b_matlab = np.array([2.150733144728282e-11, 1.720586515782626e-10,
6.022052805239190e-10, 1.204410561047838e-09,
1.505513201309798e-09, 1.204410561047838e-09,
6.022052805239190e-10, 1.720586515782626e-10,
2.150733144728282e-11])
a_matlab = np.array([1.000000000000000e+00, -7.782402035027959e+00,
2.654354569747454e+01, -5.182182531666387e+01,
6.334127355102684e+01, -4.963358186631157e+01,
2.434862182949389e+01, -6.836925348604676e+00,
8.412934944449140e-01])
# This is the input to signal.normalize after passing through the
# equivalent steps in signal.iirfilter as was done for MATLAB
b_norm_in = np.array([1.5543135865293012e-06, 1.2434508692234413e-05,
4.3520780422820447e-05, 8.7041560845640893e-05,
1.0880195105705122e-04, 8.7041560845640975e-05,
4.3520780422820447e-05, 1.2434508692234413e-05,
1.5543135865293012e-06])
a_norm_in = np.array([7.2269025909127173e+04, -5.6242661430467968e+05,
1.9182761917308895e+06, -3.7451128364682454e+06,
4.5776121393762771e+06, -3.5869706138592605e+06,
1.7596511818472347e+06, -4.9409793515707983e+05,
6.0799461347219651e+04])
b_output, a_output = normalize(b_norm_in, a_norm_in)
# The test on b works for decimal=14 but the one for a does not. For
# the sake of consistency, both of these are decimal=13. If something
# breaks on another platform, it is probably fine to relax this lower.
assert_array_almost_equal(b_matlab, b_output, decimal=13)
assert_array_almost_equal(a_matlab, a_output, decimal=13)
class TestLp2lp(TestCase):
def test_basic(self):
b = [1]
a = [1, np.sqrt(2), 1]
b_lp, a_lp = lp2lp(b, a, 0.38574256627112119)
assert_array_almost_equal(b_lp, [0.1488], decimal=4)
assert_array_almost_equal(a_lp, [1, 0.5455, 0.1488], decimal=4)
class TestLp2hp(TestCase):
def test_basic(self):
b = [0.25059432325190018]
a = [1, 0.59724041654134863, 0.92834805757524175, 0.25059432325190018]
b_hp, a_hp = lp2hp(b, a, 2*np.pi*5000)
assert_allclose(b_hp, [1, 0, 0, 0])
assert_allclose(a_hp, [1, 1.1638e5, 2.3522e9, 1.2373e14], rtol=1e-4)
class TestLp2bp(TestCase):
def test_basic(self):
b = [1]
a = [1, 2, 2, 1]
b_bp, a_bp = lp2bp(b, a, 2*np.pi*4000, 2*np.pi*2000)
assert_allclose(b_bp, [1.9844e12, 0, 0, 0], rtol=1e-6)
assert_allclose(a_bp, [1, 2.5133e4, 2.2108e9, 3.3735e13,
1.3965e18, 1.0028e22, 2.5202e26], rtol=1e-4)
class TestLp2bs(TestCase):
def test_basic(self):
b = [1]
a = [1, 1]
b_bs, a_bs = lp2bs(b, a, 0.41722257286366754, 0.18460575326152251)
assert_array_almost_equal(b_bs, [1, 0, 0.17407], decimal=5)
assert_array_almost_equal(a_bs, [1, 0.18461, 0.17407], decimal=5)
class TestBilinear(TestCase):
def test_basic(self):
b = [0.14879732743343033]
a = [1, 0.54552236880522209, 0.14879732743343033]
b_z, a_z = bilinear(b, a, 0.5)
assert_array_almost_equal(b_z, [0.087821, 0.17564, 0.087821],
decimal=5)
assert_array_almost_equal(a_z, [1, -1.0048, 0.35606], decimal=4)
b = [1, 0, 0.17407467530697837]
a = [1, 0.18460575326152251, 0.17407467530697837]
b_z, a_z = bilinear(b, a, 0.5)
assert_array_almost_equal(b_z, [0.86413, -1.2158, 0.86413],
decimal=4)
assert_array_almost_equal(a_z, [1, -1.2158, 0.72826],
decimal=4)
class TestPrototypeType(TestCase):
def test_output_type(self):
# Prototypes should consistently output arrays, not lists
# https://github.com/scipy/scipy/pull/441
for func in (buttap,
besselap,
lambda N: cheb1ap(N, 1),
lambda N: cheb2ap(N, 20),
lambda N: ellipap(N, 1, 20)):
for N in range(7):
z, p, k = func(N)
assert_(isinstance(z, np.ndarray))
assert_(isinstance(p, np.ndarray))
def dB(x):
# Return magnitude in decibels
return 20 * np.log10(abs(x))
class TestButtord(TestCase):
def test_lowpass(self):
wp = 0.2
ws = 0.3
rp = 3
rs = 60
N, Wn = buttord(wp, ws, rp, rs, False)
b, a = butter(N, Wn, 'lowpass', False)
w, h = freqz(b, a)
w /= np.pi
assert_array_less(-rp, dB(h[w <= wp]))
assert_array_less(dB(h[ws <= w]), -rs)
assert_equal(N, 16)
assert_allclose(Wn, 2.0002776782743284e-01, rtol=1e-15)
def test_highpass(self):
wp = 0.3
ws = 0.2
rp = 3
rs = 70
N, Wn = buttord(wp, ws, rp, rs, False)
b, a = butter(N, Wn, 'highpass', False)
w, h = freqz(b, a)
w /= np.pi
assert_array_less(-rp, dB(h[wp <= w]))
assert_array_less(dB(h[w <= ws]), -rs)
assert_equal(N, 18)
assert_allclose(Wn, 2.9996603079132672e-01, rtol=1e-15)
def test_bandpass(self):
wp = [0.2, 0.5]
ws = [0.1, 0.6]
rp = 3
rs = 80
N, Wn = buttord(wp, ws, rp, rs, False)
b, a = butter(N, Wn, 'bandpass', False)
w, h = freqz(b, a)
w /= np.pi
assert_array_less(-rp - 0.1,
dB(h[np.logical_and(wp[0] <= w, w <= wp[1])]))
assert_array_less(dB(h[np.logical_or(w <= ws[0], ws[1] <= w)]),
-rs + 0.1)
assert_equal(N, 18)
assert_allclose(Wn, [1.9998742411409134e-01, 5.0002139595676276e-01],
rtol=1e-15)
def test_bandstop(self):
wp = [0.1, 0.6]
ws = [0.2, 0.5]
rp = 3
rs = 90
N, Wn = buttord(wp, ws, rp, rs, False)
b, a = butter(N, Wn, 'bandstop', False)
w, h = freqz(b, a)
w /= np.pi
assert_array_less(-rp,
dB(h[np.logical_or(w <= wp[0], wp[1] <= w)]))
assert_array_less(dB(h[np.logical_and(ws[0] <= w, w <= ws[1])]),
-rs)
assert_equal(N, 20)
assert_allclose(Wn, [1.4759432329294042e-01, 5.9997365985276407e-01],
rtol=1e-6)
def test_analog(self):
wp = 200
ws = 600
rp = 3
rs = 60
N, Wn = buttord(wp, ws, rp, rs, True)
b, a = butter(N, Wn, 'lowpass', True)
w, h = freqs(b, a)
assert_array_less(-rp, dB(h[w <= wp]))
assert_array_less(dB(h[ws <= w]), -rs)
assert_equal(N, 7)
assert_allclose(Wn, 2.0006785355671877e+02, rtol=1e-15)
n, Wn = buttord(1, 550/450, 1, 26, analog=True)
assert_equal(n, 19)
assert_allclose(Wn, 1.0361980524629517, rtol=1e-15)
assert_equal(buttord(1, 1.2, 1, 80, analog=True)[0], 55)
class TestCheb1ord(TestCase):
def test_lowpass(self):
wp = 0.2
ws = 0.3
rp = 3
rs = 60
N, Wn = cheb1ord(wp, ws, rp, rs, False)
b, a = cheby1(N, rp, Wn, 'low', False)
w, h = freqz(b, a)
w /= np.pi
assert_array_less(-rp - 0.1, dB(h[w <= wp]))
assert_array_less(dB(h[ws <= w]), -rs + 0.1)
assert_equal(N, 8)
assert_allclose(Wn, 0.2, rtol=1e-15)
def test_highpass(self):
wp = 0.3
ws = 0.2
rp = 3
rs = 70
N, Wn = cheb1ord(wp, ws, rp, rs, False)
b, a = cheby1(N, rp, Wn, 'high', False)
w, h = freqz(b, a)
w /= np.pi
assert_array_less(-rp - 0.1, dB(h[wp <= w]))
assert_array_less(dB(h[w <= ws]), -rs + 0.1)
assert_equal(N, 9)
assert_allclose(Wn, 0.3, rtol=1e-15)
def test_bandpass(self):
wp = [0.2, 0.5]
ws = [0.1, 0.6]
rp = 3
rs = 80
N, Wn = cheb1ord(wp, ws, rp, rs, False)
b, a = cheby1(N, rp, Wn, 'band', False)
w, h = freqz(b, a)
w /= np.pi
assert_array_less(-rp - 0.1,
dB(h[np.logical_and(wp[0] <= w, w <= wp[1])]))
assert_array_less(dB(h[np.logical_or(w <= ws[0], ws[1] <= w)]),
-rs + 0.1)
assert_equal(N, 9)
assert_allclose(Wn, [0.2, 0.5], rtol=1e-15)
def test_bandstop(self):
wp = [0.1, 0.6]
ws = [0.2, 0.5]
rp = 3
rs = 90
N, Wn = cheb1ord(wp, ws, rp, rs, False)
b, a = cheby1(N, rp, Wn, 'stop', False)
w, h = freqz(b, a)
w /= np.pi
assert_array_less(-rp - 0.1,
dB(h[np.logical_or(w <= wp[0], wp[1] <= w)]))
assert_array_less(dB(h[np.logical_and(ws[0] <= w, w <= ws[1])]),
-rs + 0.1)
assert_equal(N, 10)
assert_allclose(Wn, [0.14758232569947785, 0.6], rtol=1e-5)
def test_analog(self):
wp = 700
ws = 100
rp = 3
rs = 70
N, Wn = cheb1ord(wp, ws, rp, rs, True)
b, a = cheby1(N, rp, Wn, 'high', True)
w, h = freqs(b, a)
assert_array_less(-rp - 0.1, dB(h[wp <= w]))
assert_array_less(dB(h[w <= ws]), -rs + 0.1)
assert_equal(N, 4)
assert_allclose(Wn, 700, rtol=1e-15)
assert_equal(cheb1ord(1, 1.2, 1, 80, analog=True)[0], 17)
class TestCheb2ord(TestCase):
def test_lowpass(self):
wp = 0.2
ws = 0.3
rp = 3
rs = 60
N, Wn = cheb2ord(wp, ws, rp, rs, False)
b, a = cheby2(N, rs, Wn, 'lp', False)
w, h = freqz(b, a)
w /= np.pi
assert_array_less(-rp - 0.1, dB(h[w <= wp]))
assert_array_less(dB(h[ws <= w]), -rs + 0.1)
assert_equal(N, 8)
assert_allclose(Wn, 0.28647639976553163, rtol=1e-15)
def test_highpass(self):
wp = 0.3
ws = 0.2
rp = 3
rs = 70
N, Wn = cheb2ord(wp, ws, rp, rs, False)
b, a = cheby2(N, rs, Wn, 'hp', False)
w, h = freqz(b, a)
w /= np.pi
assert_array_less(-rp - 0.1, dB(h[wp <= w]))
assert_array_less(dB(h[w <= ws]), -rs + 0.1)
assert_equal(N, 9)
assert_allclose(Wn, 0.20697492182903282, rtol=1e-15)
def test_bandpass(self):
wp = [0.2, 0.5]
ws = [0.1, 0.6]
rp = 3
rs = 80
N, Wn = cheb2ord(wp, ws, rp, rs, False)
b, a = cheby2(N, rs, Wn, 'bp', False)
w, h = freqz(b, a)
w /= np.pi
assert_array_less(-rp - 0.1,
dB(h[np.logical_and(wp[0] <= w, w <= wp[1])]))
assert_array_less(dB(h[np.logical_or(w <= ws[0], ws[1] <= w)]),
-rs + 0.1)
assert_equal(N, 9)
assert_allclose(Wn, [0.14876937565923479, 0.59748447842351482],
rtol=1e-15)
def test_bandstop(self):
wp = [0.1, 0.6]
ws = [0.2, 0.5]
rp = 3
rs = 90
N, Wn = cheb2ord(wp, ws, rp, rs, False)
b, a = cheby2(N, rs, Wn, 'bs', False)
w, h = freqz(b, a)
w /= np.pi
assert_array_less(-rp - 0.1,
dB(h[np.logical_or(w <= wp[0], wp[1] <= w)]))
assert_array_less(dB(h[np.logical_and(ws[0] <= w, w <= ws[1])]),
-rs + 0.1)
assert_equal(N, 10)
assert_allclose(Wn, [0.19926249974781743, 0.50125246585567362],
rtol=1e-6)
def test_analog(self):
wp = [20, 50]
ws = [10, 60]
rp = 3
rs = 80
N, Wn = cheb2ord(wp, ws, rp, rs, True)
b, a = cheby2(N, rs, Wn, 'bp', True)
w, h = freqs(b, a)
assert_array_less(-rp - 0.1,
dB(h[np.logical_and(wp[0] <= w, w <= wp[1])]))
assert_array_less(dB(h[np.logical_or(w <= ws[0], ws[1] <= w)]),
-rs + 0.1)
assert_equal(N, 11)
assert_allclose(Wn, [1.673740595370124e+01, 5.974641487254268e+01],
rtol=1e-15)
class TestEllipord(TestCase):
def test_lowpass(self):
wp = 0.2
ws = 0.3
rp = 3
rs = 60
N, Wn = ellipord(wp, ws, rp, rs, False)
b, a = ellip(N, rp, rs, Wn, 'lp', False)
w, h = freqz(b, a)
w /= np.pi
assert_array_less(-rp - 0.1, dB(h[w <= wp]))
assert_array_less(dB(h[ws <= w]), -rs + 0.1)
assert_equal(N, 5)
assert_allclose(Wn, 0.2, rtol=1e-15)
def test_highpass(self):
wp = 0.3
ws = 0.2
rp = 3
rs = 70
N, Wn = ellipord(wp, ws, rp, rs, False)
b, a = ellip(N, rp, rs, Wn, 'hp', False)
w, h = freqz(b, a)
w /= np.pi
assert_array_less(-rp - 0.1, dB(h[wp <= w]))
assert_array_less(dB(h[w <= ws]), -rs + 0.1)
assert_equal(N, 6)
assert_allclose(Wn, 0.3, rtol=1e-15)
def test_bandpass(self):
wp = [0.2, 0.5]
ws = [0.1, 0.6]
rp = 3
rs = 80
N, Wn = ellipord(wp, ws, rp, rs, False)
b, a = ellip(N, rp, rs, Wn, 'bp', False)
w, h = freqz(b, a)
w /= np.pi
assert_array_less(-rp - 0.1,
dB(h[np.logical_and(wp[0] <= w, w <= wp[1])]))
assert_array_less(dB(h[np.logical_or(w <= ws[0], ws[1] <= w)]),
-rs + 0.1)
assert_equal(N, 6)
assert_allclose(Wn, [0.2, 0.5], rtol=1e-15)
def test_bandstop(self):
wp = [0.1, 0.6]
ws = [0.2, 0.5]
rp = 3
rs = 90
N, Wn = ellipord(wp, ws, rp, rs, False)
b, a = ellip(N, rp, rs, Wn, 'bs', False)
w, h = freqz(b, a)
w /= np.pi
assert_array_less(-rp - 0.1,
dB(h[np.logical_or(w <= wp[0], wp[1] <= w)]))
assert_array_less(dB(h[np.logical_and(ws[0] <= w, w <= ws[1])]),
-rs + 0.1)
assert_equal(N, 7)
assert_allclose(Wn, [0.14758232794342988, 0.6], rtol=1e-5)
def test_analog(self):
wp = [1000, 6000]
ws = [2000, 5000]
rp = 3
rs = 90
N, Wn = ellipord(wp, ws, rp, rs, True)
b, a = ellip(N, rp, rs, Wn, 'bs', True)
w, h = freqs(b, a)
assert_array_less(-rp - 0.1,
dB(h[np.logical_or(w <= wp[0], wp[1] <= w)]))
assert_array_less(dB(h[np.logical_and(ws[0] <= w, w <= ws[1])]),
-rs + 0.1)
assert_equal(N, 8)
assert_allclose(Wn, [1666.6666, 6000])
assert_equal(ellipord(1, 1.2, 1, 80, analog=True)[0], 9)
class TestBessel(TestCase):
def test_degenerate(self):
# 0-order filter is just a passthrough
b, a = bessel(0, 1, analog=True)
assert_array_equal(b, [1])
assert_array_equal(a, [1])
# 1-order filter is same for all types
b, a = bessel(1, 1, analog=True)
assert_array_equal(b, [1])
assert_array_equal(a, [1, 1])
z, p, k = bessel(1, 0.3, analog=True, output='zpk')
assert_array_equal(z, [])
assert_allclose(p, [-0.3], rtol=1e-14)
assert_allclose(k, 0.3, rtol=1e-14)
def test_high_order(self):
# high even order
z, p, k = bessel(24, 100, analog=True, output='zpk')
z2 = []
p2 = [
-9.055312334014323e+01 + 4.844005815403969e+00j,
-9.055312334014323e+01 - 4.844005815403969e+00j,
-8.983105162681878e+01 + 1.454056170018573e+01j,
-8.983105162681878e+01 - 1.454056170018573e+01j,
-8.837357994162065e+01 + 2.426335240122282e+01j,
-8.837357994162065e+01 - 2.426335240122282e+01j,
-8.615278316179575e+01 + 3.403202098404543e+01j,
-8.615278316179575e+01 - 3.403202098404543e+01j,
-8.312326467067703e+01 + 4.386985940217900e+01j,
-8.312326467067703e+01 - 4.386985940217900e+01j,
-7.921695461084202e+01 + 5.380628489700191e+01j,
-7.921695461084202e+01 - 5.380628489700191e+01j,
-7.433392285433246e+01 + 6.388084216250878e+01j,
-7.433392285433246e+01 - 6.388084216250878e+01j,
-6.832565803501586e+01 + 7.415032695116071e+01j,
-6.832565803501586e+01 - 7.415032695116071e+01j,
-6.096221567378025e+01 + 8.470292433074425e+01j,
-6.096221567378025e+01 - 8.470292433074425e+01j,
-5.185914574820616e+01 + 9.569048385258847e+01j,
-5.185914574820616e+01 - 9.569048385258847e+01j,
-4.027853855197555e+01 + 1.074195196518679e+02j,
-4.027853855197555e+01 - 1.074195196518679e+02j,
-2.433481337524861e+01 + 1.207298683731973e+02j,
-2.433481337524861e+01 - 1.207298683731973e+02j,
]
k2 = 9.999999999999989e+47
assert_array_equal(z, z2)
assert_allclose(sorted(p, key=np.imag), sorted(p2, key=np.imag))
assert_allclose(k, k2, rtol=1e-14)
# high odd order
z, p, k = bessel(23, 1000, analog=True, output='zpk')
z2 = []
p2 = [
-2.497697202208956e+02 + 1.202813187870698e+03j,
-2.497697202208956e+02 - 1.202813187870698e+03j,
-4.126986617510172e+02 + 1.065328794475509e+03j,
-4.126986617510172e+02 - 1.065328794475509e+03j,
-5.304922463809596e+02 + 9.439760364018479e+02j,
-5.304922463809596e+02 - 9.439760364018479e+02j,
-9.027564978975828e+02 + 1.010534334242318e+02j,
-9.027564978975828e+02 - 1.010534334242318e+02j,
-8.909283244406079e+02 + 2.023024699647598e+02j,
-8.909283244406079e+02 - 2.023024699647598e+02j,
-8.709469394347836e+02 + 3.039581994804637e+02j,
-8.709469394347836e+02 - 3.039581994804637e+02j,
-8.423805948131370e+02 + 4.062657947488952e+02j,
-8.423805948131370e+02 - 4.062657947488952e+02j,
-8.045561642249877e+02 + 5.095305912401127e+02j,
-8.045561642249877e+02 - 5.095305912401127e+02j,
-7.564660146766259e+02 + 6.141594859516342e+02j,
-7.564660146766259e+02 - 6.141594859516342e+02j,
-6.965966033906477e+02 + 7.207341374730186e+02j,
-6.965966033906477e+02 - 7.207341374730186e+02j,
-6.225903228776276e+02 + 8.301558302815096e+02j,
-6.225903228776276e+02 - 8.301558302815096e+02j,
-9.066732476324988e+02]
k2 = 9.999999999999983e+68
assert_array_equal(z, z2)
assert_allclose(sorted(p, key=np.imag), sorted(p2, key=np.imag))
assert_allclose(k, k2, rtol=1e-14)
class TestButter(TestCase):
def test_degenerate(self):
# 0-order filter is just a passthrough
b, a = butter(0, 1, analog=True)
assert_array_equal(b, [1])
assert_array_equal(a, [1])
# 1-order filter is same for all types
b, a = butter(1, 1, analog=True)
assert_array_almost_equal(b, [1])
assert_array_almost_equal(a, [1, 1])
z, p, k = butter(1, 0.3, output='zpk')
assert_array_equal(z, [-1])
assert_allclose(p, [3.249196962329063e-01], rtol=1e-14)
assert_allclose(k, 3.375401518835469e-01, rtol=1e-14)
def test_basic(self):
# analog s-plane
for N in range(25):
wn = 0.01
z, p, k = butter(N, wn, 'low', analog=True, output='zpk')
assert_array_almost_equal([], z)
assert_(len(p) == N)
# All poles should be at distance wn from origin
assert_array_almost_equal(wn, abs(p))
assert_(all(np.real(p) <= 0)) # No poles in right half of S-plane
assert_array_almost_equal(wn**N, k)
# digital z-plane
for N in range(25):
wn = 0.01
z, p, k = butter(N, wn, 'high', analog=False, output='zpk')
assert_array_equal(np.ones(N), z) # All zeros exactly at DC
assert_(all(np.abs(p) <= 1)) # No poles outside unit circle
b1, a1 = butter(2, 1, analog=True)
assert_array_almost_equal(b1, [1])
assert_array_almost_equal(a1, [1, np.sqrt(2), 1])
b2, a2 = butter(5, 1, analog=True)
assert_array_almost_equal(b2, [1])
assert_array_almost_equal(a2, [1, 3.2361, 5.2361,
5.2361, 3.2361, 1], decimal=4)
b3, a3 = butter(10, 1, analog=True)
assert_array_almost_equal(b3, [1])
assert_array_almost_equal(a3, [1, 6.3925, 20.4317, 42.8021, 64.8824,
74.2334, 64.8824, 42.8021, 20.4317,
6.3925, 1], decimal=4)
b2, a2 = butter(19, 1.0441379169150726, analog=True)
assert_array_almost_equal(b2, [2.2720], decimal=4)
assert_array_almost_equal(a2, 1.0e+004 * np.array([
0.0001, 0.0013, 0.0080, 0.0335, 0.1045, 0.2570,
0.5164, 0.8669, 1.2338, 1.5010, 1.5672, 1.4044,
1.0759, 0.6986, 0.3791, 0.1681, 0.0588, 0.0153,
0.0026, 0.0002]), decimal=0)
b, a = butter(5, 0.4)
assert_array_almost_equal(b, [0.0219, 0.1097, 0.2194,
0.2194, 0.1097, 0.0219], decimal=4)
assert_array_almost_equal(a, [1.0000, -0.9853, 0.9738,
-0.3864, 0.1112, -0.0113], decimal=4)
def test_highpass(self):
# highpass, high even order
z, p, k = butter(28, 0.43, 'high', output='zpk')
z2 = np.ones(28)
p2 = [
2.068257195514592e-01 + 9.238294351481734e-01j,
2.068257195514592e-01 - 9.238294351481734e-01j,
1.874933103892023e-01 + 8.269455076775277e-01j,
1.874933103892023e-01 - 8.269455076775277e-01j,
1.717435567330153e-01 + 7.383078571194629e-01j,
1.717435567330153e-01 - 7.383078571194629e-01j,
1.588266870755982e-01 + 6.564623730651094e-01j,
1.588266870755982e-01 - 6.564623730651094e-01j,
1.481881532502603e-01 + 5.802343458081779e-01j,
1.481881532502603e-01 - 5.802343458081779e-01j,
1.394122576319697e-01 + 5.086609000582009e-01j,
1.394122576319697e-01 - 5.086609000582009e-01j,
1.321840881809715e-01 + 4.409411734716436e-01j,
1.321840881809715e-01 - 4.409411734716436e-01j,
1.262633413354405e-01 + 3.763990035551881e-01j,
1.262633413354405e-01 - 3.763990035551881e-01j,
1.214660449478046e-01 + 3.144545234797277e-01j,
1.214660449478046e-01 - 3.144545234797277e-01j,
1.104868766650320e-01 + 2.771505404367791e-02j,
1.104868766650320e-01 - 2.771505404367791e-02j,
1.111768629525075e-01 + 8.331369153155753e-02j,
1.111768629525075e-01 - 8.331369153155753e-02j,
1.125740630842972e-01 + 1.394219509611784e-01j,
1.125740630842972e-01 - 1.394219509611784e-01j,
1.147138487992747e-01 + 1.963932363793666e-01j,
1.147138487992747e-01 - 1.963932363793666e-01j,
1.176516491045901e-01 + 2.546021573417188e-01j,
1.176516491045901e-01 - 2.546021573417188e-01j,
]
k2 = 1.446671081817286e-06
assert_array_equal(z, z2)
assert_allclose(sorted(p, key=np.imag),
sorted(p2, key=np.imag), rtol=1e-7)
assert_allclose(k, k2, rtol=1e-10)
# highpass, high odd order
z, p, k = butter(27, 0.56, 'high', output='zpk')
z2 = np.ones(27)
p2 = [
-1.772572785680147e-01 + 9.276431102995948e-01j,
-1.772572785680147e-01 - 9.276431102995948e-01j,
-1.600766565322114e-01 + 8.264026279893268e-01j,
-1.600766565322114e-01 - 8.264026279893268e-01j,
-1.461948419016121e-01 + 7.341841939120078e-01j,
-1.461948419016121e-01 - 7.341841939120078e-01j,
-1.348975284762046e-01 + 6.493235066053785e-01j,
-1.348975284762046e-01 - 6.493235066053785e-01j,
-1.256628210712206e-01 + 5.704921366889227e-01j,
-1.256628210712206e-01 - 5.704921366889227e-01j,
-1.181038235962314e-01 + 4.966120551231630e-01j,
-1.181038235962314e-01 - 4.966120551231630e-01j,
-1.119304913239356e-01 + 4.267938916403775e-01j,
-1.119304913239356e-01 - 4.267938916403775e-01j,
-1.069237739782691e-01 + 3.602914879527338e-01j,
-1.069237739782691e-01 - 3.602914879527338e-01j,
-1.029178030691416e-01 + 2.964677964142126e-01j,
-1.029178030691416e-01 - 2.964677964142126e-01j,
-9.978747500816100e-02 + 2.347687643085738e-01j,
-9.978747500816100e-02 - 2.347687643085738e-01j,
-9.743974496324025e-02 + 1.747028739092479e-01j,
-9.743974496324025e-02 - 1.747028739092479e-01j,
-9.580754551625957e-02 + 1.158246860771989e-01j,
-9.580754551625957e-02 - 1.158246860771989e-01j,
-9.484562207782568e-02 + 5.772118357151691e-02j,
-9.484562207782568e-02 - 5.772118357151691e-02j,
-9.452783117928215e-02
]
k2 = 9.585686688851069e-09
assert_array_equal(z, z2)
assert_allclose(sorted(p, key=np.imag),
sorted(p2, key=np.imag), rtol=1e-8)
assert_allclose(k, k2)
def test_bandpass(self):
z, p, k = butter(8, [0.25, 0.33], 'band', output='zpk')
z2 = [1, 1, 1, 1, 1, 1, 1, 1,
-1, -1, -1, -1, -1, -1, -1, -1]
p2 = [
4.979909925436156e-01 + 8.367609424799387e-01j,
4.979909925436156e-01 - 8.367609424799387e-01j,
4.913338722555539e-01 + 7.866774509868817e-01j,
4.913338722555539e-01 - 7.866774509868817e-01j,
5.035229361778706e-01 + 7.401147376726750e-01j,
5.035229361778706e-01 - 7.401147376726750e-01j,
5.307617160406101e-01 + 7.029184459442954e-01j,
5.307617160406101e-01 - 7.029184459442954e-01j,
5.680556159453138e-01 + 6.788228792952775e-01j,
5.680556159453138e-01 - 6.788228792952775e-01j,
6.100962560818854e-01 + 6.693849403338664e-01j,
6.100962560818854e-01 - 6.693849403338664e-01j,
6.904694312740631e-01 + 6.930501690145245e-01j,
6.904694312740631e-01 - 6.930501690145245e-01j,
6.521767004237027e-01 + 6.744414640183752e-01j,
6.521767004237027e-01 - 6.744414640183752e-01j,
]
k2 = 3.398854055800844e-08
assert_array_equal(z, z2)
assert_allclose(sorted(p, key=np.imag),
sorted(p2, key=np.imag), rtol=1e-13)
assert_allclose(k, k2, rtol=1e-13)
# bandpass analog
z, p, k = butter(4, [90.5, 110.5], 'bp', analog=True, output='zpk')
z2 = np.zeros(4)
p2 = [
-4.179137760733086e+00 + 1.095935899082837e+02j,
-4.179137760733086e+00 - 1.095935899082837e+02j,
-9.593598668443835e+00 + 1.034745398029734e+02j,
-9.593598668443835e+00 - 1.034745398029734e+02j,
-8.883991981781929e+00 + 9.582087115567160e+01j,
-8.883991981781929e+00 - 9.582087115567160e+01j,
-3.474530886568715e+00 + 9.111599925805801e+01j,
-3.474530886568715e+00 - 9.111599925805801e+01j,
]
k2 = 1.600000000000001e+05
assert_array_equal(z, z2)
assert_allclose(sorted(p, key=np.imag), sorted(p2, key=np.imag))
assert_allclose(k, k2, rtol=1e-15)
def test_bandstop(self):
z, p, k = butter(7, [0.45, 0.56], 'stop', output='zpk')
z2 = [-1.594474531383421e-02 + 9.998728744679880e-01j,
-1.594474531383421e-02 - 9.998728744679880e-01j,
-1.594474531383421e-02 + 9.998728744679880e-01j,
-1.594474531383421e-02 - 9.998728744679880e-01j,
-1.594474531383421e-02 + 9.998728744679880e-01j,
-1.594474531383421e-02 - 9.998728744679880e-01j,
-1.594474531383421e-02 + 9.998728744679880e-01j,
-1.594474531383421e-02 - 9.998728744679880e-01j,
-1.594474531383421e-02 + 9.998728744679880e-01j,
-1.594474531383421e-02 - 9.998728744679880e-01j,
-1.594474531383421e-02 + 9.998728744679880e-01j,
-1.594474531383421e-02 - 9.998728744679880e-01j,
-1.594474531383421e-02 + 9.998728744679880e-01j,
-1.594474531383421e-02 - 9.998728744679880e-01j]
p2 = [-1.766850742887729e-01 + 9.466951258673900e-01j,
-1.766850742887729e-01 - 9.466951258673900e-01j,
1.467897662432886e-01 + 9.515917126462422e-01j,
1.467897662432886e-01 - 9.515917126462422e-01j,
-1.370083529426906e-01 + 8.880376681273993e-01j,
-1.370083529426906e-01 - 8.880376681273993e-01j,
1.086774544701390e-01 + 8.915240810704319e-01j,
1.086774544701390e-01 - 8.915240810704319e-01j,
-7.982704457700891e-02 + 8.506056315273435e-01j,
-7.982704457700891e-02 - 8.506056315273435e-01j,
5.238812787110331e-02 + 8.524011102699969e-01j,
5.238812787110331e-02 - 8.524011102699969e-01j,
-1.357545000491310e-02 + 8.382287744986582e-01j,
-1.357545000491310e-02 - 8.382287744986582e-01j]
k2 = 4.577122512960063e-01
assert_allclose(sorted(z, key=np.imag), sorted(z2, key=np.imag))
assert_allclose(sorted(p, key=np.imag), sorted(p2, key=np.imag))
assert_allclose(k, k2, rtol=1e-14)
def test_ba_output(self):
b, a = butter(4, [100, 300], 'bandpass', analog=True)
b2 = [1.6e+09, 0, 0, 0, 0]
a2 = [1.000000000000000e+00, 5.226251859505511e+02,
2.565685424949238e+05, 6.794127417357160e+07,
1.519411254969542e+10, 2.038238225207147e+12,
2.309116882454312e+14, 1.411088002066486e+16,
8.099999999999991e+17]
assert_allclose(b, b2, rtol=1e-14)
assert_allclose(a, a2, rtol=1e-14)
class TestCheby1(TestCase):
def test_degenerate(self):
# 0-order filter is just a passthrough
# Even-order filters have DC gain of -rp dB
b, a = cheby1(0, 10*np.log10(2), 1, analog=True)
assert_array_almost_equal(b, [1/np.sqrt(2)])
assert_array_equal(a, [1])
# 1-order filter is same for all types
b, a = cheby1(1, 10*np.log10(2), 1, analog=True)
assert_array_almost_equal(b, [1])
assert_array_almost_equal(a, [1, 1])
z, p, k = cheby1(1, 0.1, 0.3, output='zpk')
assert_array_equal(z, [-1])
assert_allclose(p, [-5.390126972799615e-01], rtol=1e-14)
assert_allclose(k, 7.695063486399808e-01, rtol=1e-14)
def test_basic(self):
for N in range(25):
wn = 0.01
z, p, k = cheby1(N, 1, wn, 'low', analog=True, output='zpk')
assert_array_almost_equal([], z)
assert_(len(p) == N)
assert_(all(np.real(p) <= 0)) # No poles in right half of S-plane
for N in range(25):
wn = 0.01
z, p, k = cheby1(N, 1, wn, 'high', analog=False, output='zpk')
assert_array_equal(np.ones(N), z) # All zeros exactly at DC
assert_(all(np.abs(p) <= 1)) # No poles outside unit circle
# Same test as TestNormalize
b, a = cheby1(8, 0.5, 0.048)
assert_array_almost_equal(b, [
2.150733144728282e-11, 1.720586515782626e-10,
6.022052805239190e-10, 1.204410561047838e-09,
1.505513201309798e-09, 1.204410561047838e-09,
6.022052805239190e-10, 1.720586515782626e-10,
2.150733144728282e-11], decimal=14)
assert_array_almost_equal(a, [
1.000000000000000e+00, -7.782402035027959e+00,
2.654354569747454e+01, -5.182182531666387e+01,
6.334127355102684e+01, -4.963358186631157e+01,
2.434862182949389e+01, -6.836925348604676e+00,
8.412934944449140e-01], decimal=14)
b, a = cheby1(4, 1, [0.4, 0.7], btype='band')
assert_array_almost_equal(b, [0.0084, 0, -0.0335, 0, 0.0502, 0,
-0.0335, 0, 0.0084], decimal=4)
assert_array_almost_equal(a, [1.0, 1.1191, 2.862, 2.2986, 3.4137,
1.8653, 1.8982, 0.5676, 0.4103],
decimal=4)
b2, a2 = cheby1(5, 3, 1, analog=True)
assert_array_almost_equal(b2, [0.0626], decimal=4)
assert_array_almost_equal(a2, [1, 0.5745, 1.4150, 0.5489, 0.4080,
0.0626], decimal=4)
b, a = cheby1(8, 0.5, 0.1)
assert_array_almost_equal(b, 1.0e-006 * np.array([
0.00703924326028, 0.05631394608227, 0.19709881128793,
0.39419762257586, 0.49274702821983, 0.39419762257586,
0.19709881128793, 0.05631394608227, 0.00703924326028]),
decimal=13)
assert_array_almost_equal(a, [
1.00000000000000, -7.44912258934158, 24.46749067762108,
-46.27560200466141, 55.11160187999928, -42.31640010161038,
20.45543300484147, -5.69110270561444, 0.69770374759022],
decimal=13)
b, a = cheby1(8, 0.5, 0.25)
assert_array_almost_equal(b, 1.0e-003 * np.array([
0.00895261138923, 0.07162089111382, 0.25067311889837,
0.50134623779673, 0.62668279724591, 0.50134623779673,
0.25067311889837, 0.07162089111382, 0.00895261138923]),
decimal=13)
assert_array_almost_equal(a, [1.00000000000000, -5.97529229188545,
16.58122329202101, -27.71423273542923,
30.39509758355313, -22.34729670426879,
10.74509800434910, -3.08924633697497,
0.40707685889802], decimal=13)
def test_highpass(self):
# high even order
z, p, k = cheby1(24, 0.7, 0.2, 'high', output='zpk')
z2 = np.ones(24)
p2 = [-6.136558509657073e-01 + 2.700091504942893e-01j,
-6.136558509657073e-01 - 2.700091504942893e-01j,
-3.303348340927516e-01 + 6.659400861114254e-01j,
-3.303348340927516e-01 - 6.659400861114254e-01j,
8.779713780557169e-03 + 8.223108447483040e-01j,
8.779713780557169e-03 - 8.223108447483040e-01j,
2.742361123006911e-01 + 8.356666951611864e-01j,
2.742361123006911e-01 - 8.356666951611864e-01j,
4.562984557158206e-01 + 7.954276912303594e-01j,
4.562984557158206e-01 - 7.954276912303594e-01j,
5.777335494123628e-01 + 7.435821817961783e-01j,
5.777335494123628e-01 - 7.435821817961783e-01j,
6.593260977749194e-01 + 6.955390907990932e-01j,
6.593260977749194e-01 - 6.955390907990932e-01j,
7.149590948466562e-01 + 6.559437858502012e-01j,
7.149590948466562e-01 - 6.559437858502012e-01j,
7.532432388188739e-01 + 6.256158042292060e-01j,
7.532432388188739e-01 - 6.256158042292060e-01j,
7.794365244268271e-01 + 6.042099234813333e-01j,
7.794365244268271e-01 - 6.042099234813333e-01j,
7.967253874772997e-01 + 5.911966597313203e-01j,
7.967253874772997e-01 - 5.911966597313203e-01j,
8.069756417293870e-01 + 5.862214589217275e-01j,
8.069756417293870e-01 - 5.862214589217275e-01j]
k2 = 6.190427617192018e-04
assert_array_equal(z, z2)
assert_allclose(sorted(p, key=np.imag),
sorted(p2, key=np.imag), rtol=1e-10)
assert_allclose(k, k2, rtol=1e-10)
# high odd order
z, p, k = cheby1(23, 0.8, 0.3, 'high', output='zpk')
z2 = np.ones(23)
p2 = [-7.676400532011010e-01,
-6.754621070166477e-01 + 3.970502605619561e-01j,
-6.754621070166477e-01 - 3.970502605619561e-01j,
-4.528880018446727e-01 + 6.844061483786332e-01j,
-4.528880018446727e-01 - 6.844061483786332e-01j,
-1.986009130216447e-01 + 8.382285942941594e-01j,
-1.986009130216447e-01 - 8.382285942941594e-01j,
2.504673931532608e-02 + 8.958137635794080e-01j,
2.504673931532608e-02 - 8.958137635794080e-01j,
2.001089429976469e-01 + 9.010678290791480e-01j,
2.001089429976469e-01 - 9.010678290791480e-01j,
3.302410157191755e-01 + 8.835444665962544e-01j,
3.302410157191755e-01 - 8.835444665962544e-01j,
4.246662537333661e-01 + 8.594054226449009e-01j,
4.246662537333661e-01 - 8.594054226449009e-01j,
4.919620928120296e-01 + 8.366772762965786e-01j,
4.919620928120296e-01 - 8.366772762965786e-01j,
5.385746917494749e-01 + 8.191616180796720e-01j,
5.385746917494749e-01 - 8.191616180796720e-01j,
5.855636993537203e-01 + 8.060680937701062e-01j,
5.855636993537203e-01 - 8.060680937701062e-01j,
5.688812849391721e-01 + 8.086497795114683e-01j,
5.688812849391721e-01 - 8.086497795114683e-01j]
k2 = 1.941697029206324e-05
assert_array_equal(z, z2)
assert_allclose(sorted(p, key=np.imag),
sorted(p2, key=np.imag), rtol=1e-10)
assert_allclose(k, k2, rtol=1e-10)
z, p, k = cheby1(10, 1, 1000, 'high', analog=True, output='zpk')
z2 = np.zeros(10)
p2 = [-3.144743169501551e+03 + 3.511680029092744e+03j,
-3.144743169501551e+03 - 3.511680029092744e+03j,
-5.633065604514602e+02 + 2.023615191183945e+03j,
-5.633065604514602e+02 - 2.023615191183945e+03j,
-1.946412183352025e+02 + 1.372309454274755e+03j,
-1.946412183352025e+02 - 1.372309454274755e+03j,
-7.987162953085479e+01 + 1.105207708045358e+03j,
-7.987162953085479e+01 - 1.105207708045358e+03j,
-2.250315039031946e+01 + 1.001723931471477e+03j,
-2.250315039031946e+01 - 1.001723931471477e+03j]
k2 = 8.912509381337453e-01
assert_array_equal(z, z2)
assert_allclose(sorted(p, key=np.imag),
sorted(p2, key=np.imag), rtol=1e-13)
assert_allclose(k, k2, rtol=1e-15)
def test_bandpass(self):
z, p, k = cheby1(8, 1, [0.3, 0.4], 'bp', output='zpk')
z2 = [1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1, -1, -1, -1]
p2 = [3.077784854851463e-01 + 9.453307017592942e-01j,
3.077784854851463e-01 - 9.453307017592942e-01j,
3.280567400654425e-01 + 9.272377218689016e-01j,
3.280567400654425e-01 - 9.272377218689016e-01j,
3.677912763284301e-01 + 9.038008865279966e-01j,
3.677912763284301e-01 - 9.038008865279966e-01j,
4.194425632520948e-01 + 8.769407159656157e-01j,
4.194425632520948e-01 - 8.769407159656157e-01j,
4.740921994669189e-01 + 8.496508528630974e-01j,
4.740921994669189e-01 - 8.496508528630974e-01j,
5.234866481897429e-01 + 8.259608422808477e-01j,
5.234866481897429e-01 - 8.259608422808477e-01j,
5.844717632289875e-01 + 8.052901363500210e-01j,
5.844717632289875e-01 - 8.052901363500210e-01j,
5.615189063336070e-01 + 8.100667803850766e-01j,
5.615189063336070e-01 - 8.100667803850766e-01j]
k2 = 5.007028718074307e-09
assert_array_equal(z, z2)
assert_allclose(sorted(p, key=np.imag),
sorted(p2, key=np.imag), rtol=1e-13)
assert_allclose(k, k2, rtol=1e-13)
def test_bandstop(self):
z, p, k = cheby1(7, 1, [0.5, 0.6], 'stop', output='zpk')
z2 = [-1.583844403245361e-01 + 9.873775210440450e-01j,
-1.583844403245361e-01 - 9.873775210440450e-01j,
-1.583844403245361e-01 + 9.873775210440450e-01j,
-1.583844403245361e-01 - 9.873775210440450e-01j,
-1.583844403245361e-01 + 9.873775210440450e-01j,
-1.583844403245361e-01 - 9.873775210440450e-01j,
-1.583844403245361e-01 + 9.873775210440450e-01j,
-1.583844403245361e-01 - 9.873775210440450e-01j,
-1.583844403245361e-01 + 9.873775210440450e-01j,
-1.583844403245361e-01 - 9.873775210440450e-01j,
-1.583844403245361e-01 + 9.873775210440450e-01j,
-1.583844403245361e-01 - 9.873775210440450e-01j,
-1.583844403245361e-01 + 9.873775210440450e-01j,
-1.583844403245361e-01 - 9.873775210440450e-01j]
p2 = [-8.942974551472813e-02 + 3.482480481185926e-01j,
-8.942974551472813e-02 - 3.482480481185926e-01j,
1.293775154041798e-01 + 8.753499858081858e-01j,
1.293775154041798e-01 - 8.753499858081858e-01j,
3.399741945062013e-02 + 9.690316022705607e-01j,
3.399741945062013e-02 - 9.690316022705607e-01j,
4.167225522796539e-04 + 9.927338161087488e-01j,
4.167225522796539e-04 - 9.927338161087488e-01j,
-3.912966549550960e-01 + 8.046122859255742e-01j,
-3.912966549550960e-01 - 8.046122859255742e-01j,
-3.307805547127368e-01 + 9.133455018206508e-01j,
-3.307805547127368e-01 - 9.133455018206508e-01j,
-3.072658345097743e-01 + 9.443589759799366e-01j,
-3.072658345097743e-01 - 9.443589759799366e-01j]
k2 = 3.619438310405028e-01
assert_allclose(sorted(z, key=np.imag),
sorted(z2, key=np.imag), rtol=1e-13)
assert_allclose(sorted(p, key=np.imag),
sorted(p2, key=np.imag), rtol=1e-13)
assert_allclose(k, k2, rtol=1e-15)
def test_ba_output(self):
# with transfer function conversion, without digital conversion
b, a = cheby1(5, 0.9, [210, 310], 'stop', analog=True)
b2 = [1.000000000000006e+00, 0,
3.255000000000020e+05, 0,
4.238010000000026e+10, 0,
2.758944510000017e+15, 0,
8.980364380050052e+19, 0,
1.169243442282517e+24
]
a2 = [1.000000000000000e+00, 4.630555945694342e+02,
4.039266454794788e+05, 1.338060988610237e+08,
5.844333551294591e+10, 1.357346371637638e+13,
3.804661141892782e+15, 5.670715850340080e+17,
1.114411200988328e+20, 8.316815934908471e+21,
1.169243442282517e+24
]
assert_allclose(b, b2, rtol=1e-14)
assert_allclose(a, a2, rtol=1e-14)
class TestCheby2(TestCase):
def test_degenerate(self):
# 0-order filter is just a passthrough
# Stopband ripple factor doesn't matter
b, a = cheby2(0, 123.456, 1, analog=True)
assert_array_equal(b, [1])
assert_array_equal(a, [1])
# 1-order filter is same for all types
b, a = cheby2(1, 10*np.log10(2), 1, analog=True)
assert_array_almost_equal(b, [1])
assert_array_almost_equal(a, [1, 1])
z, p, k = cheby2(1, 50, 0.3, output='zpk')
assert_array_equal(z, [-1])
assert_allclose(p, [9.967826460175649e-01], rtol=1e-14)
assert_allclose(k, 1.608676991217512e-03, rtol=1e-14)
def test_basic(self):
for N in range(25):
wn = 0.01
z, p, k = cheby2(N, 40, wn, 'low', analog=True, output='zpk')
assert_(len(p) == N)
assert_(all(np.real(p) <= 0)) # No poles in right half of S-plane
for N in range(25):
wn = 0.01
z, p, k = cheby2(N, 40, wn, 'high', analog=False, output='zpk')
assert_(all(np.abs(p) <= 1)) # No poles outside unit circle
B, A = cheby2(18, 100, 0.5)
assert_array_almost_equal(B, [
0.00167583914216, 0.01249479541868, 0.05282702120282,
0.15939804265706, 0.37690207631117, 0.73227013789108,
1.20191856962356, 1.69522872823393, 2.07598674519837,
2.21972389625291, 2.07598674519838, 1.69522872823395,
1.20191856962359, 0.73227013789110, 0.37690207631118,
0.15939804265707, 0.05282702120282, 0.01249479541868,
0.00167583914216], decimal=13)
assert_array_almost_equal(A, [
1.00000000000000, -0.27631970006174, 3.19751214254060,
-0.15685969461355, 4.13926117356269, 0.60689917820044,
2.95082770636540, 0.89016501910416, 1.32135245849798,
0.51502467236824, 0.38906643866660, 0.15367372690642,
0.07255803834919, 0.02422454070134, 0.00756108751837,
0.00179848550988, 0.00033713574499, 0.00004258794833,
0.00000281030149], decimal=13)
def test_highpass(self):
# high even order
z, p, k = cheby2(26, 60, 0.3, 'high', output='zpk')
z2 = [9.981088955489852e-01 + 6.147058341984388e-02j,
9.981088955489852e-01 - 6.147058341984388e-02j,
9.832702870387426e-01 + 1.821525257215483e-01j,
9.832702870387426e-01 - 1.821525257215483e-01j,
9.550760158089112e-01 + 2.963609353922882e-01j,
9.550760158089112e-01 - 2.963609353922882e-01j,
9.162054748821922e-01 + 4.007087817803773e-01j,
9.162054748821922e-01 - 4.007087817803773e-01j,
8.700619897368064e-01 + 4.929423232136168e-01j,
8.700619897368064e-01 - 4.929423232136168e-01j,
5.889791753434985e-01 + 8.081482110427953e-01j,
5.889791753434985e-01 - 8.081482110427953e-01j,
5.984900456570295e-01 + 8.011302423760501e-01j,
5.984900456570295e-01 - 8.011302423760501e-01j,
6.172880888914629e-01 + 7.867371958365343e-01j,
6.172880888914629e-01 - 7.867371958365343e-01j,
6.448899971038180e-01 + 7.642754030030161e-01j,
6.448899971038180e-01 - 7.642754030030161e-01j,
6.804845629637927e-01 + 7.327624168637228e-01j,
6.804845629637927e-01 - 7.327624168637228e-01j,
8.202619107108660e-01 + 5.719881098737678e-01j,
8.202619107108660e-01 - 5.719881098737678e-01j,
7.228410452536148e-01 + 6.910143437705678e-01j,
7.228410452536148e-01 - 6.910143437705678e-01j,
7.702121399578629e-01 + 6.377877856007792e-01j,
7.702121399578629e-01 - 6.377877856007792e-01j]
p2 = [7.365546198286450e-01 + 4.842085129329526e-02j,
7.365546198286450e-01 - 4.842085129329526e-02j,
7.292038510962885e-01 + 1.442201672097581e-01j,
7.292038510962885e-01 - 1.442201672097581e-01j,
7.151293788040354e-01 + 2.369925800458584e-01j,
7.151293788040354e-01 - 2.369925800458584e-01j,
6.955051820787286e-01 + 3.250341363856910e-01j,
6.955051820787286e-01 - 3.250341363856910e-01j,
6.719122956045220e-01 + 4.070475750638047e-01j,
6.719122956045220e-01 - 4.070475750638047e-01j,
6.461722130611300e-01 + 4.821965916689270e-01j,
6.461722130611300e-01 - 4.821965916689270e-01j,
5.528045062872224e-01 + 8.162920513838372e-01j,
5.528045062872224e-01 - 8.162920513838372e-01j,
5.464847782492791e-01 + 7.869899955967304e-01j,
5.464847782492791e-01 - 7.869899955967304e-01j,
5.488033111260949e-01 + 7.520442354055579e-01j,
5.488033111260949e-01 - 7.520442354055579e-01j,
6.201874719022955e-01 + 5.500894392527353e-01j,
6.201874719022955e-01 - 5.500894392527353e-01j,
5.586478152536709e-01 + 7.112676877332921e-01j,
5.586478152536709e-01 - 7.112676877332921e-01j,
5.958145844148228e-01 + 6.107074340842115e-01j,
5.958145844148228e-01 - 6.107074340842115e-01j,
5.747812938519067e-01 + 6.643001536914696e-01j,
5.747812938519067e-01 - 6.643001536914696e-01j]
k2 = 9.932997786497189e-02
assert_allclose(sorted(z, key=np.angle),
sorted(z2, key=np.angle), rtol=1e-13)
assert_allclose(sorted(p, key=np.angle),
sorted(p2, key=np.angle), rtol=1e-12)
assert_allclose(k, k2, rtol=1e-11)
# high odd order
z, p, k = cheby2(25, 80, 0.5, 'high', output='zpk')
z2 = [9.690690376586687e-01 + 2.467897896011971e-01j,
9.690690376586687e-01 - 2.467897896011971e-01j,
9.999999999999492e-01,
8.835111277191199e-01 + 4.684101698261429e-01j,
8.835111277191199e-01 - 4.684101698261429e-01j,
7.613142857900539e-01 + 6.483830335935022e-01j,
7.613142857900539e-01 - 6.483830335935022e-01j,
6.232625173626231e-01 + 7.820126817709752e-01j,
6.232625173626231e-01 - 7.820126817709752e-01j,
4.864456563413621e-01 + 8.737108351316745e-01j,
4.864456563413621e-01 - 8.737108351316745e-01j,
3.618368136816749e-01 + 9.322414495530347e-01j,
3.618368136816749e-01 - 9.322414495530347e-01j,
2.549486883466794e-01 + 9.669545833752675e-01j,
2.549486883466794e-01 - 9.669545833752675e-01j,
1.676175432109457e-01 + 9.858520980390212e-01j,
1.676175432109457e-01 - 9.858520980390212e-01j,
1.975218468277521e-03 + 9.999980492540941e-01j,
1.975218468277521e-03 - 9.999980492540941e-01j,
1.786959496651858e-02 + 9.998403260399917e-01j,
1.786959496651858e-02 - 9.998403260399917e-01j,
9.967933660557139e-02 + 9.950196127985684e-01j,
9.967933660557139e-02 - 9.950196127985684e-01j,
5.013970951219547e-02 + 9.987422137518890e-01j,
5.013970951219547e-02 - 9.987422137518890e-01j]
p2 = [4.218866331906864e-01,
4.120110200127552e-01 + 1.361290593621978e-01j,
4.120110200127552e-01 - 1.361290593621978e-01j,
3.835890113632530e-01 + 2.664910809911026e-01j,
3.835890113632530e-01 - 2.664910809911026e-01j,
3.399195570456499e-01 + 3.863983538639875e-01j,
3.399195570456499e-01 - 3.863983538639875e-01j,
2.855977834508353e-01 + 4.929444399540688e-01j,
2.855977834508353e-01 - 4.929444399540688e-01j,
2.255765441339322e-01 + 5.851631870205766e-01j,
2.255765441339322e-01 - 5.851631870205766e-01j,
1.644087535815792e-01 + 6.637356937277153e-01j,
1.644087535815792e-01 - 6.637356937277153e-01j,
-7.293633845273095e-02 + 9.739218252516307e-01j,
-7.293633845273095e-02 - 9.739218252516307e-01j,
1.058259206358626e-01 + 7.304739464862978e-01j,
1.058259206358626e-01 - 7.304739464862978e-01j,
-5.703971947785402e-02 + 9.291057542169088e-01j,
-5.703971947785402e-02 - 9.291057542169088e-01j,
5.263875132656864e-02 + 7.877974334424453e-01j,
5.263875132656864e-02 - 7.877974334424453e-01j,
-3.007943405982616e-02 + 8.846331716180016e-01j,
-3.007943405982616e-02 - 8.846331716180016e-01j,
6.857277464483946e-03 + 8.383275456264492e-01j,
6.857277464483946e-03 - 8.383275456264492e-01j]
k2 = 6.507068761705037e-03
assert_allclose(sorted(z, key=np.angle),
sorted(z2, key=np.angle), rtol=1e-13)
assert_allclose(sorted(p, key=np.angle),
sorted(p2, key=np.angle), rtol=1e-12)
assert_allclose(k, k2, rtol=1e-11)
def test_bandpass(self):
z, p, k = cheby2(9, 40, [0.07, 0.2], 'pass', output='zpk')
z2 = [-9.999999999999999e-01,
3.676588029658514e-01 + 9.299607543341383e-01j,
3.676588029658514e-01 - 9.299607543341383e-01j,
7.009689684982283e-01 + 7.131917730894889e-01j,
7.009689684982283e-01 - 7.131917730894889e-01j,
7.815697973765858e-01 + 6.238178033919218e-01j,
7.815697973765858e-01 - 6.238178033919218e-01j,
8.063793628819866e-01 + 5.913986160941200e-01j,
8.063793628819866e-01 - 5.913986160941200e-01j,
1.000000000000001e+00,
9.944493019920448e-01 + 1.052168511576739e-01j,
9.944493019920448e-01 - 1.052168511576739e-01j,
9.854674703367308e-01 + 1.698642543566085e-01j,
9.854674703367308e-01 - 1.698642543566085e-01j,
9.762751735919308e-01 + 2.165335665157851e-01j,
9.762751735919308e-01 - 2.165335665157851e-01j,
9.792277171575134e-01 + 2.027636011479496e-01j,
9.792277171575134e-01 - 2.027636011479496e-01j]
p2 = [8.143803410489621e-01 + 5.411056063397541e-01j,
8.143803410489621e-01 - 5.411056063397541e-01j,
7.650769827887418e-01 + 5.195412242095543e-01j,
7.650769827887418e-01 - 5.195412242095543e-01j,
6.096241204063443e-01 + 3.568440484659796e-01j,
6.096241204063443e-01 - 3.568440484659796e-01j,
6.918192770246239e-01 + 4.770463577106911e-01j,
6.918192770246239e-01 - 4.770463577106911e-01j,
6.986241085779207e-01 + 1.146512226180060e-01j,
6.986241085779207e-01 - 1.146512226180060e-01j,
8.654645923909734e-01 + 1.604208797063147e-01j,
8.654645923909734e-01 - 1.604208797063147e-01j,
9.164831670444591e-01 + 1.969181049384918e-01j,
9.164831670444591e-01 - 1.969181049384918e-01j,
9.630425777594550e-01 + 2.317513360702271e-01j,
9.630425777594550e-01 - 2.317513360702271e-01j,
9.438104703725529e-01 + 2.193509900269860e-01j,
9.438104703725529e-01 - 2.193509900269860e-01j]
k2 = 9.345352824659604e-03
assert_allclose(sorted(z, key=np.angle),
sorted(z2, key=np.angle), rtol=1e-13)
assert_allclose(sorted(p, key=np.angle),
sorted(p2, key=np.angle), rtol=1e-13)
assert_allclose(k, k2, rtol=1e-11)
def test_bandstop(self):
z, p, k = cheby2(6, 55, [0.1, 0.9], 'stop', output='zpk')
z2 = [6.230544895101009e-01 + 7.821784343111114e-01j,
6.230544895101009e-01 - 7.821784343111114e-01j,
9.086608545660115e-01 + 4.175349702471991e-01j,
9.086608545660115e-01 - 4.175349702471991e-01j,
9.478129721465802e-01 + 3.188268649763867e-01j,
9.478129721465802e-01 - 3.188268649763867e-01j,
-6.230544895100982e-01 + 7.821784343111109e-01j,
-6.230544895100982e-01 - 7.821784343111109e-01j,
-9.086608545660116e-01 + 4.175349702472088e-01j,
-9.086608545660116e-01 - 4.175349702472088e-01j,
-9.478129721465784e-01 + 3.188268649763897e-01j,
-9.478129721465784e-01 - 3.188268649763897e-01j]
p2 = [-9.464094036167638e-01 + 1.720048695084344e-01j,
-9.464094036167638e-01 - 1.720048695084344e-01j,
-8.715844103386737e-01 + 1.370665039509297e-01j,
-8.715844103386737e-01 - 1.370665039509297e-01j,
-8.078751204586425e-01 + 5.729329866682983e-02j,
-8.078751204586425e-01 - 5.729329866682983e-02j,
9.464094036167665e-01 + 1.720048695084332e-01j,
9.464094036167665e-01 - 1.720048695084332e-01j,
8.078751204586447e-01 + 5.729329866683007e-02j,
8.078751204586447e-01 - 5.729329866683007e-02j,
8.715844103386721e-01 + 1.370665039509331e-01j,
8.715844103386721e-01 - 1.370665039509331e-01j]
k2 = 2.917823332763358e-03
assert_allclose(sorted(z, key=np.angle),
sorted(z2, key=np.angle), rtol=1e-13)
assert_allclose(sorted(p, key=np.angle),
sorted(p2, key=np.angle), rtol=1e-13)
assert_allclose(k, k2, rtol=1e-11)
def test_ba_output(self):
# with transfer function conversion, without digital conversion
b, a = cheby2(5, 20, [2010, 2100], 'stop', True)
b2 = [1.000000000000000e+00, 0, # Matlab: 6.683253076978249e-12,
2.111512500000000e+07, 0, # Matlab: 1.134325604589552e-04,
1.782966433781250e+14, 0, # Matlab: 7.216787944356781e+02,
7.525901316990656e+20, 0, # Matlab: 2.039829265789886e+09,
1.587960565565748e+27, 0, # Matlab: 2.161236218626134e+15,
1.339913493808585e+33]
a2 = [1.000000000000000e+00, 1.849550755473371e+02,
2.113222918998538e+07, 3.125114149732283e+09,
1.785133457155609e+14, 1.979158697776348e+16,
7.535048322653831e+20, 5.567966191263037e+22,
1.589246884221346e+27, 5.871210648525566e+28,
1.339913493808590e+33]
assert_allclose(b, b2, rtol=1e-14)
assert_allclose(a, a2, rtol=1e-14)
class TestEllip(TestCase):
def test_degenerate(self):
# 0-order filter is just a passthrough
# Even-order filters have DC gain of -rp dB
# Stopband ripple factor doesn't matter
b, a = ellip(0, 10*np.log10(2), 123.456, 1, analog=True)
assert_array_almost_equal(b, [1/np.sqrt(2)])
assert_array_equal(a, [1])
# 1-order filter is same for all types
b, a = ellip(1, 10*np.log10(2), 1, 1, analog=True)
assert_array_almost_equal(b, [1])
assert_array_almost_equal(a, [1, 1])
z, p, k = ellip(1, 1, 55, 0.3, output='zpk')
assert_allclose(z, [-9.999999999999998e-01], rtol=1e-14)
assert_allclose(p, [-6.660721153525525e-04], rtol=1e-10)
assert_allclose(k, 5.003330360576763e-01, rtol=1e-14)
def test_basic(self):
for N in range(25):
wn = 0.01
z, p, k = ellip(N, 1, 40, wn, 'low', analog=True, output='zpk')
assert_(len(p) == N)
assert_(all(np.real(p) <= 0)) # No poles in right half of S-plane
for N in range(25):
wn = 0.01
z, p, k = ellip(N, 1, 40, wn, 'high', analog=False, output='zpk')
assert_(all(np.abs(p) <= 1)) # No poles outside unit circle
b3, a3 = ellip(5, 3, 26, 1, analog=True)
assert_array_almost_equal(b3, [0.1420, 0, 0.3764, 0,
0.2409], decimal=4)
assert_array_almost_equal(a3, [1, 0.5686, 1.8061, 0.8017, 0.8012,
0.2409], decimal=4)
b, a = ellip(3, 1, 60, [0.4, 0.7], 'stop')
assert_array_almost_equal(b, [0.3310, 0.3469, 1.1042, 0.7044, 1.1042,
0.3469, 0.3310], decimal=4)
assert_array_almost_equal(a, [1.0000, 0.6973, 1.1441, 0.5878, 0.7323,
0.1131, -0.0060], decimal=4)
def test_highpass(self):
# high even order
z, p, k = ellip(24, 1, 80, 0.3, 'high', output='zpk')
z2 = [9.761875332501075e-01 + 2.169283290099910e-01j,
9.761875332501075e-01 - 2.169283290099910e-01j,
8.413503353963494e-01 + 5.404901600661900e-01j,
8.413503353963494e-01 - 5.404901600661900e-01j,
7.160082576305009e-01 + 6.980918098681732e-01j,
7.160082576305009e-01 - 6.980918098681732e-01j,
6.456533638965329e-01 + 7.636306264739803e-01j,
6.456533638965329e-01 - 7.636306264739803e-01j,
6.127321820971366e-01 + 7.902906256703928e-01j,
6.127321820971366e-01 - 7.902906256703928e-01j,
5.983607817490196e-01 + 8.012267936512676e-01j,
5.983607817490196e-01 - 8.012267936512676e-01j,
5.922577552594799e-01 + 8.057485658286990e-01j,
5.922577552594799e-01 - 8.057485658286990e-01j,
5.896952092563588e-01 + 8.076258788449631e-01j,
5.896952092563588e-01 - 8.076258788449631e-01j,
5.886248765538837e-01 + 8.084063054565607e-01j,
5.886248765538837e-01 - 8.084063054565607e-01j,
5.881802711123132e-01 + 8.087298490066037e-01j,
5.881802711123132e-01 - 8.087298490066037e-01j,
5.879995719101164e-01 + 8.088612386766461e-01j,
5.879995719101164e-01 - 8.088612386766461e-01j,
5.879354086709576e-01 + 8.089078780868164e-01j,
5.879354086709576e-01 - 8.089078780868164e-01j]
p2 = [-3.184805259081650e-01 + 4.206951906775851e-01j,
-3.184805259081650e-01 - 4.206951906775851e-01j,
1.417279173459985e-01 + 7.903955262836452e-01j,
1.417279173459985e-01 - 7.903955262836452e-01j,
4.042881216964651e-01 + 8.309042239116594e-01j,
4.042881216964651e-01 - 8.309042239116594e-01j,
5.128964442789670e-01 + 8.229563236799665e-01j,
5.128964442789670e-01 - 8.229563236799665e-01j,
5.569614712822724e-01 + 8.155957702908510e-01j,
5.569614712822724e-01 - 8.155957702908510e-01j,
5.750478870161392e-01 + 8.118633973883931e-01j,
5.750478870161392e-01 - 8.118633973883931e-01j,
5.825314018170804e-01 + 8.101960910679270e-01j,
5.825314018170804e-01 - 8.101960910679270e-01j,
5.856397379751872e-01 + 8.094825218722543e-01j,
5.856397379751872e-01 - 8.094825218722543e-01j,
5.869326035251949e-01 + 8.091827531557583e-01j,
5.869326035251949e-01 - 8.091827531557583e-01j,
5.874697218855733e-01 + 8.090593298213502e-01j,
5.874697218855733e-01 - 8.090593298213502e-01j,
5.876904783532237e-01 + 8.090127161018823e-01j,
5.876904783532237e-01 - 8.090127161018823e-01j,
5.877753105317594e-01 + 8.090050577978136e-01j,
5.877753105317594e-01 - 8.090050577978136e-01j]
k2 = 4.918081266957108e-02
assert_allclose(sorted(z, key=np.angle),
sorted(z2, key=np.angle), rtol=1e-4)
assert_allclose(sorted(p, key=np.angle),
sorted(p2, key=np.angle), rtol=1e-4)
assert_allclose(k, k2, rtol=1e-3)
# high odd order
z, p, k = ellip(23, 1, 70, 0.5, 'high', output='zpk')
z2 = [9.999999999998661e-01,
6.603717261750994e-01 + 7.509388678638675e-01j,
6.603717261750994e-01 - 7.509388678638675e-01j,
2.788635267510325e-01 + 9.603307416968041e-01j,
2.788635267510325e-01 - 9.603307416968041e-01j,
1.070215532544218e-01 + 9.942567008268131e-01j,
1.070215532544218e-01 - 9.942567008268131e-01j,
4.049427369978163e-02 + 9.991797705105507e-01j,
4.049427369978163e-02 - 9.991797705105507e-01j,
1.531059368627931e-02 + 9.998827859909265e-01j,
1.531059368627931e-02 - 9.998827859909265e-01j,
5.808061438534933e-03 + 9.999831330689181e-01j,
5.808061438534933e-03 - 9.999831330689181e-01j,
2.224277847754599e-03 + 9.999975262909676e-01j,
2.224277847754599e-03 - 9.999975262909676e-01j,
8.731857107534554e-04 + 9.999996187732845e-01j,
8.731857107534554e-04 - 9.999996187732845e-01j,
3.649057346914968e-04 + 9.999999334218996e-01j,
3.649057346914968e-04 - 9.999999334218996e-01j,
1.765538109802615e-04 + 9.999999844143768e-01j,
1.765538109802615e-04 - 9.999999844143768e-01j,
1.143655290967426e-04 + 9.999999934602630e-01j,
1.143655290967426e-04 - 9.999999934602630e-01j]
p2 = [-6.322017026545028e-01,
-4.648423756662754e-01 + 5.852407464440732e-01j,
-4.648423756662754e-01 - 5.852407464440732e-01j,
-2.249233374627773e-01 + 8.577853017985717e-01j,
-2.249233374627773e-01 - 8.577853017985717e-01j,
-9.234137570557621e-02 + 9.506548198678851e-01j,
-9.234137570557621e-02 - 9.506548198678851e-01j,
-3.585663561241373e-02 + 9.821494736043981e-01j,
-3.585663561241373e-02 - 9.821494736043981e-01j,
-1.363917242312723e-02 + 9.933844128330656e-01j,
-1.363917242312723e-02 - 9.933844128330656e-01j,
-5.131505238923029e-03 + 9.975221173308673e-01j,
-5.131505238923029e-03 - 9.975221173308673e-01j,
-1.904937999259502e-03 + 9.990680819857982e-01j,
-1.904937999259502e-03 - 9.990680819857982e-01j,
-6.859439885466834e-04 + 9.996492201426826e-01j,
-6.859439885466834e-04 - 9.996492201426826e-01j,
-2.269936267937089e-04 + 9.998686250679161e-01j,
-2.269936267937089e-04 - 9.998686250679161e-01j,
-5.687071588789117e-05 + 9.999527573294513e-01j,
-5.687071588789117e-05 - 9.999527573294513e-01j,
-6.948417068525226e-07 + 9.999882737700173e-01j,
-6.948417068525226e-07 - 9.999882737700173e-01j]
k2 = 1.220910020289434e-02
assert_allclose(sorted(z, key=np.angle),
sorted(z2, key=np.angle), rtol=1e-4)
assert_allclose(sorted(p, key=np.angle),
sorted(p2, key=np.angle), rtol=1e-4)
assert_allclose(k, k2, rtol=1e-3)
def test_bandpass(self):
z, p, k = ellip(7, 1, 40, [0.07, 0.2], 'pass', output='zpk')
z2 = [-9.999999999999991e-01,
6.856610961780020e-01 + 7.279209168501619e-01j,
6.856610961780020e-01 - 7.279209168501619e-01j,
7.850346167691289e-01 + 6.194518952058737e-01j,
7.850346167691289e-01 - 6.194518952058737e-01j,
7.999038743173071e-01 + 6.001281461922627e-01j,
7.999038743173071e-01 - 6.001281461922627e-01j,
9.999999999999999e-01,
9.862938983554124e-01 + 1.649980183725925e-01j,
9.862938983554124e-01 - 1.649980183725925e-01j,
9.788558330548762e-01 + 2.045513580850601e-01j,
9.788558330548762e-01 - 2.045513580850601e-01j,
9.771155231720003e-01 + 2.127093189691258e-01j,
9.771155231720003e-01 - 2.127093189691258e-01j]
p2 = [8.063992755498643e-01 + 5.858071374778874e-01j,
8.063992755498643e-01 - 5.858071374778874e-01j,
8.050395347071724e-01 + 5.639097428109795e-01j,
8.050395347071724e-01 - 5.639097428109795e-01j,
8.113124936559144e-01 + 4.855241143973142e-01j,
8.113124936559144e-01 - 4.855241143973142e-01j,
8.665595314082394e-01 + 3.334049560919331e-01j,
8.665595314082394e-01 - 3.334049560919331e-01j,
9.412369011968871e-01 + 2.457616651325908e-01j,
9.412369011968871e-01 - 2.457616651325908e-01j,
9.679465190411238e-01 + 2.228772501848216e-01j,
9.679465190411238e-01 - 2.228772501848216e-01j,
9.747235066273385e-01 + 2.178937926146544e-01j,
9.747235066273385e-01 - 2.178937926146544e-01j]
k2 = 8.354782670263239e-03
assert_allclose(sorted(z, key=np.angle),
sorted(z2, key=np.angle), rtol=1e-4)
assert_allclose(sorted(p, key=np.angle),
sorted(p2, key=np.angle), rtol=1e-4)
assert_allclose(k, k2, rtol=1e-3)
z, p, k = ellip(5, 1, 75, [90.5, 110.5], 'pass', True, 'zpk')
z2 = [-5.583607317695175e-14 + 1.433755965989225e+02j,
-5.583607317695175e-14 - 1.433755965989225e+02j,
5.740106416459296e-14 + 1.261678754570291e+02j,
5.740106416459296e-14 - 1.261678754570291e+02j,
-2.199676239638652e-14 + 6.974861996895196e+01j,
-2.199676239638652e-14 - 6.974861996895196e+01j,
-3.372595657044283e-14 + 7.926145989044531e+01j,
-3.372595657044283e-14 - 7.926145989044531e+01j,
0]
p2 = [-8.814960004852743e-01 + 1.104124501436066e+02j,
-8.814960004852743e-01 - 1.104124501436066e+02j,
-2.477372459140184e+00 + 1.065638954516534e+02j,
-2.477372459140184e+00 - 1.065638954516534e+02j,
-3.072156842945799e+00 + 9.995404870405324e+01j,
-3.072156842945799e+00 - 9.995404870405324e+01j,
-2.180456023925693e+00 + 9.379206865455268e+01j,
-2.180456023925693e+00 - 9.379206865455268e+01j,
-7.230484977485752e-01 + 9.056598800801140e+01j,
-7.230484977485752e-01 - 9.056598800801140e+01j]
k2 = 3.774571622827070e-02
assert_allclose(sorted(z, key=np.imag),
sorted(z2, key=np.imag), rtol=1e-4)
assert_allclose(sorted(p, key=np.imag),
sorted(p2, key=np.imag), rtol=1e-6)
assert_allclose(k, k2, rtol=1e-3)
def test_bandstop(self):
z, p, k = ellip(8, 1, 65, [0.2, 0.4], 'stop', output='zpk')
z2 = [3.528578094286510e-01 + 9.356769561794296e-01j,
3.528578094286510e-01 - 9.356769561794296e-01j,
3.769716042264783e-01 + 9.262248159096587e-01j,
3.769716042264783e-01 - 9.262248159096587e-01j,
4.406101783111199e-01 + 8.976985411420985e-01j,
4.406101783111199e-01 - 8.976985411420985e-01j,
5.539386470258847e-01 + 8.325574907062760e-01j,
5.539386470258847e-01 - 8.325574907062760e-01j,
6.748464963023645e-01 + 7.379581332490555e-01j,
6.748464963023645e-01 - 7.379581332490555e-01j,
7.489887970285254e-01 + 6.625826604475596e-01j,
7.489887970285254e-01 - 6.625826604475596e-01j,
7.913118471618432e-01 + 6.114127579150699e-01j,
7.913118471618432e-01 - 6.114127579150699e-01j,
7.806804740916381e-01 + 6.249303940216475e-01j,
7.806804740916381e-01 - 6.249303940216475e-01j]
p2 = [-1.025299146693730e-01 + 5.662682444754943e-01j,
-1.025299146693730e-01 - 5.662682444754943e-01j,
1.698463595163031e-01 + 8.926678667070186e-01j,
1.698463595163031e-01 - 8.926678667070186e-01j,
2.750532687820631e-01 + 9.351020170094005e-01j,
2.750532687820631e-01 - 9.351020170094005e-01j,
3.070095178909486e-01 + 9.457373499553291e-01j,
3.070095178909486e-01 - 9.457373499553291e-01j,
7.695332312152288e-01 + 2.792567212705257e-01j,
7.695332312152288e-01 - 2.792567212705257e-01j,
8.083818999225620e-01 + 4.990723496863960e-01j,
8.083818999225620e-01 - 4.990723496863960e-01j,
8.066158014414928e-01 + 5.649811440393374e-01j,
8.066158014414928e-01 - 5.649811440393374e-01j,
8.062787978834571e-01 + 5.855780880424964e-01j,
8.062787978834571e-01 - 5.855780880424964e-01j]
k2 = 2.068622545291259e-01
assert_allclose(sorted(z, key=np.angle),
sorted(z2, key=np.angle), rtol=1e-6)
assert_allclose(sorted(p, key=np.angle),
sorted(p2, key=np.angle), rtol=1e-5)
assert_allclose(k, k2, rtol=1e-5)
def test_ba_output(self):
# with transfer function conversion, without digital conversion
b, a = ellip(5, 1, 40, [201, 240], 'stop', True)
b2 = [
1.000000000000000e+00, 0, # Matlab: 1.743506051190569e-13,
2.426561778314366e+05, 0, # Matlab: 3.459426536825722e-08,
2.348218683400168e+10, 0, # Matlab: 2.559179747299313e-03,
1.132780692872241e+15, 0, # Matlab: 8.363229375535731e+01,
2.724038554089566e+19, 0, # Matlab: 1.018700994113120e+06,
2.612380874940186e+23
]
a2 = [
1.000000000000000e+00, 1.337266601804649e+02,
2.486725353510667e+05, 2.628059713728125e+07,
2.436169536928770e+10, 1.913554568577315e+12,
1.175208184614438e+15, 6.115751452473410e+16,
2.791577695211466e+19, 7.241811142725384e+20,
2.612380874940182e+23
]
assert_allclose(b, b2, rtol=1e-6)
assert_allclose(a, a2, rtol=1e-4)
def test_sos_consistency():
# Consistency checks of output='sos' for the specialized IIR filter
# design functions.
design_funcs = [(bessel, (0.1,)),
(butter, (0.1,)),
(cheby1, (45.0, 0.1)),
(cheby2, (0.087, 0.1)),
(ellip, (0.087, 45, 0.1))]
for func, args in design_funcs:
name = func.__name__
b, a = func(2, *args, output='ba')
sos = func(2, *args, output='sos')
assert_allclose(sos, [np.hstack((b, a))], err_msg="%s(2,...)" % name)
zpk = func(3, *args, output='zpk')
sos = func(3, *args, output='sos')
assert_allclose(sos, zpk2sos(*zpk), err_msg="%s(3,...)" % name)
zpk = func(4, *args, output='zpk')
sos = func(4, *args, output='sos')
assert_allclose(sos, zpk2sos(*zpk), err_msg="%s(4,...)" % name)
class TestIIRFilter(TestCase):
def test_symmetry(self):
# All built-in IIR filters are real, so should have perfectly
# symmetrical poles and zeros. Then ba representation (using
# numpy.poly) will be purely real instead of having negligible
# imaginary parts.
for N in np.arange(1, 26):
for ftype in ('butter', 'bessel', 'cheby1', 'cheby2', 'ellip'):
z, p, k = iirfilter(N, 1.1, 1, 20, 'low', analog=True,
ftype=ftype, output='zpk')
assert_array_equal(sorted(z), sorted(z.conj()))
assert_array_equal(sorted(p), sorted(p.conj()))
assert_equal(k, np.real(k))
b, a = iirfilter(N, 1.1, 1, 20, 'low', analog=True,
ftype=ftype, output='ba')
assert_(issubclass(b.dtype.type, np.floating))
assert_(issubclass(a.dtype.type, np.floating))
def test_int_inputs(self):
# Using integer frequency arguments and large N should not produce
# np.ints that wraparound to negative numbers
k = iirfilter(24, 100, btype='low', analog=True, ftype='bessel',
output='zpk')[2]
k2 = 9.999999999999989e+47
assert_allclose(k, k2)
def test_invalid_wn_size(self):
# low and high have 1 Wn, band and stop have 2 Wn
assert_raises(ValueError, iirfilter, 1, [0.1, 0.9], btype='low')
assert_raises(ValueError, iirfilter, 1, [0.2, 0.5], btype='high')
assert_raises(ValueError, iirfilter, 1, 0.2, btype='bp')
assert_raises(ValueError, iirfilter, 1, 400, btype='bs', analog=True)
def test_invalid_wn_range(self):
# For digital filters, 0 <= Wn <= 1
assert_raises(ValueError, iirfilter, 1, 2, btype='low')
assert_raises(ValueError, iirfilter, 1, -1, btype='high')
assert_raises(ValueError, iirfilter, 1, [1, 2], btype='band')
assert_raises(ValueError, iirfilter, 1, [10, 20], btype='stop')
class TestGroupDelay(TestCase):
def test_identity_filter(self):
w, gd = group_delay((1, 1))
assert_array_almost_equal(w, pi * np.arange(512) / 512)
assert_array_almost_equal(gd, np.zeros(512))
w, gd = group_delay((1, 1), whole=True)
assert_array_almost_equal(w, 2 * pi * np.arange(512) / 512)
assert_array_almost_equal(gd, np.zeros(512))
def test_fir(self):
# Let's design linear phase FIR and check that the group delay
# is constant.
N = 100
b = firwin(N + 1, 0.1)
w, gd = group_delay((b, 1))
assert_allclose(gd, 0.5 * N)
def test_iir(self):
# Let's design Butterworth filter and test the group delay at
# some points against MATLAB answer.
b, a = butter(4, 0.1)
w = np.linspace(0, pi, num=10, endpoint=False)
w, gd = group_delay((b, a), w=w)
matlab_gd = np.array([8.249313898506037, 11.958947880907104,
2.452325615326005, 1.048918665702008,
0.611382575635897, 0.418293269460578,
0.317932917836572, 0.261371844762525,
0.229038045801298, 0.212185774208521])
assert_array_almost_equal(gd, matlab_gd)
def test_singular(self):
# Let's create a filter with zeros and poles on the unit circle and
# check if warning is raised and the group delay is set to zero at
# these frequencies.
z1 = np.exp(1j * 0.1 * pi)
z2 = np.exp(1j * 0.25 * pi)
p1 = np.exp(1j * 0.5 * pi)
p2 = np.exp(1j * 0.8 * pi)
b = np.convolve([1, -z1], [1, -z2])
a = np.convolve([1, -p1], [1, -p2])
w = np.array([0.1 * pi, 0.25 * pi, -0.5 * pi, -0.8 * pi])
with warnings.catch_warnings():
warnings.simplefilter("ignore")
assert_warns(UserWarning, group_delay, (b, a), w=w)
w, gd = group_delay((b, a), w=w)
assert_allclose(gd, 0)
if __name__ == "__main__":
run_module_suite()
|
bsd-3-clause
|
jvrsantacruz/XlsxWriter
|
xlsxwriter/test/utility/test_xl_rowcol_to_cell.py
|
8
|
2341
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, jmcnamara@cpan.org
#
import unittest
from ...utility import xl_rowcol_to_cell
from ...utility import xl_rowcol_to_cell_fast
class TestUtility(unittest.TestCase):
"""
Test xl_rowcol_to_cell() utility function.
"""
def test_xl_rowcol_to_cell(self):
"""Test xl_rowcol_to_cell()"""
tests = [
# row, col, A1 string
(0, 0, 'A1'),
(0, 1, 'B1'),
(0, 2, 'C1'),
(0, 9, 'J1'),
(1, 0, 'A2'),
(2, 0, 'A3'),
(9, 0, 'A10'),
(1, 24, 'Y2'),
(7, 25, 'Z8'),
(9, 26, 'AA10'),
(1, 254, 'IU2'),
(1, 255, 'IV2'),
(1, 256, 'IW2'),
(0, 16383, 'XFD1'),
(1048576, 16384, 'XFE1048577'),
]
for row, col, string in tests:
exp = string
got = xl_rowcol_to_cell(row, col)
self.assertEqual(got, exp)
def test_xl_rowcol_to_cell_abs(self):
"""Test xl_rowcol_to_cell() with absolute references"""
tests = [
# row, col, row_abs, col_abs, A1 string
(0, 0, 0, 0, 'A1'),
(0, 0, 1, 0, 'A$1'),
(0, 0, 0, 1, '$A1'),
(0, 0, 1, 1, '$A$1'),
]
for row, col, row_abs, col_abs, string in tests:
exp = string
got = xl_rowcol_to_cell(row, col, row_abs, col_abs)
self.assertEqual(got, exp)
def test_xl_rowcol_to_cell_fast(self):
"""Test xl_rowcol_to_cell_fast()"""
tests = [
# row, col, A1 string
(0, 0, 'A1'),
(0, 1, 'B1'),
(0, 2, 'C1'),
(0, 9, 'J1'),
(1, 0, 'A2'),
(2, 0, 'A3'),
(9, 0, 'A10'),
(1, 24, 'Y2'),
(7, 25, 'Z8'),
(9, 26, 'AA10'),
(1, 254, 'IU2'),
(1, 255, 'IV2'),
(1, 256, 'IW2'),
(0, 16383, 'XFD1'),
(1048576, 16384, 'XFE1048577'),
]
for row, col, string in tests:
exp = string
got = xl_rowcol_to_cell_fast(row, col)
self.assertEqual(got, exp)
|
bsd-2-clause
|
wikimedia/operations-debs-kubernetes
|
cluster/juju/layers/kubernetes-worker/lib/charms/kubernetes/common.py
|
365
|
1084
|
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import subprocess
def get_version(bin_name):
"""Get the version of an installed Kubernetes binary.
:param str bin_name: Name of binary
:return: 3-tuple version (maj, min, patch)
Example::
>>> `get_version('kubelet')
(1, 6, 0)
"""
cmd = '{} --version'.format(bin_name).split()
version_string = subprocess.check_output(cmd).decode('utf-8')
return tuple(int(q) for q in re.findall("[0-9]+", version_string)[:3])
|
apache-2.0
|
donspaulding/adspygoogle
|
examples/adspygoogle/dfp/v201302/contact_service/create_contacts.py
|
3
|
2467
|
#!/usr/bin/python
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example creates new contacts.
To determine which contacts exist, run get_all_contacts.py.
Tags: ContactService.createContacts
"""
__author__ = 'Vincent Tsao'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfpClient
from adspygoogle.common import Utils
# Set the ID of the advertiser company this contact is associated with.
ADVERTISER_COMPANY_ID = 'INSERT_ADVERTISER_COMPANY_ID_HERE'
# Set the ID of the agency company this contact is associated with.
AGENCY_COMPANY_ID = 'INSERT_AGENCY_COMPANY_ID_HERE'
def main(client, advertiser_company_id, agency_company_id):
# Initialize appropriate service.
contact_service = client.GetService('ContactService', version='v201302')
# Create an advertiser contact.
advertiser_contact = {
'name': 'Mr. Advertiser #%s' % Utils.GetUniqueName(),
'email': 'advertiser@advertising.com',
'companyId': advertiser_company_id
}
# Create an agency contact.
agency_contact = {
'name': 'Ms. Agency #%s' % Utils.GetUniqueName(),
'email': 'agency@agencies.com',
'companyId': agency_company_id
}
# Create the contacts on the server.
contacts = contact_service.CreateContacts([advertiser_contact,
agency_contact])
# Display results.
for contact in contacts:
print ('Contact with ID \'%s\' name \'%s\' was created.'
% (contact['id'], contact['name']))
if __name__ == '__main__':
# Initialize client object.
dfp_client = DfpClient(path=os.path.join('..', '..', '..', '..', '..'))
main(dfp_client, ADVERTISER_COMPANY_ID, AGENCY_COMPANY_ID)
|
apache-2.0
|
donspaulding/adspygoogle
|
examples/adspygoogle/dfp/v201208/create_video_line_item.py
|
4
|
4536
|
#!/usr/bin/python
#
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example creates a new line item to serve to video content. This
feature is only available to DFP premium solution networks. To determine which
line items exist, run get_all_line_items.py. To determine which orders exist,
run get_all_orders.py. To create a video ad unit, run create_video_ad_unit.py.
To create criteria for categories, run
create_custom_targeting_keys_and_values.py"""
__author__ = 'api.shamjeff@gmail.com (Jeff Sham)'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
from datetime import date
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfpClient
from adspygoogle.common import Utils
# Initialize client object.
client = DfpClient(path=os.path.join('..', '..', '..', '..'))
# Initialize appropriate service.
line_item_service = client.GetService('LineItemService', version='v201208')
# Set order that all created line item will belong to and the video ad unit id
# to target.
order_id = 'INSERT_ORDER_ID_HERE'
targeted_video_ad_unit_id = 'INSERT_TARGETED_VIDEO_AD_UNIT_ID_HERE'
# Set the custom targeting key ID and value ID representing the metadata on the
# content to target. This would typically be a key representing a 'genre' and
# a value representing something like 'comedy'.
content_custom_targeting_key_id = 'INSERT_CONTENT_CUSTOM_TARGETING_KEY_ID_HERE'
content_custom_targeting_value_id = \
'INSERT_CONTENT_CUSTOM_TARGETING_VALUE_ID_HERE'
# create custom criteria for the content metadata targeting.
custom_criteria = {
'xsi_type': 'CustomCriteria',
'keyId': content_custom_targeting_key_id,
'valueIds': [content_custom_targeting_value_id],
'operator': 'IS'
}
# Create the custom criteria set.
top_set = {
'xsi_type': 'CustomCriteriaSet',
'logicalOperator': 'OR',
'children': [custom_criteria]
}
# Create line item object.
line_item = {
'name': 'Line item #%s' % Utils.GetUniqueName(),
'orderId': order_id,
'targeting': {
'customTargeting': top_set,
'inventoryTargeting': {
'targetedAdUnits': [{'adUnitId': targeted_video_ad_unit_id,
'includeDescendants': 'True'}]
},
'videoPositionTargeting': {
'targetedVideoPositions': ['PREROLL']
}
},
'creativePlaceholders': [
{
'size': {
'width': '400',
'height': '300'
},
'companions': [
{
'size': {
'width': '300',
'height': '250'
},
},
{
'size': {
'width': '728',
'height': '90'
},
}
]
}
],
'environmentType': 'VIDEO_PLAYER',
'companionDeliveryOption': 'OPTIONAL',
'startDateTimeType': 'IMMEDIATELY',
'lineItemType': 'SPONSORSHIP',
'endDateTime': {
'date': {
'year': str(date.today().year + 1),
'month': '9',
'day': '30'
},
'hour': '0',
'minute': '0',
'second': '0'
},
'costType': 'CPM',
'costPerUnit': {
'currencyCode': 'USD',
'microAmount': '2000000'
},
'creativeRotationType': 'OPTIMIZED',
'discountType': 'PERCENTAGE',
'unitsBought': '100',
'unitType': 'IMPRESSIONS',
'allowOverbook': 'True'
}
# Add line item.
line_item = line_item_service.CreateLineItem(line_item)[0]
# Display results.
print ('Video line item with id \'%s\', belonging to order id \'%s\', and named'
' \'%s\' was created.' % (line_item['id'], line_item['orderId'],
line_item['name']))
|
apache-2.0
|
wuxue/altanalyze
|
SQLInterace.py
|
2
|
3693
|
### SQL Interface
import string
import time
import random
import math
import sys, os
import sqlite3
import export
def cleanUpLine(line):
line = string.replace(line,'\n','')
line = string.replace(line,'\c','')
data = string.replace(line,'\r','')
data = string.replace(data,'"','')
return data
def filepath(filename):
try:
import unique ### local to AltAnalyze
fn = unique.filepath(filename)
except Exception:
### Should work fine when run as a script with this (AltAnalyze code is specific for packaging with AltAnalyze)
dir=os.path.dirname(dirfile.__file__)
try: dir_list = os.listdir(filename); fn = filename ### test to see if the path can be found (then it is the full path)
except Exception: fn=os.path.join(dir,filename)
return fn
##### SQLite Database Access ######
def createSchemaTextFile(species,platform,schema_text,DBname):
schema_filename = filepath('AltDatabase/'+species+'/'+platform+'/'+DBname+'_schema.sql')
export_data = export.ExportFile(schema_filename)
### We will need to augment the database with protein feature annotations for
export_data.write(schema_text)
export_data.close()
def populateSQLite(species,platform,DBname,schema_text=None):
global conn
""" Since we wish to work with only one gene at a time which can be associated with a lot of data
it would be more memory efficient transfer this data to a propper relational database for each query """
db_filename = filepath('AltDatabase/'+species+'/'+platform+'/'+DBname+'.db') ### store in user directory
schema_filename = filepath('AltDatabase/'+species+'/'+platform+'/'+DBname+'_schema.sql')
### Check to see if the database exists already and if not creat it
db_is_new = not os.path.exists(db_filename)
with sqlite3.connect(db_filename) as conn:
if db_is_new:
createSchemaTextFile(species,platform,schema_text,DBname)
print 'Creating schema'
with open(schema_filename, 'rt') as f:
schema = f.read()
#print schema
conn.executescript(schema)
else:
print 'Database exists, assume schema does too.'
#sys.exit()
return conn ### User must now add data to the empty SQLite database
def connectToDB(species,platform,DBname):
db_filename = filepath('AltDatabase/'+species+'/'+platform+'/'+DBname+'.db') ### store in user directory
with sqlite3.connect(db_filename) as conn:
return conn
def retreiveDatabaseFields(conn,ids,query):
""" Retreive data from specific fields from the database """
cursor = conn.cursor()
#id = 'ENSG00000114127'
#query = "select id, name, description, chr, strand from genes where id = ?"
cursor.execute(query,ids) ### In this way, don't have to use %s and specify type
ls=[]
for row in cursor.fetchall():
#id, name, description, chr, strand = row
#print '%s %s %s %s %s' % (id, name, description, chr, strand)
ls.append(row)
return ls
def bulkLoading():
import csv
import sqlite3
import sys
db_filename = 'todo.db'
data_filename = sys.argv[1]
SQL = """insert into task (details, priority, status, deadline, project)
values (:details, :priority, 'active', :deadline, :project)
"""
with open(data_filename, 'rt') as csv_file:
csv_reader = csv.DictReader(csv_file)
with sqlite3.connect(db_filename) as conn:
cursor = conn.cursor()
cursor.executemany(SQL, csv_reader)
|
apache-2.0
|
themrmax/scikit-learn
|
doc/conf.py
|
10
|
9807
|
# -*- coding: utf-8 -*-
#
# scikit-learn documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 8 09:13:42 2010.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from __future__ import print_function
import sys
import os
from sklearn.externals.six import u
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.insert(0, os.path.abspath('sphinxext'))
from github_link import make_linkcode_resolve
import sphinx_gallery
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc', 'sphinx.ext.autosummary',
'numpy_ext.numpydoc',
'sphinx.ext.linkcode', 'sphinx.ext.doctest',
'sphinx_gallery.gen_gallery',
'sphinx_issues',
]
# pngmath / imgmath compatibility layer for different sphinx versions
import sphinx
from distutils.version import LooseVersion
if LooseVersion(sphinx.__version__) < LooseVersion('1.4'):
extensions.append('sphinx.ext.pngmath')
else:
extensions.append('sphinx.ext.imgmath')
autodoc_default_flags = ['members', 'inherited-members']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# generate autosummary even if no references
autosummary_generate = True
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# Generate the plots for the gallery
plot_gallery = True
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u('scikit-learn')
copyright = u('2007 - 2017, scikit-learn developers (BSD License)')
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import sklearn
version = sklearn.__version__
# The full version, including alpha/beta/rc tags.
release = sklearn.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be
# searched for source files.
exclude_trees = ['_build', 'templates', 'includes']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'scikit-learn'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'oldversion': False, 'collapsiblesidebar': True,
'google_analytics': True, 'surveybanner': False,
'sprintbanner': True}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'scikit-learn'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'logos/scikit-learn-logo-small.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'logos/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['images']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'scikit-learndoc'
# -- Options for LaTeX output ------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [('index', 'user_guide.tex', u('scikit-learn user guide'),
u('scikit-learn developers'), 'manual'), ]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "logos/scikit-learn-logo.png"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_preamble = r"""
\usepackage{amsmath}\usepackage{amsfonts}\usepackage{bm}\usepackage{morefloats}
\usepackage{enumitem} \setlistdepth{10}
"""
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_domain_indices = False
trim_doctests_flags = True
sphinx_gallery_conf = {
'doc_module': 'sklearn',
'backreferences_dir': os.path.join('modules', 'generated'),
'reference_url': {
'sklearn': None,
'matplotlib': 'http://matplotlib.org',
'numpy': 'http://docs.scipy.org/doc/numpy-1.8.1',
'scipy': 'http://docs.scipy.org/doc/scipy-0.13.3/reference'}
}
# The following dictionary contains the information used to create the
# thumbnails for the front page of the scikit-learn home page.
# key: first image in set
# values: (number of plot in set, height of thumbnail)
carousel_thumbs = {'sphx_glr_plot_classifier_comparison_001.png': 600,
'sphx_glr_plot_outlier_detection_003.png': 372,
'sphx_glr_plot_gpr_co2_001.png': 350,
'sphx_glr_plot_adaboost_twoclass_001.png': 372,
'sphx_glr_plot_compare_methods_001.png': 349}
def make_carousel_thumbs(app, exception):
"""produces the final resized carousel images"""
if exception is not None:
return
print('Preparing carousel images')
image_dir = os.path.join(app.builder.outdir, '_images')
for glr_plot, max_width in carousel_thumbs.items():
image = os.path.join(image_dir, glr_plot)
if os.path.exists(image):
c_thumb = os.path.join(image_dir, glr_plot[:-4] + '_carousel.png')
sphinx_gallery.gen_rst.scale_image(image, c_thumb, max_width, 190)
# Config for sphinx_issues
issues_uri = 'https://github.com/scikit-learn/scikit-learn/issues/{issue}'
issues_github_path = 'scikit-learn/scikit-learn'
issues_user_uri = 'https://github.com/{user}'
def setup(app):
# to hide/show the prompt in code examples:
app.add_javascript('js/copybutton.js')
app.connect('build-finished', make_carousel_thumbs)
# The following is used by sphinx.ext.linkcode to provide links to github
linkcode_resolve = make_linkcode_resolve('sklearn',
u'https://github.com/scikit-learn/'
'scikit-learn/blob/{revision}/'
'{package}/{path}#L{lineno}')
|
bsd-3-clause
|
drglove/SickRage
|
lib/github/StatsParticipation.py
|
74
|
2654
|
# -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# #
# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import github.GithubObject
import github.NamedUser
class StatsParticipation(github.GithubObject.NonCompletableGithubObject):
"""
This class represents statistics of participation. The reference can be found here http://developer.github.com/v3/repos/statistics/#get-the-weekly-commit-count-for-the-repo-owner-and-everyone-else
"""
@property
def all(self):
"""
:type: list of int
"""
return self._all.value
@property
def owner(self):
"""
:type: list of int
"""
return self._owner.value
def _initAttributes(self):
self._all = github.GithubObject.NotSet
self._owner = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "all" in attributes: # pragma no branch
self._all = self._makeListOfIntsAttribute(attributes["all"])
if "owner" in attributes: # pragma no branch
self._owner = self._makeListOfIntsAttribute(attributes["owner"])
|
gpl-3.0
|
orgito/ansible
|
lib/ansible/modules/system/seboolean.py
|
2
|
10133
|
#!/usr/bin/python
# Copyright: (c) 2012, Stephen Fromm <sfromm@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: seboolean
short_description: Toggles SELinux booleans
description:
- Toggles SELinux booleans.
version_added: "0.7"
options:
name:
description:
- Name of the boolean to configure.
required: true
persistent:
description:
- Set to C(yes) if the boolean setting should survive a reboot.
type: bool
default: 'no'
state:
description:
- Desired boolean value
type: bool
required: true
notes:
- Not tested on any Debian based system.
requirements:
- libselinux-python
- libsemanage-python
author:
- Stephen Fromm (@sfromm)
'''
EXAMPLES = '''
- name: Set httpd_can_network_connect flag on and keep it persistent across reboots
seboolean:
name: httpd_can_network_connect
state: yes
persistent: yes
'''
import os
try:
import selinux
HAVE_SELINUX = True
except ImportError:
HAVE_SELINUX = False
try:
import semanage
HAVE_SEMANAGE = True
except ImportError:
HAVE_SEMANAGE = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import binary_type
from ansible.module_utils._text import to_bytes, to_text
def has_boolean_value(module, name):
bools = []
try:
rc, bools = selinux.security_get_boolean_names()
except OSError:
module.fail_json(msg="Failed to get list of boolean names")
# work around for selinux who changed its API, see
# https://github.com/ansible/ansible/issues/25651
if len(bools) > 0:
if isinstance(bools[0], binary_type):
name = to_bytes(name)
if name in bools:
return True
else:
return False
def get_boolean_value(module, name):
state = 0
try:
state = selinux.security_get_boolean_active(name)
except OSError:
module.fail_json(msg="Failed to determine current state for boolean %s" % name)
if state == 1:
return True
else:
return False
def semanage_get_handle(module):
handle = semanage.semanage_handle_create()
if not handle:
module.fail_json(msg="Failed to create semanage library handle")
managed = semanage.semanage_is_managed(handle)
if managed <= 0:
semanage.semanage_handle_destroy(handle)
if managed < 0:
module.fail_json(msg="Failed to determine whether policy is manage")
if managed == 0:
if os.getuid() == 0:
module.fail_json(msg="Cannot set persistent booleans without managed policy")
else:
module.fail_json(msg="Cannot set persistent booleans; please try as root")
if semanage.semanage_connect(handle) < 0:
semanage.semanage_handle_destroy(handle)
module.fail_json(msg="Failed to connect to semanage")
return handle
def semanage_begin_transaction(module, handle):
if semanage.semanage_begin_transaction(handle) < 0:
semanage.semanage_handle_destroy(handle)
module.fail_json(msg="Failed to begin semanage transaction")
def semanage_set_boolean_value(module, handle, name, value):
rc, t_b = semanage.semanage_bool_create(handle)
if rc < 0:
semanage.semanage_handle_destroy(handle)
module.fail_json(msg="Failed to create seboolean with semanage")
if semanage.semanage_bool_set_name(handle, t_b, name) < 0:
semanage.semanage_handle_destroy(handle)
module.fail_json(msg="Failed to set seboolean name with semanage")
rc, boolkey = semanage.semanage_bool_key_extract(handle, t_b)
if rc < 0:
semanage.semanage_handle_destroy(handle)
module.fail_json(msg="Failed to extract boolean key with semanage")
rc, exists = semanage.semanage_bool_exists(handle, boolkey)
if rc < 0:
semanage.semanage_handle_destroy(handle)
module.fail_json(msg="Failed to check if boolean is defined")
if not exists:
semanage.semanage_handle_destroy(handle)
module.fail_json(msg="SELinux boolean %s is not defined in persistent policy" % name)
rc, sebool = semanage.semanage_bool_query(handle, boolkey)
if rc < 0:
semanage.semanage_handle_destroy(handle)
module.fail_json(msg="Failed to query boolean in persistent policy")
semanage.semanage_bool_set_value(sebool, value)
if semanage.semanage_bool_modify_local(handle, boolkey, sebool) < 0:
semanage.semanage_handle_destroy(handle)
module.fail_json(msg="Failed to modify boolean key with semanage")
if semanage.semanage_bool_set_active(handle, boolkey, sebool) < 0:
semanage.semanage_handle_destroy(handle)
module.fail_json(msg="Failed to set boolean key active with semanage")
semanage.semanage_bool_key_free(boolkey)
semanage.semanage_bool_free(t_b)
semanage.semanage_bool_free(sebool)
def semanage_get_boolean_value(module, handle, name):
rc, t_b = semanage.semanage_bool_create(handle)
if rc < 0:
semanage.semanage_handle_destroy(handle)
module.fail_json(msg="Failed to create seboolean with semanage")
if semanage.semanage_bool_set_name(handle, t_b, name) < 0:
semanage.semanage_handle_destroy(handle)
module.fail_json(msg="Failed to set seboolean name with semanage")
rc, boolkey = semanage.semanage_bool_key_extract(handle, t_b)
if rc < 0:
semanage.semanage_handle_destroy(handle)
module.fail_json(msg="Failed to extract boolean key with semanage")
rc, exists = semanage.semanage_bool_exists(handle, boolkey)
if rc < 0:
semanage.semanage_handle_destroy(handle)
module.fail_json(msg="Failed to check if boolean is defined")
if not exists:
semanage.semanage_handle_destroy(handle)
module.fail_json(msg="SELinux boolean %s is not defined in persistent policy" % name)
rc, sebool = semanage.semanage_bool_query(handle, boolkey)
if rc < 0:
semanage.semanage_handle_destroy(handle)
module.fail_json(msg="Failed to query boolean in persistent policy")
value = semanage.semanage_bool_get_value(sebool)
semanage.semanage_bool_key_free(boolkey)
semanage.semanage_bool_free(t_b)
semanage.semanage_bool_free(sebool)
return value
def semanage_commit(module, handle, load=0):
semanage.semanage_set_reload(handle, load)
if semanage.semanage_commit(handle) < 0:
semanage.semanage_handle_destroy(handle)
module.fail_json(msg="Failed to commit changes to semanage")
def semanage_destroy_handle(module, handle):
rc = semanage.semanage_disconnect(handle)
semanage.semanage_handle_destroy(handle)
if rc < 0:
module.fail_json(msg="Failed to disconnect from semanage")
# The following method implements what setsebool.c does to change
# a boolean and make it persist after reboot..
def semanage_boolean_value(module, name, state):
value = 0
changed = False
if state:
value = 1
try:
handle = semanage_get_handle(module)
semanage_begin_transaction(module, handle)
cur_value = semanage_get_boolean_value(module, handle, name)
if cur_value != value:
changed = True
if not module.check_mode:
semanage_set_boolean_value(module, handle, name, value)
semanage_commit(module, handle)
semanage_destroy_handle(module, handle)
except Exception as e:
module.fail_json(msg=u"Failed to manage policy for boolean %s: %s" % (name, to_text(e)))
return changed
def set_boolean_value(module, name, state):
rc = 0
value = 0
if state:
value = 1
try:
rc = selinux.security_set_boolean(name, value)
except OSError:
module.fail_json(msg="Failed to set boolean %s to %s" % (name, value))
if rc == 0:
return True
else:
return False
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', required=True),
persistent=dict(type='bool', default=False),
state=dict(type='bool', required=True),
),
supports_check_mode=True,
)
if not HAVE_SELINUX:
module.fail_json(msg="This module requires libselinux-python support")
if not HAVE_SEMANAGE:
module.fail_json(msg="This module requires libsemanage-python support")
if not selinux.is_selinux_enabled():
module.fail_json(msg="SELinux is disabled on this host.")
name = module.params['name']
persistent = module.params['persistent']
state = module.params['state']
result = dict(
name=name,
persistent=persistent,
state=state
)
changed = False
if hasattr(selinux, 'selinux_boolean_sub'):
# selinux_boolean_sub allows sites to rename a boolean and alias the old name
# Feature only available in selinux library since 2012.
name = selinux.selinux_boolean_sub(name)
if not has_boolean_value(module, name):
module.fail_json(msg="SELinux boolean %s does not exist." % name)
if persistent:
changed = semanage_boolean_value(module, name, state)
else:
cur_value = get_boolean_value(module, name)
if cur_value != state:
changed = True
if not module.check_mode:
changed = set_boolean_value(module, name, state)
if not changed:
module.fail_json(msg="Failed to set boolean %s to %s" % (name, state))
try:
selinux.security_commit_booleans()
except Exception:
module.fail_json(msg="Failed to commit pending boolean %s value" % name)
result['changed'] = changed
module.exit_json(**result)
if __name__ == '__main__':
main()
|
gpl-3.0
|
Evervolv/android_external_chromium_org
|
ppapi/native_client/tools/browser_tester/browsertester/rpclistener.py
|
170
|
2297
|
#!/usr/bin/python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import time
class RPCListener(object):
def __init__(self, shutdown_callback):
self.shutdown_callback = shutdown_callback
self.prefix = '|||| '
self.ever_failed = False
self.start_time = time.time()
def Log(self, message):
# Display the number of milliseconds since startup.
# This gives us additional data for debugging bot behavior.
prefix = '[%6s ms] ' % int((time.time()-self.start_time)*1000) + self.prefix
lines = [line.rstrip() for line in message.split('\n')]
text = ''.join(['%s%s\n' % (prefix, line) for line in lines])
sys.stdout.write(text)
def TestLog(self, message):
self.Log(message)
return 'OK'
# Something went very wrong on the server side, everything is horked?
# Only called locally.
def ServerError(self, message):
self.Log('\n[SERVER_ERROR] %s' % (message,))
self.ever_failed = True
self._TestingDone()
return 'OK'
# Does nothing. Called to prevent timeouts. (The server resets the timeout
# every time it receives a GET request.)
def Ping(self):
return 'OK'
# This happens automatically, as long as the renderer process has not crashed.
def JavaScriptIsAlive(self):
return 'OK'
def Shutdown(self, message, passed):
self.Log(message)
# This check looks slightly backwards, but this is intentional.
# Everything but passed.lower() == 'true' is considered a failure. This
# means that if the test runner sends garbage, it will be a failure.
# NOTE in interactive mode this function may be called multiple times.
# ever_failed is designed to be set and never reset - if any of the runs
# fail, the an error code will be returned to the command line.
# In summary, the tester is biased towards failure - it should scream "FAIL"
# if things are not 100% correct. False positives must be avoided.
if passed.lower() != 'true':
self.ever_failed = True
close_browser = self._TestingDone()
if close_browser:
return 'Die, please'
else:
return 'OK'
def _TestingDone(self):
return self.shutdown_callback()
|
bsd-3-clause
|
altsen/diandiyun-platform
|
common/test/acceptance/fixtures/discussion.py
|
8
|
2178
|
"""
Tools for creating discussion content fixture data.
"""
from datetime import datetime
import json
import factory
import requests
from . import COMMENTS_STUB_URL
class ContentFactory(factory.Factory):
FACTORY_FOR = dict
id = None
user_id = "dummy-user-id"
username = "dummy-username"
course_id = "dummy-course-id"
commentable_id = "dummy-commentable-id"
anonymous = False
anonymous_to_peers = False
at_position_list = []
abuse_flaggers = []
created_at = datetime.utcnow().isoformat()
updated_at = datetime.utcnow().isoformat()
endorsed = False
closed = False
votes = {"up_count": 0}
class Thread(ContentFactory):
comments_count = 0
unread_comments_count = 0
title = "dummy thread title"
body = "dummy thread body"
type = "thread"
group_id = None
pinned = False
read = False
class Comment(ContentFactory):
thread_id = None
depth = 0
type = "comment"
body = "dummy comment body"
class Response(Comment):
depth = 1
body = "dummy response body"
class SingleThreadViewFixture(object):
def __init__(self, thread):
self.thread = thread
def addResponse(self, response, comments=[]):
response['children'] = comments
self.thread.setdefault('children', []).append(response)
self.thread['comments_count'] += len(comments) + 1
def _get_comment_map(self):
"""
Generate a dict mapping each response/comment in the thread
by its `id`.
"""
def _visit(obj):
res = []
for child in obj.get('children', []):
res.append((child['id'], child))
if 'children' in child:
res += _visit(child)
return res
return dict(_visit(self.thread))
def push(self):
"""
Push the data to the stub comments service.
"""
requests.put(
'{}/set_config'.format(COMMENTS_STUB_URL),
data={
"threads": json.dumps({self.thread['id']: self.thread}),
"comments": json.dumps(self._get_comment_map())
}
)
|
agpl-3.0
|
Novasoft-India/OperERP-AM-Motors
|
openerp/addons/point_of_sale/wizard/pos_box.py
|
49
|
2231
|
from openerp.osv import osv, fields
from openerp.tools.translate import _
from openerp.addons.account.wizard.pos_box import CashBox
class PosBox(CashBox):
_register = False
def run(self, cr, uid, ids, context=None):
if not context:
context = dict()
active_model = context.get('active_model', False) or False
active_ids = context.get('active_ids', []) or []
if active_model == 'pos.session':
records = self.pool.get(active_model).browse(cr, uid, active_ids, context=context)
bank_statements = [record.cash_register_id for record in records if record.cash_register_id]
if not bank_statements:
raise osv.except_osv(_('Error!'),
_("There is no cash register for this PoS Session"))
return self._run(cr, uid, ids, bank_statements, context=context)
else:
return super(PosBox, self).run(cr, uid, ids, context=context)
class PosBoxIn(PosBox):
_inherit = 'cash.box.in'
def _compute_values_for_statement_line(self, cr, uid, box, record, context=None):
if context is None:
context = {}
values = super(PosBoxIn, self)._compute_values_for_statement_line(cr, uid, box, record, context=context)
active_model = context.get('active_model', False) or False
active_ids = context.get('active_ids', []) or []
if active_model == 'pos.session':
session = self.pool.get(active_model).browse(cr, uid, active_ids, context=context)[0]
values['ref'] = session.name
return values
class PosBoxOut(PosBox):
_inherit = 'cash.box.out'
def _compute_values_for_statement_line(self, cr, uid, box, record, context=None):
values = super(PosBoxOut, self)._compute_values_for_statement_line(cr, uid, box, record, context=context)
active_model = context.get('active_model', False) or False
active_ids = context.get('active_ids', []) or []
if active_model == 'pos.session':
session = self.pool.get(active_model).browse(cr, uid, active_ids, context=context)[0]
values['ref'] = session.name
return values
|
agpl-3.0
|
knowsis/django
|
django/utils/timezone.py
|
90
|
9180
|
"""
Timezone-related classes and functions.
This module uses pytz when it's available and fallbacks when it isn't.
"""
from datetime import datetime, timedelta, tzinfo
from threading import local
import sys
import time as _time
try:
import pytz
except ImportError:
pytz = None
from django.conf import settings
from django.utils import six
__all__ = [
'utc',
'get_default_timezone', 'get_default_timezone_name',
'get_current_timezone', 'get_current_timezone_name',
'activate', 'deactivate', 'override',
'localtime', 'now',
'is_aware', 'is_naive', 'make_aware', 'make_naive',
]
# UTC and local time zones
ZERO = timedelta(0)
class UTC(tzinfo):
"""
UTC implementation taken from Python's docs.
Used only when pytz isn't available.
"""
def __repr__(self):
return "<UTC>"
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
class ReferenceLocalTimezone(tzinfo):
"""
Local time implementation taken from Python's docs.
Used only when pytz isn't available, and most likely inaccurate. If you're
having trouble with this class, don't waste your time, just install pytz.
Kept identical to the reference version. Subclasses contain improvements.
"""
def __init__(self):
# This code is moved in __init__ to execute it as late as possible
# See get_default_timezone().
self.STDOFFSET = timedelta(seconds=-_time.timezone)
if _time.daylight:
self.DSTOFFSET = timedelta(seconds=-_time.altzone)
else:
self.DSTOFFSET = self.STDOFFSET
self.DSTDIFF = self.DSTOFFSET - self.STDOFFSET
tzinfo.__init__(self)
def __repr__(self):
return "<LocalTimezone>"
def utcoffset(self, dt):
if self._isdst(dt):
return self.DSTOFFSET
else:
return self.STDOFFSET
def dst(self, dt):
if self._isdst(dt):
return self.DSTDIFF
else:
return ZERO
def tzname(self, dt):
is_dst = False if dt is None else self._isdst(dt)
return _time.tzname[is_dst]
def _isdst(self, dt):
tt = (dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.weekday(), 0, 0)
stamp = _time.mktime(tt)
tt = _time.localtime(stamp)
return tt.tm_isdst > 0
class LocalTimezone(ReferenceLocalTimezone):
"""
Slightly improved local time implementation focusing on correctness.
It still crashes on dates before 1970 or after 2038, but at least the
error message is helpful.
"""
def _isdst(self, dt):
try:
return super(LocalTimezone, self)._isdst(dt)
except (OverflowError, ValueError) as exc:
exc_type = type(exc)
exc_value = exc_type(
"Unsupported value: %r. You should install pytz." % dt)
exc_value.__cause__ = exc
six.reraise(exc_type, exc_value, sys.exc_info()[2])
utc = pytz.utc if pytz else UTC()
"""UTC time zone as a tzinfo instance."""
# In order to avoid accessing the settings at compile time,
# wrap the expression in a function and cache the result.
_localtime = None
def get_default_timezone():
"""
Returns the default time zone as a tzinfo instance.
This is the time zone defined by settings.TIME_ZONE.
See also :func:`get_current_timezone`.
"""
global _localtime
if _localtime is None:
if isinstance(settings.TIME_ZONE, six.string_types) and pytz is not None:
_localtime = pytz.timezone(settings.TIME_ZONE)
else:
# This relies on os.environ['TZ'] being set to settings.TIME_ZONE.
_localtime = LocalTimezone()
return _localtime
# This function exists for consistency with get_current_timezone_name
def get_default_timezone_name():
"""
Returns the name of the default time zone.
"""
return _get_timezone_name(get_default_timezone())
_active = local()
def get_current_timezone():
"""
Returns the currently active time zone as a tzinfo instance.
"""
return getattr(_active, "value", get_default_timezone())
def get_current_timezone_name():
"""
Returns the name of the currently active time zone.
"""
return _get_timezone_name(get_current_timezone())
def _get_timezone_name(timezone):
"""
Returns the name of ``timezone``.
"""
try:
# for pytz timezones
return timezone.zone
except AttributeError:
# for regular tzinfo objects
return timezone.tzname(None)
# Timezone selection functions.
# These functions don't change os.environ['TZ'] and call time.tzset()
# because it isn't thread safe.
def activate(timezone):
"""
Sets the time zone for the current thread.
The ``timezone`` argument must be an instance of a tzinfo subclass or a
time zone name. If it is a time zone name, pytz is required.
"""
if isinstance(timezone, tzinfo):
_active.value = timezone
elif isinstance(timezone, six.string_types) and pytz is not None:
_active.value = pytz.timezone(timezone)
else:
raise ValueError("Invalid timezone: %r" % timezone)
def deactivate():
"""
Unsets the time zone for the current thread.
Django will then use the time zone defined by settings.TIME_ZONE.
"""
if hasattr(_active, "value"):
del _active.value
class override(object):
"""
Temporarily set the time zone for the current thread.
This is a context manager that uses ``~django.utils.timezone.activate()``
to set the timezone on entry, and restores the previously active timezone
on exit.
The ``timezone`` argument must be an instance of a ``tzinfo`` subclass, a
time zone name, or ``None``. If is it a time zone name, pytz is required.
If it is ``None``, Django enables the default time zone.
"""
def __init__(self, timezone):
self.timezone = timezone
self.old_timezone = getattr(_active, 'value', None)
def __enter__(self):
if self.timezone is None:
deactivate()
else:
activate(self.timezone)
def __exit__(self, exc_type, exc_value, traceback):
if self.old_timezone is None:
deactivate()
else:
_active.value = self.old_timezone
# Templates
def template_localtime(value, use_tz=None):
"""
Checks if value is a datetime and converts it to local time if necessary.
If use_tz is provided and is not None, that will force the value to
be converted (or not), overriding the value of settings.USE_TZ.
This function is designed for use by the template engine.
"""
should_convert = (isinstance(value, datetime)
and (settings.USE_TZ if use_tz is None else use_tz)
and not is_naive(value)
and getattr(value, 'convert_to_local_time', True))
return localtime(value) if should_convert else value
# Utilities
def localtime(value, timezone=None):
"""
Converts an aware datetime.datetime to local time.
Local time is defined by the current time zone, unless another time zone
is specified.
"""
if timezone is None:
timezone = get_current_timezone()
value = value.astimezone(timezone)
if hasattr(timezone, 'normalize'):
# available for pytz time zones
value = timezone.normalize(value)
return value
def now():
"""
Returns an aware or naive datetime.datetime, depending on settings.USE_TZ.
"""
if settings.USE_TZ:
# timeit shows that datetime.now(tz=utc) is 24% slower
return datetime.utcnow().replace(tzinfo=utc)
else:
return datetime.now()
# By design, these four functions don't perform any checks on their arguments.
# The caller should ensure that they don't receive an invalid value like None.
def is_aware(value):
"""
Determines if a given datetime.datetime is aware.
The logic is described in Python's docs:
http://docs.python.org/library/datetime.html#datetime.tzinfo
"""
return value.tzinfo is not None and value.tzinfo.utcoffset(value) is not None
def is_naive(value):
"""
Determines if a given datetime.datetime is naive.
The logic is described in Python's docs:
http://docs.python.org/library/datetime.html#datetime.tzinfo
"""
return value.tzinfo is None or value.tzinfo.utcoffset(value) is None
def make_aware(value, timezone):
"""
Makes a naive datetime.datetime in a given time zone aware.
"""
if hasattr(timezone, 'localize'):
# available for pytz time zones
return timezone.localize(value, is_dst=None)
else:
# may be wrong around DST changes
return value.replace(tzinfo=timezone)
def make_naive(value, timezone):
"""
Makes an aware datetime.datetime naive in a given time zone.
"""
value = value.astimezone(timezone)
if hasattr(timezone, 'normalize'):
# available for pytz time zones
value = timezone.normalize(value)
return value.replace(tzinfo=None)
|
bsd-3-clause
|
Mzero2010/MaxZone
|
plugin.video.Mzero/servers/vodbeast.py
|
4
|
1968
|
# -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Conector para vodbeast
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import re
from core import logger
from core import scrapertools
def test_video_exists( page_url ):
logger.info("pelisalacarta.servers.vodbeast test_video_exists(page_url='%s')" % page_url)
data = scrapertools.cache_page( page_url )
if ("File was deleted" or "Not Found") in data: return False, "[Vodbeast] El archivo no existe o ha sido borrado"
return True,""
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
logger.info("pelisalacarta.servers.vodbeast url="+page_url)
data = scrapertools.cache_page( page_url )
media_urls = scrapertools.find_multiple_matches(data,',{file:\s+"([^"]+)"')
video_urls = []
for media_url in media_urls:
video_urls.append( [ scrapertools.get_filename_from_url(media_url)[-4:]+" [vodbeast]",media_url])
for video_url in video_urls:
logger.info("pelisalacarta.servers.vodbeast %s - %s" % (video_url[0],video_url[1]))
return video_urls
# Encuentra vídeos del servidor en el texto pasado
def find_videos(data):
encontrados = set()
devuelve = []
# http://vodbeast.com/jdfscsa5uoy4
patronvideos = "vodbeast.com/(?:embed-|)([a-z0-9]+)"
logger.info("pelisalacarta.servers.vodbeast find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos,re.DOTALL).findall(data)
for match in matches:
titulo = "[vodbeast]"
url = "http://vodbeast.com/embed-%s.html" % match
if url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'vodbeast' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
return devuelve
|
gpl-3.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.