repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
franga2000/django-machina | tests/unit/permission/test_models.py | 1 | 1651 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import pytest
from django.core.exceptions import ValidationError
from machina.core.db.models import get_model
from machina.test.factories import ForumPermissionFactory
from machina.test.factories import UserFactory
from machina.test.factories import UserForumPermissionFactory
Forum = get_model('forum', 'Forum')
ForumPermission = get_model('forum_permission', 'ForumPermission')
UserForumPermission = get_model('forum_permission', 'UserForumPermission')
@pytest.mark.django_db
class TestForumPermission(object):
def test_cannot_be_cleaned_without_local_or_global_flag(self):
# Run & check
with pytest.raises(ValidationError):
perm = ForumPermissionFactory.build(is_local=False, is_global=False)
perm.clean()
@pytest.mark.django_db
class TestUserForumPermission(object):
def test_cannot_target_an_anonymous_user_and_a_registered_user(self):
# Setup
user = UserFactory.create()
# Run & check
with pytest.raises(ValidationError):
perm = ForumPermissionFactory.create(is_local=True, is_global=True)
user_perm = UserForumPermissionFactory.build(
permission=perm, user=user, anonymous_user=True)
user_perm.clean()
def test_cannot_be_saved_without_forum_if_the_permission_is_not_global(self):
# Run & check
with pytest.raises(ValidationError):
perm = ForumPermissionFactory.create(is_global=False)
user_perm = UserForumPermissionFactory.build(
permission=perm)
user_perm.clean()
| bsd-3-clause |
USGSDenverPychron/pychron | pychron/remote_hardware/registry.py | 1 | 9895 | # ===============================================================================
# Copyright 2015 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
# ============= standard library imports ========================
# ============= local library imports ==========================
from traits.has_traits import MetaHasTraits
from pychron.core.helpers.logger_setup import new_logger
from pychron.core.helpers.strtools import camel_case, to_list
REGISTRY = {}
FUNC_REGISTRY = {}
logger = new_logger('DeviceFunctionRegistry')
class DeviceFunctionRegistry(object):
def __init__(self, name=None, camel_case=False, postprocess=None):
self.postprocess = postprocess
self.name = name
self.camel_case = camel_case
def __call__(self, func):
name = self.name
if name is None:
name = func.func_name
if self.camel_case:
name = camel_case(name)
logger.debug('register function {} as {}'.format(func.func_name, name))
REGISTRY[name] = (func.func_name, self.postprocess)
return func
register = DeviceFunctionRegistry
class RegisteredFunction(object):
def __init__(self, cmd=None, camel_case=False, returntype=None):
self.cmd = cmd
self.camel_case = camel_case
self.returntype = returntype
def __call__(self, func):
def wrapper(obj, *args, **kw):
cmd = self.cmd
if cmd is None:
cmd = func.func_name
if self.camel_case:
cmd = camel_case(cmd)
r = obj.ask(cmd)
if self.returntype:
try:
r = self.returntype(r)
except BaseException:
pass
return r
return wrapper
registered_function = RegisteredFunction
def make_wrapper(func, postprocess):
def wrapper(obj, manager, *args, **kw):
"""
handler signature is self, manager, args, sender
"""
ret = func(*args[1:], **kw)
if postprocess:
ret = postprocess(ret)
return ret
return wrapper
class MetaHandler(MetaHasTraits):
def __call__(cls, *args, **kw):
for k, v in FUNC_REGISTRY.items():
setattr(cls, k, make_wrapper(*v))
return MetaHasTraits.__call__(cls, *args, **kw)
class RHMixin(object):
def register_functions(self):
for k, (fname, p) in REGISTRY.items():
if hasattr(self, fname):
if fname not in FUNC_REGISTRY:
func = getattr(self, fname)
if func is not None:
FUNC_REGISTRY[k] = (func, p)
logger.debug('Function register {} {}:{}'.format(self.name, k, fname))
if __name__ == '__main__':
class Handler(object):
__metaclass__ = MetaHandler
class Device2(RHMixin):
def __init__(self):
self.coolant = -1234
self.register_functions()
@register()
def get_coolant(self):
return self.coolant
class Device(RHMixin):
def __init__(self):
self.output = 101.43
self.register_functions()
@register(camel_case=True)
def get_coolant_out_temperature(self):
return self.output
@register(camel_case=True, postprocess=','.join)
def get_faults(self):
return ['foo', 'bar']
class Device3(RHMixin):
def __init__(self):
self.register_functions()
@register('MyFunc')
def foobar(self):
return 'asdf'
d = Device()
d2 = Device2()
d3 = Device3()
# print d.get_coolant_out_temperature()
h = Handler()
print 'handler get coolant out temp', h.GetCoolantOutTemperature(None)
print 'handler get faults', h.GetFaults(None)
# print h.get_coolant_out_temperature(None)
# print h.get_faults(None)
print 'handler get coolant', h.get_coolant(None)
print 'handler foobar', h.MyFunc(None)
print
class RemoteDevice(object):
@registered_function(camel_case=True, returntype=to_list)
def get_faults(self):
pass
@registered_function(camel_case=True, returntype=float)
def get_coolant_out_temperature(self):
pass
def ask(self, cmd):
print 'Asking {}'.format(cmd)
if cmd == 'GetCoolantOutTemperature':
return '1'
elif cmd == 'GetFaults':
return 'Too Hot,Tank Low'
rd = RemoteDevice()
v = rd.get_coolant_out_temperature()
print 'remote device coolant', v, type(v)
v = rd.get_faults()
print 'remote device faults', v, type(v)
# ============= EOF =============================================
# # ===============================================================================
# # Copyright 2015 Jake Ross
# #
# # Licensed under the Apache License, Version 2.0 (the "License");
# # you may not use this file except in compliance with the License.
# # You may obtain a copy of the License at
# #
# # http://www.apache.org/licenses/LICENSE-2.0
# #
# # Unless required by applicable law or agreed to in writing, software
# # distributed under the License is distributed on an "AS IS" BASIS,
# # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# # See the License for the specific language governing permissions and
# # limitations under the License.
# # ===============================================================================
#
# # ============= enthought library imports =======================
# import weakref
# # ============= standard library imports ========================
# # ============= local library imports ==========================
# from pychron.core.helpers.strtools import camel_case, to_list
#
# REGISTRY = {}
# FUNC_REGISTRY = {}
#
#
# class Registry(object):
# def __init__(self, name=None, camel_case=False, postprocess=None):
# self.postprocess = postprocess
# self.name = name
# self.camel_case = camel_case
#
# def __call__(self, func):
#
# name = self.name
# if name is None:
# name = func.func_name
# if self.camel_case:
# name = camel_case(name)
#
# if self.postprocess:
# nfunc = lambda *args, **kw: self.postprocess(func(*args, **kw))
# REGISTRY[name] = nfunc
# return nfunc
# else:
# REGISTRY[name] = func
# return func
#
#
# register = Registry
#
#
# class RegisteredFunction(object):
# def __init__(self, camel_case=False, returntype=None):
# self.camel_case = camel_case
# self.returntype = returntype
#
# def __call__(self, func):
# def wrapper(obj, *args, **kw):
# cmd = func.func_name
# if self.camel_case:
# cmd = camel_case(cmd)
#
# r = obj.ask(cmd)
# if self.returntype:
# try:
# r = self.returntype(r)
# except BaseException:
# pass
#
# return r
#
# return wrapper
#
#
# registered_function = RegisteredFunction
#
#
# class RHHandleMixin(object):
# def registry_commands(self):
# for k, v in REGISTRY.items():
# FUNC_REGISTRY[k] = (v, weakref.ref(self)())
#
#
# if __name__ == '__main__':
#
# class Handler(object):
# def __getattr__(self, item):
# if item in FUNC_REGISTRY:
# func, obj = FUNC_REGISTRY[item]
# return lambda manager, *args, **kw: func(obj, *args, **kw)
#
# class Device(object):
# def __init__(self):
# self.output = 10.3
# for k, v in REGISTRY.items():
# FUNC_REGISTRY[k] = (v, weakref.ref(self)())
#
# @register(camel_case=True, postprocess=','.join)
# def get_faults(self):
# return ['tank low', 'too hot']
#
# @register(camel_case=True)
# def get_coolant_out_temperature(self):
# return self.output
#
# @register()
# def amyfunc(self, a):
# return 'return of myfunction b {}'.format(a)
#
# class RemoteDevice(object):
#
# @registered_function(camel_case=True, returntype=to_list)
# def get_faults(self):
# pass
#
# @registered_function(camel_case=True, returntype=float)
# def get_coolant_out_temperature(self):
# pass
#
# def ask(self, cmd):
# print 'Asking {}'.format(cmd)
# if cmd == 'GetCoolantOutTemperature':
# return '1'
# elif cmd == 'GetFaults':
# return 'Too Hot,Tank Low'
#
#
# d = Device()
# # d2 = Device()
# # d2.output = 100234
# a = Handler()
# # print a.myfunc(None, 'aa')
# # print a.amyfunc(None, 'abaa')
# # print a.GetCoolantOutTemperature(None)
# # print a.GetFaults(None)
#
# rd = RemoteDevice()
# v = rd.get_coolant_out_temperature()
# print v, type(v)
#
# v = rd.get_faults()
# print v, type(v)
#
# # ============= EOF =============================================
#
#
#
| apache-2.0 |
ahmetabdi/SickRage | lib/hachoir_parser/archive/cab.py | 90 | 4627 | """
Microsoft Cabinet (CAB) archive.
Author: Victor Stinner
Creation date: 31 january 2007
"""
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (FieldSet, Enum,
CString, String,
UInt16, UInt32, Bit, Bits, PaddingBits, NullBits,
DateTimeMSDOS32, RawBytes)
from lib.hachoir_parser.common.msdos import MSDOSFileAttr16
from lib.hachoir_core.text_handler import textHandler, hexadecimal, filesizeHandler
from lib.hachoir_core.endian import LITTLE_ENDIAN
MAX_NB_FOLDER = 30
COMPRESSION_NONE = 0
COMPRESSION_NAME = {
0: "Uncompressed",
1: "Deflate",
2: "Quantum",
3: "LZX",
}
class Folder(FieldSet):
def createFields(self):
yield UInt32(self, "off_data", "Offset of data")
yield UInt16(self, "cf_data")
yield Enum(Bits(self, "compr_method", 4, "Compression method"), COMPRESSION_NAME)
yield Bits(self, "compr_level", 5, "Compression level")
yield PaddingBits(self, "padding", 7)
def createDescription(self):
text= "Folder: compression %s" % self["compr_method"].display
if self["compr_method"].value != COMPRESSION_NONE:
text += " (level %u)" % self["compr_level"].value
return text
class File(FieldSet):
def createFields(self):
yield filesizeHandler(UInt32(self, "filesize", "Uncompressed file size"))
yield UInt32(self, "offset", "File offset after decompression")
yield UInt16(self, "iFolder", "file control id")
yield DateTimeMSDOS32(self, "timestamp")
yield MSDOSFileAttr16(self, "attributes")
yield CString(self, "filename", charset="ASCII")
def createDescription(self):
return "File %s (%s)" % (
self["filename"].display, self["filesize"].display)
class Reserved(FieldSet):
def createFields(self):
yield UInt32(self, "size")
size = self["size"].value
if size:
yield RawBytes(self, "data", size)
class Flags(FieldSet):
static_size = 16
def createFields(self):
yield Bit(self, "has_previous")
yield Bit(self, "has_next")
yield Bit(self, "has_reserved")
yield NullBits(self, "padding", 13)
class CabFile(Parser):
endian = LITTLE_ENDIAN
MAGIC = "MSCF"
PARSER_TAGS = {
"id": "cab",
"category": "archive",
"file_ext": ("cab",),
"mime": (u"application/vnd.ms-cab-compressed",),
"magic": ((MAGIC, 0),),
"min_size": 1*8, # header + file entry
"description": "Microsoft Cabinet archive"
}
def validate(self):
if self.stream.readBytes(0, 4) != self.MAGIC:
return "Invalid magic"
if self["cab_version"].value != 0x0103:
return "Unknown version (%s)" % self["cab_version"].display
if not (1 <= self["nb_folder"].value <= MAX_NB_FOLDER):
return "Invalid number of folder (%s)" % self["nb_folder"].value
return True
def createFields(self):
yield String(self, "magic", 4, "Magic (MSCF)", charset="ASCII")
yield textHandler(UInt32(self, "hdr_checksum", "Header checksum (0 if not used)"), hexadecimal)
yield filesizeHandler(UInt32(self, "filesize", "Cabinet file size"))
yield textHandler(UInt32(self, "fld_checksum", "Folders checksum (0 if not used)"), hexadecimal)
yield UInt32(self, "off_file", "Offset of first file")
yield textHandler(UInt32(self, "files_checksum", "Files checksum (0 if not used)"), hexadecimal)
yield textHandler(UInt16(self, "cab_version", "Cabinet version"), hexadecimal)
yield UInt16(self, "nb_folder", "Number of folders")
yield UInt16(self, "nb_files", "Number of files")
yield Flags(self, "flags")
yield UInt16(self, "setid")
yield UInt16(self, "number", "Zero-based cabinet number")
# --- TODO: Support flags
if self["flags/has_reserved"].value:
yield Reserved(self, "reserved")
#(3) Previous cabinet name, if CAB_HEADER.flags & CAB_FLAG_HASPREV
#(4) Previous disk name, if CAB_HEADER.flags & CAB_FLAG_HASPREV
#(5) Next cabinet name, if CAB_HEADER.flags & CAB_FLAG_HASNEXT
#(6) Next disk name, if CAB_HEADER.flags & CAB_FLAG_HASNEXT
# ----
for index in xrange(self["nb_folder"].value):
yield Folder(self, "folder[]")
for index in xrange(self["nb_files"].value):
yield File(self, "file[]")
end = self.seekBit(self.size, "endraw")
if end:
yield end
def createContentSize(self):
return self["filesize"].value * 8
| gpl-3.0 |
jeasoft/odoo | addons/account/wizard/account_state_open.py | 341 | 1785 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
from openerp.tools.translate import _
class account_state_open(osv.osv_memory):
_name = 'account.state.open'
_description = 'Account State Open'
def change_inv_state(self, cr, uid, ids, context=None):
proxy = self.pool.get('account.invoice')
if context is None:
context = {}
active_ids = context.get('active_ids')
if isinstance(active_ids, list):
invoice = proxy.browse(cr, uid, active_ids[0], context=context)
if invoice.reconciled:
raise osv.except_osv(_('Warning!'), _('Invoice is already reconciled.'))
invoice.signal_workflow('open_test')
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
v-iam/azure-sdk-for-python | azure-mgmt-compute/azure/mgmt/compute/compute/v2016_03_30/operations/virtual_machines_operations.py | 2 | 48648 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
import uuid
from .. import models
class VirtualMachinesOperations(object):
"""VirtualMachinesOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Client Api Version. Constant value: "2016-03-30".
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2016-03-30"
self.config = config
def capture(
self, resource_group_name, vm_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Captures the VM by copying virtual hard disks of the VM and outputs a
template that can be used to create similar VMs.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:param parameters: Parameters supplied to the Capture Virtual Machine
operation.
:type parameters: :class:`VirtualMachineCaptureParameters
<azure.mgmt.compute.compute.v2016_03_30.models.VirtualMachineCaptureParameters>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`VirtualMachineCaptureResult
<azure.mgmt.compute.compute.v2016_03_30.models.VirtualMachineCaptureResult>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/capture'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmName': self._serialize.url("vm_name", vm_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'VirtualMachineCaptureParameters')
# Construct and send request
def long_running_send():
request = self._client.post(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualMachineCaptureResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def create_or_update(
self, resource_group_name, vm_name, parameters, custom_headers=None, raw=False, **operation_config):
"""The operation to create or update a virtual machine.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:param parameters: Parameters supplied to the Create Virtual Machine
operation.
:type parameters: :class:`VirtualMachine
<azure.mgmt.compute.compute.v2016_03_30.models.VirtualMachine>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`VirtualMachine
<azure.mgmt.compute.compute.v2016_03_30.models.VirtualMachine>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmName': self._serialize.url("vm_name", vm_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'VirtualMachine')
# Construct and send request
def long_running_send():
request = self._client.put(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualMachine', response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualMachine', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def delete(
self, resource_group_name, vm_name, custom_headers=None, raw=False, **operation_config):
"""The operation to delete a virtual machine.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`OperationStatusResponse
<azure.mgmt.compute.compute.v2016_03_30.models.OperationStatusResponse>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmName': self._serialize.url("vm_name", vm_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
def long_running_send():
request = self._client.delete(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationStatusResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def get(
self, resource_group_name, vm_name, expand=None, custom_headers=None, raw=False, **operation_config):
"""Retrieves information about the model view or the instance view of a
virtual machine.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:param expand: The expand expression to apply on the operation.
Possible values include: 'instanceView'
:type expand: str or :class:`InstanceViewTypes
<azure.mgmt.compute.compute.v2016_03_30.models.InstanceViewTypes>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`VirtualMachine
<azure.mgmt.compute.compute.v2016_03_30.models.VirtualMachine>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmName': self._serialize.url("vm_name", vm_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'InstanceViewTypes')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualMachine', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def deallocate(
self, resource_group_name, vm_name, custom_headers=None, raw=False, **operation_config):
"""Shuts down the virtual machine and releases the compute resources. You
are not billed for the compute resources that this virtual machine
uses.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`OperationStatusResponse
<azure.mgmt.compute.compute.v2016_03_30.models.OperationStatusResponse>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/deallocate'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmName': self._serialize.url("vm_name", vm_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
def long_running_send():
request = self._client.post(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationStatusResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def generalize(
self, resource_group_name, vm_name, custom_headers=None, raw=False, **operation_config):
"""Sets the state of the virtual machine to generalized.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`OperationStatusResponse
<azure.mgmt.compute.compute.v2016_03_30.models.OperationStatusResponse>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/generalize'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmName': self._serialize.url("vm_name", vm_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationStatusResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Lists all of the virtual machines in the specified resource group. Use
the nextLink property in the response to get the next page of virtual
machines.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`VirtualMachinePaged
<azure.mgmt.compute.compute.v2016_03_30.models.VirtualMachinePaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.VirtualMachinePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.VirtualMachinePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def list_all(
self, custom_headers=None, raw=False, **operation_config):
"""Lists all of the virtual machines in the specified subscription. Use
the nextLink property in the response to get the next page of virtual
machines.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`VirtualMachinePaged
<azure.mgmt.compute.compute.v2016_03_30.models.VirtualMachinePaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/virtualMachines'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.VirtualMachinePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.VirtualMachinePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def list_available_sizes(
self, resource_group_name, vm_name, custom_headers=None, raw=False, **operation_config):
"""Lists all available virtual machine sizes to which the specified
virtual machine can be resized.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`VirtualMachineSizePaged
<azure.mgmt.compute.compute.v2016_03_30.models.VirtualMachineSizePaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/vmSizes'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmName': self._serialize.url("vm_name", vm_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.VirtualMachineSizePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.VirtualMachineSizePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def power_off(
self, resource_group_name, vm_name, custom_headers=None, raw=False, **operation_config):
"""The operation to power off (stop) a virtual machine. The virtual
machine can be restarted with the same provisioned resources. You are
still charged for this virtual machine.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`OperationStatusResponse
<azure.mgmt.compute.compute.v2016_03_30.models.OperationStatusResponse>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/powerOff'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmName': self._serialize.url("vm_name", vm_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
def long_running_send():
request = self._client.post(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationStatusResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def restart(
self, resource_group_name, vm_name, custom_headers=None, raw=False, **operation_config):
"""The operation to restart a virtual machine.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`OperationStatusResponse
<azure.mgmt.compute.compute.v2016_03_30.models.OperationStatusResponse>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/restart'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmName': self._serialize.url("vm_name", vm_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
def long_running_send():
request = self._client.post(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationStatusResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def start(
self, resource_group_name, vm_name, custom_headers=None, raw=False, **operation_config):
"""The operation to start a virtual machine.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`OperationStatusResponse
<azure.mgmt.compute.compute.v2016_03_30.models.OperationStatusResponse>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/start'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmName': self._serialize.url("vm_name", vm_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
def long_running_send():
request = self._client.post(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationStatusResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def redeploy(
self, resource_group_name, vm_name, custom_headers=None, raw=False, **operation_config):
"""The operation to redeploy a virtual machine.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`OperationStatusResponse
<azure.mgmt.compute.compute.v2016_03_30.models.OperationStatusResponse>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/redeploy'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmName': self._serialize.url("vm_name", vm_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
def long_running_send():
request = self._client.post(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationStatusResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
| mit |
rvykydal/anaconda | pyanaconda/network.py | 3 | 22327 | #
# network.py - network configuration install data
#
# Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007 Red Hat, Inc.
# 2008, 2009, 2017
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import shutil
import socket
import itertools
import os
import time
import threading
import re
import ipaddress
from dasbus.typing import get_native
from pyanaconda.anaconda_loggers import get_module_logger
from pyanaconda.core import util, constants
from pyanaconda.core.i18n import _
from pyanaconda.core.kernel import kernel_arguments
from pyanaconda.core.regexes import HOSTNAME_PATTERN_WITHOUT_ANCHORS, \
IPV6_ADDRESS_IN_DRACUT_IP_OPTION, MAC_OCTET
from pyanaconda.core.configuration.anaconda import conf
from pyanaconda.core.constants import TIME_SOURCE_SERVER
from pyanaconda.modules.common.constants.services import NETWORK, TIMEZONE, STORAGE
from pyanaconda.modules.common.constants.objects import FCOE
from pyanaconda.modules.common.task import sync_run_task
from pyanaconda.modules.common.structures.network import NetworkDeviceInfo
from pyanaconda.modules.common.structures.timezone import TimeSourceData
from pyanaconda.modules.common.util import is_module_available
import gi
gi.require_version("NM", "1.0")
from gi.repository import NM
log = get_module_logger(__name__)
network_connected = None
network_connected_condition = threading.Condition()
_nm_client = None
__all__ = ["get_supported_devices", "status_message", "wait_for_connectivity",
"wait_for_connecting_NM_thread", "wait_for_network_devices", "wait_for_connected_NM",
"initialize_network", "copy_resolv_conf_to_root", "prefix_to_netmask",
"netmask_to_prefix", "get_first_ip_address", "is_valid_hostname", "check_ip_address",
"get_nm_client", "write_configuration"]
def get_nm_client():
"""Get NetworkManager Client."""
if conf.system.provides_system_bus:
global _nm_client
if not _nm_client:
_nm_client = NM.Client.new(None)
return _nm_client
else:
log.debug("NetworkManager client not available (system does not provide it).")
return None
def check_ip_address(address, version=None):
"""Check if the given IP address is valid in given version if set.
:param str address: IP address for testing
:param int version: ``4`` for IPv4, ``6`` for IPv6 or
``None`` to allow either format
:returns: ``True`` if IP address is valid or ``False`` if not
:rtype: bool
"""
try:
if version == 4:
ipaddress.IPv4Address(address)
elif version == 6:
ipaddress.IPv6Address(address)
elif not version: # any of those
ipaddress.ip_address(address)
else:
log.error("IP version %s is not supported", version)
return False
return True
except ValueError:
return False
def is_valid_hostname(hostname, local=False):
"""Check if the given string is (syntactically) a valid hostname.
:param str hostname: a string to check
:param bool local: is the hostname static (for this system) or not (on the network)
:returns: a pair containing boolean value (valid or invalid) and
an error message (if applicable)
:rtype: (bool, str)
"""
if not hostname:
return (False, _("Host name cannot be None or an empty string."))
if len(hostname) > 64:
return (False, _("Host name must be 64 or fewer characters in length."))
if local and hostname[-1] == ".":
return (False, _("Local host name must not end with period '.'."))
if not re.match('^' + HOSTNAME_PATTERN_WITHOUT_ANCHORS + '$', hostname):
return (False, _("Host names can only contain the characters 'a-z', "
"'A-Z', '0-9', '-', or '.', parts between periods "
"must contain something and cannot start or end with "
"'-'."))
return (True, "")
def get_ip_addresses():
"""Return a list of IP addresses for all active devices."""
ipv4_addresses = []
ipv6_addresses = []
for device in get_activated_devices(get_nm_client()):
ipv4_addresses += get_device_ip_addresses(device, version=4)
ipv6_addresses += get_device_ip_addresses(device, version=6)
# prefer IPv4 addresses to IPv6 addresses
return ipv4_addresses + ipv6_addresses
def get_first_ip_address():
"""Return the first non-local IP of active devices.
:return: IP address assigned to an active device
:rtype: str or None
"""
for ip in get_ip_addresses():
if ip not in ("127.0.0.1", "::1"):
return ip
return None
def netmask_to_prefix(netmask):
""" Convert netmask to prefix (CIDR bits) """
prefix = 0
while prefix < 33:
if prefix_to_netmask(prefix) == netmask:
return prefix
prefix += 1
return prefix
def prefix_to_netmask(prefix):
""" Convert prefix (CIDR bits) to netmask """
_bytes = []
for _i in range(4):
if prefix >= 8:
_bytes.append(255)
prefix -= 8
else:
_bytes.append(256 - 2 ** (8 - prefix))
prefix = 0
netmask = ".".join(str(byte) for byte in _bytes)
return netmask
def hostname_from_cmdline(kernel_args):
"""Get hostname defined by boot options.
:param kernel_args: structure holding installer boot options
:type kernel_args: KernelArguments
"""
# legacy hostname= option
hostname = kernel_args.get('hostname', "")
# ip= option (man dracut.cmdline)
ipopts = kernel_args.get('ip')
# Example (2 options):
# ens3:dhcp 10.34.102.244::10.34.102.54:255.255.255.0:myhostname:ens9:none
if ipopts:
for ipopt in ipopts.split(" "):
if ipopt.startswith("["):
# Replace ipv6 addresses with empty string, example of ipv6 config:
# [fd00:10:100::84:5]::[fd00:10:100::86:49]:80:myhostname:ens9:none
ipopt = IPV6_ADDRESS_IN_DRACUT_IP_OPTION.sub('', ipopt)
elements = ipopt.split(':')
# Hostname can be defined only in option having more than 6 elements.
# But filter out auto ip= with mac address set by MAC_OCTET matching, eg:
# ip=<interface>:dhcp::52:54:00:12:34:56
# where the 4th element is not hostname.
if len(elements) > 6 and not re.match(MAC_OCTET, elements[6]):
hostname = ipopt.split(':')[4]
return hostname
def iface_for_host_ip(host_ip):
"""Get interface used to access given host IP."""
route = util.execWithCapture("ip", ["route", "get", "to", host_ip])
if not route:
log.error("Could not get interface for route to %s", host_ip)
return ""
route_info = route.split()
if route_info[0] != host_ip or len(route_info) < 5 or \
"dev" not in route_info or route_info.index("dev") > 3:
log.error('Unexpected "ip route get to %s" reply: %s', host_ip, route_info)
return ""
return route_info[route_info.index("dev") + 1]
def copy_resolv_conf_to_root(root="/"):
"""Copy resolv.conf to a system root."""
src = "/etc/resolv.conf"
dst = os.path.join(root, src.lstrip('/'))
if not os.path.isfile(src):
log.debug("%s does not exist", src)
return
if os.path.isfile(dst):
log.debug("%s already exists", dst)
return
dst_dir = os.path.dirname(dst)
if not os.path.isdir(dst_dir):
util.mkdirChain(dst_dir)
shutil.copyfile(src, dst)
def run_network_initialization_task(task_path):
"""Run network initialization task and log the result."""
task_proxy = NETWORK.get_proxy(task_path)
log.debug("Running task %s", task_proxy.Name)
sync_run_task(task_proxy)
result = get_native(task_proxy.GetResult())
msg = "%s result: %s" % (task_proxy.Name, result)
log.debug(msg)
def initialize_network():
"""Initialize networking."""
if not conf.system.can_configure_network:
return
network_proxy = NETWORK.get_proxy()
msg = "Initialization started."
log.debug(msg)
network_proxy.LogConfigurationState(msg)
log.debug("Devices found: %s",
[dev.device_name for dev in get_supported_devices()])
run_network_initialization_task(network_proxy.ApplyKickstartWithTask())
run_network_initialization_task(network_proxy.DumpMissingConfigFilesWithTask())
if not network_proxy.Hostname:
bootopts_hostname = hostname_from_cmdline(kernel_arguments)
if bootopts_hostname:
log.debug("Updating host name from boot options: %s", bootopts_hostname)
network_proxy.SetHostname(bootopts_hostname)
# Create device configuration tracking in the module.
# It will be used to generate kickstart from persistent network configuration
# managed by NM (having config files) and updated by NM signals on device
# configuration changes.
log.debug("Creating network configurations.")
network_proxy.CreateDeviceConfigurations()
log.debug("Initialization finished.")
def write_configuration(overwrite=False):
"""Install network configuration to target system."""
fcoe_proxy = STORAGE.get_proxy(FCOE)
fcoe_nics = fcoe_proxy.GetNics()
fcoe_ifaces = [dev.device_name for dev in get_supported_devices()
if dev.device_name in fcoe_nics]
network_proxy = NETWORK.get_proxy()
task_path = network_proxy.ConfigureActivationOnBootWithTask(fcoe_ifaces)
task_proxy = NETWORK.get_proxy(task_path)
sync_run_task(task_proxy)
task_path = network_proxy.InstallNetworkWithTask(overwrite)
task_proxy = NETWORK.get_proxy(task_path)
sync_run_task(task_proxy)
task_path = network_proxy.ConfigureHostnameWithTask(overwrite)
task_proxy = NETWORK.get_proxy(task_path)
sync_run_task(task_proxy)
if conf.system.can_change_hostname:
hostname = network_proxy.Hostname
if hostname:
network_proxy.SetCurrentHostname(hostname)
def _set_ntp_servers_from_dhcp():
"""Set NTP servers of timezone module from dhcp if not set by kickstart."""
# FIXME - do it only if they will be applied (the guard at the end of the function)
if not is_module_available(TIMEZONE):
return
timezone_proxy = TIMEZONE.get_proxy()
ntp_servers = get_ntp_servers_from_dhcp(get_nm_client())
log.info("got %d NTP servers from DHCP", len(ntp_servers))
hostnames = []
for server_address in ntp_servers:
try:
hostname = socket.gethostbyaddr(server_address)[0]
except socket.error:
# getting hostname failed, just use the address returned from DHCP
log.debug("getting NTP server host name failed for address: %s",
server_address)
hostname = server_address
hostnames.append(hostname)
# check if some NTP servers were specified from kickstart
if not timezone_proxy.TimeSources and conf.target.is_hardware:
# no NTP servers were specified, add those from DHCP
servers = []
for hostname in hostnames:
server = TimeSourceData()
server.type = TIME_SOURCE_SERVER
server.hostname = hostname
server.options = ["iburst"]
servers.append(server)
timezone_proxy.SetTimeSources(
TimeSourceData.to_structure_list(servers)
)
def wait_for_connected_NM(timeout=constants.NETWORK_CONNECTION_TIMEOUT, only_connecting=False):
"""Wait for NM being connected.
If only_connecting is set, wait only if NM is in connecting state and
return immediately after leaving this state (regardless of the new state).
Used to wait for dhcp configuration in progress.
:param timeout: timeout in seconds
:type timeout: int
:parm only_connecting: wait only for the result of NM being connecting
:type only_connecting: bool
:return: NM is connected
:rtype: bool
"""
network_proxy = NETWORK.get_proxy()
if network_proxy.Connected:
return True
if only_connecting:
if network_proxy.IsConnecting():
log.debug("waiting for connecting NM (dhcp in progress?), timeout=%d", timeout)
else:
return False
else:
log.debug("waiting for connected NM, timeout=%d", timeout)
i = 0
while i < timeout:
i += constants.NETWORK_CONNECTED_CHECK_INTERVAL
time.sleep(constants.NETWORK_CONNECTED_CHECK_INTERVAL)
if network_proxy.Connected:
log.debug("NM connected, waited %d seconds", i)
return True
elif only_connecting:
if not network_proxy.IsConnecting():
break
log.debug("NM not connected, waited %d seconds", i)
return False
def wait_for_network_devices(devices, timeout=constants.NETWORK_CONNECTION_TIMEOUT):
"""Wait for network devices to be activated with a connection."""
devices = set(devices)
i = 0
log.debug("waiting for connection of devices %s for iscsi", devices)
while i < timeout:
network_proxy = NETWORK.get_proxy()
activated_devices = network_proxy.GetActivatedInterfaces()
if not devices - set(activated_devices):
return True
i += 1
time.sleep(1)
return False
def wait_for_connecting_NM_thread():
"""Wait for connecting NM in thread, do some work and signal connectivity.
This function is called from a thread which is run at startup to wait for
NetworkManager being in connecting state (eg getting IP from DHCP). When NM
leaves connecting state do some actions and signal new state if NM becomes
connected.
"""
connected = wait_for_connected_NM(only_connecting=True)
if connected:
_set_ntp_servers_from_dhcp()
with network_connected_condition:
global network_connected
network_connected = connected
network_connected_condition.notify_all()
def wait_for_connectivity(timeout=constants.NETWORK_CONNECTION_TIMEOUT):
"""Wait for network connectivty to become available
:param timeout: how long to wait in seconds
:type timeout: integer of float"""
connected = False
network_connected_condition.acquire()
# if network_connected is None, network connectivity check
# has not yet been run or is in progress, so wait for it to finish
if network_connected is None:
# wait releases the lock and reacquires it once the thread is unblocked
network_connected_condition.wait(timeout=timeout)
connected = network_connected
# after wait() unblocks, we get the lock back,
# so we need to release it
network_connected_condition.release()
return connected
def get_activated_devices(nm_client):
"""Get activated NetworkManager devices."""
activated_devices = []
for ac in nm_client.get_active_connections():
if ac.get_state() != NM.ActiveConnectionState.ACTIVATED:
continue
for device in ac.get_devices():
activated_devices.append(device)
return activated_devices
def status_message(nm_client):
"""A short string describing which devices are connected."""
msg = _("Unknown")
if not nm_client:
msg = _("Status not available")
return msg
state = nm_client.get_state()
if state == NM.State.CONNECTING:
msg = _("Connecting...")
elif state == NM.State.DISCONNECTING:
msg = _("Disconnecting...")
else:
active_devs = [d for d in get_activated_devices(nm_client)
if not is_libvirt_device(d.get_ip_iface() or d.get_iface())]
if active_devs:
ports = {}
ssids = {}
nonports = []
# first find ports and wireless aps
for device in active_devs:
device_ports = []
if hasattr(device, 'get_slaves'):
device_ports = [port_dev.get_iface() for port_dev in device.get_slaves()]
iface = device.get_iface()
ports[iface] = device_ports
if device.get_device_type() == NM.DeviceType.WIFI:
ssid = ""
ap = device.get_active_access_point()
if ap:
ssid = ap.get_ssid().get_data().decode()
ssids[iface] = ssid
all_ports = set(itertools.chain.from_iterable(ports.values()))
nonports = [dev for dev in active_devs if dev.get_iface() not in all_ports]
if len(nonports) == 1:
device = nonports[0]
iface = device.get_ip_iface() or device.get_iface()
device_type = device.get_device_type()
if device_type_is_supported_wired(device_type):
msg = _("Wired (%(interface_name)s) connected") \
% {"interface_name": iface}
elif device_type == NM.DeviceType.WIFI:
msg = _("Wireless connected to %(access_point)s") \
% {"access_point": ssids[iface]}
elif device_type == NM.DeviceType.BOND:
msg = _("Bond %(interface_name)s (%(list_of_ports)s) connected") \
% {"interface_name": iface,
"list_of_ports": ",".join(ports[iface])}
elif device_type == NM.DeviceType.TEAM:
msg = _("Team %(interface_name)s (%(list_of_ports)s) connected") \
% {"interface_name": iface,
"list_of_ports": ",".join(ports[iface])}
elif device_type == NM.DeviceType.BRIDGE:
msg = _("Bridge %(interface_name)s (%(list_of_ports)s) connected") \
% {"interface_name": iface,
"list_of_ports": ",".join(ports[iface])}
elif device_type == NM.DeviceType.VLAN:
parent = device.get_parent()
vlanid = device.get_vlan_id()
msg = _("VLAN %(interface_name)s (%(parent_device)s, ID %(vlanid)s) connected") \
% {"interface_name": iface, "parent_device": parent, "vlanid": vlanid}
elif len(nonports) > 1:
devlist = []
for device in nonports:
iface = device.get_ip_iface() or device.get_iface()
device_type = device.get_device_type()
if device_type_is_supported_wired(device_type):
devlist.append("%s" % iface)
elif device_type == NM.DeviceType.WIFI:
devlist.append("%s" % ssids[iface])
elif device_type == NM.DeviceType.BOND:
devlist.append("%s (%s)" % (iface, ",".join(ports[iface])))
elif device_type == NM.DeviceType.TEAM:
devlist.append("%s (%s)" % (iface, ",".join(ports[iface])))
elif device_type == NM.DeviceType.BRIDGE:
devlist.append("%s (%s)" % (iface, ",".join(ports[iface])))
elif device_type == NM.DeviceType.VLAN:
devlist.append("%s" % iface)
msg = _("Connected: %(list_of_interface_names)s") % {"list_of_interface_names": ", ".join(devlist)}
else:
msg = _("Not connected")
if not get_supported_devices():
msg = _("No network devices available")
return msg
def get_supported_devices():
"""Get existing network devices supported by the installer.
:return: basic information about the devices
:rtype: list(NetworkDeviceInfo)
"""
network_proxy = NETWORK.get_proxy()
return NetworkDeviceInfo.from_structure_list(network_proxy.GetSupportedDevices())
def get_ntp_servers_from_dhcp(nm_client):
"""Return IPs of NTP servers obtained by DHCP.
:param nm_client: instance of NetworkManager client
:type nm_client: NM.Client
:return: IPs of NTP servers obtained by DHCP
:rtype: list of str
"""
ntp_servers = []
if not nm_client:
return ntp_servers
for device in get_activated_devices(nm_client):
dhcp4_config = device.get_dhcp4_config()
if dhcp4_config:
options = dhcp4_config.get_options()
ntp_servers_string = options.get("ntp_servers")
if ntp_servers_string:
ntp_servers.extend(ntp_servers_string.split(" "))
# NetworkManager does not request NTP/SNTP options for DHCP6
return ntp_servers
def get_device_ip_addresses(device, version=4):
"""Get IP addresses of the device.
Ignores ipv6 link-local addresses.
:param device: NetworkManager device object
:type device: NMDevice
:param version: IP version (4 or 6)
:type version: int
"""
addresses = []
if version == 4:
ipv4_config = device.get_ip4_config()
if ipv4_config:
addresses = [addr.get_address() for addr in ipv4_config.get_addresses()]
elif version == 6:
ipv6_config = device.get_ip6_config()
if ipv6_config:
all_addresses = [addr.get_address() for addr in ipv6_config.get_addresses()]
addresses = [addr for addr in all_addresses
if not addr.startswith("fe80:")]
return addresses
def is_libvirt_device(iface):
return iface and iface.startswith("virbr")
def device_type_is_supported_wired(device_type):
return device_type in [NM.DeviceType.ETHERNET, NM.DeviceType.INFINIBAND]
| gpl-2.0 |
thomasvs/pychecker | pychecker2/utest/format.py | 11 | 3277 | from pychecker2 import TestSupport
from pychecker2 import FormatStringChecks
class FormatTestCase(TestSupport.WarningTester):
def testGoodFormats(self):
self.silent('def f(x):\n'
' return "%s" % x\n')
self.silent('def f(x):\n'
" return ('%s' + '%s') % (x, x)\n")
self.silent("def f(x):\n"
" return (('%s' + '%s') * 8) % ((x,) * 16)\n")
self.silent("def f(x):\n"
" y = 2\n"
" return '%(x)f %(y)s' % locals()\n")
self.silent("y = 1\n"
"def f():\n"
" return '%(y)s' % globals()\n")
self.silent("def f():\n"
" return '%*.s %*.*s %*f' % locals()\n")
self.silent("def f():\n"
" return '%s %%' % ('',)\n")
self.silent("def f(t):\n"
" return '%s %f' % t\n")
self.silent("def f(t):\n"
" return ('%s %f' + t) % (1, 2)\n")
self.silent("def f(t):\n"
" return '%s' % `t`\n")
self.silent("def f(t):\n"
" return '%s' * ((7 - 1) / 2) % (t,t,t)\n")
def testBadFormats(self):
w = FormatStringChecks.FormatStringCheck.badFormat
self.warning("def f():\n"
" return '%' % locals()\n", 2, w, 0, '%')
self.warning("def f():\n"
" return '%z a kookie format, yah' % locals()\n",
2, w, 0, '%z a kooki...')
self.warning("def f():\n"
" return '%(foo)*.*s' % {'foo': 'bar'}\n",
2, w, 0, '%(foo)*.*s')
def testMixed(self):
w = FormatStringChecks.FormatStringCheck.mixedFormat
self.warning("def f():\n"
" return '%(mi)x %up' % locals()\n", 2, w, '(mi)')
self.warning("def f():\n"
" return '%up %(mi)x' % (1, 2)\n", 2, w, '(mi)')
def testFormatCount(self):
w = FormatStringChecks.FormatStringCheck.formatCount
self.warning("def f():\n"
" return '%s %d %f' % ('', 2)\n",
2, w, 2, 3)
def testUselessModifier(self):
w = FormatStringChecks.FormatStringCheck.uselessModifier
self.warning("def f(t):\n"
" return '%s %lf' % (t, t)\n",
2, w, 'l')
def testFormatConstants(self):
w = FormatStringChecks.FormatStringCheck.badConstant
self.warning("def f():\n"
" return ('%s' * 6) % ((1, 2) + 3 * 7)\n",
2, w, 'can only concatenate tuple (not "int") to tuple')
self.warning("def f():\n"
" return ('%s' + 6) % ((1, 2) * 3)\n",
2, w, "cannot concatenate 'str' and 'int' objects")
def testUnknownName(self):
w = FormatStringChecks.FormatStringCheck.unknownFormatName
self.warning("def f():\n"
" return '%(unknown)s' % globals()\n",
2, w, "unknown", "globals")
self.warning("def f():\n"
" return '%(unknown)s' % locals()\n",
2, w, "unknown", "locals")
| bsd-3-clause |
marknca/cling | dependencies/docutils/parsers/rst/languages/cs.py | 128 | 4857 | # $Id: cs.py 7119 2011-09-02 13:00:23Z milde $
# Author: Marek Blaha <mb@dat.cz>
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Czech-language mappings for language-dependent features of
reStructuredText.
"""
__docformat__ = 'reStructuredText'
directives = {
# language-dependent: fixed
u'pozor': 'attention',
u'caution (translation required)': 'caution', # jak rozlisit caution a warning?
u'code (translation required)': 'code',
u'nebezpe\u010D\u00ED': 'danger',
u'chyba': 'error',
u'rada': 'hint',
u'd\u016Fle\u017Eit\u00E9': 'important',
u'pozn\u00E1mka': 'note',
u'tip (translation required)': 'tip',
u'varov\u00E1n\u00ED': 'warning',
u'admonition (translation required)': 'admonition',
u'sidebar (translation required)': 'sidebar',
u't\u00E9ma': 'topic',
u'line-block (translation required)': 'line-block',
u'parsed-literal (translation required)': 'parsed-literal',
u'odd\u00EDl': 'rubric',
u'moto': 'epigraph',
u'highlights (translation required)': 'highlights',
u'pull-quote (translation required)': 'pull-quote',
u'compound (translation required)': 'compound',
u'container (translation required)': 'container',
#'questions': 'questions',
#'qa': 'questions',
#'faq': 'questions',
u'table (translation required)': 'table',
u'csv-table (translation required)': 'csv-table',
u'list-table (translation required)': 'list-table',
u'math (translation required)': 'math',
u'meta (translation required)': 'meta',
#'imagemap': 'imagemap',
u'image (translation required)': 'image', # obrazek
u'figure (translation required)': 'figure', # a tady?
u'include (translation required)': 'include',
u'raw (translation required)': 'raw',
u'replace (translation required)': 'replace',
u'unicode (translation required)': 'unicode',
u'datum': 'date',
u't\u0159\u00EDda': 'class',
u'role (translation required)': 'role',
u'default-role (translation required)': 'default-role',
u'title (translation required)': 'title',
u'obsah': 'contents',
u'sectnum (translation required)': 'sectnum',
u'section-numbering (translation required)': 'sectnum',
u'header (translation required)': 'header',
u'footer (translation required)': 'footer',
#'footnotes': 'footnotes',
#'citations': 'citations',
u'target-notes (translation required)': 'target-notes',
u'restructuredtext-test-directive': 'restructuredtext-test-directive'}
"""Czech name to registered (in directives/__init__.py) directive name
mapping."""
roles = {
# language-dependent: fixed
u'abbreviation (translation required)': 'abbreviation',
u'ab (translation required)': 'abbreviation',
u'acronym (translation required)': 'acronym',
u'ac (translation required)': 'acronym',
u'code (translation required)': 'code',
u'index (translation required)': 'index',
u'i (translation required)': 'index',
u'subscript (translation required)': 'subscript',
u'sub (translation required)': 'subscript',
u'superscript (translation required)': 'superscript',
u'sup (translation required)': 'superscript',
u'title-reference (translation required)': 'title-reference',
u'title (translation required)': 'title-reference',
u't (translation required)': 'title-reference',
u'pep-reference (translation required)': 'pep-reference',
u'pep (translation required)': 'pep-reference',
u'rfc-reference (translation required)': 'rfc-reference',
u'rfc (translation required)': 'rfc-reference',
u'emphasis (translation required)': 'emphasis',
u'strong (translation required)': 'strong',
u'literal (translation required)': 'literal',
u'math (translation required)': 'math',
u'named-reference (translation required)': 'named-reference',
u'anonymous-reference (translation required)': 'anonymous-reference',
u'footnote-reference (translation required)': 'footnote-reference',
u'citation-reference (translation required)': 'citation-reference',
u'substitution-reference (translation required)': 'substitution-reference',
u'target (translation required)': 'target',
u'uri-reference (translation required)': 'uri-reference',
u'uri (translation required)': 'uri-reference',
u'url (translation required)': 'uri-reference',
u'raw (translation required)': 'raw',}
"""Mapping of Czech role names to canonical role names for interpreted text.
"""
| apache-2.0 |
abstract-open-solutions/OCB | doc/_themes/odoodoc/odoo_pygments.py | 129 | 1723 | # -*- coding: utf-8 -*-
import imp
import sys
from pygments.style import Style
from pygments.token import *
# extracted from getbootstrap.com
class OdooStyle(Style):
background_color = '#ffffcc'
highlight_color = '#fcf8e3'
styles = {
Whitespace: '#BBB',
Error: 'bg:#FAA #A00',
Keyword: '#069',
Keyword.Type: '#078',
Name.Attribute: '#4F9FCF',
Name.Builtin: '#366',
Name.Class: '#0A8',
Name.Constant: '#360',
Name.Decorator: '#99F',
Name.Entity: '#999',
Name.Exception: '#C00',
Name.Function: '#C0F',
Name.Label: '#99F',
Name.Namespace: '#0CF',
Name.Tag: '#2F6F9F',
Name.Variable: '#033',
String: '#d44950',
String.Backtick: '#C30',
String.Char: '#C30',
String.Doc: 'italic #C30',
String.Double: '#C30',
String.Escape: '#C30',
String.Heredoc: '#C30',
String.Interol: '#C30',
String.Other: '#C30',
String.Regex: '#3AA',
String.Single: '#C30',
String.Symbol: '#FC3',
Number: '#F60',
Operator: '#555',
Operator.Word: '#000',
Comment: '#999',
Comment.Preproc: '#099',
Generic.Deleted: 'bg:#FCC border:#c00',
Generic.Emph: 'italic',
Generic.Error: '#F00',
Generic.Heading: '#030',
Generic.Inserted: 'bg:#CFC border:#0C0',
Generic.Output: '#AAA',
Generic.Prompt: '#009',
Generic.Strong: '',
Generic.Subheading: '#030',
Generic.Traceback: '#9C6',
}
modname = 'pygments.styles.odoo'
m = imp.new_module(modname)
m.OdooStyle = OdooStyle
sys.modules[modname] = m
| agpl-3.0 |
LockScreen/Backend | venv/lib/python2.7/site-packages/boto/dynamodb/types.py | 97 | 12477 | # Copyright (c) 2011 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2011 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
"""
Some utility functions to deal with mapping Amazon DynamoDB types to
Python types and vice-versa.
"""
import base64
from decimal import (Decimal, DecimalException, Context,
Clamped, Overflow, Inexact, Underflow, Rounded)
from collections import Mapping
from boto.dynamodb.exceptions import DynamoDBNumberError
from boto.compat import filter, map, six, long_type
DYNAMODB_CONTEXT = Context(
Emin=-128, Emax=126, rounding=None, prec=38,
traps=[Clamped, Overflow, Inexact, Rounded, Underflow])
# python2.6 cannot convert floats directly to
# Decimals. This is taken from:
# http://docs.python.org/release/2.6.7/library/decimal.html#decimal-faq
def float_to_decimal(f):
n, d = f.as_integer_ratio()
numerator, denominator = Decimal(n), Decimal(d)
ctx = DYNAMODB_CONTEXT
result = ctx.divide(numerator, denominator)
while ctx.flags[Inexact]:
ctx.flags[Inexact] = False
ctx.prec *= 2
result = ctx.divide(numerator, denominator)
return result
def is_num(n, boolean_as_int=True):
if boolean_as_int:
types = (int, long_type, float, Decimal, bool)
else:
types = (int, long_type, float, Decimal)
return isinstance(n, types) or n in types
if six.PY2:
def is_str(n):
return (isinstance(n, basestring) or
isinstance(n, type) and issubclass(n, basestring))
def is_binary(n):
return isinstance(n, Binary)
else: # PY3
def is_str(n):
return (isinstance(n, str) or
isinstance(n, type) and issubclass(n, str))
def is_binary(n):
return isinstance(n, bytes) # Binary is subclass of bytes.
def serialize_num(val):
"""Cast a number to a string and perform
validation to ensure no loss of precision.
"""
if isinstance(val, bool):
return str(int(val))
return str(val)
def convert_num(s):
if '.' in s:
n = float(s)
else:
n = int(s)
return n
def convert_binary(n):
return Binary(base64.b64decode(n))
def get_dynamodb_type(val, use_boolean=True):
"""
Take a scalar Python value and return a string representing
the corresponding Amazon DynamoDB type. If the value passed in is
not a supported type, raise a TypeError.
"""
dynamodb_type = None
if val is None:
dynamodb_type = 'NULL'
elif is_num(val):
if isinstance(val, bool) and use_boolean:
dynamodb_type = 'BOOL'
else:
dynamodb_type = 'N'
elif is_str(val):
dynamodb_type = 'S'
elif isinstance(val, (set, frozenset)):
if False not in map(is_num, val):
dynamodb_type = 'NS'
elif False not in map(is_str, val):
dynamodb_type = 'SS'
elif False not in map(is_binary, val):
dynamodb_type = 'BS'
elif is_binary(val):
dynamodb_type = 'B'
elif isinstance(val, Mapping):
dynamodb_type = 'M'
elif isinstance(val, list):
dynamodb_type = 'L'
if dynamodb_type is None:
msg = 'Unsupported type "%s" for value "%s"' % (type(val), val)
raise TypeError(msg)
return dynamodb_type
def dynamize_value(val):
"""
Take a scalar Python value and return a dict consisting
of the Amazon DynamoDB type specification and the value that
needs to be sent to Amazon DynamoDB. If the type of the value
is not supported, raise a TypeError
"""
dynamodb_type = get_dynamodb_type(val)
if dynamodb_type == 'N':
val = {dynamodb_type: serialize_num(val)}
elif dynamodb_type == 'S':
val = {dynamodb_type: val}
elif dynamodb_type == 'NS':
val = {dynamodb_type: list(map(serialize_num, val))}
elif dynamodb_type == 'SS':
val = {dynamodb_type: [n for n in val]}
elif dynamodb_type == 'B':
if isinstance(val, bytes):
val = Binary(val)
val = {dynamodb_type: val.encode()}
elif dynamodb_type == 'BS':
val = {dynamodb_type: [n.encode() for n in val]}
return val
if six.PY2:
class Binary(object):
def __init__(self, value):
if not isinstance(value, (bytes, six.text_type)):
raise TypeError('Value must be a string of binary data!')
if not isinstance(value, bytes):
value = value.encode("utf-8")
self.value = value
def encode(self):
return base64.b64encode(self.value).decode('utf-8')
def __eq__(self, other):
if isinstance(other, Binary):
return self.value == other.value
else:
return self.value == other
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return 'Binary(%r)' % self.value
def __str__(self):
return self.value
def __hash__(self):
return hash(self.value)
else:
class Binary(bytes):
def encode(self):
return base64.b64encode(self).decode('utf-8')
@property
def value(self):
# This matches the public API of the Python 2 version,
# but just returns itself since it is already a bytes
# instance.
return bytes(self)
def __repr__(self):
return 'Binary(%r)' % self.value
def item_object_hook(dct):
"""
A custom object hook for use when decoding JSON item bodys.
This hook will transform Amazon DynamoDB JSON responses to something
that maps directly to native Python types.
"""
if len(dct.keys()) > 1:
return dct
if 'S' in dct:
return dct['S']
if 'N' in dct:
return convert_num(dct['N'])
if 'SS' in dct:
return set(dct['SS'])
if 'NS' in dct:
return set(map(convert_num, dct['NS']))
if 'B' in dct:
return convert_binary(dct['B'])
if 'BS' in dct:
return set(map(convert_binary, dct['BS']))
return dct
class Dynamizer(object):
"""Control serialization/deserialization of types.
This class controls the encoding of python types to the
format that is expected by the DynamoDB API, as well as
taking DynamoDB types and constructing the appropriate
python types.
If you want to customize this process, you can subclass
this class and override the encoding/decoding of
specific types. For example::
'foo' (Python type)
|
v
encode('foo')
|
v
_encode_s('foo')
|
v
{'S': 'foo'} (Encoding sent to/received from DynamoDB)
|
V
decode({'S': 'foo'})
|
v
_decode_s({'S': 'foo'})
|
v
'foo' (Python type)
"""
def _get_dynamodb_type(self, attr):
return get_dynamodb_type(attr)
def encode(self, attr):
"""
Encodes a python type to the format expected
by DynamoDB.
"""
dynamodb_type = self._get_dynamodb_type(attr)
try:
encoder = getattr(self, '_encode_%s' % dynamodb_type.lower())
except AttributeError:
raise ValueError("Unable to encode dynamodb type: %s" %
dynamodb_type)
return {dynamodb_type: encoder(attr)}
def _encode_n(self, attr):
try:
if isinstance(attr, float) and not hasattr(Decimal, 'from_float'):
# python2.6 does not support creating Decimals directly
# from floats so we have to do this ourself.
n = str(float_to_decimal(attr))
else:
n = str(DYNAMODB_CONTEXT.create_decimal(attr))
if list(filter(lambda x: x in n, ('Infinity', 'NaN'))):
raise TypeError('Infinity and NaN not supported')
return n
except (TypeError, DecimalException) as e:
msg = '{0} numeric for `{1}`\n{2}'.format(
e.__class__.__name__, attr, str(e) or '')
raise DynamoDBNumberError(msg)
def _encode_s(self, attr):
if isinstance(attr, bytes):
attr = attr.decode('utf-8')
elif not isinstance(attr, six.text_type):
attr = str(attr)
return attr
def _encode_ns(self, attr):
return list(map(self._encode_n, attr))
def _encode_ss(self, attr):
return [self._encode_s(n) for n in attr]
def _encode_b(self, attr):
if isinstance(attr, bytes):
attr = Binary(attr)
return attr.encode()
def _encode_bs(self, attr):
return [self._encode_b(n) for n in attr]
def _encode_null(self, attr):
return True
def _encode_bool(self, attr):
return attr
def _encode_m(self, attr):
return dict([(k, self.encode(v)) for k, v in attr.items()])
def _encode_l(self, attr):
return [self.encode(i) for i in attr]
def decode(self, attr):
"""
Takes the format returned by DynamoDB and constructs
the appropriate python type.
"""
if len(attr) > 1 or not attr:
return attr
dynamodb_type = list(attr.keys())[0]
if dynamodb_type.lower() == dynamodb_type:
# It's not an actual type, just a single character attr that
# overlaps with the DDB types. Return it.
return attr
try:
decoder = getattr(self, '_decode_%s' % dynamodb_type.lower())
except AttributeError:
return attr
return decoder(attr[dynamodb_type])
def _decode_n(self, attr):
return DYNAMODB_CONTEXT.create_decimal(attr)
def _decode_s(self, attr):
return attr
def _decode_ns(self, attr):
return set(map(self._decode_n, attr))
def _decode_ss(self, attr):
return set(map(self._decode_s, attr))
def _decode_b(self, attr):
return convert_binary(attr)
def _decode_bs(self, attr):
return set(map(self._decode_b, attr))
def _decode_null(self, attr):
return None
def _decode_bool(self, attr):
return attr
def _decode_m(self, attr):
return dict([(k, self.decode(v)) for k, v in attr.items()])
def _decode_l(self, attr):
return [self.decode(i) for i in attr]
class NonBooleanDynamizer(Dynamizer):
"""Casting boolean type to numeric types.
This class is provided for backward compatibility.
"""
def _get_dynamodb_type(self, attr):
return get_dynamodb_type(attr, use_boolean=False)
class LossyFloatDynamizer(NonBooleanDynamizer):
"""Use float/int instead of Decimal for numeric types.
This class is provided for backwards compatibility. Instead of
using Decimals for the 'N', 'NS' types it uses ints/floats.
This class is deprecated and its usage is not encouraged,
as doing so may result in loss of precision. Use the
`Dynamizer` class instead.
"""
def _encode_n(self, attr):
return serialize_num(attr)
def _encode_ns(self, attr):
return [str(i) for i in attr]
def _decode_n(self, attr):
return convert_num(attr)
def _decode_ns(self, attr):
return set(map(self._decode_n, attr))
| mit |
archfan/xu4-linux | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py | 12527 | 1935 | # Util.py - Python extension for perf script, miscellaneous utility code
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import errno, os
FUTEX_WAIT = 0
FUTEX_WAKE = 1
FUTEX_PRIVATE_FLAG = 128
FUTEX_CLOCK_REALTIME = 256
FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
NSECS_PER_SEC = 1000000000
def avg(total, n):
return total / n
def nsecs(secs, nsecs):
return secs * NSECS_PER_SEC + nsecs
def nsecs_secs(nsecs):
return nsecs / NSECS_PER_SEC
def nsecs_nsecs(nsecs):
return nsecs % NSECS_PER_SEC
def nsecs_str(nsecs):
str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
return str
def add_stats(dict, key, value):
if not dict.has_key(key):
dict[key] = (value, value, value, 1)
else:
min, max, avg, count = dict[key]
if value < min:
min = value
if value > max:
max = value
avg = (avg + value) / 2
dict[key] = (min, max, avg, count + 1)
def clear_term():
print("\x1b[H\x1b[2J")
audit_package_warned = False
try:
import audit
machine_to_id = {
'x86_64': audit.MACH_86_64,
'alpha' : audit.MACH_ALPHA,
'ia64' : audit.MACH_IA64,
'ppc' : audit.MACH_PPC,
'ppc64' : audit.MACH_PPC64,
's390' : audit.MACH_S390,
's390x' : audit.MACH_S390X,
'i386' : audit.MACH_X86,
'i586' : audit.MACH_X86,
'i686' : audit.MACH_X86,
}
try:
machine_to_id['armeb'] = audit.MACH_ARMEB
except:
pass
machine_id = machine_to_id[os.uname()[4]]
except:
if not audit_package_warned:
audit_package_warned = True
print "Install the audit-libs-python package to get syscall names"
def syscall_name(id):
try:
return audit.audit_syscall_to_name(id, machine_id)
except:
return str(id)
def strerror(nr):
try:
return errno.errorcode[abs(nr)]
except:
return "Unknown %d errno" % nr
| gpl-2.0 |
fr34k8/OmnigraffleScripts-1 | install.py | 2 | 1333 | #!/usr/bin/env python
# encoding: utf-8
"""
install.py
Created by Charles-Axel Dein on 2011-10-03.
Copyright (c) 2011 Charles-Axel Dein. All rights reserved.
"""
import shutil
import os
import logging
INSTALL_DIRECTORY = "~/Library/Scripts/Applications/Omnigraffle Pro/"
SCRIPTS = ("ExportAllFiles", "ExportAllLayers", "ResetPrototype", "ExportForiPad")
def main():
install_directory = os.path.expanduser(INSTALL_DIRECTORY)
# creates INSTALL_DIRECTORY if necessary
if not os.path.exists(install_directory):
os.makedirs(install_directory)
for s in SCRIPTS:
applescript_filename = os.path.join(s, s + ".applescript")
scpt_filename = os.path.join(s, s + ".scpt")
print "Compiling %s" % (scpt_filename)
os.system('osacompile -o "%s" "%s"' %
(scpt_filename, applescript_filename))
print "Installing %s" % scpt_filename
shutil.copy(scpt_filename, install_directory)
logging.debug("Copied %s to %s" % (scpt_filename, install_directory))
print "Cleaning up..."
for s in SCRIPTS:
scpt_filename = os.path.join(s, s + ".scpt")
logging.debug("Deleting %s" % applescript_filename)
os.remove(scpt_filename)
print "\nInstallation finished."
if __name__ == '__main__':
main()
| bsd-2-clause |
louyihua/edx-platform | common/djangoapps/monkey_patch/__init__.py | 49 | 2801 | """
Monkey-patch the edX platform
Here be dragons (and simians!)
* USE WITH CAUTION *
No, but seriously, you probably never really want to make changes here.
This module contains methods to monkey-patch [0] the edx-platform.
Patches are to be applied as early as possible in the callstack
(currently lms/startup.py and cms/startup.py). Consequently, changes
made here will affect the entire platform.
That said, if you've decided you really need to monkey-patch the
platform (and you've convinced enough people that this is best
solution), kindly follow these guidelines:
- Reference django_18_upgrade.py for a sample implementation.
- Name your module by replacing periods with underscores for the
module to be patched:
- patching 'django.utils.translation'
becomes 'django_utils_translation'
- patching 'your.module'
becomes 'your_module'
- Implement argumentless function wrappers in
monkey_patch.your_module for the following:
- is_patched
- patch
- unpatch
- Add the following code where needed (typically cms/startup.py and
lms/startup.py):
```
from monkey_patch import your_module
your_module.patch()
```
- Write tests! All code should be tested anyway, but with code that
patches the platform runtime, we must be extra sure there are no
unintended consequences.
[0] http://en.wikipedia.org/wiki/Monkey_patch
"""
# Use this key to store a reference to the unpatched copy
__BACKUP_ATTRIBUTE_NAME = '__monkey_patch'
def is_patched(module, attribute_name):
"""
Check if an attribute has been monkey-patched
"""
attribute = getattr(module, attribute_name)
return hasattr(attribute, __BACKUP_ATTRIBUTE_NAME)
def patch(module, attribute_name, attribute_replacement):
"""
Monkey-patch an attribute
A backup of the original attribute is preserved in the patched
attribute (see: __BACKUP_ATTRIBUTE_NAME).
"""
attribute = getattr(module, attribute_name)
setattr(attribute_replacement, __BACKUP_ATTRIBUTE_NAME, attribute)
setattr(module, attribute_name, attribute_replacement)
return is_patched(module, attribute_name)
def unpatch(module, attribute_name):
"""
Un-monkey-patch an attribute
Restore a backup of the original attribute from the patched
attribute, iff it exists (see: __BACKUP_ATTRIBUTE_NAME).
Return boolean whether or not the attribute could be unpatched
"""
was_patched = False
attribute = getattr(module, attribute_name)
if hasattr(attribute, __BACKUP_ATTRIBUTE_NAME):
attribute_old = getattr(attribute, __BACKUP_ATTRIBUTE_NAME)
setattr(module, attribute_name, attribute_old)
was_patched = True
return was_patched
| agpl-3.0 |
pombredanne/pyjs | examples/misc/djangotasks/settings.py | 13 | 2910 | # Django settings for pyjsDemo project.
import os
STATIC = str(os.path.join(os.path.dirname(__file__), 'media').replace('\\','/'))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASE_ENGINE = 'mysql' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
DATABASE_NAME = 'todo' # Or path to database file if using sqlite3.
DATABASE_USER = 'todo' # Not used with sqlite3.
DATABASE_PASSWORD = '' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = '#*jv)6zbb15!9z8oru*3irida-24@_5+ib$k6$-&k&oy84ww87'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
)
ROOT_URLCONF = 'djangotasks.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
#'django.contrib.auth',
#'django.contrib.contenttypes',
#'django.contrib.sessions',
#'django.contrib.sites',
'djangotasks.todo',
)
| apache-2.0 |
lucciano/bigcouch | couchjs/scons/scons-local-2.0.1/SCons/Tool/swig.py | 61 | 7122 | """SCons.Tool.swig
Tool-specific initialization for swig.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/swig.py 5134 2010/08/16 23:02:40 bdeegan"
import os.path
import re
import subprocess
import SCons.Action
import SCons.Defaults
import SCons.Scanner
import SCons.Tool
import SCons.Util
SwigAction = SCons.Action.Action('$SWIGCOM', '$SWIGCOMSTR')
def swigSuffixEmitter(env, source):
if '-c++' in SCons.Util.CLVar(env.subst("$SWIGFLAGS", source=source)):
return '$SWIGCXXFILESUFFIX'
else:
return '$SWIGCFILESUFFIX'
# Match '%module test', as well as '%module(directors="1") test'
# Also allow for test to be quoted (SWIG permits double quotes, but not single)
_reModule = re.compile(r'%module(\s*\(.*\))?\s+("?)(.+)\2')
def _find_modules(src):
"""Find all modules referenced by %module lines in `src`, a SWIG .i file.
Returns a list of all modules, and a flag set if SWIG directors have
been requested (SWIG will generate an additional header file in this
case.)"""
directors = 0
mnames = []
try:
matches = _reModule.findall(open(src).read())
except IOError:
# If the file's not yet generated, guess the module name from the filename
matches = []
mnames.append(os.path.splitext(src)[0])
for m in matches:
mnames.append(m[2])
directors = directors or m[0].find('directors') >= 0
return mnames, directors
def _add_director_header_targets(target, env):
# Directors only work with C++ code, not C
suffix = env.subst(env['SWIGCXXFILESUFFIX'])
# For each file ending in SWIGCXXFILESUFFIX, add a new target director
# header by replacing the ending with SWIGDIRECTORSUFFIX.
for x in target[:]:
n = x.name
d = x.dir
if n[-len(suffix):] == suffix:
target.append(d.File(n[:-len(suffix)] + env['SWIGDIRECTORSUFFIX']))
def _swigEmitter(target, source, env):
swigflags = env.subst("$SWIGFLAGS", target=target, source=source)
flags = SCons.Util.CLVar(swigflags)
for src in source:
src = str(src.rfile())
mnames = None
if "-python" in flags and "-noproxy" not in flags:
if mnames is None:
mnames, directors = _find_modules(src)
if directors:
_add_director_header_targets(target, env)
python_files = [m + ".py" for m in mnames]
outdir = env.subst('$SWIGOUTDIR', target=target, source=source)
# .py files should be generated in SWIGOUTDIR if specified,
# otherwise in the same directory as the target
if outdir:
python_files = [env.fs.File(os.path.join(outdir, j)) for j in python_files]
else:
python_files = [target[0].dir.File(m) for m in python_files]
target.extend(python_files)
if "-java" in flags:
if mnames is None:
mnames, directors = _find_modules(src)
if directors:
_add_director_header_targets(target, env)
java_files = [[m + ".java", m + "JNI.java"] for m in mnames]
java_files = SCons.Util.flatten(java_files)
outdir = env.subst('$SWIGOUTDIR', target=target, source=source)
if outdir:
java_files = [os.path.join(outdir, j) for j in java_files]
java_files = list(map(env.fs.File, java_files))
for jf in java_files:
t_from_s = lambda t, p, s, x: t.dir
SCons.Util.AddMethod(jf, t_from_s, 'target_from_source')
target.extend(java_files)
return (target, source)
def _get_swig_version(env):
"""Run the SWIG command line tool to get and return the version number"""
pipe = SCons.Action._subproc(env, [env['SWIG'], '-version'],
stdin = 'devnull',
stderr = 'devnull',
stdout = subprocess.PIPE)
if pipe.wait() != 0: return
out = pipe.stdout.read()
match = re.search(r'SWIG Version\s+(\S+)$', out, re.MULTILINE)
if match:
return match.group(1)
def generate(env):
"""Add Builders and construction variables for swig to an Environment."""
c_file, cxx_file = SCons.Tool.createCFileBuilders(env)
c_file.suffix['.i'] = swigSuffixEmitter
cxx_file.suffix['.i'] = swigSuffixEmitter
c_file.add_action('.i', SwigAction)
c_file.add_emitter('.i', _swigEmitter)
cxx_file.add_action('.i', SwigAction)
cxx_file.add_emitter('.i', _swigEmitter)
java_file = SCons.Tool.CreateJavaFileBuilder(env)
java_file.suffix['.i'] = swigSuffixEmitter
java_file.add_action('.i', SwigAction)
java_file.add_emitter('.i', _swigEmitter)
env['SWIG'] = 'swig'
env['SWIGVERSION'] = _get_swig_version(env)
env['SWIGFLAGS'] = SCons.Util.CLVar('')
env['SWIGDIRECTORSUFFIX'] = '_wrap.h'
env['SWIGCFILESUFFIX'] = '_wrap$CFILESUFFIX'
env['SWIGCXXFILESUFFIX'] = '_wrap$CXXFILESUFFIX'
env['_SWIGOUTDIR'] = r'${"-outdir \"%s\"" % SWIGOUTDIR}'
env['SWIGPATH'] = []
env['SWIGINCPREFIX'] = '-I'
env['SWIGINCSUFFIX'] = ''
env['_SWIGINCFLAGS'] = '$( ${_concat(SWIGINCPREFIX, SWIGPATH, SWIGINCSUFFIX, __env__, RDirs, TARGET, SOURCE)} $)'
env['SWIGCOM'] = '$SWIG -o $TARGET ${_SWIGOUTDIR} ${_SWIGINCFLAGS} $SWIGFLAGS $SOURCES'
expr = '^[ \t]*%[ \t]*(?:include|import|extern)[ \t]*(<|"?)([^>\s"]+)(?:>|"?)'
scanner = SCons.Scanner.ClassicCPP("SWIGScan", ".i", "SWIGPATH", expr)
env.Append(SCANNERS = scanner)
def exists(env):
return env.Detect(['swig'])
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| apache-2.0 |
arcturusannamalai/open-tamil | tamiltts/normalize.py | 1 | 2995 | # This Python file uses the following encoding: utf-8
# (C) 2020 Muthiah Annamalai
# This file is part of open-tamil project
import string
from tamil.utf8 import get_letters
from tamil.numeral import num2tamilstr
def normalize_numeral_text(text_tokens):
"""
Input @text_tokens = ["இரு","நண்பர்கள்","௹","100","கொடுத்து","உணவு","உண்டனர்."]
^ - எண் 100 என்பது சொல்வடிவில் நூறு என்று
வெளியிடப்படும்.
"""
rval = []
for word in text_tokens:
if (word[0] in string.digits) or word[0] == '-':
try:
val = num2tamilstr(word)
rval.append(val)
except Exception as e:
rval.append(word)
else:
rval.append(word)
return rval
def normalize_punctuation_text(text_tokens):
"""
Input @text_tokens = ["இரு","நண்பர்கள்","௹","100","கொடுத்து","உணவு","உண்டனர்."]
^ ரூபாய் என்று மாற்றி வெளியிடப்படும்.
"""
special_char_map = {'!':'ஆச்சரியக்குறி',
'!=':'சமன்பாடு இல்லை',
',':'துணைக்குறி',
'#':'எண்',
'$':'டாலர்',
'™':'முத்திரை',
'©':'பதிப்புரிமை',
'௹':'ரூபாய்',
'₹':'ரூபாய்',
'£':'பவுண்டு',
'%':'சதவிகிதம்',
'&':'மற்றும்',
'*':'நட்சத்திரக்குறி',
'(':'அடைப்புகுக்குறி தொடக்கம்',
')':'அடைப்புகுக்குறி முடிவு',
'[':'அடைப்புகுக்குறி தொடக்கம்',
']':'அடைப்புகுக்குறி முடிவு',
'{':'அடைப்புகுக்குறி தொடக்கம்',
'}':'அடைப்புகுக்குறி முடிவு',
'+':'கூட்டல்குறி',
'-':'கழித்தல்குறி',
'x':'பெருக்கல்குறி',
'/':'வகுத்தல்குறி',
'=':'சமன்பாடுக்குறி',
':':'புள்ளி',
'"':'மேற்கோள்குறி',
'\'':'மேற்கோள்குறி',
';':'அரைப்புள்ளி',
'.':'முற்றுப்புள்ளி',
'?':'கேள்விக்குறி'}
rval = []
for char in text_tokens:
rval.append( special_char_map.get(char,char) )
return rval
| mit |
TouK/vumi | vumi/transports/api/tests/test_api.py | 4 | 5952 | # -*- encoding: utf-8 -*-
import json
from urllib import urlencode
from twisted.internet.defer import inlineCallbacks
from vumi.utils import http_request, http_request_full
from vumi.tests.helpers import VumiTestCase
from vumi.transports.api import HttpApiTransport
from vumi.transports.tests.helpers import TransportHelper
def config_override(**config):
def deco(fun):
fun.config_override = config
return fun
return deco
class TestHttpApiTransport(VumiTestCase):
@inlineCallbacks
def setUp(self):
self.config = {
'web_path': "foo",
'web_port': 0,
}
test_method = getattr(self, self._testMethodName)
config_override = getattr(test_method, 'config_override', {})
self.config.update(config_override)
self.tx_helper = self.add_helper(TransportHelper(HttpApiTransport))
self.transport = yield self.tx_helper.get_transport(self.config)
self.transport_url = self.transport.get_transport_url()
def mkurl(self, content, from_addr=123, to_addr=555, **kw):
params = {
'to_addr': to_addr,
'from_addr': from_addr,
'content': content,
}
params.update(kw)
return self.mkurl_raw(**params)
def mkurl_raw(self, **params):
return '%s%s?%s' % (
self.transport_url,
self.config['web_path'],
urlencode(params)
)
@inlineCallbacks
def test_health(self):
result = yield http_request(
self.transport_url + "health", "", method='GET')
self.assertEqual(json.loads(result), {'pending_requests': 0})
@inlineCallbacks
def test_inbound(self):
url = self.mkurl('hello')
response = yield http_request(url, '', method='GET')
[msg] = self.tx_helper.get_dispatched_inbound()
self.assertEqual(msg['transport_name'], self.tx_helper.transport_name)
self.assertEqual(msg['to_addr'], "555")
self.assertEqual(msg['from_addr'], "123")
self.assertEqual(msg['content'], "hello")
self.assertEqual(json.loads(response),
{'message_id': msg['message_id']})
@inlineCallbacks
def test_handle_non_ascii_input(self):
url = self.mkurl(u"öæł".encode("utf-8"))
response = yield http_request(url, '', method='GET')
[msg] = self.tx_helper.get_dispatched_inbound()
self.assertEqual(msg['transport_name'], self.tx_helper.transport_name)
self.assertEqual(msg['to_addr'], "555")
self.assertEqual(msg['from_addr'], "123")
self.assertEqual(msg['content'], u"öæł")
self.assertEqual(json.loads(response),
{'message_id': msg['message_id']})
@inlineCallbacks
@config_override(reply_expected=True)
def test_inbound_with_reply(self):
d = http_request(self.mkurl('hello'), '', method='GET')
[msg] = yield self.tx_helper.wait_for_dispatched_inbound(1)
yield self.tx_helper.make_dispatch_reply(msg, "OK")
response = yield d
self.assertEqual(response, 'OK')
@inlineCallbacks
def test_good_optional_parameter(self):
url = self.mkurl('hello', group='#channel')
response = yield http_request(url, '', method='GET')
[msg] = self.tx_helper.get_dispatched_inbound()
self.assertEqual(msg['group'], '#channel')
self.assertEqual(json.loads(response),
{'message_id': msg['message_id']})
@inlineCallbacks
def test_bad_parameter(self):
url = self.mkurl('hello', foo='bar')
response = yield http_request_full(url, '', method='GET')
self.assertEqual(400, response.code)
self.assertEqual(json.loads(response.delivered_body),
{'unexpected_parameter': ['foo']})
@inlineCallbacks
def test_missing_parameters(self):
url = self.mkurl_raw(content='hello')
response = yield http_request_full(url, '', method='GET')
self.assertEqual(400, response.code)
self.assertEqual(json.loads(response.delivered_body),
{'missing_parameter': ['to_addr', 'from_addr']})
@inlineCallbacks
@config_override(field_defaults={'to_addr': '555'})
def test_default_parameters(self):
url = self.mkurl_raw(content='hello', from_addr='123')
response = yield http_request(url, '', method='GET')
[msg] = self.tx_helper.get_dispatched_inbound()
self.assertEqual(msg['transport_name'], self.tx_helper.transport_name)
self.assertEqual(msg['to_addr'], "555")
self.assertEqual(msg['from_addr'], "123")
self.assertEqual(msg['content'], "hello")
self.assertEqual(json.loads(response),
{'message_id': msg['message_id']})
@inlineCallbacks
@config_override(field_defaults={'to_addr': '555'},
allowed_fields=['content', 'from_addr'])
def test_disallowed_default_parameters(self):
url = self.mkurl_raw(content='hello', from_addr='123')
response = yield http_request(url, '', method='GET')
[msg] = self.tx_helper.get_dispatched_inbound()
self.assertEqual(msg['transport_name'], self.tx_helper.transport_name)
self.assertEqual(msg['to_addr'], "555")
self.assertEqual(msg['from_addr'], "123")
self.assertEqual(msg['content'], "hello")
self.assertEqual(json.loads(response),
{'message_id': msg['message_id']})
@inlineCallbacks
@config_override(allowed_fields=['content', 'from_addr'])
def test_disallowed_parameters(self):
url = self.mkurl('hello')
response = yield http_request_full(url, '', method='GET')
self.assertEqual(400, response.code)
self.assertEqual(json.loads(response.delivered_body),
{'unexpected_parameter': ['to_addr']})
| bsd-3-clause |
jreback/pandas | pandas/tests/arrays/boolean/test_construction.py | 6 | 12857 | import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.arrays import BooleanArray
from pandas.core.arrays.boolean import coerce_to_array
def test_boolean_array_constructor():
values = np.array([True, False, True, False], dtype="bool")
mask = np.array([False, False, False, True], dtype="bool")
result = BooleanArray(values, mask)
expected = pd.array([True, False, True, None], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
with pytest.raises(TypeError, match="values should be boolean numpy array"):
BooleanArray(values.tolist(), mask)
with pytest.raises(TypeError, match="mask should be boolean numpy array"):
BooleanArray(values, mask.tolist())
with pytest.raises(TypeError, match="values should be boolean numpy array"):
BooleanArray(values.astype(int), mask)
with pytest.raises(TypeError, match="mask should be boolean numpy array"):
BooleanArray(values, None)
with pytest.raises(ValueError, match="values must be a 1D array"):
BooleanArray(values.reshape(1, -1), mask)
with pytest.raises(ValueError, match="mask must be a 1D array"):
BooleanArray(values, mask.reshape(1, -1))
def test_boolean_array_constructor_copy():
values = np.array([True, False, True, False], dtype="bool")
mask = np.array([False, False, False, True], dtype="bool")
result = BooleanArray(values, mask)
assert result._data is values
assert result._mask is mask
result = BooleanArray(values, mask, copy=True)
assert result._data is not values
assert result._mask is not mask
def test_to_boolean_array():
expected = BooleanArray(
np.array([True, False, True]), np.array([False, False, False])
)
result = pd.array([True, False, True], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
result = pd.array(np.array([True, False, True]), dtype="boolean")
tm.assert_extension_array_equal(result, expected)
result = pd.array(np.array([True, False, True], dtype=object), dtype="boolean")
tm.assert_extension_array_equal(result, expected)
# with missing values
expected = BooleanArray(
np.array([True, False, True]), np.array([False, False, True])
)
result = pd.array([True, False, None], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
result = pd.array(np.array([True, False, None], dtype=object), dtype="boolean")
tm.assert_extension_array_equal(result, expected)
def test_to_boolean_array_all_none():
expected = BooleanArray(np.array([True, True, True]), np.array([True, True, True]))
result = pd.array([None, None, None], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
result = pd.array(np.array([None, None, None], dtype=object), dtype="boolean")
tm.assert_extension_array_equal(result, expected)
@pytest.mark.parametrize(
"a, b",
[
([True, False, None, np.nan, pd.NA], [True, False, None, None, None]),
([True, np.nan], [True, None]),
([True, pd.NA], [True, None]),
([np.nan, np.nan], [None, None]),
(np.array([np.nan, np.nan], dtype=float), [None, None]),
],
)
def test_to_boolean_array_missing_indicators(a, b):
result = pd.array(a, dtype="boolean")
expected = pd.array(b, dtype="boolean")
tm.assert_extension_array_equal(result, expected)
@pytest.mark.parametrize(
"values",
[
["foo", "bar"],
["1", "2"],
# "foo",
[1, 2],
[1.0, 2.0],
pd.date_range("20130101", periods=2),
np.array(["foo"]),
np.array([1, 2]),
np.array([1.0, 2.0]),
[np.nan, {"a": 1}],
],
)
def test_to_boolean_array_error(values):
# error in converting existing arrays to BooleanArray
msg = "Need to pass bool-like value"
with pytest.raises(TypeError, match=msg):
pd.array(values, dtype="boolean")
def test_to_boolean_array_from_integer_array():
result = pd.array(np.array([1, 0, 1, 0]), dtype="boolean")
expected = pd.array([True, False, True, False], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
# with missing values
result = pd.array(np.array([1, 0, 1, None]), dtype="boolean")
expected = pd.array([True, False, True, None], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
def test_to_boolean_array_from_float_array():
result = pd.array(np.array([1.0, 0.0, 1.0, 0.0]), dtype="boolean")
expected = pd.array([True, False, True, False], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
# with missing values
result = pd.array(np.array([1.0, 0.0, 1.0, np.nan]), dtype="boolean")
expected = pd.array([True, False, True, None], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
def test_to_boolean_array_integer_like():
# integers of 0's and 1's
result = pd.array([1, 0, 1, 0], dtype="boolean")
expected = pd.array([True, False, True, False], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
# with missing values
result = pd.array([1, 0, 1, None], dtype="boolean")
expected = pd.array([True, False, True, None], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
def test_coerce_to_array():
# TODO this is currently not public API
values = np.array([True, False, True, False], dtype="bool")
mask = np.array([False, False, False, True], dtype="bool")
result = BooleanArray(*coerce_to_array(values, mask=mask))
expected = BooleanArray(values, mask)
tm.assert_extension_array_equal(result, expected)
assert result._data is values
assert result._mask is mask
result = BooleanArray(*coerce_to_array(values, mask=mask, copy=True))
expected = BooleanArray(values, mask)
tm.assert_extension_array_equal(result, expected)
assert result._data is not values
assert result._mask is not mask
# mixed missing from values and mask
values = [True, False, None, False]
mask = np.array([False, False, False, True], dtype="bool")
result = BooleanArray(*coerce_to_array(values, mask=mask))
expected = BooleanArray(
np.array([True, False, True, True]), np.array([False, False, True, True])
)
tm.assert_extension_array_equal(result, expected)
result = BooleanArray(*coerce_to_array(np.array(values, dtype=object), mask=mask))
tm.assert_extension_array_equal(result, expected)
result = BooleanArray(*coerce_to_array(values, mask=mask.tolist()))
tm.assert_extension_array_equal(result, expected)
# raise errors for wrong dimension
values = np.array([True, False, True, False], dtype="bool")
mask = np.array([False, False, False, True], dtype="bool")
with pytest.raises(ValueError, match="values must be a 1D list-like"):
coerce_to_array(values.reshape(1, -1))
with pytest.raises(ValueError, match="mask must be a 1D list-like"):
coerce_to_array(values, mask=mask.reshape(1, -1))
def test_coerce_to_array_from_boolean_array():
# passing BooleanArray to coerce_to_array
values = np.array([True, False, True, False], dtype="bool")
mask = np.array([False, False, False, True], dtype="bool")
arr = BooleanArray(values, mask)
result = BooleanArray(*coerce_to_array(arr))
tm.assert_extension_array_equal(result, arr)
# no copy
assert result._data is arr._data
assert result._mask is arr._mask
result = BooleanArray(*coerce_to_array(arr), copy=True)
tm.assert_extension_array_equal(result, arr)
assert result._data is not arr._data
assert result._mask is not arr._mask
with pytest.raises(ValueError, match="cannot pass mask for BooleanArray input"):
coerce_to_array(arr, mask=mask)
def test_coerce_to_numpy_array():
# with missing values -> object dtype
arr = pd.array([True, False, None], dtype="boolean")
result = np.array(arr)
expected = np.array([True, False, pd.NA], dtype="object")
tm.assert_numpy_array_equal(result, expected)
# also with no missing values -> object dtype
arr = pd.array([True, False, True], dtype="boolean")
result = np.array(arr)
expected = np.array([True, False, True], dtype="object")
tm.assert_numpy_array_equal(result, expected)
# force bool dtype
result = np.array(arr, dtype="bool")
expected = np.array([True, False, True], dtype="bool")
tm.assert_numpy_array_equal(result, expected)
# with missing values will raise error
arr = pd.array([True, False, None], dtype="boolean")
msg = (
"cannot convert to 'bool'-dtype NumPy array with missing values. "
"Specify an appropriate 'na_value' for this dtype."
)
with pytest.raises(ValueError, match=msg):
np.array(arr, dtype="bool")
def test_to_boolean_array_from_strings():
result = BooleanArray._from_sequence_of_strings(
np.array(["True", "False", "1", "1.0", "0", "0.0", np.nan], dtype=object)
)
expected = BooleanArray(
np.array([True, False, True, True, False, False, False]),
np.array([False, False, False, False, False, False, True]),
)
tm.assert_extension_array_equal(result, expected)
def test_to_boolean_array_from_strings_invalid_string():
with pytest.raises(ValueError, match="cannot be cast"):
BooleanArray._from_sequence_of_strings(["donkey"])
@pytest.mark.parametrize("box", [True, False], ids=["series", "array"])
def test_to_numpy(box):
con = pd.Series if box else pd.array
# default (with or without missing values) -> object dtype
arr = con([True, False, True], dtype="boolean")
result = arr.to_numpy()
expected = np.array([True, False, True], dtype="object")
tm.assert_numpy_array_equal(result, expected)
arr = con([True, False, None], dtype="boolean")
result = arr.to_numpy()
expected = np.array([True, False, pd.NA], dtype="object")
tm.assert_numpy_array_equal(result, expected)
arr = con([True, False, None], dtype="boolean")
result = arr.to_numpy(dtype="str")
expected = np.array([True, False, pd.NA], dtype="<U5")
tm.assert_numpy_array_equal(result, expected)
# no missing values -> can convert to bool, otherwise raises
arr = con([True, False, True], dtype="boolean")
result = arr.to_numpy(dtype="bool")
expected = np.array([True, False, True], dtype="bool")
tm.assert_numpy_array_equal(result, expected)
arr = con([True, False, None], dtype="boolean")
with pytest.raises(ValueError, match="cannot convert to 'bool'-dtype"):
result = arr.to_numpy(dtype="bool")
# specify dtype and na_value
arr = con([True, False, None], dtype="boolean")
result = arr.to_numpy(dtype=object, na_value=None)
expected = np.array([True, False, None], dtype="object")
tm.assert_numpy_array_equal(result, expected)
result = arr.to_numpy(dtype=bool, na_value=False)
expected = np.array([True, False, False], dtype="bool")
tm.assert_numpy_array_equal(result, expected)
result = arr.to_numpy(dtype="int64", na_value=-99)
expected = np.array([1, 0, -99], dtype="int64")
tm.assert_numpy_array_equal(result, expected)
result = arr.to_numpy(dtype="float64", na_value=np.nan)
expected = np.array([1, 0, np.nan], dtype="float64")
tm.assert_numpy_array_equal(result, expected)
# converting to int or float without specifying na_value raises
with pytest.raises(ValueError, match="cannot convert to 'int64'-dtype"):
arr.to_numpy(dtype="int64")
with pytest.raises(ValueError, match="cannot convert to 'float64'-dtype"):
arr.to_numpy(dtype="float64")
def test_to_numpy_copy():
# to_numpy can be zero-copy if no missing values
arr = pd.array([True, False, True], dtype="boolean")
result = arr.to_numpy(dtype=bool)
result[0] = False
tm.assert_extension_array_equal(
arr, pd.array([False, False, True], dtype="boolean")
)
arr = pd.array([True, False, True], dtype="boolean")
result = arr.to_numpy(dtype=bool, copy=True)
result[0] = False
tm.assert_extension_array_equal(arr, pd.array([True, False, True], dtype="boolean"))
# FIXME: don't leave commented out
# TODO when BooleanArray coerces to object dtype numpy array, need to do conversion
# manually in the indexing code
# def test_indexing_boolean_mask():
# arr = pd.array([1, 2, 3, 4], dtype="Int64")
# mask = pd.array([True, False, True, False], dtype="boolean")
# result = arr[mask]
# expected = pd.array([1, 3], dtype="Int64")
# tm.assert_extension_array_equal(result, expected)
# # missing values -> error
# mask = pd.array([True, False, True, None], dtype="boolean")
# with pytest.raises(IndexError):
# result = arr[mask]
| bsd-3-clause |
cedral/aws-sdk-cpp | scripts/build_example.py | 1 | 4783 | #
# Copyright 2010-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
#
import os
import argparse
import os
import shutil
import subprocess
def GetBuildEnvironments():
return { 'Windows2017' : { 'global_build_call' : 'msbuild ALL_BUILD.vcxproj',
'parallel_option' : ' -m:??',
'configs': ' -p:Configuration=??',
'generator_param' : {
'x86' : ' -G \"Visual Studio 15 2017\"',
'x86_64' : ' -G \"Visual Studio 15 2017 Win64\"'} },
'Ubuntu' : { 'global_build_call' : 'make',
'parallel_option' : ' -j??',
'configs': '',
'generator_param' : {
'x86' : ' -DCMAKE_CXX_FLAGS=-m32',
'x86_64' : ''} } }
def ParseArguments():
argMap = {}
parser = argparse.ArgumentParser(description="AWSNativeSDK Run all examples from Github aws-doc-sdk-examples repository.")
parser.add_argument("--platform", action="store")
parser.add_argument("--cpuArchitecture", action="store")
parser.add_argument("--configs", action="store")
parser.add_argument("--parallel", action="store")
parser.add_argument("--sdkInstallDir", action="store")
parser.add_argument("--exampleSourceDir", action="store")
args = vars( parser.parse_args() )
argMap[ "platform" ] = args[ "platform" ] or "Ubuntu"
argMap[ "configs" ] = args[ "configs" ] or "Debug"
argMap[ "cpuArchitecture" ] = args[ "cpuArchitecture"] or "x86_64"
argMap[ "parallel" ] = args[ "parallel" ] or "8"
argMap[ "sdkInstallDir" ] = args[ "sdkInstallDir" ] or "./install"
argMap[ "exampleSourceDir" ] = args[ "exampleSourceDir" ] or os.path.join("aws-doc-sdk-examples", "cpp", "example_code")
return argMap
def BuildExample(platform, cpuArchitecture, buildDir, buildType, sdkInstallDir, sourceDir, parallel):
buildEnvironment = GetBuildEnvironments()
if os.path.exists(buildDir):
shutil.rmtree(buildDir)
os.mkdir(buildDir)
os.chdir(buildDir)
cmake_call = "cmake" + buildEnvironment[ platform ][ "generator_param" ][ cpuArchitecture ]
cmake_call = cmake_call + " -DBUILD_SHARED_LIBS=ON"
cmake_call = cmake_call + " -DCMAKE_BUILD_TYPE=" + buildType
cmake_call = cmake_call + " -DCMAKE_PREFIX_PATH=" + sdkInstallDir
cmake_call = cmake_call + " -DAWSSDK_ROOT_DIR=" + sdkInstallDir
cmake_call = cmake_call + " " + sourceDir
print("cmake call = " + cmake_call)
subprocess.check_call(cmake_call, shell = True)
build_call = buildEnvironment[ platform ][ 'global_build_call' ]
build_call = build_call + buildEnvironment[ platform ][ 'configs' ].replace("??", buildType)
build_call = build_call + buildEnvironment[ platform ][ 'parallel_option' ].replace("??", parallel)
print("build call = " + build_call)
subprocess.check_call(build_call, shell = True)
def Main():
arguments = ParseArguments()
currentDirectory = os.getcwd()
quotedSdkInstallDir = '"' + os.path.abspath(arguments[ "sdkInstallDir" ]) + '"'
exampleSourceDir = os.path.abspath(arguments[ "exampleSourceDir" ])
for serviceName in os.listdir(exampleSourceDir):
serviceDir = os.path.join(exampleSourceDir, serviceName)
if not os.path.isdir(serviceDir):
continue
else:
if any(str(file) == "CMakeLists.txt" for file in os.listdir(serviceDir)):
if serviceName == "redshift":
continue
os.chdir(currentDirectory)
BuildExample(platform = arguments[ "platform" ],
cpuArchitecture = arguments[ "cpuArchitecture" ],
buildDir = os.path.join(currentDirectory, serviceName),
buildType = arguments[ "configs" ],
sdkInstallDir = quotedSdkInstallDir,
sourceDir = serviceDir,
parallel = arguments[ "parallel" ])
# Run from powershell; make sure msbuild is in PATH environment variable
Main()
| apache-2.0 |
DCSaunders/tensorflow | tensorflow/contrib/tensor_forest/__init__.py | 14 | 1104 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Random forest implementation in tensorflow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import
from tensorflow.contrib.tensor_forest.client import *
from tensorflow.contrib.tensor_forest.data import *
from tensorflow.contrib.tensor_forest.python import *
# pylint: enable=unused-import,wildcard-import
| apache-2.0 |
cheukyin699/genset-demo-site | scripts/prediction_fixer.py | 1 | 2018 | #!/usr/bin/env python
'''
Takes the WEKA output files, and for each line, replaces the instance number
with the corresponding protein tag, removes the error column, replaces the
predictions column with the true/false probability columns, and outputs to the
specified file. Here is an example of the headers of an input file:
inst#,actual,predicted,error,prediction
Here is an example of the headers of the output file:
Protein Name,Actual,Predicted,True Probability,False Probability
To calculate the true probability, we take the 'predicted' column, and check if
the string 'TRUE' is a substring of said column. If it is, we copy the
'prediction' column to the 'True Probability' column. If it isn't, we take
`1 - prediction`, and insert the result into the 'True Probability' column. The
false probability is calculated and functions similarly.
'''
import argparse as ag
parser = ag.ArgumentParser(
description='Reformats WEKA buffer results (prettifies)',
epilog='Read pydocs for more.'
)
parser.add_argument('ref', help='original CSV file')
parser.add_argument('inp', help='input file with WEKA buffer result format (CSV)')
parser.add_argument('out', help='output filename')
args = parser.parse_args()
refcsv = args.ref
inpcsv = args.inp
outcsv = args.out
HEADERS = [
'Protein Name',
'Actual',
'Predicted',
'True Probability',
'False Probability'
]
with open(refcsv, 'r') as r, open(inpcsv, 'r') as i, open(outcsv, 'w') as o:
# Skip the first line
r.readline(); i.readline()
# Write the header
o.write(','.join(HEADERS) + '\n')
for i, (l1, l2) in enumerate(zip(r.readlines(), i.readlines())):
name = l1.split(',')[0]
s = l2.rstrip().split(',')
pred = s[2]
actual = s[1]
true_prob = s[4] if 'TRUE' in pred else "%0.3f" % (1 - float(s[4]))
false_prob = s[4] if 'FALSE' in pred else "%0.3f" % (1 - float(s[4]))
line = [name, actual, pred, true_prob, false_prob]
o.write(','.join(line) + '\n')
| gpl-3.0 |
slfl/HUAWEI89_WE_KK_610 | mediatek/build/tools/proguard_native_parser.py | 17 | 2505 | #!/usr/bin/python
import sys, os
import re
from optparse import OptionParser
JNI_GET_METHOD = 'GetMethodID'
def getJavaMethods(filePath):
inputFile = file(filePath)
lineNo = 1
javaMethods = list()
# fine JNI_GET_METHOD line by line
for line in inputFile:
funcStart = line.find(JNI_GET_METHOD)
funcCall = ""
if funcStart != -1:
#print 'find %s at %d' % (JNI_GET_METHOD, lineNo)
funcEnd = line.rfind(';')
funcCall = line[funcStart:funcEnd]
if funcCall != "":
pattern = r'(\w[\w\d_]*)\((.*)\)$'
match = re.match(pattern, funcCall)
if match:
grps = list(match.groups())
if len(grps) == 2:
args = grps[1].split(',')
if len(args) == 3:
javaFuncName = args[1].strip(' ')
if javaFuncName[0] == '\"':
javaFuncName = javaFuncName.strip('\"')
location = '%s:%d' % (filePath, lineNo)
javaMethods.append((location, javaFuncName))
lineNo += 1
return javaMethods
def writeProGuard(javaMethods, filePath):
try:
file = open(filePath+'/proguard_native', 'w')
for location, method in javaMethods:
file.write('# view ' + location + '\n')
file.write('-keepclassmembers class * {\n')
file.write(' *** ' + method + '(...);\n')
file.write('}\n\n')
except:
print 'open error' + filePath
def main():
parser = OptionParser(usage="usage: %prog input_directory",version="%prog 1.0")
(options, args) = parser.parse_args()
if len(args) != 2:
parser.print_help()
sys.exit(1)
curDir = os.getcwd()
inDir = curDir + '/' + args[0]
outDir = curDir + '/' + args[1]
print 'input dir: ' + inDir
print 'output dir: ' + outDir
javaMethods = list()
for root, dirs, files in os.walk(inDir):
for file in files:
if file.endswith('.cpp'):
filePath = root + '/' + file
methods = getJavaMethods(filePath)
if methods:
#print filePath
#print methods
for location, method in methods:
javaMethods.append((location, method))
writeProGuard(javaMethods, outDir)
if __name__ == "__main__":
main()
| gpl-3.0 |
evhub/coconut | coconut/terminal.py | 1 | 11780 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------------------------------------------------
# INFO:
# -----------------------------------------------------------------------------------------------------------------------
"""
Author: Evan Hubinger
License: Apache 2.0
Description: logger utilities.
"""
# -----------------------------------------------------------------------------------------------------------------------
# IMPORTS:
# -----------------------------------------------------------------------------------------------------------------------
from __future__ import print_function, absolute_import, unicode_literals, division
from coconut.root import * # NOQA
import sys
import traceback
import logging
import time
from contextlib import contextmanager
from coconut.root import _indent
from coconut._pyparsing import (
lineno,
col,
ParserElement,
)
from coconut.constants import (
info_tabulation,
main_sig,
taberrfmt,
packrat_cache,
)
from coconut.exceptions import (
CoconutWarning,
displayable,
internal_assert,
)
# -----------------------------------------------------------------------------------------------------------------------
# FUNCTIONS:
# -----------------------------------------------------------------------------------------------------------------------
def printerr(*args):
"""Prints to standard error."""
print(*args, file=sys.stderr)
def format_error(err_type, err_value, err_trace=None):
"""Properly formats the specified error."""
if err_trace is None:
err_parts = "".join(traceback.format_exception_only(err_type, err_value)).strip().split(": ", 1)
if len(err_parts) == 1:
err_name, err_msg = err_parts[0], ""
else:
err_name, err_msg = err_parts
err_name = err_name.split(".")[-1]
return err_name + ": " + err_msg
else:
return "".join(traceback.format_exception(err_type, err_value, err_trace)).strip()
def complain(error):
"""Raises in develop; warns in release."""
if callable(error):
if DEVELOP:
raise error()
elif DEVELOP:
raise error
else:
logger.warn_err(error)
def get_name(expr):
"""Get the name of an expression for displaying."""
name = expr if isinstance(expr, str) else None
if name is None:
name = getattr(expr, "name", None)
if name is None:
name = displayable(expr)
return name
def get_clock_time():
"""Get a time to use for performance metrics."""
if PY2:
return time.clock()
else:
return time.process_time()
# -----------------------------------------------------------------------------------------------------------------------
# logger:
# -----------------------------------------------------------------------------------------------------------------------
class Logger(object):
"""Container object for various logger functions and variables."""
verbose = False
quiet = False
path = None
name = None
tracing = False
trace_ind = 0
def __init__(self, other=None):
"""Create a logger, optionally from another logger."""
if other is not None:
self.copy_from(other)
self.patch_logging()
def copy_from(self, other):
"""Copy other onto self."""
self.verbose, self.quiet, self.path, self.name, self.tracing, self.trace_ind = other.verbose, other.quiet, other.path, other.name, other.tracing, other.trace_ind
def display(self, messages, sig="", debug=False):
"""Prints an iterator of messages."""
full_message = "".join(
sig + line for line in " ".join(
str(msg) for msg in messages
).splitlines(True)
)
if not full_message:
full_message = sig.rstrip()
if debug:
printerr(full_message)
else:
print(full_message)
def show(self, *messages):
"""Prints messages if not --quiet."""
if not self.quiet:
self.display(messages)
def show_sig(self, *messages):
"""Prints messages with main signature if not --quiet."""
if not self.quiet:
self.display(messages, main_sig)
def show_error(self, *messages):
"""Prints error messages with main signature if not --quiet."""
if not self.quiet:
self.display(messages, main_sig, debug=True)
def log(self, *messages):
"""Logs debug messages if --verbose."""
if self.verbose:
printerr(*messages)
def log_func(self, func):
"""Calls a function and logs the results if --verbose."""
if self.verbose:
to_log = func()
if isinstance(to_log, tuple):
printerr(*to_log)
else:
printerr(to_log)
def log_prefix(self, prefix, *messages):
"""Logs debug messages with the given signature if --verbose."""
if self.verbose:
self.display(messages, prefix, debug=True)
def log_sig(self, *messages):
"""Logs debug messages with the main signature if --verbose."""
self.log_prefix(main_sig, *messages)
def log_vars(self, message, variables, rem_vars=("self",)):
"""Logs variables with given message if --verbose."""
if self.verbose:
new_vars = dict(variables)
for v in rem_vars:
del new_vars[v]
printerr(message, new_vars)
def get_error(self):
"""Properly formats the current error."""
exc_info = sys.exc_info()
if exc_info[0] is None:
return None
else:
err_type, err_value, err_trace = exc_info[0], exc_info[1], None
if self.verbose and len(exc_info) > 2:
err_trace = exc_info[2]
return format_error(err_type, err_value, err_trace)
@contextmanager
def in_path(self, new_path, old_path=None):
"""Temporarily enters a path."""
self.path = new_path
try:
yield
finally:
self.path = old_path
def warn(self, *args, **kwargs):
"""Creates and displays a warning."""
return self.warn_err(CoconutWarning(*args, **kwargs))
def warn_err(self, warning, force=False):
"""Displays a warning."""
try:
raise warning
except Exception:
if not self.quiet or force:
self.display_exc()
def display_exc(self):
"""Properly prints an exception in the exception context."""
errmsg = self.get_error()
if errmsg is not None:
if self.path is not None:
errmsg_lines = ["in " + self.path + ":"]
for line in errmsg.splitlines():
if line:
line = " " * taberrfmt + line
errmsg_lines.append(line)
errmsg = "\n".join(errmsg_lines)
printerr(errmsg)
def log_exc(self):
"""Display an exception only if --verbose."""
if self.verbose:
self.display_exc()
def log_cmd(self, args):
"""Logs a console command if --verbose."""
self.log("> " + " ".join(args))
def show_tabulated(self, begin, middle, end):
"""Shows a tabulated message."""
internal_assert(len(begin) < info_tabulation, "info message too long", begin)
self.show(begin + " " * (info_tabulation - len(begin)) + middle + " " + end)
@contextmanager
def indent_tracing(self):
"""Indent wrapped tracing."""
self.trace_ind += 1
try:
yield
finally:
self.trace_ind -= 1
def print_trace(self, *args):
"""Print to stderr with tracing indent."""
trace = " ".join(str(arg) for arg in args)
printerr(_indent(trace, self.trace_ind))
def log_tag(self, tag, code, multiline=False):
"""Logs a tagged message if tracing."""
if self.tracing:
if callable(code):
code = code()
tagstr = "[" + str(tag) + "]"
if multiline:
self.print_trace(tagstr + "\n" + displayable(code))
else:
self.print_trace(tagstr, ascii(code))
def log_trace(self, expr, original, loc, tokens=None, extra=None):
"""Formats and displays a trace if tracing."""
if self.tracing:
tag = get_name(expr)
original = displayable(original)
loc = int(loc)
if "{" not in tag:
out = ["[" + tag + "]"]
add_line_col = True
if tokens is not None:
if isinstance(tokens, Exception):
msg = displayable(str(tokens))
if "{" in msg:
head, middle = msg.split("{", 1)
middle, tail = middle.rsplit("}", 1)
msg = head + "{...}" + tail
out.append(msg)
add_line_col = False
elif len(tokens) == 1 and isinstance(tokens[0], str):
out.append(ascii(tokens[0]))
else:
out.append(ascii(tokens))
if add_line_col:
out.append("(line:" + str(lineno(loc, original)) + ", col:" + str(col(loc, original)) + ")")
if extra is not None:
out.append("from " + ascii(extra))
self.print_trace(*out)
def _trace_success_action(self, original, start_loc, end_loc, expr, tokens):
self.log_trace(expr, original, start_loc, tokens)
def _trace_exc_action(self, original, loc, expr, exc):
if self.verbose:
self.log_trace(expr, original, loc, exc)
def trace(self, item):
"""Traces a parse element (only enabled in develop)."""
if DEVELOP:
item.debugActions = (
None, # no start action
self._trace_success_action,
self._trace_exc_action,
)
item.debug = True
return item
@contextmanager
def gather_parsing_stats(self):
"""Times parsing if --verbose."""
if self.verbose:
start_time = get_clock_time()
try:
yield
finally:
elapsed_time = get_clock_time() - start_time
printerr("Time while parsing:", elapsed_time, "seconds")
if packrat_cache:
hits, misses = ParserElement.packrat_cache_stats
printerr("Packrat parsing stats:", hits, "hits;", misses, "misses")
else:
yield
def patch_logging(self):
"""Patches built-in Python logging if necessary."""
if not hasattr(logging, "getLogger"):
def getLogger(name=None):
other = Logger(self)
if name is not None:
other.name = name
return other
logging.getLogger = getLogger
def pylog(self, *args, **kwargs):
"""Display all available logging information."""
printerr(self.name, args, kwargs, traceback.format_exc())
debug = info = warning = error = critical = exception = pylog
# -----------------------------------------------------------------------------------------------------------------------
# MAIN:
# -----------------------------------------------------------------------------------------------------------------------
logger = Logger()
trace = logger.trace
| apache-2.0 |
tclose/python-neo | neo/io/neuroexplorerio.py | 6 | 13896 | # -*- coding: utf-8 -*-
"""
Class for reading data from NeuroExplorer (.nex)
Documentation for dev :
http://www.neuroexplorer.com/code.html
Depend on: scipy
Supported : Read
Author: sgarcia,luc estebanez
"""
import os
import struct
import numpy as np
import quantities as pq
from neo.io.baseio import BaseIO
from neo.core import Segment, AnalogSignal, SpikeTrain, EpochArray, EventArray
class NeuroExplorerIO(BaseIO):
"""
Class for reading nex file.
Usage:
>>> from neo import io
>>> r = io.NeuroExplorerIO(filename='File_neuroexplorer_1.nex')
>>> seg = r.read_segment(lazy=False, cascade=True)
>>> print seg.analogsignals # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[<AnalogSignal(array([ 39.0625 , 0. , 0. , ..., -26.85546875, ...
>>> print seg.spiketrains # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[<SpikeTrain(array([ 2.29499992e-02, 6.79249987e-02, 1.13399997e-01, ...
>>> print seg.eventarrays # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[<EventArray: @21.1967754364 s, @21.2993755341 s, @21.350725174 s, @21.5048999786 s, ...
>>> print seg.epocharrays # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[<neo.core.epocharray.EpochArray object at 0x10561ba90>, <neo.core.epocharray.EpochArray object at 0x10561bad0>]
"""
is_readable = True
is_writable = False
supported_objects = [Segment , AnalogSignal, SpikeTrain, EventArray, EpochArray]
readable_objects = [ Segment]
writeable_objects = []
has_header = False
is_streameable = False
# This is for GUI stuf : a definition for parameters when reading.
read_params = {
Segment : [ ]
}
write_params = None
name = 'NeuroExplorer'
extensions = [ 'nex' ]
mode = 'file'
def __init__(self , filename = None) :
"""
This class read a nex file.
Arguments:
filename : the filename to read you can pu what ever it do not read anythings
"""
BaseIO.__init__(self)
self.filename = filename
def read_segment(self,
lazy = False,
cascade = True,
):
fid = open(self.filename, 'rb')
globalHeader = HeaderReader(fid , GlobalHeader ).read_f(offset = 0)
#~ print globalHeader
#~ print 'version' , globalHeader['version']
seg = Segment()
seg.file_origin = os.path.basename(self.filename)
seg.annotate(neuroexplorer_version = globalHeader['version'])
seg.annotate(comment = globalHeader['comment'])
if not cascade :
return seg
offset = 544
for i in range(globalHeader['nvar']):
entityHeader = HeaderReader(fid , EntityHeader ).read_f(offset = offset+i*208)
entityHeader['name'] = entityHeader['name'].replace('\x00','')
#print 'i',i, entityHeader['type']
if entityHeader['type'] == 0:
# neuron
if lazy:
spike_times = [ ]*pq.s
else:
spike_times= np.memmap(self.filename , np.dtype('i4') ,'r' ,
shape = (entityHeader['n'] ),
offset = entityHeader['offset'],
)
spike_times = spike_times.astype('f8')/globalHeader['freq']*pq.s
sptr = SpikeTrain( times= spike_times,
t_start = globalHeader['tbeg']/globalHeader['freq']*pq.s,
t_stop = globalHeader['tend']/globalHeader['freq']*pq.s,
name = entityHeader['name'],
)
if lazy:
sptr.lazy_shape = entityHeader['n']
sptr.annotate(channel_index = entityHeader['WireNumber'])
seg.spiketrains.append(sptr)
if entityHeader['type'] == 1:
# event
if lazy:
event_times = [ ]*pq.s
else:
event_times= np.memmap(self.filename , np.dtype('i4') ,'r' ,
shape = (entityHeader['n'] ),
offset = entityHeader['offset'],
)
event_times = event_times.astype('f8')/globalHeader['freq'] * pq.s
labels = np.array(['']*event_times.size, dtype = 'S')
evar = EventArray(times = event_times, labels=labels, channel_name = entityHeader['name'] )
if lazy:
evar.lazy_shape = entityHeader['n']
seg.eventarrays.append(evar)
if entityHeader['type'] == 2:
# interval
if lazy:
start_times = [ ]*pq.s
stop_times = [ ]*pq.s
else:
start_times= np.memmap(self.filename , np.dtype('i4') ,'r' ,
shape = (entityHeader['n'] ),
offset = entityHeader['offset'],
)
start_times = start_times.astype('f8')/globalHeader['freq']*pq.s
stop_times= np.memmap(self.filename , np.dtype('i4') ,'r' ,
shape = (entityHeader['n'] ),
offset = entityHeader['offset']+entityHeader['n']*4,
)
stop_times = stop_times.astype('f')/globalHeader['freq']*pq.s
epar = EpochArray(times = start_times,
durations = stop_times - start_times,
labels = np.array(['']*start_times.size, dtype = 'S'),
channel_name = entityHeader['name'])
if lazy:
epar.lazy_shape = entityHeader['n']
seg.epocharrays.append(epar)
if entityHeader['type'] == 3:
# spiketrain and wavefoms
if lazy:
spike_times = [ ]*pq.s
waveforms = None
else:
spike_times= np.memmap(self.filename , np.dtype('i4') ,'r' ,
shape = (entityHeader['n'] ),
offset = entityHeader['offset'],
)
spike_times = spike_times.astype('f8')/globalHeader['freq'] * pq.s
waveforms = np.memmap(self.filename , np.dtype('i2') ,'r' ,
shape = (entityHeader['n'] , 1,entityHeader['NPointsWave']),
offset = entityHeader['offset']+entityHeader['n'] *4,
)
waveforms = (waveforms.astype('f')* entityHeader['ADtoMV'] + entityHeader['MVOffset'])*pq.mV
t_stop = globalHeader['tend']/globalHeader['freq']*pq.s
if spike_times.size>0:
t_stop = max(t_stop, max(spike_times))
sptr = SpikeTrain( times = spike_times,
t_start = globalHeader['tbeg']/globalHeader['freq']*pq.s,
#~ t_stop = max(globalHeader['tend']/globalHeader['freq']*pq.s,max(spike_times)),
t_stop = t_stop,
name = entityHeader['name'],
waveforms = waveforms,
sampling_rate = entityHeader['WFrequency']*pq.Hz,
left_sweep = 0*pq.ms,
)
if lazy:
sptr.lazy_shape = entityHeader['n']
sptr.annotate(channel_index = entityHeader['WireNumber'])
seg.spiketrains.append(sptr)
if entityHeader['type'] == 4:
# popvectors
pass
if entityHeader['type'] == 5:
# analog
timestamps= np.memmap(self.filename , np.dtype('i4') ,'r' ,
shape = (entityHeader['n'] ),
offset = entityHeader['offset'],
)
timestamps = timestamps.astype('f8')/globalHeader['freq']
fragmentStarts = np.memmap(self.filename , np.dtype('i4') ,'r' ,
shape = (entityHeader['n'] ),
offset = entityHeader['offset'],
)
fragmentStarts = fragmentStarts.astype('f8')/globalHeader['freq']
t_start = timestamps[0] - fragmentStarts[0]/float(entityHeader['WFrequency'])
del timestamps, fragmentStarts
if lazy :
signal = [ ]*pq.mV
else:
signal = np.memmap(self.filename , np.dtype('i2') ,'r' ,
shape = (entityHeader['NPointsWave'] ),
offset = entityHeader['offset'],
)
signal = signal.astype('f')
signal *= entityHeader['ADtoMV']
signal += entityHeader['MVOffset']
signal = signal*pq.mV
anaSig = AnalogSignal(signal=signal, t_start=t_start * pq.s,
sampling_rate=
entityHeader['WFrequency'] * pq.Hz,
name=entityHeader['name'],
channel_index=entityHeader['WireNumber'])
if lazy:
anaSig.lazy_shape = entityHeader['NPointsWave']
seg.analogsignals.append( anaSig )
if entityHeader['type'] == 6:
# markers : TO TEST
if lazy:
times = [ ]*pq.s
labels = np.array([ ], dtype = 'S')
markertype = None
else:
times= np.memmap(self.filename , np.dtype('i4') ,'r' ,
shape = (entityHeader['n'] ),
offset = entityHeader['offset'],
)
times = times.astype('f8')/globalHeader['freq'] * pq.s
fid.seek(entityHeader['offset'] + entityHeader['n']*4)
markertype = fid.read(64).replace('\x00','')
labels = np.memmap(self.filename, np.dtype('S' + str(entityHeader['MarkerLength'])) ,'r',
shape = (entityHeader['n'] ),
offset = entityHeader['offset'] + entityHeader['n']*4 + 64
)
ea = EventArray( times = times,
labels = labels.view(np.ndarray),
name = entityHeader['name'],
channel_index = entityHeader['WireNumber'],
marker_type = markertype
)
if lazy:
ea.lazy_shape = entityHeader['n']
seg.eventarrays.append(ea)
seg.create_many_to_one_relationship()
return seg
GlobalHeader = [
('signature' , '4s'),
('version','i'),
('comment','256s'),
('freq','d'),
('tbeg','i'),
('tend','i'),
('nvar','i'),
]
EntityHeader = [
('type' , 'i'),
('varVersion','i'),
('name','64s'),
('offset','i'),
('n','i'),
('WireNumber','i'),
('UnitNumber','i'),
('Gain','i'),
('Filter','i'),
('XPos','d'),
('YPos','d'),
('WFrequency','d'),
('ADtoMV','d'),
('NPointsWave','i'),
('NMarkers','i'),
('MarkerLength','i'),
('MVOffset','d'),
('dummy','60s'),
]
MarkerHeader = [
('type' , 'i'),
('varVersion','i'),
('name','64s'),
('offset','i'),
('n','i'),
('WireNumber','i'),
('UnitNumber','i'),
('Gain','i'),
('Filter','i'),
]
class HeaderReader():
def __init__(self,fid ,description ):
self.fid = fid
self.description = description
def read_f(self, offset =0):
self.fid.seek(offset)
d = { }
for key, fmt in self.description :
val = struct.unpack(fmt , self.fid.read(struct.calcsize(fmt)))
if len(val) == 1:
val = val[0]
else :
val = list(val)
d[key] = val
return d
| bsd-3-clause |
shoelzer/buildbot | master/buildbot/test/unit/test_mq_connector.py | 10 | 3297 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import print_function
import mock
from twisted.internet import defer
from twisted.trial import unittest
from buildbot.mq import base
from buildbot.mq import connector
from buildbot.test.fake import fakemaster
from buildbot.util import service
class FakeMQ(service.ReconfigurableServiceMixin, base.MQBase):
new_config = "not_called"
def reconfigServiceWithBuildbotConfig(self, new_config):
self.new_config = new_config
return defer.succeed(None)
def produce(self, routingKey, data):
pass
def startConsuming(self, callback, filter, persistent_name=None):
return defer.succeed(None)
class MQConnector(unittest.TestCase):
def setUp(self):
self.master = fakemaster.make_master()
self.mqconfig = self.master.config.mq = {}
self.conn = connector.MQConnector()
self.conn.setServiceParent(self.master)
def patchFakeMQ(self, name='fake'):
self.patch(connector.MQConnector, 'classes',
{name:
{'class': 'buildbot.test.unit.test_mq_connector.FakeMQ'},
})
def test_setup_unknown_type(self):
self.mqconfig['type'] = 'unknown'
self.assertRaises(AssertionError, lambda:
self.conn.setup())
def test_setup_simple_type(self):
self.patchFakeMQ(name='simple')
self.mqconfig['type'] = 'simple'
self.conn.setup()
self.assertIsInstance(self.conn.impl, FakeMQ)
self.assertEqual(self.conn.impl.produce, self.conn.produce)
self.assertEqual(self.conn.impl.startConsuming,
self.conn.startConsuming)
def test_reconfigServiceWithBuildbotConfig(self):
self.patchFakeMQ()
self.mqconfig['type'] = 'fake'
self.conn.setup()
new_config = mock.Mock()
new_config.mq = dict(type='fake')
d = self.conn.reconfigServiceWithBuildbotConfig(new_config)
@d.addCallback
def check(_):
self.assertIdentical(self.conn.impl.new_config, new_config)
return d
@defer.inlineCallbacks
def test_reconfigService_change_type(self):
self.patchFakeMQ()
self.mqconfig['type'] = 'fake'
self.conn.setup()
new_config = mock.Mock()
new_config.mq = dict(type='other')
try:
yield self.conn.reconfigServiceWithBuildbotConfig(new_config)
except AssertionError:
pass # expected
else:
self.fail("should have failed")
| gpl-2.0 |
Mil0dV/voorpret-gen | tests/voorpret-gen_tests.py | 1 | 2962 | from nose.tools import *
# from voorpretgen import *
import voorpretgen
from voorpretgen import filemanager
from voorpretgen import main
from voorpretgen import spotify
import unittest
import pprint
def setup():
print "SETUP!"
def teardown():
print "TEAR DOWN!"
def test_basic():
print "I RAN!"
class TestVoorpretgen(unittest.TestCase):
def test_file_read(self):
file_path = 'tests/lineup-test.txt'
result = filemanager.lineup_parser(file_path)
self.assertTrue(result == ['audiotist', 'larry de kat', 'madonna', 'interr ferenc'])
def test_parse_argements(self):
# parser = main.parse_arguments(['lineup.txt', 'lineup.txt'])
# self.assertTrue(parser)
# Couldn't get this to work though it should, therefore;
pass
def test_read_settings(self):
file_path = 'tests/voorpretgen-test.ini'
result = filemanager.read_settings(file_path)
self.assertTrue(int(result[0]) == 3)
def test_artist_id_list_gen(self):
file_path = 'tests/lineup-test.txt'
lineup = filemanager.lineup_parser(file_path)
result = spotify.artist_id_list_gen(lineup, 1)
self.assertTrue(type(result) == list)
def test_tracklist_gen(self):
file_path = 'tests/lineup-test.txt'
lineup = filemanager.lineup_parser(file_path)
artist_id_list = spotify.artist_id_list_gen(lineup)
result = spotify.tracklist_gen(artist_id_list, 5)
self.assertTrue(type(result) == list)
def test_user_authentication(self):
file_path = 'voorpretgen/voorpretgen.ini'
settings = filemanager.read_settings(file_path)
username = 'milowinterburn'
result = spotify.user_authentication(username, settings[1:])
self.assertTrue(result == None)
def test_initialise(self):
# settings_file = 'tests/voorpretgen-test.ini'
# lineup_file = 'tests/voorpretgen-test.ini'
# print lineup_file
# result = main.initialise(['milowinterburn', 'bangface.txt'], settings_file)
# self.assertTrue(int(result[0]) == 3)
# Couldn't get this to work though it should, therefore;
pass
def test_get_token(self):
username = 'milowinterburn'
file_path = 'voorpretgen/voorpretgen.ini'
result = filemanager.read_settings(file_path)
spotify.get_token(username, result[1], result[2], result[3])
self.assertTrue(True)
def test_write_playlist(self):
file_path = 'tests/lineup-test.txt'
lineup = filemanager.lineup_parser(file_path)
artist_id_list = spotify.artist_id_list_gen(lineup)
track_id_list = spotify.tracklist_gen(artist_id_list, 5)
playlist_name = 'test'
username = 'milowinterburn'
spotify.write_playlist(track_id_list, playlist_name, username)
self.assertTrue(True)
| mit |
imsplitbit/nova | nova/tests/compute/test_compute_xen.py | 16 | 2602 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for expectations of behaviour from the Xen driver."""
from oslo.config import cfg
from nova.compute import power_state
from nova import context
from nova.objects import instance as instance_obj
from nova.openstack.common import importutils
from nova.tests import fake_instance
from nova.tests.virt.xenapi import stubs
from nova.virt.xenapi import vm_utils
CONF = cfg.CONF
CONF.import_opt('compute_manager', 'nova.service')
CONF.import_opt('compute_driver', 'nova.virt.driver')
class ComputeXenTestCase(stubs.XenAPITestBaseNoDB):
def setUp(self):
super(ComputeXenTestCase, self).setUp()
self.flags(compute_driver='xenapi.XenAPIDriver')
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.compute = importutils.import_object(CONF.compute_manager)
def test_sync_power_states_instance_not_found(self):
db_instance = fake_instance.fake_db_instance()
ctxt = context.get_admin_context()
instance_list = instance_obj._make_instance_list(ctxt,
instance_obj.InstanceList(), [db_instance], None)
instance = instance_list[0]
self.mox.StubOutWithMock(instance_obj.InstanceList, 'get_by_host')
self.mox.StubOutWithMock(self.compute.driver, 'get_num_instances')
self.mox.StubOutWithMock(vm_utils, 'lookup')
self.mox.StubOutWithMock(self.compute, '_sync_instance_power_state')
instance_obj.InstanceList.get_by_host(ctxt,
self.compute.host, use_slave=True).AndReturn(instance_list)
self.compute.driver.get_num_instances().AndReturn(1)
vm_utils.lookup(self.compute.driver._session, instance['name'],
False).AndReturn(None)
self.compute._sync_instance_power_state(ctxt, instance,
power_state.NOSTATE)
self.mox.ReplayAll()
self.compute._sync_power_states(ctxt)
| apache-2.0 |
efiring/scipy | scipy/integrate/odepack.py | 10 | 7143 | # Author: Travis Oliphant
from __future__ import division, print_function, absolute_import
__all__ = ['odeint']
from . import _odepack
from copy import copy
_msgs = {2: "Integration successful.",
1: "Nothing was done; the integration time was 0.",
-1: "Excess work done on this call (perhaps wrong Dfun type).",
-2: "Excess accuracy requested (tolerances too small).",
-3: "Illegal input detected (internal error).",
-4: "Repeated error test failures (internal error).",
-5: "Repeated convergence failures (perhaps bad Jacobian or tolerances).",
-6: "Error weight became zero during problem.",
-7: "Internal workspace insufficient to finish (internal error)."
}
def odeint(func, y0, t, args=(), Dfun=None, col_deriv=0, full_output=0,
ml=None, mu=None, rtol=None, atol=None, tcrit=None, h0=0.0,
hmax=0.0, hmin=0.0, ixpr=0, mxstep=0, mxhnil=0, mxordn=12,
mxords=5, printmessg=0):
"""
Integrate a system of ordinary differential equations.
Solve a system of ordinary differential equations using lsoda from the
FORTRAN library odepack.
Solves the initial value problem for stiff or non-stiff systems
of first order ode-s::
dy/dt = func(y,t0,...)
where y can be a vector.
Parameters
----------
func : callable(y, t0, ...)
Computes the derivative of y at t0.
y0 : array
Initial condition on y (can be a vector).
t : array
A sequence of time points for which to solve for y. The initial
value point should be the first element of this sequence.
args : tuple, optional
Extra arguments to pass to function.
Dfun : callable(y, t0, ...)
Gradient (Jacobian) of `func`.
col_deriv : bool, optional
True if `Dfun` defines derivatives down columns (faster),
otherwise `Dfun` should define derivatives across rows.
full_output : bool, optional
True if to return a dictionary of optional outputs as the second output
printmessg : bool, optional
Whether to print the convergence message
Returns
-------
y : array, shape (len(t), len(y0))
Array containing the value of y for each desired time in t,
with the initial value `y0` in the first row.
infodict : dict, only returned if full_output == True
Dictionary containing additional output information
======= ============================================================
key meaning
======= ============================================================
'hu' vector of step sizes successfully used for each time step.
'tcur' vector with the value of t reached for each time step.
(will always be at least as large as the input times).
'tolsf' vector of tolerance scale factors, greater than 1.0,
computed when a request for too much accuracy was detected.
'tsw' value of t at the time of the last method switch
(given for each time step)
'nst' cumulative number of time steps
'nfe' cumulative number of function evaluations for each time step
'nje' cumulative number of jacobian evaluations for each time step
'nqu' a vector of method orders for each successful step.
'imxer' index of the component of largest magnitude in the
weighted local error vector (e / ewt) on an error return, -1
otherwise.
'lenrw' the length of the double work array required.
'leniw' the length of integer work array required.
'mused' a vector of method indicators for each successful time step:
1: adams (nonstiff), 2: bdf (stiff)
======= ============================================================
Other Parameters
----------------
ml, mu : int, optional
If either of these are not None or non-negative, then the
Jacobian is assumed to be banded. These give the number of
lower and upper non-zero diagonals in this banded matrix.
For the banded case, `Dfun` should return a matrix whose
rows contain the non-zero bands (starting with the lowest diagonal).
Thus, the return matrix `jac` from `Dfun` should have shape
``(ml + mu + 1, len(y0))`` when ``ml >=0`` or ``mu >=0``.
The data in `jac` must be stored such that ``jac[i - j + mu, j]``
holds the derivative of the `i`th equation with respect to the `j`th
state variable. If `col_deriv` is True, the transpose of this
`jac` must be returned.
rtol, atol : float, optional
The input parameters `rtol` and `atol` determine the error
control performed by the solver. The solver will control the
vector, e, of estimated local errors in y, according to an
inequality of the form ``max-norm of (e / ewt) <= 1``,
where ewt is a vector of positive error weights computed as
``ewt = rtol * abs(y) + atol``.
rtol and atol can be either vectors the same length as y or scalars.
Defaults to 1.49012e-8.
tcrit : ndarray, optional
Vector of critical points (e.g. singularities) where integration
care should be taken.
h0 : float, (0: solver-determined), optional
The step size to be attempted on the first step.
hmax : float, (0: solver-determined), optional
The maximum absolute step size allowed.
hmin : float, (0: solver-determined), optional
The minimum absolute step size allowed.
ixpr : bool, optional
Whether to generate extra printing at method switches.
mxstep : int, (0: solver-determined), optional
Maximum number of (internally defined) steps allowed for each
integration point in t.
mxhnil : int, (0: solver-determined), optional
Maximum number of messages printed.
mxordn : int, (0: solver-determined), optional
Maximum order to be allowed for the non-stiff (Adams) method.
mxords : int, (0: solver-determined), optional
Maximum order to be allowed for the stiff (BDF) method.
See Also
--------
ode : a more object-oriented integrator based on VODE.
quad : for finding the area under a curve.
"""
if ml is None:
ml = -1 # changed to zero inside function call
if mu is None:
mu = -1 # changed to zero inside function call
t = copy(t)
y0 = copy(y0)
output = _odepack.odeint(func, y0, t, args, Dfun, col_deriv, ml, mu,
full_output, rtol, atol, tcrit, h0, hmax, hmin,
ixpr, mxstep, mxhnil, mxordn, mxords)
if output[-1] < 0:
print(_msgs[output[-1]])
print("Run with full_output = 1 to get quantitative information.")
else:
if printmessg:
print(_msgs[output[-1]])
if full_output:
output[1]['message'] = _msgs[output[-1]]
output = output[:-1]
if len(output) == 1:
return output[0]
else:
return output
| bsd-3-clause |
hasteur/g13bot_tools_new | pywikibot/comms/eventstreams.py | 1 | 10259 | # -*- coding: utf-8 -*-
"""
Server-Sent Events client.
This file is part of the Pywikibot framework.
This module requires sseclient to be installed:
pip install sseclient
"""
#
# (C) xqt, 2017
# (C) Pywikibot team, 2017
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
import json
import socket
from requests.packages.urllib3.exceptions import ProtocolError
from requests.packages.urllib3.response import httplib
try:
from sseclient import SSEClient as EventSource
except ImportError as e:
EventSource = e
from pywikibot import config, debug, Site, warning
from pywikibot.tools import StringTypes
_logger = 'pywikibot.eventstreams'
class EventStreams(object):
"""Basic EventStreams iterator class for Server-Sent Events (SSE) protocol.
It provides access to arbitrary streams of data including recent changes.
It replaces rcstream.py implementation.
"""
def __init__(self, **kwargs):
"""Constructor.
@keyword site: a project site object. Used when no url is given
@type site: APISite
@keyword stream: event stream type. Used when no url is given.
@type stream: str
@keyword timeout: a timeout value indication how long to wait to send
data before giving up
@type timeout: int, float or a tuple of two values of int or float
@keyword url: an url retrieving events from. Will be set up to a
default url using _site.family settings and streamtype
@type url: str
@param kwargs: keyword arguments passed to SSEClient and requests lib
@type kwargs: dict
@raises ImportError: sseclient is not installed
"""
if isinstance(EventSource, Exception):
raise ImportError('sseclient is required for EventStreams;\n'
'install it with "pip install sseclient"\n')
self.filter = {'all': [], 'any': [], 'none': []}
self._total = None
self._site = kwargs.pop('site', Site())
self._stream = kwargs.pop('stream', None)
self._url = kwargs.get('url') or self.url
kwargs.setdefault('url', self._url)
kwargs.setdefault('timeout', config.socket_timeout)
self.sse_kwargs = kwargs
@property
def url(self):
"""Get the EventStream's url.
@raises NotImplementedError: streamtype is not specified
"""
if not hasattr(self, '_url'):
if self._stream is None:
raise NotImplementedError(
'No stream specified for class {0}'
.format(self.__class__.__name__))
self._url = ('{0}{1}/{2}'.format(self._site.rcstream_host(),
self._site.eventstreams_path(),
self._stream))
return self._url
def set_maximum_items(self, value):
"""
Set the maximum number of items to be retrieved from the stream.
If not called, most queries will continue as long as there is
more data to be retrieved from the stream.
@param value: The value of maximum number of items to be retrieved
in total to set.
@type value: int
"""
if value is not None:
self._total = int(value)
debug('{0}: Set limit (maximum_items) to {1}.'
.format(self.__class__.__name__, self._total), _logger)
def register_filter(self, *args, **kwargs):
"""Register a filter.
Filter types
============
There are 3 types of filter: 'all', 'any' and 'none'.
The filter type must be given with the keyword argument 'ftype'
(see below). If no 'ftype' keyword argument is given, 'all' is
assumed as default.
You may register multiple filters for each type of filter.
The behaviour of filter type is as follows::
- B{'none'}: Skip if the any filter matches. Otherwise check 'all'.
- B{'all'}: Skip if not all filter matches. Otherwise check 'any':
- B{'any'}: Skip if no given filter matches. Otherwise pass.
Filter functions
================
Filter may be specified as external function methods given as
positional argument like::
def foo(data):
return True
register_filter(foo, ftype='any')
The data dict from event is passed to the external filter function as
a parameter and that method must handle it in a proper way and return
C{True} if the filter matches and C{False} otherwise.
Filter keys and values
======================
Another method to register a filter is to pass pairs of keys and values
as keyword arguments to this method. The key must be a key of the event
data dict and the value must be any value or an iterable of values the
C{data['key']} may match or be part of it. Samples::
register_filter(server_name='de.wikipedia.org') # 1
register_filter(type=('edit', 'log')) # 2
register_filter(ftype='none', bot=True) # 3
Explanation for the result of the filter function:
1. C{return data['sever_name'] == 'de.wikipedia.org'}
2. C{return data['type'] in ('edit', 'log')}
3. C{return data['bot'] is True}
@keyword ftype: The filter type, one of 'all', 'any', 'none'.
Default value is 'all'
@type ftype: str
@param args: You may pass your own filter functions here.
Every function should be able to handle the data dict from events.
@type args: callable
@param kwargs: Any key returned by event data with a event data value
for this given key.
@type kwargs: str, list, tuple or other sequence
@raise TypeError: A given args parameter is not a callable.
"""
ftype = kwargs.pop('ftype', 'all') # set default ftype value
# register an external filter function
for func in args:
if callable(func):
self.filter[ftype].append(func)
else:
raise TypeError('{0} is not a callable'.format(func))
# register pairs of keys and items as a filter function
for key, value in kwargs.items():
# append function for singletons
if value in (True, False, None):
self.filter[ftype].append(lambda e: key in e and
e[key] is value)
# append function for a single value
elif isinstance(value, StringTypes):
self.filter[ftype].append(lambda e: key in e and
e[key] == value)
# append function for an iterable as value
else:
self.filter[ftype].append(lambda e: key in e and
e[key] in value)
def streamfilter(self, data):
"""Filter function for eventstreams.
See the description of register_filter() how it works.
@param data: event data dict used by filter functions
@type data: dict
"""
if any(function(data) for function in self.filter['none']):
return False
if not all(function(data) for function in self.filter['all']):
return False
if not self.filter['any']:
return True
return any(function(data) for function in self.filter['any'])
def __iter__(self):
"""Iterator."""
n = 0
event = None
while self._total is None or n < self._total:
if not hasattr(self, 'source'):
self.source = EventSource(**self.sse_kwargs)
try:
event = next(self.source)
except (ProtocolError, socket.error, httplib.IncompleteRead) as e:
warning('Connection error: {0}.\n'
'Try to re-establish connection.'.format(e))
del self.source
if event is not None:
self.sse_kwargs['last_id'] = event.id
continue
if event.event == 'message' and event.data:
try:
element = json.loads(event.data)
except ValueError as e:
warning('Could not load json data from\n{0}\n{1}'
.format(event, e))
else:
if self.streamfilter(element):
n += 1
yield element
elif event.event == 'message' and not event.data:
warning('Empty message found.')
elif event.event == 'error':
warning('Encountered error: {0}'.format(event.data))
else:
warning('Unknown event {0} occured.'.format(event.event))
else:
debug('{0}: Stopped iterating due to '
'exceeding item limit.'
.format(self.__class__.__name__), _logger)
del self.source
def site_rc_listener(site, total=None):
"""Yield changes received from EventStream.
@param site: the Pywikibot.Site object to yield live recent changes for
@type site: Pywikibot.BaseSite
@param total: the maximum number of changes to return
@type total: int
@return: pywikibot.comms.eventstream.rc_listener configured for given site
"""
if isinstance(EventSource, Exception):
warning('sseclient is required for EventStreams;\n'
'install it with "pip install sseclient"\n')
# fallback to old rcstream method
# NOTE: this will be deprecated soon
from pywikibot.comms.rcstream import rc_listener
return rc_listener(
wikihost=site.hostname(),
rchost=site.rcstream_host(),
rcport=site.rcstream_port(),
rcpath=site.rcstream_path(),
total=total,
)
stream = EventStreams(stream='recentchange', site=site)
stream.set_maximum_items(total)
stream.register_filter(server_name=site.hostname())
return stream
| mit |
aatjitra/PR25 | scripts/rt-tester/rt-tester.py | 11005 | 5307 | #!/usr/bin/python
#
# rt-mutex tester
#
# (C) 2006 Thomas Gleixner <tglx@linutronix.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
"schedother" : "1",
"schedfifo" : "2",
"lock" : "3",
"locknowait" : "4",
"lockint" : "5",
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
"signal" : "11",
"resetevent" : "98",
"reset" : "99",
}
test_opcodes = {
"prioeq" : ["P" , "eq" , None],
"priolt" : ["P" , "lt" , None],
"priogt" : ["P" , "gt" , None],
"nprioeq" : ["N" , "eq" , None],
"npriolt" : ["N" , "lt" , None],
"npriogt" : ["N" , "gt" , None],
"unlocked" : ["M" , "eq" , 0],
"trylock" : ["M" , "eq" , 1],
"blocked" : ["M" , "eq" , 2],
"blockedwake" : ["M" , "eq" , 3],
"locked" : ["M" , "eq" , 4],
"opcodeeq" : ["O" , "eq" , None],
"opcodelt" : ["O" , "lt" , None],
"opcodegt" : ["O" , "gt" , None],
"eventeq" : ["E" , "eq" , None],
"eventlt" : ["E" , "lt" , None],
"eventgt" : ["E" , "gt" , None],
}
# Print usage information
def usage():
print "rt-tester.py <-c -h -q -t> <testfile>"
print " -c display comments after first command"
print " -h help"
print " -q quiet mode"
print " -t test mode (syntax check)"
print " testfile: read test specification from testfile"
print " otherwise from stdin"
return
# Print progress when not in quiet mode
def progress(str):
if not quiet:
print str
# Analyse a status value
def analyse(val, top, arg):
intval = int(val)
if top[0] == "M":
intval = intval / (10 ** int(arg))
intval = intval % 10
argval = top[2]
elif top[0] == "O":
argval = int(cmd_opcodes.get(arg, arg))
else:
argval = int(arg)
# progress("%d %s %d" %(intval, top[1], argval))
if top[1] == "eq" and intval == argval:
return 1
if top[1] == "lt" and intval < argval:
return 1
if top[1] == "gt" and intval > argval:
return 1
return 0
# Parse the commandline
try:
(options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
usage()
sys.exit(1)
# Parse commandline options
for option, value in options:
if option == "-c":
comments = 1
elif option == "-q":
quiet = 1
elif option == "-t":
test = 1
elif option == '-h':
usage()
sys.exit(0)
# Select the input source
if arguments:
try:
fd = open(arguments[0])
except Exception,ex:
sys.stderr.write("File not found %s\n" %(arguments[0]))
sys.exit(1)
else:
fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
linenr = linenr + 1
line = fd.readline()
if not len(line):
break
line = line.strip()
parts = line.split(":")
if not parts or len(parts) < 1:
continue
if len(parts[0]) == 0:
continue
if parts[0].startswith("#"):
if comments > 1:
progress(line)
continue
if comments == 1:
comments = 2
progress(line)
cmd = parts[0].strip().lower()
opc = parts[1].strip().lower()
tid = parts[2].strip()
dat = parts[3].strip()
try:
# Test or wait for a status value
if cmd == "t" or cmd == "w":
testop = test_opcodes[opc]
fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
if test:
print fname
continue
while 1:
query = 1
fsta = open(fname, 'r')
status = fsta.readline().strip()
fsta.close()
stat = status.split(",")
for s in stat:
s = s.strip()
if s.startswith(testop[0]):
# Separate status value
val = s[2:].strip()
query = analyse(val, testop, dat)
break
if query or cmd == "t":
break
progress(" " + status)
if not query:
sys.stderr.write("Test failed in line %d\n" %(linenr))
sys.exit(1)
# Issue a command to the tester
elif cmd == "c":
cmdnr = cmd_opcodes[opc]
# Build command string and sys filename
cmdstr = "%s:%s" %(cmdnr, dat)
fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
if test:
print fname
continue
fcmd = open(fname, 'w')
fcmd.write(cmdstr)
fcmd.close()
except Exception,ex:
sys.stderr.write(str(ex))
sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
if not test:
fd.close()
sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
| gpl-2.0 |
markandrewj/pygments | pygments/formatters/rtf.py | 1 | 4536 | # -*- coding: utf-8 -*-
"""
pygments.formatters.rtf
~~~~~~~~~~~~~~~~~~~~~~~
A formatter that generates RTF files.
:copyright: Copyright 2006-2011 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.formatter import Formatter
__all__ = ['RtfFormatter']
class RtfFormatter(Formatter):
"""
Format tokens as RTF markup. This formatter automatically outputs full RTF
documents with color information and other useful stuff. Perfect for Copy and
Paste into Microsoft® Word® documents.
*New in Pygments 0.6.*
Additional options accepted:
`style`
The style to use, can be a string or a Style subclass (default:
``'default'``).
`fontface`
The used font famliy, for example ``Bitstream Vera Sans``. Defaults to
some generic font which is supposed to have fixed width.
"""
name = 'RTF'
aliases = ['rtf']
filenames = ['*.rtf']
unicodeoutput = False
def __init__(self, **options):
"""
Additional options accepted:
``fontface``
Name of the font used. Could for example be ``'Courier New'``
to further specify the default which is ``'\fmodern'``. The RTF
specification claims that ``\fmodern`` are "Fixed-pitch serif
and sans serif fonts". Hope every RTF implementation thinks
the same about modern...
"""
Formatter.__init__(self, **options)
self.fontface = options.get('fontface') or ''
def _escape(self, text):
return text.replace('\\', '\\\\') \
.replace('{', '\\{') \
.replace('}', '\\}')
def _escape_text(self, text):
# empty strings, should give a small performance improvment
if not text:
return ''
# escape text
text = self._escape(text)
if self.encoding in ('utf-8', 'utf-16', 'utf-32'):
encoding = 'iso-8859-15'
else:
encoding = self.encoding or 'iso-8859-15'
buf = []
for c in text:
if ord(c) > 128:
ansic = c.encode(encoding, 'ignore') or '?'
if ord(ansic) > 128:
ansic = '\\\'%x' % ord(ansic)
else:
ansic = c
buf.append(r'\ud{\u%d%s}' % (ord(c), ansic))
else:
buf.append(str(c))
return ''.join(buf).replace('\n', '\\par\n')
def format_unencoded(self, tokensource, outfile):
# rtf 1.8 header
outfile.write(r'{\rtf1\ansi\deff0'
r'{\fonttbl{\f0\fmodern\fprq1\fcharset0%s;}}'
r'{\colortbl;' % (self.fontface and
' ' + self._escape(self.fontface) or
''))
# convert colors and save them in a mapping to access them later.
color_mapping = {}
offset = 1
for _, style in self.style:
for color in style['color'], style['bgcolor'], style['border']:
if color and color not in color_mapping:
color_mapping[color] = offset
outfile.write(r'\red%d\green%d\blue%d;' % (
int(color[0:2], 16),
int(color[2:4], 16),
int(color[4:6], 16)
))
offset += 1
outfile.write(r'}\f0')
# highlight stream
for ttype, value in tokensource:
while not self.style.styles_token(ttype) and ttype.parent:
ttype = ttype.parent
style = self.style.style_for_token(ttype)
buf = []
if style['bgcolor']:
buf.append(r'\cb%d' % color_mapping[style['bgcolor']])
if style['color']:
buf.append(r'\cf%d' % color_mapping[style['color']])
if style['bold']:
buf.append(r'\b')
if style['italic']:
buf.append(r'\i')
if style['underline']:
buf.append(r'\ul')
if style['border']:
buf.append(r'\chbrdr\chcfpat%d' %
color_mapping[style['border']])
start = ''.join(buf)
if start:
outfile.write('{%s ' % start)
outfile.write(self._escape_text(value))
if start:
outfile.write('}')
outfile.write('}')
| bsd-2-clause |
double12gzh/nova | nova/tests/unit/scheduler/filters/test_disk_filters.py | 58 | 4487 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova.scheduler.filters import disk_filter
from nova import test
from nova.tests.unit.scheduler import fakes
class TestDiskFilter(test.NoDBTestCase):
def setUp(self):
super(TestDiskFilter, self).setUp()
def test_disk_filter_passes(self):
self.flags(disk_allocation_ratio=1.0)
filt_cls = disk_filter.DiskFilter()
filter_properties = {'instance_type': {'root_gb': 1,
'ephemeral_gb': 1, 'swap': 512}}
host = fakes.FakeHostState('host1', 'node1',
{'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 13})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_disk_filter_fails(self):
self.flags(disk_allocation_ratio=1.0)
filt_cls = disk_filter.DiskFilter()
filter_properties = {'instance_type': {'root_gb': 10,
'ephemeral_gb': 1, 'swap': 1024}}
host = fakes.FakeHostState('host1', 'node1',
{'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 13})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_disk_filter_oversubscribe(self):
self.flags(disk_allocation_ratio=10.0)
filt_cls = disk_filter.DiskFilter()
filter_properties = {'instance_type': {'root_gb': 100,
'ephemeral_gb': 18, 'swap': 1024}}
# 1GB used... so 119GB allowed...
host = fakes.FakeHostState('host1', 'node1',
{'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 12})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
self.assertEqual(12 * 10.0, host.limits['disk_gb'])
def test_disk_filter_oversubscribe_fail(self):
self.flags(disk_allocation_ratio=10.0)
filt_cls = disk_filter.DiskFilter()
filter_properties = {'instance_type': {'root_gb': 100,
'ephemeral_gb': 19, 'swap': 1024}}
# 1GB used... so 119GB allowed...
host = fakes.FakeHostState('host1', 'node1',
{'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 12})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@mock.patch('nova.scheduler.filters.utils.aggregate_values_from_key')
def test_aggregate_disk_filter_value_error(self, agg_mock):
filt_cls = disk_filter.AggregateDiskFilter()
self.flags(disk_allocation_ratio=1.0)
filter_properties = {
'context': mock.sentinel.ctx,
'instance_type': {'root_gb': 1,
'ephemeral_gb': 1,
'swap': 1024}}
host = fakes.FakeHostState('host1', 'node1',
{'free_disk_mb': 3 * 1024,
'total_usable_disk_gb': 1})
agg_mock.return_value = set(['XXX'])
self.assertTrue(filt_cls.host_passes(host, filter_properties))
agg_mock.assert_called_once_with(host, 'disk_allocation_ratio')
@mock.patch('nova.scheduler.filters.utils.aggregate_values_from_key')
def test_aggregate_disk_filter_default_value(self, agg_mock):
filt_cls = disk_filter.AggregateDiskFilter()
self.flags(disk_allocation_ratio=1.0)
filter_properties = {
'context': mock.sentinel.ctx,
'instance_type': {'root_gb': 2,
'ephemeral_gb': 1,
'swap': 1024}}
host = fakes.FakeHostState('host1', 'node1',
{'free_disk_mb': 3 * 1024,
'total_usable_disk_gb': 1})
# Uses global conf.
agg_mock.return_value = set([])
self.assertFalse(filt_cls.host_passes(host, filter_properties))
agg_mock.assert_called_once_with(host, 'disk_allocation_ratio')
agg_mock.return_value = set(['2'])
self.assertTrue(filt_cls.host_passes(host, filter_properties))
| apache-2.0 |
sumedhasingla/VTK | ThirdParty/Twisted/twisted/web/test/test_cgi.py | 23 | 11227 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.web.twcgi}.
"""
import sys, os
from twisted.trial import unittest
from twisted.internet import reactor, interfaces, error
from twisted.python import util, failure, log
from twisted.web.http import NOT_FOUND, INTERNAL_SERVER_ERROR
from twisted.web import client, twcgi, server, resource
from twisted.web.test._util import _render
from twisted.web.test.test_web import DummyRequest
DUMMY_CGI = '''\
print "Header: OK"
print
print "cgi output"
'''
DUAL_HEADER_CGI = '''\
print "Header: spam"
print "Header: eggs"
print
print "cgi output"
'''
BROKEN_HEADER_CGI = '''\
print "XYZ"
print
print "cgi output"
'''
SPECIAL_HEADER_CGI = '''\
print "Server: monkeys"
print "Date: last year"
print
print "cgi output"
'''
READINPUT_CGI = '''\
# this is an example of a correctly-written CGI script which reads a body
# from stdin, which only reads env['CONTENT_LENGTH'] bytes.
import os, sys
body_length = int(os.environ.get('CONTENT_LENGTH',0))
indata = sys.stdin.read(body_length)
print "Header: OK"
print
print "readinput ok"
'''
READALLINPUT_CGI = '''\
# this is an example of the typical (incorrect) CGI script which expects
# the server to close stdin when the body of the request is complete.
# A correct CGI should only read env['CONTENT_LENGTH'] bytes.
import sys
indata = sys.stdin.read()
print "Header: OK"
print
print "readallinput ok"
'''
NO_DUPLICATE_CONTENT_TYPE_HEADER_CGI = '''\
print "content-type: text/cgi-duplicate-test"
print
print "cgi output"
'''
class PythonScript(twcgi.FilteredScript):
filter = sys.executable
class CGI(unittest.TestCase):
"""
Tests for L{twcgi.FilteredScript}.
"""
if not interfaces.IReactorProcess.providedBy(reactor):
skip = "CGI tests require a functional reactor.spawnProcess()"
def startServer(self, cgi):
root = resource.Resource()
cgipath = util.sibpath(__file__, cgi)
root.putChild("cgi", PythonScript(cgipath))
site = server.Site(root)
self.p = reactor.listenTCP(0, site)
return self.p.getHost().port
def tearDown(self):
if getattr(self, 'p', None):
return self.p.stopListening()
def writeCGI(self, source):
cgiFilename = os.path.abspath(self.mktemp())
cgiFile = file(cgiFilename, 'wt')
cgiFile.write(source)
cgiFile.close()
return cgiFilename
def testCGI(self):
cgiFilename = self.writeCGI(DUMMY_CGI)
portnum = self.startServer(cgiFilename)
d = client.getPage("http://localhost:%d/cgi" % portnum)
d.addCallback(self._testCGI_1)
return d
def _testCGI_1(self, res):
self.assertEqual(res, "cgi output" + os.linesep)
def test_protectedServerAndDate(self):
"""
If the CGI script emits a I{Server} or I{Date} header, these are
ignored.
"""
cgiFilename = self.writeCGI(SPECIAL_HEADER_CGI)
portnum = self.startServer(cgiFilename)
url = "http://localhost:%d/cgi" % (portnum,)
factory = client.HTTPClientFactory(url)
reactor.connectTCP('localhost', portnum, factory)
def checkResponse(ignored):
self.assertNotIn('monkeys', factory.response_headers['server'])
self.assertNotIn('last year', factory.response_headers['date'])
factory.deferred.addCallback(checkResponse)
return factory.deferred
def test_noDuplicateContentTypeHeaders(self):
"""
If the CGI script emits a I{content-type} header, make sure that the
server doesn't add an additional (duplicate) one, as per ticket 4786.
"""
cgiFilename = self.writeCGI(NO_DUPLICATE_CONTENT_TYPE_HEADER_CGI)
portnum = self.startServer(cgiFilename)
url = "http://localhost:%d/cgi" % (portnum,)
factory = client.HTTPClientFactory(url)
reactor.connectTCP('localhost', portnum, factory)
def checkResponse(ignored):
self.assertEqual(
factory.response_headers['content-type'], ['text/cgi-duplicate-test'])
factory.deferred.addCallback(checkResponse)
return factory.deferred
def test_duplicateHeaderCGI(self):
"""
If a CGI script emits two instances of the same header, both are sent in
the response.
"""
cgiFilename = self.writeCGI(DUAL_HEADER_CGI)
portnum = self.startServer(cgiFilename)
url = "http://localhost:%d/cgi" % (portnum,)
factory = client.HTTPClientFactory(url)
reactor.connectTCP('localhost', portnum, factory)
def checkResponse(ignored):
self.assertEqual(
factory.response_headers['header'], ['spam', 'eggs'])
factory.deferred.addCallback(checkResponse)
return factory.deferred
def test_malformedHeaderCGI(self):
"""
Check for the error message in the duplicated header
"""
cgiFilename = self.writeCGI(BROKEN_HEADER_CGI)
portnum = self.startServer(cgiFilename)
url = "http://localhost:%d/cgi" % (portnum,)
factory = client.HTTPClientFactory(url)
reactor.connectTCP('localhost', portnum, factory)
loggedMessages = []
def addMessage(eventDict):
loggedMessages.append(log.textFromEventDict(eventDict))
log.addObserver(addMessage)
self.addCleanup(log.removeObserver, addMessage)
def checkResponse(ignored):
self.assertEqual(loggedMessages[0],
"ignoring malformed CGI header: 'XYZ'")
factory.deferred.addCallback(checkResponse)
return factory.deferred
def testReadEmptyInput(self):
cgiFilename = os.path.abspath(self.mktemp())
cgiFile = file(cgiFilename, 'wt')
cgiFile.write(READINPUT_CGI)
cgiFile.close()
portnum = self.startServer(cgiFilename)
d = client.getPage("http://localhost:%d/cgi" % portnum)
d.addCallback(self._testReadEmptyInput_1)
return d
testReadEmptyInput.timeout = 5
def _testReadEmptyInput_1(self, res):
self.assertEqual(res, "readinput ok%s" % os.linesep)
def testReadInput(self):
cgiFilename = os.path.abspath(self.mktemp())
cgiFile = file(cgiFilename, 'wt')
cgiFile.write(READINPUT_CGI)
cgiFile.close()
portnum = self.startServer(cgiFilename)
d = client.getPage("http://localhost:%d/cgi" % portnum,
method="POST",
postdata="Here is your stdin")
d.addCallback(self._testReadInput_1)
return d
testReadInput.timeout = 5
def _testReadInput_1(self, res):
self.assertEqual(res, "readinput ok%s" % os.linesep)
def testReadAllInput(self):
cgiFilename = os.path.abspath(self.mktemp())
cgiFile = file(cgiFilename, 'wt')
cgiFile.write(READALLINPUT_CGI)
cgiFile.close()
portnum = self.startServer(cgiFilename)
d = client.getPage("http://localhost:%d/cgi" % portnum,
method="POST",
postdata="Here is your stdin")
d.addCallback(self._testReadAllInput_1)
return d
testReadAllInput.timeout = 5
def _testReadAllInput_1(self, res):
self.assertEqual(res, "readallinput ok%s" % os.linesep)
def test_useReactorArgument(self):
"""
L{twcgi.FilteredScript.runProcess} uses the reactor passed as an
argument to the constructor.
"""
class FakeReactor:
"""
A fake reactor recording whether spawnProcess is called.
"""
called = False
def spawnProcess(self, *args, **kwargs):
"""
Set the C{called} flag to C{True} if C{spawnProcess} is called.
@param args: Positional arguments.
@param kwargs: Keyword arguements.
"""
self.called = True
fakeReactor = FakeReactor()
request = DummyRequest(['a', 'b'])
resource = twcgi.FilteredScript("dummy-file", reactor=fakeReactor)
_render(resource, request)
self.assertTrue(fakeReactor.called)
class CGIScriptTests(unittest.TestCase):
"""
Tests for L{twcgi.CGIScript}.
"""
def test_pathInfo(self):
"""
L{twcgi.CGIScript.render} sets the process environment I{PATH_INFO} from
the request path.
"""
class FakeReactor:
"""
A fake reactor recording the environment passed to spawnProcess.
"""
def spawnProcess(self, process, filename, args, env, wdir):
"""
Store the C{env} L{dict} to an instance attribute.
@param process: Ignored
@param filename: Ignored
@param args: Ignored
@param env: The environment L{dict} which will be stored
@param wdir: Ignored
"""
self.process_env = env
_reactor = FakeReactor()
resource = twcgi.CGIScript(self.mktemp(), reactor=_reactor)
request = DummyRequest(['a', 'b'])
_render(resource, request)
self.assertEqual(_reactor.process_env["PATH_INFO"],
"/a/b")
class CGIDirectoryTests(unittest.TestCase):
"""
Tests for L{twcgi.CGIDirectory}.
"""
def test_render(self):
"""
L{twcgi.CGIDirectory.render} sets the HTTP response code to I{NOT
FOUND}.
"""
resource = twcgi.CGIDirectory(self.mktemp())
request = DummyRequest([''])
d = _render(resource, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, NOT_FOUND)
d.addCallback(cbRendered)
return d
def test_notFoundChild(self):
"""
L{twcgi.CGIDirectory.getChild} returns a resource which renders an
response with the HTTP I{NOT FOUND} status code if the indicated child
does not exist as an entry in the directory used to initialized the
L{twcgi.CGIDirectory}.
"""
path = self.mktemp()
os.makedirs(path)
resource = twcgi.CGIDirectory(path)
request = DummyRequest(['foo'])
child = resource.getChild("foo", request)
d = _render(child, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, NOT_FOUND)
d.addCallback(cbRendered)
return d
class CGIProcessProtocolTests(unittest.TestCase):
"""
Tests for L{twcgi.CGIProcessProtocol}.
"""
def test_prematureEndOfHeaders(self):
"""
If the process communicating with L{CGIProcessProtocol} ends before
finishing writing out headers, the response has I{INTERNAL SERVER
ERROR} as its status code.
"""
request = DummyRequest([''])
protocol = twcgi.CGIProcessProtocol(request)
protocol.processEnded(failure.Failure(error.ProcessTerminated()))
self.assertEqual(request.responseCode, INTERNAL_SERVER_ERROR)
| bsd-3-clause |
meizhoubao/three.js | utils/exporters/blender/addons/io_three/dialogs.py | 299 | 2487 | from bpy import context
CONTEXT = {
0: {
'title': "Error Message",
'icon': 'CANCEL'
},
1: {
'title': "Warning Message",
'icon': 'ERROR' # I prefer this icon for warnings
},
2: {
'title': "Message",
'icon': 'NONE'
},
3: {
'title': "Question",
'icon': 'QUESTION'
}
}
def error(message, title="", wrap=40):
"""Creates an error dialog.
:param message: text of the message body
:param title: text to append to the title
(Default value = "")
:param wrap: line width (Default value = 40)
"""
_draw(message, title, wrap, 0)
def warning(message, title="", wrap=40):
"""Creates an error dialog.
:param message: text of the message body
:param title: text to append to the title
(Default value = "")
:param wrap: line width (Default value = 40)
"""
_draw(message, title, wrap, 1)
def info(message, title="", wrap=40):
"""Creates an error dialog.
:param message: text of the message body
:param title: text to append to the title
(Default value = "")
:param wrap: line width (Default value = 40)
"""
_draw(message, title, wrap, 2)
def question(message, title="", wrap=40):
"""Creates an error dialog.
:param message: text of the message body
:param title: text to append to the title
(Default value = "")
:param wrap: line width (Default value = 40)
"""
_draw(message, title, wrap, 3)
# Great idea borrowed from
# http://community.cgcookie.com/t/code-snippet-easy-error-messages/203
def _draw(message, title, wrap, key):
"""
:type message: str
:type title: str
:type wrap: int
:type key: int
"""
lines = []
if wrap > 0:
while len(message) > wrap:
i = message.rfind(' ', 0, wrap)
if i == -1:
lines += [message[:wrap]]
message = message[wrap:]
else:
lines += [message[:i]]
message = message[i+1:]
if message:
lines += [message]
def draw(self, *args):
"""
:param self:
:param *args:
"""
for line in lines:
self.layout.label(line)
title = "%s: %s" % (title, CONTEXT[key]['title'])
icon = CONTEXT[key]['icon']
context.window_manager.popup_menu(
draw, title=title.strip(), icon=icon)
| mit |
nalourie/django-macros | build/lib/macros/templatetags/repeatedblocks.py | 4 | 1933 | # the following file was written/built by Nicholas Lourie,
# while working for kozbox, llc. http://kozbox.com
""" repeatedblocks.py, part of django-macros, allows for easy
and explicit repetition of block tags in django templates.
"""
from django import template
from django.template.loader_tags import BlockNode, do_block
from django.conf import settings
register = template.Library()
def set_repeated_blocks(parser):
""" helper function to initialize
the internal variable set on the parser.
"""
try:
parser._repeated_blocks
except AttributeError:
parser._repeated_blocks = {}
@register.tag
def repeated_block(parser, token):
try:
tag_name, block_name = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError(
'{0} tag takes only one argument'.format(
token.contents.split()[0]))
# initialize attribute storing block contents on parser
set_repeated_blocks(parser)
# do_block is the internal function for creating block tags
block_node = do_block(parser, token)
# store block in parser's attribute
parser._repeated_blocks[block_name] = block_node
# return a normal block node so that it behaves exactly
# as people would expect.
return block_node
@register.tag
def repeat(parser, token):
try:
tag_name, block_name = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError(
'{0} tag takes only one argument'.format(
token.contents.split()[0]))
# try to fetch the stored block
try:
block_node = parser._repeated_blocks[block_name]
except (AttributeError, KeyError):
raise template.TemplateSyntaxError(
"No repeated block {0} tag was found before the {1} tag".format(
block_name, tag_name))
# return the block to be repeated
return block_node
| mit |
HerroYou/android_kernel_samsung_msm7x27 | tools/perf/scripts/python/sctop.py | 895 | 1936 | # system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import thread
import time
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf trace -s syscall-counts.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40d %10d\n" % (id, val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
| gpl-2.0 |
alexandrucoman/vbox-nova-driver | nova/api/openstack/compute/contrib/flavorextradata.py | 79 | 2440 | # Copyright 2012 OpenStack Foundation
# Copyright 2011 Canonical Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The Flavor extra data extension
OpenStack API version 1.1 lists "name", "ram", "disk", "vcpus" as flavor
attributes. This extension adds to that list:
- OS-FLV-EXT-DATA:ephemeral
"""
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
authorize = extensions.soft_extension_authorizer('compute', 'flavorextradata')
class FlavorextradataController(wsgi.Controller):
def _extend_flavors(self, req, flavors):
for flavor in flavors:
db_flavor = req.get_db_flavor(flavor['id'])
key = "%s:ephemeral" % Flavorextradata.alias
flavor[key] = db_flavor['ephemeral_gb']
def _show(self, req, resp_obj):
if not authorize(req.environ['nova.context']):
return
if 'flavor' in resp_obj.obj:
self._extend_flavors(req, [resp_obj.obj['flavor']])
@wsgi.extends
def show(self, req, resp_obj, id):
return self._show(req, resp_obj)
@wsgi.extends(action='create')
def create(self, req, resp_obj, body):
return self._show(req, resp_obj)
@wsgi.extends
def detail(self, req, resp_obj):
if not authorize(req.environ['nova.context']):
return
self._extend_flavors(req, list(resp_obj.obj['flavors']))
class Flavorextradata(extensions.ExtensionDescriptor):
"""Provide additional data for flavors."""
name = "FlavorExtraData"
alias = "OS-FLV-EXT-DATA"
namespace = ("http://docs.openstack.org/compute/ext/"
"flavor_extra_data/api/v1.1")
updated = "2011-09-14T00:00:00Z"
def get_controller_extensions(self):
controller = FlavorextradataController()
extension = extensions.ControllerExtension(self, 'flavors', controller)
return [extension]
| apache-2.0 |
jayhetee/mpld3 | mpld3/mpld3renderer.py | 16 | 10262 | """
mpld3 renderer
==============
This is the renderer class which implements the mplexporter framework for mpld3
"""
__all__ = ["MPLD3Renderer"]
import random
import json
import jinja2
import itertools
import numpy as np
from .mplexporter.utils import color_to_hex
from .mplexporter.exporter import Exporter
from .mplexporter.renderers import Renderer
from .utils import get_id
from .plugins import get_plugins
class MPLD3Renderer(Renderer):
"""Renderer class for mpld3
This renderer class plugs into the ``mplexporter`` package in order to
convert matplotlib figures into a JSON-serializable dictionary
representation which can be read by mpld3.js.
"""
def __init__(self):
self.figure_json = None
self.axes_json = None
self.finished_figures = []
@staticmethod
def datalabel(i):
return "data{0:02d}".format(i)
def add_data(self, data, key="data"):
"""Add a dataset to the current figure
If the dataset matches any already added data, we use that instead.
Parameters
----------
data : array_like
a shape [N,2] array of data
key : string (optional)
the key to use for the data
Returns
-------
datadict : dictionary
datadict has the keys "data", "xindex", "yindex", which will
be passed to the mpld3 JSON object.
"""
# Check if any column of the data exists elsewhere
# If so, we'll use that dataset rather than duplicating it.
data = np.asarray(data)
if data.ndim != 2 and data.shape[1] != 2:
raise ValueError("Data is expected to be of size [N, 2]")
for (i, d) in enumerate(self.datasets):
if data.shape[0] != d.shape[0]:
continue
matches = np.array([np.all(col == d.T, axis=1) for col in data.T])
if not np.any(matches):
continue
# If we get here, we've found a dataset with a matching column
# we'll update this data with additional columns if necessary
new_data = list(self.datasets[i].T)
indices = []
for j in range(data.shape[1]):
whr = np.where(matches[j])[0]
if len(whr):
indices.append(whr[0])
else:
# append a new column to the data
new_data.append(data[:, j])
indices.append(len(new_data) - 1)
self.datasets[i] = np.asarray(new_data).T
datalabel = self.datalabel(i + 1)
xindex, yindex = map(int, indices)
break
else:
# else here can be thought of as "if no break"
# if we get here, then there were no matching datasets
self.datasets.append(data)
datalabel = self.datalabel(len(self.datasets))
xindex = 0
yindex = 1
self.datalabels.append(datalabel)
return {key: datalabel, "xindex": xindex, "yindex": yindex}
def open_figure(self, fig, props):
self.datasets = []
self.datalabels = []
self.figure_json = dict(width=props['figwidth'] * props['dpi'],
height=props['figheight'] * props['dpi'],
axes=[],
data={},
id=get_id(fig))
def close_figure(self, fig):
additional_css = []
additional_js = []
for i, dataset in enumerate(self.datasets):
datalabel = self.datalabel(i + 1)
self.figure_json['data'][datalabel] = np.asarray(dataset).tolist()
self.figure_json["plugins"] = []
for plugin in get_plugins(fig):
self.figure_json["plugins"].append(plugin.get_dict())
additional_css.append(plugin.css())
additional_js.append(plugin.javascript())
self.finished_figures.append((fig, self.figure_json,
"".join(additional_css),
"".join(additional_js)))
def open_axes(self, ax, props):
self.axes_json = dict(bbox=props['bounds'],
xlim=props['xlim'],
ylim=props['ylim'],
xdomain=props['xdomain'],
ydomain=props['ydomain'],
xscale=props['xscale'],
yscale=props['yscale'],
axes=props['axes'],
axesbg=props['axesbg'],
axesbgalpha=props['axesbgalpha'],
zoomable=bool(props['dynamic']),
id=get_id(ax),
lines=[],
paths=[],
markers=[],
texts=[],
collections=[],
images=[])
self.figure_json['axes'].append(self.axes_json)
# Get shared axes info
xsib = ax.get_shared_x_axes().get_siblings(ax)
ysib = ax.get_shared_y_axes().get_siblings(ax)
self.axes_json['sharex'] = [get_id(axi) for axi in xsib
if axi is not ax]
self.axes_json['sharey'] = [get_id(axi) for axi in ysib
if axi is not ax]
def close_axes(self, ax):
self.axes_json = None
# If draw_line() is not implemented, it will be delegated to draw_path
# Should we get rid of this? There's not really any advantage here
def draw_line(self, data, coordinates, style, label, mplobj=None):
line = self.add_data(data)
line['coordinates'] = coordinates
line['id'] = get_id(mplobj)
for key in ['color', 'linewidth', 'dasharray', 'alpha', 'zorder']:
line[key] = style[key]
# Some browsers do not accept dasharray="10,0"
# This should probably be addressed in mplexporter.
if line['dasharray'] == "10,0":
line['dasharray'] = "none"
self.axes_json['lines'].append(line)
def draw_path(self, data, coordinates, pathcodes, style,
offset=None, offset_coordinates="data", mplobj=None):
path = self.add_data(data)
path['coordinates'] = coordinates
path['pathcodes'] = pathcodes
path['id'] = get_id(mplobj)
if offset is not None:
path['offset'] = list(offset)
path['offsetcoordinates'] = offset_coordinates
for key in ['dasharray', 'alpha', 'facecolor',
'edgecolor', 'edgewidth', 'zorder']:
path[key] = style[key]
# Some browsers do not accept dasharray="10,0"
# This should probably be addressed in mplexporter.
if path['dasharray'] == "10,0":
path['dasharray'] = "none"
self.axes_json['paths'].append(path)
# If draw_markers is not implemented, it will be delegated to draw_path
def draw_markers(self, data, coordinates, style, label, mplobj=None):
markers = self.add_data(data)
markers["coordinates"] = coordinates
markers['id'] = get_id(mplobj, 'pts')
for key in ['facecolor', 'edgecolor', 'edgewidth',
'alpha', 'zorder']:
markers[key] = style[key]
if style.get('markerpath'):
vertices, codes = style['markerpath']
markers['markerpath'] = (vertices.tolist(), codes)
self.axes_json['markers'].append(markers)
# If draw_path_collection is not implemented,
# it will be delegated to draw_path
def draw_path_collection(self, paths, path_coordinates, path_transforms,
offsets, offset_coordinates, offset_order,
styles, mplobj=None):
if len(paths) != 0:
styles = dict(alphas=[styles['alpha']],
edgecolors=[color_to_hex(ec)
for ec in styles['edgecolor']],
facecolors=[color_to_hex(fc)
for fc in styles['facecolor']],
edgewidths=styles['linewidth'],
offsetcoordinates=offset_coordinates,
pathcoordinates=path_coordinates,
zorder=styles['zorder'])
pathsdict = self.add_data(offsets, "offsets")
pathsdict['paths'] = [(v.tolist(), p) for (v, p) in paths]
pathsdict['pathtransforms'] = [(t[0, :2].tolist()
+ t[1, :2].tolist()
+ t[2, :2].tolist())
for t in path_transforms]
pathsdict.update(styles)
pathsdict['id'] = get_id(mplobj)
self.axes_json['collections'].append(pathsdict)
def draw_text(self, text, position, coordinates, style,
text_type=None, mplobj=None):
text = dict(text=text,
position=tuple(position),
coordinates=coordinates,
h_anchor=TEXT_HA_DICT[style['halign']],
v_baseline=TEXT_VA_DICT[style['valign']],
rotation=-style['rotation'],
fontsize=style['fontsize'],
color=style['color'],
alpha=style['alpha'],
zorder=style['zorder'],
id=get_id(mplobj))
self.axes_json['texts'].append(text)
def draw_image(self, imdata, extent, coordinates, style, mplobj=None):
image = dict(data=imdata, extent=extent, coordinates=coordinates)
image.update(style)
image['id'] = get_id(mplobj)
self.axes_json['images'].append(image)
TEXT_VA_DICT = {'bottom': 'auto',
'baseline': 'auto',
'center': 'central',
'top': 'hanging'}
TEXT_HA_DICT = {'left': 'start',
'center': 'middle',
'right': 'end'}
| bsd-3-clause |
vietch2612/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/commands/applywatchlistlocal.py | 132 | 2288 | # Copyright (c) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from webkitpy.tool.commands.abstractsequencedcommand import AbstractSequencedCommand
from webkitpy.tool import steps
class ApplyWatchListLocal(AbstractSequencedCommand):
name = "apply-watchlist-local"
help_text = "Applies the watchlist to local changes"
argument_names = "[BUGID]"
steps = [
steps.ApplyWatchList,
]
long_help = """"Applies the watchlist to local changes.
The results is logged if a bug is no given. This may be used to try out a watchlist change."""
def _prepare_state(self, options, args, tool):
if len(args) > 1:
raise Exception("Too many arguments given: %s" % (' '.join(args)))
if not args:
return {}
return {
"bug_id": args[0],
}
| bsd-3-clause |
yvaucher/stock-logistics-tracking | __unported__/stock_tracking_add_remove_pack/wizard/add_pack.py | 4 | 2654 | # -*- coding: utf-8 -*-
#################################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2012 Julius Network Solutions SARL <contact@julius.fr>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#################################################################################
from openerp.osv import fields, osv, orm
from openerp.tools.translate import _
class stock_packaging_add(orm.TransientModel):
_inherit = "stock.packaging.add"
_columns = {
'location_id': fields.many2one('stock.location', 'Location'),
'pack_ids': fields.many2many('stock.tracking', 'add_pack_child_rel', 'wizard_id', 'pack_id', 'Packs', domain=[('parent_id', '=', False),('state', '=', 'close')]),
}
def _get_location_id(self, cr, uid, context):
if context==None:
context={}
location_id = False
active_id = context.get('active_id')
if active_id:
tracking_obj = self.pool.get('stock.tracking')
tracking = tracking_obj.browse(cr, uid, active_id, context=context)
location_id = tracking.location_id and tracking.location_id.id or False
return location_id
_defaults = {
'location_id': lambda self, cr, uid, context: self._get_location_id(cr, uid, context),
}
def add_object(self, cr, uid, ids, context=None):
if context is None:
context = {}
tracking_obj = self.pool.get('stock.tracking')
res = super(stock_packaging_add, self).add_object(cr, uid, ids, context=context)
for current in self.browse(cr, uid, ids, context=context):
code_type = current.type_id.code
pack_id = current.pack_id.id
child_ids = [x.id for x in current.pack_ids]
if code_type == 'pack':
tracking_obj._add_pack(cr, uid, pack_id, child_ids, context=context)
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | agpl-3.0 |
sparklingpandas/sparklingpandas | sparklingpandas/pstatcounter.py | 4 | 5444 | """
This module provides statistics for L{PRDD}s.
Look at the stats() method on PRDD for more info.
"""
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from sparklingpandas.utils import add_pyspark_path
add_pyspark_path()
from pyspark.statcounter import StatCounter
import scipy.stats as scistats
import numpy as np
class PStatCounter(object):
"""
A wrapper around StatCounter which collects stats for multiple columns
"""
def __init__(self, dataframes, columns):
"""
Creates a stats counter for the provided DataFrames
computing the stats for all of the columns in columns.
Parameters
----------
dataframes: list of dataframes, containing the values to compute stats
on.
columns: list of strs, list of columns to compute the stats on.
"""
assert (not isinstance(columns, basestring)), "columns should be a " \
"list of strs, " \
"not a str!"
assert isinstance(columns, list), "columns should be a list!"
self._columns = columns
self._counters = dict((column, StatCounter()) for column in columns)
for df in dataframes:
self.merge(df)
def merge(self, frame):
"""
Add another DataFrame to the PStatCounter.
"""
for column, values in frame.iteritems():
# Temporary hack, fix later
counter = self._counters.get(column)
for value in values:
if counter is not None:
counter.merge(value)
def merge_pstats(self, other):
"""
Merge all of the stats counters of the other PStatCounter with our
counters.
"""
if not isinstance(other, PStatCounter):
raise Exception("Can only merge PStatcounters!")
for column, counter in self._counters.items():
other_counter = other._counters.get(column)
self._counters[column] = counter.mergeStats(other_counter)
return self
def __str__(self):
formatted_str = ""
for column, counter in self._counters.items():
formatted_str += "(field: %s, counters: %s)" % (column, counter)
return formatted_str
def __repr__(self):
return self.__str__()
class ColumnStatCounters(object):
"""
A wrapper around StatCounter which collects stats for multiple columns
"""
def __init__(self, dataframes=None, columns=None):
"""
Creates a stats counter for the provided data frames
computing the stats for all of the columns in columns.
Parameters
----------
dataframes: list of dataframes, containing the values to compute stats
on columns: list of strs, list of columns to compute the stats on
"""
self._column_stats = dict((column_name, StatCounter()) for
column_name in columns)
for single_df in dataframes:
self.merge(single_df)
def merge(self, frame):
"""
Add another DataFrame to the accumulated stats for each column.
Parameters
----------
frame: pandas DataFrame we will update our stats counter with.
"""
for column_name, _ in self._column_stats.items():
data_arr = frame[[column_name]].values
count, min_max_tup, mean, _, _, _ = \
scistats.describe(data_arr)
stats_counter = StatCounter()
stats_counter.n = count
stats_counter.mu = mean
stats_counter.m2 = np.sum((data_arr - mean) ** 2)
stats_counter.minValue, stats_counter.maxValue = min_max_tup
self._column_stats[column_name] = self._column_stats[
column_name].mergeStats(stats_counter)
return self
def merge_stats(self, other_col_counters):
"""
Merge statistics from a different column stats counter in to this one.
Parameters
----------
other_column_counters: Other col_stat_counter to marge in to this one.
"""
for column_name, _ in self._column_stats.items():
self._column_stats[column_name] = self._column_stats[column_name] \
.mergeStats(other_col_counters._column_stats[column_name])
return self
def __str__(self):
formatted_str = ""
for column, counter in self._column_stats.items():
formatted_str += "(field: %s, counters: %s)" % (column, counter)
return formatted_str
def __repr__(self):
return self.__str__()
| apache-2.0 |
chafique-delli/OpenUpgrade | addons/base_import_module/controllers/main.py | 20 | 1447 | # -*- coding: utf-8 -*-
import functools
import openerp
from openerp.http import Controller, route, request, Response
def webservice(f):
@functools.wraps(f)
def wrap(*args, **kw):
try:
return f(*args, **kw)
except Exception, e:
return Response(response=str(e), status=500)
return wrap
class ImportModule(Controller):
def check_user(self, uid=None):
if uid is None:
uid = request.uid
is_admin = request.registry['res.users'].has_group(request.cr, uid, 'base.group_erp_manager')
if not is_admin:
raise openerp.exceptions.AccessError("Only administrators can upload a module")
@route('/base_import_module/login', type='http', auth='none', methods=['POST'])
@webservice
def login(self, login, password, db=None):
if db and db != request.db:
raise Exception("Could not select database '%s'" % db)
uid = request.session.authenticate(request.db, login, password)
if not uid:
return Response(response="Wrong login/password", status=401)
self.check_user(uid)
return "ok"
@route('/base_import_module/upload', type='http', auth='user', methods=['POST'])
@webservice
def upload(self, mod_file=None, **kw):
self.check_user()
return request.registry['ir.module.module'].import_zipfile(request.cr, request.uid, mod_file, context=request.context)[0]
| agpl-3.0 |
DonBeo/scikit-learn | sklearn/linear_model/tests/test_logistic.py | 11 | 23587 | import numpy as np
import scipy.sparse as sp
from scipy import linalg, optimize, sparse
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_raise_message
from sklearn.utils import ConvergenceWarning
from sklearn.linear_model.logistic import (
LogisticRegression,
logistic_regression_path, LogisticRegressionCV,
_logistic_loss_and_grad, _logistic_loss_grad_hess,
_multinomial_loss_grad_hess
)
from sklearn.cross_validation import StratifiedKFold
from sklearn.datasets import load_iris, make_classification
X = [[-1, 0], [0, 1], [1, 1]]
X_sp = sp.csr_matrix(X)
Y1 = [0, 1, 1]
Y2 = [2, 1, 0]
iris = load_iris()
def check_predictions(clf, X, y):
"""Check that the model is able to fit the classification data"""
n_samples = len(y)
classes = np.unique(y)
n_classes = classes.shape[0]
predicted = clf.fit(X, y).predict(X)
assert_array_equal(clf.classes_, classes)
assert_equal(predicted.shape, (n_samples,))
assert_array_equal(predicted, y)
probabilities = clf.predict_proba(X)
assert_equal(probabilities.shape, (n_samples, n_classes))
assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples))
assert_array_equal(probabilities.argmax(axis=1), y)
def test_predict_2_classes():
# Simple sanity check on a 2 classes dataset
# Make sure it predicts the correct result on simple datasets.
check_predictions(LogisticRegression(random_state=0), X, Y1)
check_predictions(LogisticRegression(random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X_sp, Y1)
def test_error():
# Test for appropriate exception on errors
assert_raises(ValueError, LogisticRegression(C=-1).fit, X, Y1)
def test_predict_3_classes():
check_predictions(LogisticRegression(C=10), X, Y2)
check_predictions(LogisticRegression(C=10), X_sp, Y2)
def test_predict_iris():
# Test logistic regression with the iris dataset
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
# Test that both multinomial and OvR solvers handle
# multiclass data correctly and give good accuracy
# score (>0.95) for the training data.
for clf in [LogisticRegression(C=len(iris.data)),
LogisticRegression(C=len(iris.data), solver='lbfgs',
multi_class='multinomial'),
LogisticRegression(C=len(iris.data), solver='newton-cg',
multi_class='multinomial')]:
clf.fit(iris.data, target)
assert_array_equal(np.unique(target), clf.classes_)
pred = clf.predict(iris.data)
assert_greater(np.mean(pred == target), .95)
probabilities = clf.predict_proba(iris.data)
assert_array_almost_equal(probabilities.sum(axis=1),
np.ones(n_samples))
pred = iris.target_names[probabilities.argmax(axis=1)]
assert_greater(np.mean(pred == target), .95)
def test_multinomial_validation():
for solver in ['lbfgs', 'newton-cg']:
lr = LogisticRegression(C=-1, solver=solver, multi_class='multinomial')
assert_raises(ValueError, lr.fit, [[0, 1], [1, 0]], [0, 1])
def test_multinomial_binary():
# Test multinomial LR on a binary problem.
target = (iris.target > 0).astype(np.intp)
target = np.array(["setosa", "not-setosa"])[target]
for solver in ['lbfgs', 'newton-cg']:
clf = LogisticRegression(solver=solver, multi_class='multinomial')
clf.fit(iris.data, target)
assert_equal(clf.coef_.shape, (1, iris.data.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_array_equal(clf.predict(iris.data), target)
mlr = LogisticRegression(solver=solver, multi_class='multinomial',
fit_intercept=False)
mlr.fit(iris.data, target)
pred = clf.classes_[np.argmax(clf.predict_log_proba(iris.data),
axis=1)]
assert_greater(np.mean(pred == target), .9)
def test_sparsify():
# Test sparsify and densify members.
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
clf = LogisticRegression(random_state=0).fit(iris.data, target)
pred_d_d = clf.decision_function(iris.data)
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred_s_d = clf.decision_function(iris.data)
sp_data = sp.coo_matrix(iris.data)
pred_s_s = clf.decision_function(sp_data)
clf.densify()
pred_d_s = clf.decision_function(sp_data)
assert_array_almost_equal(pred_d_d, pred_s_d)
assert_array_almost_equal(pred_d_d, pred_s_s)
assert_array_almost_equal(pred_d_d, pred_d_s)
def test_inconsistent_input():
# Test that an exception is raised on inconsistent input
rng = np.random.RandomState(0)
X_ = rng.random_sample((5, 10))
y_ = np.ones(X_.shape[0])
y_[0] = 0
clf = LogisticRegression(random_state=0)
# Wrong dimensions for training data
y_wrong = y_[:-1]
assert_raises(ValueError, clf.fit, X, y_wrong)
# Wrong dimensions for test data
assert_raises(ValueError, clf.fit(X_, y_).predict,
rng.random_sample((3, 12)))
def test_write_parameters():
# Test that we can write to coef_ and intercept_
clf = LogisticRegression(random_state=0)
clf.fit(X, Y1)
clf.coef_[:] = 0
clf.intercept_[:] = 0
assert_array_almost_equal(clf.decision_function(X), 0)
@raises(ValueError)
def test_nan():
# Test proper NaN handling.
# Regression test for Issue #252: fit used to go into an infinite loop.
Xnan = np.array(X, dtype=np.float64)
Xnan[0, 1] = np.nan
LogisticRegression(random_state=0).fit(Xnan, Y1)
def test_consistency_path():
# Test that the path algorithm is consistent
rng = np.random.RandomState(0)
X = np.concatenate((rng.randn(100, 2) + [1, 1], rng.randn(100, 2)))
y = [1] * 100 + [-1] * 100
Cs = np.logspace(0, 4, 10)
f = ignore_warnings
# can't test with fit_intercept=True since LIBLINEAR
# penalizes the intercept
for method in ('lbfgs', 'newton-cg', 'liblinear'):
coefs, Cs = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=False, tol=1e-16, solver=method)
for i, C in enumerate(Cs):
lr = LogisticRegression(C=C, fit_intercept=False, tol=1e-16)
lr.fit(X, y)
lr_coef = lr.coef_.ravel()
assert_array_almost_equal(lr_coef, coefs[i], decimal=4)
# test for fit_intercept=True
for method in ('lbfgs', 'newton-cg', 'liblinear'):
Cs = [1e3]
coefs, Cs = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=True, tol=1e-4, solver=method)
lr = LogisticRegression(C=Cs[0], fit_intercept=True, tol=1e-4,
intercept_scaling=10000)
lr.fit(X, y)
lr_coef = np.concatenate([lr.coef_.ravel(), lr.intercept_])
assert_array_almost_equal(lr_coef, coefs[0], decimal=4)
def test_liblinear_random_state():
X, y = make_classification(n_samples=20)
lr1 = LogisticRegression(random_state=0)
lr1.fit(X, y)
lr2 = LogisticRegression(random_state=0)
lr2.fit(X, y)
assert_array_almost_equal(lr1.coef_, lr2.coef_)
def test_logistic_loss_and_grad():
X_ref, y = make_classification(n_samples=20)
n_features = X_ref.shape[1]
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = np.zeros(n_features)
# First check that our derivation of the grad is correct
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad, approx_grad, decimal=2)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(
w, X, y, alpha=1.
)
assert_array_almost_equal(loss, loss_interp)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad_interp, approx_grad, decimal=2)
def test_logistic_loss_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features = 50, 5
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = .1 * np.ones(n_features)
# First check that _logistic_loss_grad_hess is consistent
# with _logistic_loss_and_grad
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
loss_2, grad_2, hess = _logistic_loss_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(grad, grad_2)
# Now check our hessian along the second direction of the grad
vector = np.zeros_like(grad)
vector[1] = 1
hess_col = hess(vector)
# Computation of the Hessian is particularly fragile to numerical
# errors when doing simple finite differences. Here we compute the
# grad along a path in the direction of the vector and then use a
# least-square regression to estimate the slope
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_logistic_loss_and_grad(w + t * vector, X, y, alpha=1.)[1]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(approx_hess_col, hess_col, decimal=3)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(
w, X, y, alpha=1.
)
loss_interp_2, grad_interp_2, hess = \
_logistic_loss_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(loss_interp, loss_interp_2)
assert_array_almost_equal(grad_interp, grad_interp_2)
def test_logistic_cv():
# test for LogisticRegressionCV object
n_samples, n_features = 50, 5
rng = np.random.RandomState(0)
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
lr_cv = LogisticRegressionCV(Cs=[1.], fit_intercept=False,
solver='liblinear')
lr_cv.fit(X_ref, y)
lr = LogisticRegression(C=1., fit_intercept=False)
lr.fit(X_ref, y)
assert_array_almost_equal(lr.coef_, lr_cv.coef_)
assert_array_equal(lr_cv.coef_.shape, (1, n_features))
assert_array_equal(lr_cv.classes_, [-1, 1])
assert_equal(len(lr_cv.classes_), 2)
coefs_paths = np.asarray(list(lr_cv.coefs_paths_.values()))
assert_array_equal(coefs_paths.shape, (1, 3, 1, n_features))
assert_array_equal(lr_cv.Cs_.shape, (1, ))
scores = np.asarray(list(lr_cv.scores_.values()))
assert_array_equal(scores.shape, (1, 3, 1))
def test_logistic_cv_sparse():
X, y = make_classification(n_samples=50, n_features=5,
random_state=0)
X[X < 1.0] = 0.0
csr = sp.csr_matrix(X)
clf = LogisticRegressionCV(fit_intercept=True)
clf.fit(X, y)
clfs = LogisticRegressionCV(fit_intercept=True)
clfs.fit(csr, y)
assert_array_almost_equal(clfs.coef_, clf.coef_)
assert_array_almost_equal(clfs.intercept_, clf.intercept_)
assert_equal(clfs.C_, clf.C_)
def test_intercept_logistic_helper():
n_samples, n_features = 10, 5
X, y = make_classification(n_samples=n_samples, n_features=n_features,
random_state=0)
# Fit intercept case.
alpha = 1.
w = np.ones(n_features + 1)
loss_interp, grad_interp, hess_interp = _logistic_loss_grad_hess(
w, X, y, alpha)
# Do not fit intercept. This can be considered equivalent to adding
# a feature vector of ones, i.e column of one vectors.
X_ = np.hstack((X, np.ones(10)[:, np.newaxis]))
loss, grad, hess = _logistic_loss_grad_hess(w, X_, y, alpha)
# In the fit_intercept=False case, the feature vector of ones is
# penalized. This should be taken care of.
assert_almost_equal(loss_interp + 0.5 * (w[-1] ** 2), loss)
# Check gradient.
assert_array_almost_equal(grad_interp[:n_features], grad[:n_features])
assert_almost_equal(grad_interp[-1] + alpha * w[-1], grad[-1])
rng = np.random.RandomState(0)
grad = rng.rand(n_features + 1)
hess_interp = hess_interp(grad)
hess = hess(grad)
assert_array_almost_equal(hess_interp[:n_features], hess[:n_features])
assert_almost_equal(hess_interp[-1] + alpha * grad[-1], hess[-1])
def test_ovr_multinomial_iris():
# Test that OvR and multinomial are correct using the iris dataset.
train, target = iris.data, iris.target
n_samples, n_features = train.shape
# Use pre-defined fold as folds generated for different y
cv = StratifiedKFold(target, 3)
clf = LogisticRegressionCV(cv=cv)
clf.fit(train, target)
clf1 = LogisticRegressionCV(cv=cv)
target_copy = target.copy()
target_copy[target_copy == 0] = 1
clf1.fit(train, target_copy)
assert_array_almost_equal(clf.scores_[2], clf1.scores_[2])
assert_array_almost_equal(clf.intercept_[2:], clf1.intercept_)
assert_array_almost_equal(clf.coef_[2][np.newaxis, :], clf1.coef_)
# Test the shape of various attributes.
assert_equal(clf.coef_.shape, (3, n_features))
assert_array_equal(clf.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, 3, 10, n_features + 1))
assert_equal(clf.Cs_.shape, (10, ))
scores = np.asarray(list(clf.scores_.values()))
assert_equal(scores.shape, (3, 3, 10))
# Test that for the iris data multinomial gives a better accuracy than OvR
for solver in ['lbfgs', 'newton-cg']:
clf_multi = LogisticRegressionCV(
solver=solver, multi_class='multinomial', max_iter=15
)
clf_multi.fit(train, target)
multi_score = clf_multi.score(train, target)
ovr_score = clf.score(train, target)
assert_greater(multi_score, ovr_score)
# Test attributes of LogisticRegressionCV
assert_equal(clf.coef_.shape, clf_multi.coef_.shape)
assert_array_equal(clf_multi.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf_multi.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, 3, 10,
n_features + 1))
assert_equal(clf_multi.Cs_.shape, (10, ))
scores = np.asarray(list(clf_multi.scores_.values()))
assert_equal(scores.shape, (3, 3, 10))
def test_logistic_regression_solvers():
X, y = make_classification(n_features=10, n_informative=5, random_state=0)
clf_n = LogisticRegression(solver='newton-cg', fit_intercept=False)
clf_n.fit(X, y)
clf_lbf = LogisticRegression(solver='lbfgs', fit_intercept=False)
clf_lbf.fit(X, y)
clf_lib = LogisticRegression(fit_intercept=False)
clf_lib.fit(X, y)
assert_array_almost_equal(clf_n.coef_, clf_lib.coef_, decimal=3)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=3)
assert_array_almost_equal(clf_n.coef_, clf_lbf.coef_, decimal=3)
def test_logistic_regression_solvers_multiclass():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
clf_n = LogisticRegression(solver='newton-cg', fit_intercept=False)
clf_n.fit(X, y)
clf_lbf = LogisticRegression(solver='lbfgs', fit_intercept=False)
clf_lbf.fit(X, y)
clf_lib = LogisticRegression(fit_intercept=False)
clf_lib.fit(X, y)
assert_array_almost_equal(clf_n.coef_, clf_lib.coef_, decimal=4)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4)
assert_array_almost_equal(clf_n.coef_, clf_lbf.coef_, decimal=4)
def test_logistic_regressioncv_class_weights():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
# Test the liblinear fails when class_weight of type dict is
# provided, when it is multiclass. However it can handle
# binary problems.
clf_lib = LogisticRegressionCV(class_weight={0: 0.1, 1: 0.2},
solver='liblinear')
assert_raises(ValueError, clf_lib.fit, X, y)
y_ = y.copy()
y_[y == 2] = 1
clf_lib.fit(X, y_)
assert_array_equal(clf_lib.classes_, [0, 1])
# Test for class_weight=auto
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
random_state=0)
clf_lbf = LogisticRegressionCV(solver='lbfgs', fit_intercept=False,
class_weight='auto')
clf_lbf.fit(X, y)
clf_lib = LogisticRegressionCV(solver='liblinear', fit_intercept=False,
class_weight='auto')
clf_lib.fit(X, y)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4)
def test_logistic_regression_convergence_warnings():
# Test that warnings are raised if model does not converge
X, y = make_classification(n_samples=20, n_features=20)
clf_lib = LogisticRegression(solver='liblinear', max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, clf_lib.fit, X, y)
assert_equal(clf_lib.n_iter_, 2)
def test_logistic_regression_multinomial():
# Tests for the multinomial option in logistic regression
# Some basic attributes of Logistic Regression
n_samples, n_features, n_classes = 50, 20, 3
X, y = make_classification(n_samples=n_samples,
n_features=n_features,
n_informative=10,
n_classes=n_classes, random_state=0)
clf_int = LogisticRegression(solver='lbfgs', multi_class='multinomial')
clf_int.fit(X, y)
assert_array_equal(clf_int.coef_.shape, (n_classes, n_features))
clf_wint = LogisticRegression(solver='lbfgs', multi_class='multinomial',
fit_intercept=False)
clf_wint.fit(X, y)
assert_array_equal(clf_wint.coef_.shape, (n_classes, n_features))
# Similar tests for newton-cg solver option
clf_ncg_int = LogisticRegression(solver='newton-cg',
multi_class='multinomial')
clf_ncg_int.fit(X, y)
assert_array_equal(clf_ncg_int.coef_.shape, (n_classes, n_features))
clf_ncg_wint = LogisticRegression(solver='newton-cg', fit_intercept=False,
multi_class='multinomial')
clf_ncg_wint.fit(X, y)
assert_array_equal(clf_ncg_wint.coef_.shape, (n_classes, n_features))
# Compare solutions between lbfgs and newton-cg
assert_almost_equal(clf_int.coef_, clf_ncg_int.coef_, decimal=3)
assert_almost_equal(clf_wint.coef_, clf_ncg_wint.coef_, decimal=3)
assert_almost_equal(clf_int.intercept_, clf_ncg_int.intercept_, decimal=3)
# Test that the path give almost the same results. However since in this
# case we take the average of the coefs after fitting across all the
# folds, it need not be exactly the same.
for solver in ['lbfgs', 'newton-cg']:
clf_path = LogisticRegressionCV(solver=solver,
multi_class='multinomial', Cs=[1.])
clf_path.fit(X, y)
assert_array_almost_equal(clf_path.coef_, clf_int.coef_, decimal=3)
assert_almost_equal(clf_path.intercept_, clf_int.intercept_, decimal=3)
def test_multinomial_loss_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features, n_classes = 100, 5, 3
X = rng.randn(n_samples, n_features)
w = rng.rand(n_classes, n_features)
Y = np.zeros((n_samples, n_classes))
ind = np.argmax(np.dot(X, w.T), axis=1)
Y[range(0, n_samples), ind] = 1
w = w.ravel()
sample_weights = np.ones(X.shape[0])
_, grad, hessp = _multinomial_loss_grad_hess(w, X, Y, alpha=1.,
sample_weight=sample_weights)
# extract first column of hessian matrix
vec = np.zeros(n_features * n_classes)
vec[0] = 1
hess_col = hessp(vec)
# Estimate hessian using least squares as done in
# test_logistic_loss_grad_hess
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_multinomial_loss_grad_hess(w + t * vec, X, Y, alpha=1.,
sample_weight=sample_weights)[1]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(hess_col, approx_hess_col)
def test_liblinear_decision_function_zero():
# Test negative prediction when decision_function values are zero.
# Liblinear predicts the positive class when decision_function values
# are zero. This is a test to verify that we do not do the same.
# See Issue: https://github.com/scikit-learn/scikit-learn/issues/3600
# and the PR https://github.com/scikit-learn/scikit-learn/pull/3623
X, y = make_classification(n_samples=5, n_features=5)
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, y)
# Dummy data such that the decision function becomes zero.
X = np.zeros((5, 5))
assert_array_equal(clf.predict(X), np.zeros(5))
def test_liblinear_logregcv_sparse():
# Test LogRegCV with solver='liblinear' works for sparse matrices
X, y = make_classification(n_samples=10, n_features=5)
clf = LogisticRegressionCV(solver='liblinear')
clf.fit(sparse.csr_matrix(X), y)
def test_logreg_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
clf = LogisticRegression(intercept_scaling=i)
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % clf.intercept_scaling)
assert_raise_message(ValueError, msg, clf.fit, X, Y1)
def test_logreg_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, Y1)
assert_equal(clf.intercept_, 0.)
| bsd-3-clause |
catkira/seqan | apps/snp_store/tests/run_tests.py | 13 | 7297 | #!/usr/bin/env python
"""Execute the tests for snp_store.
The golden test outputs are generated by the script generate_outputs.sh.
You have to give the root paths to the source and the binaries as arguments to
the program. These are the paths to the directory that contains the 'projects'
directory.
Usage: run_tests.py SOURCE_ROOT_PATH BINARY_ROOT_PATH
"""
import logging
import os.path
import sys
# Automagically add util/py_lib to PYTHONPATH environment variable.
path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..',
'..', '..', 'util', 'py_lib'))
sys.path.insert(0, path)
import seqan.app_tests as app_tests
def main(source_base, binary_base):
"""Main entry point of the script."""
print 'Executing test for snp_store'
print '========================='
print
ph = app_tests.TestPathHelper(
source_base, binary_base,
'apps/snp_store/tests') # tests dir
# ============================================================
# Auto-detect the binary path.
# ============================================================
path_to_program = app_tests.autolocateBinary(
binary_base, 'apps/snp_store', 'snp_store')
# ============================================================
# Built TestConf list.
# ============================================================
# Build list with TestConf objects, analoguely to how the output
# was generated in generate_outputs.sh.
conf_list = []
# We prepare a list of transforms to apply to the output files. This is
# used to strip the input/output paths from the programs' output to
# make it more canonical and host independent.
ph.outFile('-') # To ensure that the out path is set.
transforms = [
app_tests.RegexpReplaceTransform("#.*snp_store.exe", "#snp_store"),
app_tests.RegexpReplaceTransform("#[^ ]+snp_store", "#snp_store"),
app_tests.ReplaceTransform(ph.inFile(''), ''),
app_tests.ReplaceTransform(ph.outFile(''), ''),
]
# ============================================================
# First Section.
# ============================================================
# App TestConf objects to conf_list, just like this for each
# test you want to run.
# default
conf = app_tests.TestConf(
program=path_to_program,
redir_stdout=ph.outFile('snp_store_default.stdout'),
args=[ph.inFile('human-chr22-inf2.fa'),
ph.inFile('human-reads2.gff'),
'-o', ph.outFile('snps_default.vcf'),
'-id', ph.outFile('indels_default.gff'),],
to_diff=[(ph.inFile('snp_store_default.stdout'),
ph.outFile('snp_store_default.stdout')),
(ph.inFile('snps_default.vcf'),
ph.outFile('snps_default.vcf'),
transforms),
(ph.inFile('indels_default.gff'),
ph.outFile('indels_default.gff',))])
conf_list.append(conf)
# test 2
conf = app_tests.TestConf(
program=path_to_program,
redir_stdout=ph.outFile('snp_store_realign.stdout'),
args=[ph.inFile('human-chr22-inf2.fa'),
ph.inFile('human-reads2.sam'),
'-re',
'-o', ph.outFile('snps_realign.vcf'),
'-id', ph.outFile('indels_realign.gff')],
to_diff=[(ph.inFile('snp_store_realign.stdout'),
ph.outFile('snp_store_realign.stdout')),
(ph.inFile('snps_realign.vcf'),
ph.outFile('snps_realign.vcf'),
transforms),
(ph.inFile('indels_realign.gff'),
ph.outFile('indels_realign.gff'))])
conf_list.append(conf)
# test 3
conf = app_tests.TestConf(
program=path_to_program,
redir_stdout=ph.outFile('snp_store_realign_m1mp1oa.stdout'),
args=[ph.inFile('human-chr22-inf2.fa'),
ph.inFile('human-reads2.sam'),
'-it', str(1), '-re', '-oa', '-mp', str(1), '-m', 'maq', '-hq',
'-o', ph.outFile('snps_realign_m1mp1oa.vcf'),
'-id', ph.outFile('indels_realign_m1mp1oa.gff')],
to_diff=[(ph.inFile('snp_store_realign_m1mp1oa.stdout'),
ph.outFile('snp_store_realign_m1mp1oa.stdout')),
(ph.inFile('snps_realign_m1mp1oa.vcf'),
ph.outFile('snps_realign_m1mp1oa.vcf'),
transforms),
(ph.inFile('indels_realign_m1mp1oa.gff'),
ph.outFile('indels_realign_m1mp1oa.gff'))])
conf_list.append(conf)
# test 4
conf = app_tests.TestConf(
program=path_to_program,
redir_stdout=ph.outFile('snp_store_realign_m0mp1oa.stdout'),
args=[ph.inFile('human-chr22-inf2.fa'),
ph.inFile('human-reads2.gff'),
'-it', str(2), '-re', '-oa', '-mp', str(1), '-hq',
'-o', ph.outFile('snps_realign_m0mp1oa.vcf'),
'-id', ph.outFile('indels_realign_m0mp1oa.gff')],
to_diff=[(ph.inFile('snp_store_realign_m0mp1oa.stdout'),
ph.outFile('snp_store_realign_m0mp1oa.stdout')),
(ph.inFile('snps_realign_m0mp1oa.vcf'),
ph.outFile('snps_realign_m0mp1oa.vcf'),
transforms),
(ph.inFile('indels_realign_m0mp1oa.gff'),
ph.outFile('indels_realign_m0mp1oa.gff'))])
conf_list.append(conf)
# test 5
conf = app_tests.TestConf(
program=path_to_program,
redir_stdout=ph.outFile('snp_store_realign_m0mp1oa_it1ipt01.stdout'),
args=[ph.inFile('human-chr22-inf2.fa'),
ph.inFile('human-reads2.sam'),
'-it', str(1), '-ipt', str(0.1), '-osc', '-re', '-oa', '-hq',
'-o', ph.outFile('snps_realign_m0mp1oa_it1ipt01.vcf'),
'-id', ph.outFile('indels_realign_m0mp1oa_it1ipt01.gff')],
to_diff=[(ph.inFile('snp_store_realign_m0mp1oa_it1ipt01.stdout'),
ph.outFile('snp_store_realign_m0mp1oa_it1ipt01.stdout')),
(ph.inFile('snps_realign_m0mp1oa_it1ipt01.vcf'),
ph.outFile('snps_realign_m0mp1oa_it1ipt01.vcf'),
transforms),
(ph.inFile('indels_realign_m0mp1oa_it1ipt01.gff'),
ph.outFile('indels_realign_m0mp1oa_it1ipt01.gff'))])
conf_list.append(conf)
# ============================================================
# Execute the tests.
# ============================================================
failures = 0
for conf in conf_list:
res = app_tests.runTest(conf)
# Output to the user.
print ' '.join(['snp_store'] + conf.args),
if res:
print 'OK'
else:
failures += 1
print 'FAILED'
# Cleanup.
ph.deleteTempDir()
print '=============================='
print ' total tests: %d' % len(conf_list)
print ' failed tests: %d' % failures
print 'successful tests: %d' % (len(conf_list) - failures)
print '=============================='
# Compute and return return code.
return failures != 0
if __name__ == '__main__':
sys.exit(app_tests.main(main))
| bsd-3-clause |
kcsry/django-form-designer | form_designer/uploads.py | 1 | 3604 | import os
from types import SimpleNamespace
from django.core.files.base import File
from django.db.models.fields.files import FieldFile
from django.forms.forms import NON_FIELD_ERRORS
from django.template.defaultfilters import filesizeformat
from django.utils.translation import gettext_lazy as _
from form_designer import settings as app_settings
from form_designer.utils import get_random_hash
def get_storage():
return app_settings.FILE_STORAGE_CLASS()
def clean_files(form):
total_upload_size = 0
for field in form.file_fields:
uploaded_file = form.cleaned_data.get(field.name, None)
msg = None
if uploaded_file is None:
if field.required:
msg = _('This field is required.')
else:
continue
else:
file_size = uploaded_file.size
total_upload_size += file_size
if not os.path.splitext(uploaded_file.name)[1].lstrip('.').lower() in \
app_settings.ALLOWED_FILE_TYPES:
msg = _('This file type is not allowed.')
elif file_size > app_settings.MAX_UPLOAD_SIZE:
msg = _('Please keep file size under %(max_size)s. Current size is %(size)s.') % \
{'max_size': filesizeformat(app_settings.MAX_UPLOAD_SIZE),
'size': filesizeformat(file_size)}
if msg:
form._errors[field.name] = form.error_class([msg])
if total_upload_size > app_settings.MAX_UPLOAD_TOTAL_SIZE:
msg = _('Please keep total file size under %(max)s. Current total size is %(current)s.') % \
{"max": filesizeformat(app_settings.MAX_UPLOAD_TOTAL_SIZE), "current": filesizeformat(total_upload_size)}
if NON_FIELD_ERRORS in form._errors:
form._errors[NON_FIELD_ERRORS].append(msg)
else:
form._errors[NON_FIELD_ERRORS] = form.error_class([msg])
return form.cleaned_data
def handle_uploaded_files(form_definition, form):
files = []
if form_definition.save_uploaded_files and len(form.file_fields):
storage = get_storage()
secret_hash = get_random_hash(10)
for field in form.file_fields:
uploaded_file = form.cleaned_data.get(field.name, None)
if uploaded_file is None:
continue
valid_file_name = storage.get_valid_name(uploaded_file.name)
root, ext = os.path.splitext(valid_file_name)
filename = storage.get_available_name(
os.path.join(app_settings.FILE_STORAGE_DIR,
form_definition.name,
f'{root}_{secret_hash}{ext}'))
storage.save(filename, uploaded_file)
form.cleaned_data[field.name] = StoredUploadedFile(name=filename)
files.append(storage.path(filename))
return files
class StoredUploadedFile(FieldFile):
"""
A wrapper for uploaded files that is compatible to the FieldFile class, i.e.
you can use instances of this class in templates just like you use the value
of FileFields (e.g. `{{ my_file.url }}`)
"""
def __init__(self, name):
File.__init__(self, None, name)
self.field = SimpleNamespace(storage=get_storage())
self.instance = None
def save(self, *args, **kwargs):
raise NotImplementedError('Static files are read-only') # pragma: no cover
def delete(self, *args, **kwargs):
raise NotImplementedError('Static files are read-only') # pragma: no cover
def __str__(self):
return self.name
| bsd-3-clause |
sopier/django | django/contrib/contenttypes/admin.py | 191 | 5385 | from __future__ import unicode_literals
from functools import partial
from django.contrib.admin.checks import InlineModelAdminChecks
from django.contrib.admin.options import InlineModelAdmin, flatten_fieldsets
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.forms import (
BaseGenericInlineFormSet, generic_inlineformset_factory,
)
from django.core import checks
from django.core.exceptions import FieldDoesNotExist
from django.forms import ALL_FIELDS
from django.forms.models import modelform_defines_fields
class GenericInlineModelAdminChecks(InlineModelAdminChecks):
def _check_exclude_of_parent_model(self, obj, parent_model):
# There's no FK to exclude, so no exclusion checks are required.
return []
def _check_relation(self, obj, parent_model):
# There's no FK, but we do need to confirm that the ct_field and ct_fk_field are valid,
# and that they are part of a GenericForeignKey.
gfks = [
f for f in obj.model._meta.virtual_fields
if isinstance(f, GenericForeignKey)
]
if len(gfks) == 0:
return [
checks.Error(
"'%s.%s' has no GenericForeignKey." % (
obj.model._meta.app_label, obj.model._meta.object_name
),
hint=None,
obj=obj.__class__,
id='admin.E301'
)
]
else:
# Check that the ct_field and ct_fk_fields exist
try:
obj.model._meta.get_field(obj.ct_field)
except FieldDoesNotExist:
return [
checks.Error(
"'ct_field' references '%s', which is not a field on '%s.%s'." % (
obj.ct_field, obj.model._meta.app_label, obj.model._meta.object_name
),
hint=None,
obj=obj.__class__,
id='admin.E302'
)
]
try:
obj.model._meta.get_field(obj.ct_fk_field)
except FieldDoesNotExist:
return [
checks.Error(
"'ct_fk_field' references '%s', which is not a field on '%s.%s'." % (
obj.ct_fk_field, obj.model._meta.app_label, obj.model._meta.object_name
),
hint=None,
obj=obj.__class__,
id='admin.E303'
)
]
# There's one or more GenericForeignKeys; make sure that one of them
# uses the right ct_field and ct_fk_field.
for gfk in gfks:
if gfk.ct_field == obj.ct_field and gfk.fk_field == obj.ct_fk_field:
return []
return [
checks.Error(
"'%s.%s' has no GenericForeignKey using content type field '%s' and object ID field '%s'." % (
obj.model._meta.app_label, obj.model._meta.object_name, obj.ct_field, obj.ct_fk_field
),
hint=None,
obj=obj.__class__,
id='admin.E304'
)
]
class GenericInlineModelAdmin(InlineModelAdmin):
ct_field = "content_type"
ct_fk_field = "object_id"
formset = BaseGenericInlineFormSet
checks_class = GenericInlineModelAdminChecks
def get_formset(self, request, obj=None, **kwargs):
if 'fields' in kwargs:
fields = kwargs.pop('fields')
else:
fields = flatten_fieldsets(self.get_fieldsets(request, obj))
if self.exclude is None:
exclude = []
else:
exclude = list(self.exclude)
exclude.extend(self.get_readonly_fields(request, obj))
if self.exclude is None and hasattr(self.form, '_meta') and self.form._meta.exclude:
# Take the custom ModelForm's Meta.exclude into account only if the
# GenericInlineModelAdmin doesn't define its own.
exclude.extend(self.form._meta.exclude)
exclude = exclude or None
can_delete = self.can_delete and self.has_delete_permission(request, obj)
defaults = {
"ct_field": self.ct_field,
"fk_field": self.ct_fk_field,
"form": self.form,
"formfield_callback": partial(self.formfield_for_dbfield, request=request),
"formset": self.formset,
"extra": self.get_extra(request, obj),
"can_delete": can_delete,
"can_order": False,
"fields": fields,
"min_num": self.get_min_num(request, obj),
"max_num": self.get_max_num(request, obj),
"exclude": exclude
}
defaults.update(kwargs)
if defaults['fields'] is None and not modelform_defines_fields(defaults['form']):
defaults['fields'] = ALL_FIELDS
return generic_inlineformset_factory(self.model, **defaults)
class GenericStackedInline(GenericInlineModelAdmin):
template = 'admin/edit_inline/stacked.html'
class GenericTabularInline(GenericInlineModelAdmin):
template = 'admin/edit_inline/tabular.html'
| bsd-3-clause |
SparklerPone/burpyhooves | burpyhooves.py | 2 | 12751 | #!/usr/bin/env python2
# This file is part of BurpyHooves.
#
# BurpyHooves is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# BurpyHooves is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the# GNU General Public License
# along with BurpyHooves. If not, see <http://www.gnu.org/licenses/>.
import sys
import json
import base64
import logging
import requests
from line import Line
from database import Database
from modules import ModuleManager
from collections import defaultdict
from permissions import Permissions
from connection import IRCConnection
from hooks import HookManager, Hook
class BurpyHooves:
def __init__(self, config_file):
self.config_file = config_file
self.config = json.load(open(self.config_file))
self.me = self.config["me"]
self.net = self.config["network"]
self.module_manager = ModuleManager(self)
self.hook_manager = HookManager(self)
self.perms = Permissions(self)
self.connection = IRCConnection(self.net["address"], self.net["port"], self.net["ssl"], self.config["proxies"].get(self.net.get("proxy", "none"), None), self.net.get("flood_interval", 0.0))
self.running = True
self.state = {} # Dict used to hold stuff like last line received and last message etc...
self.db = Database("etc/burpyhooves.db")
self.db.connect()
self.names = defaultdict(list)
self._setup_hooks()
logging.basicConfig(level=getattr(logging, self.config["misc"]["loglevel"]), format='[%(asctime)s] %(levelname)s: %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
self.requests_session = requests.session()
if self.config["misc"].get("http_proxy", "none") != "none":
proxy = self.config["proxies"].get(self.config["misc"]["http_proxy"], "none")
if proxy != "none":
self.requests_session.proxies = {"http": proxy, "https": proxy}
self.flood_verbs = [x.lower() for x in self.net.get("flood_verbs", [])]
self.skybot = self.config["misc"]["skybot_nick"]
def run(self):
self.connection.connect()
if self.config["network"]["sasl"]["use"]:
self.raw("CAP REQ :sasl")
self.raw("NICK %s" % self.me["nicks"][0]) # Nicks thing is a temp hack
self.raw("USER %s * * :%s" % (self.me["ident"], self.me["gecos"]))
for module in self.config["modules"]:
self.module_manager.load_module(module)
while self.running:
if not self.loop():
self.stop()
def raw(self, line):
"""
Send a raw IRC line to the server.
@param line: The raw line to send, without a trailing carriage return or newline.
"""
logging.debug("[IRC] <- %s" % line)
ln = Line.parse(line)
force = True # Whether we bypass flood protection or not.
if ln.command.lower() in self.flood_verbs:
force = False
self.connection.write_line(line, force)
def parse_line(self, ln):
logging.debug("[IRC] -> %s" % ln.linestr)
if ln.command == "PING":
self.raw(ln.linestr.replace("PING", "PONG"))
elif ln.command == "376":
for channel in self.net["channels"]:
self.join(channel)
elif ln.command == "CAP":
if ln.params[1] == "ACK" and ln.params[-1] == "sasl" and self.net["sasl"]["use"]:
self.raw("AUTHENTICATE PLAIN")
elif ln.command == "AUTHENTICATE":
magic = base64.b64encode("%s\x00%s\x00%s" % (self.net["sasl"]["username"], self.net["sasl"]["username"], self.net["sasl"]["password"]))
self.raw("AUTHENTICATE %s" % magic)
elif ln.command == "903":
self.raw("CAP END")
elif ln.command == "904":
logging.warning("SASL authentication failed, continuing login anyways...")
self.raw("CAP END")
elif ln.command == "001":
self.raw("MODE %s +B" % self.me["nicks"][0])
def loop(self):
if not self.connection.loop():
return False
for line in self.connection.buffer:
ln = Line.parse(line)
self.state["last_line"] = ln
self.parse_line(ln)
self.hook_manager.run_irc_hooks(ln, self)
return True
def stop(self):
self.raw("QUIT :Bye!")
self.connection.disconnect()
self.running = False
def rehash(self):
"""
Rehash (reread and reparse) the bot's configuration file.
"""
self.config = json.load(open(self.config_file))
# Helper functions
def hook_command(self, cmd, callback):
"""
Register a command hook to the bot.
@param cmd: Command name to hook.
@param callback: Event callback function to call when this command is ran.
@return: ID of the new hook. (Used for removal later)
"""
return self.hook_manager.add_hook(Hook("command_%s" % cmd, callback))
def hook_numeric(self, numeric, callback):
"""
Register a raw numeric hook to the bot.
@param numeric: The raw IRC numeric (or command, such as PRIVMSG) to hook.
@param callback: Event callback function to call when this numeric/command is received from the server.
@return: ID of the new hook. (Used for removal later)
"""
return self.hook_manager.add_hook(Hook("irc_raw_%s" % numeric, callback))
def unhook_something(self, the_id):
"""
Unhook any sort of hook. (Command, numeric, or event.)
@param the_id: The ID of the hook to remove, returned by a hook adding function.
"""
self.hook_manager.remove_hook(the_id)
def is_admin(self, hostmask=None):
"""
Check if a hostmask is a bot admin.
@param hostmask: The hostmask to check.
@return: True if admin, False if not.
"""
if hostmask is None:
hostmask = self.state["last_line"].hostmask
return self.perms.check_permission(hostmask, "admin")
def check_condition(self, condition, false_message="Sorry, you may not do that.", reply_func=None):
"""
Check a condition and return it, calling reply_func with false_message if the condition is False.
@param condition: The condition to check.
@param false_message: The message to be passed to reply_func
@param reply_func: The function to call with false_message as argument if condition is False.
@return:
"""
if reply_func is None:
reply_func = self.reply
if condition:
return True
reply_func(false_message)
return False
def check_permission(self, permission="admin", error_reply="Sorry, you do not have permission to do that!",
reply_func=None):
"""
Check a bot permission against the hostmask of the last line received, and return whether it matches.
Calls reply_func with error_reply as argument if condition is False
@param permission: The permission to check.
@param error_reply: The message to be passed to reply_func
@param reply_func: The function to call with error_reply as argument if condition is False.
@return:
"""
if reply_func is None:
reply_func = self.reply_notice
return self.check_condition(self.perms.check_permission(self.state["last_line"].hostmask, permission),
error_reply, reply_func)
# IRC-related stuff begins here
def _msg_like(self, verb, target, message):
self.raw("%s %s :%s" % (verb, target, message))
def privmsg(self, target, message):
"""
Send a PRIVMSG (channel or user message) to a user/channel.
@param target: The target to send this message to. (Can be nickname or channel.)
@param message: The actual message to send.
"""
self._msg_like("PRIVMSG", target, message)
def act(self, target, action):
"""
Send a CTCP ACTION (/me) to a user/channel.
@param target: The target to send this ACTION to. (Can be nickname or channel.)
@param action: The actual action to send.
"""
self.privmsg(target, "\x01ACTION %s\x01" % action)
def notice(self, target, message):
"""
Send a NOTICE to a user/channel.
@param target: The user or channel to send this notice to.
@param message: The actual notice text.
"""
self._msg_like("NOTICE", target, message)
def join(self, channel):
"""
Send a raw channel JOIN message to the server. (Join a channel)
@param channel: The channel to join. (Key can be passed in the same argument, separated by a space.)
"""
self.raw("JOIN %s" % channel)
def part(self, channel):
"""
Send a raw channel PART to the server. (Leave a channel)
@param channel: The channel to leave.
"""
self.raw("PART %s" % channel)
# IRC-related stuff that involves state.
def reply(self, message):
"""
Send a PRIVMSG (channel or user message) to the last channel or user we received a message in.
@param message: The reply message to send.
"""
ln = self.state["last_line"]
reply_to = ln.hostmask.nick
if ln.params[0][0] == "#":
reply_to = ln.params[0]
self.privmsg(reply_to, message)
def reply_act(self, action):
"""
Send a CTCP ACTION (/me) to the last channel or user we received a message in.
@param action: The action to send.
"""
self.reply("\x01ACTION %s\x01" % action)
def reply_notice(self, message):
"""
Send a NOTICE to the last channel or user we received a message in.
@param message: The notice text to send.
"""
ln = self.state["last_line"]
self.notice(ln.hostmask.nick, message)
# Web stuff.
def http_get(self, url, **kwargs):
"""
Perform an HTTP GET using requests.
@param url: The URL to GET.
@param kwargs: Any arguments to pass to requests.get()
@return: requests.Response object.
"""
return self.requests_session.get(url, **kwargs)
def http_post(self, url, **kwargs):
"""
Perform an HTTP POST using requests.
@param url: The URL to POST to.
@param kwargs: Any arguments to pass to requests.get()
@return: requests.Response object.
"""
return self.requests_session.post(url, **kwargs)
# Internal hooks
def _setup_hooks(self):
hooks = {
"353": self.on_raw_353,
"366": self.on_raw_366,
"PART": self.on_raw_part,
"QUIT": self.on_raw_quit,
"JOIN": self.on_raw_join,
"NICK": self.on_raw_nick
}
for cm, cb in hooks.iteritems():
self.hook_numeric(cm, cb)
def on_raw_353(self, bot, ln):
chan = ln.params[2]
names = ln.params[-1].split(" ")
if self.state.get("names_%s" % chan, False):
self.names[chan].extend(names)
else:
self.state["names_%s" % chan] = True
self.names[chan] = names
def on_raw_366(self, bot, ln):
self.state["names_%s" % ln.params[1]] = False
def on_raw_part(self, bot, ln):
nick = ln.hostmask.nick
chan = ln.params[0]
self.names[chan].remove(nick)
def on_raw_quit(self, bot, ln):
nick = ln.hostmask.nick
for chan, names in self.names.iteritems():
if nick in names:
names.remove(nick)
def on_raw_join(self, bot, ln):
nick = ln.hostmask.nick
chan = ln.params[0]
self.names[chan].append(nick)
def on_raw_nick(self, bot, ln):
old = ln.hostmask.nick
new = ln.params[0]
for chan, names in self.names.iteritems():
if old in names:
names.remove(old)
names.append(new)
conf = "etc/burpyhooves.json"
if len(sys.argv) > 1:
conf = sys.argv[1]
bh = BurpyHooves(conf)
try:
bh.run()
except KeyboardInterrupt:
logging.info("Interrupted, exiting cleanly!")
bh.stop()
| gpl-3.0 |
javaos74/neutron | neutron/plugins/ml2/drivers/mech_agent.py | 46 | 8792 | # Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import six
from oslo_log import log
from neutron.extensions import portbindings
from neutron.i18n import _LW
from neutron.plugins.common import constants as p_constants
from neutron.plugins.ml2 import driver_api as api
LOG = log.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class AgentMechanismDriverBase(api.MechanismDriver):
"""Base class for drivers that attach to networks using an L2 agent.
The AgentMechanismDriverBase provides common code for mechanism
drivers that integrate the ml2 plugin with L2 agents. Port binding
with this driver requires the driver's associated agent to be
running on the port's host, and that agent to have connectivity to
at least one segment of the port's network.
MechanismDrivers using this base class must pass the agent type to
__init__(), and must implement try_to_bind_segment_for_agent().
"""
def __init__(self, agent_type,
supported_vnic_types=[portbindings.VNIC_NORMAL]):
"""Initialize base class for specific L2 agent type.
:param agent_type: Constant identifying agent type in agents_db
:param supported_vnic_types: The binding:vnic_type values we can bind
"""
self.agent_type = agent_type
self.supported_vnic_types = supported_vnic_types
def initialize(self):
pass
def bind_port(self, context):
LOG.debug("Attempting to bind port %(port)s on "
"network %(network)s",
{'port': context.current['id'],
'network': context.network.current['id']})
vnic_type = context.current.get(portbindings.VNIC_TYPE,
portbindings.VNIC_NORMAL)
if vnic_type not in self.supported_vnic_types:
LOG.debug("Refusing to bind due to unsupported vnic_type: %s",
vnic_type)
return
for agent in context.host_agents(self.agent_type):
LOG.debug("Checking agent: %s", agent)
if agent['alive']:
for segment in context.segments_to_bind:
if self.try_to_bind_segment_for_agent(context, segment,
agent):
LOG.debug("Bound using segment: %s", segment)
return
else:
LOG.warning(_LW("Attempting to bind with dead agent: %s"),
agent)
@abc.abstractmethod
def try_to_bind_segment_for_agent(self, context, segment, agent):
"""Try to bind with segment for agent.
:param context: PortContext instance describing the port
:param segment: segment dictionary describing segment to bind
:param agent: agents_db entry describing agent to bind
:returns: True iff segment has been bound for agent
Called outside any transaction during bind_port() so that
derived MechanismDrivers can use agent_db data along with
built-in knowledge of the corresponding agent's capabilities
to attempt to bind to the specified network segment for the
agent.
If the segment can be bound for the agent, this function must
call context.set_binding() with appropriate values and then
return True. Otherwise, it must return False.
"""
@six.add_metaclass(abc.ABCMeta)
class SimpleAgentMechanismDriverBase(AgentMechanismDriverBase):
"""Base class for simple drivers using an L2 agent.
The SimpleAgentMechanismDriverBase provides common code for
mechanism drivers that integrate the ml2 plugin with L2 agents,
where the binding:vif_type and binding:vif_details values are the
same for all bindings. Port binding with this driver requires the
driver's associated agent to be running on the port's host, and
that agent to have connectivity to at least one segment of the
port's network.
MechanismDrivers using this base class must pass the agent type
and the values for binding:vif_type and binding:vif_details to
__init__(), and must implement check_segment_for_agent().
"""
def __init__(self, agent_type, vif_type, vif_details,
supported_vnic_types=[portbindings.VNIC_NORMAL]):
"""Initialize base class for specific L2 agent type.
:param agent_type: Constant identifying agent type in agents_db
:param vif_type: Value for binding:vif_type when bound
:param vif_details: Dictionary with details for VIF driver when bound
:param supported_vnic_types: The binding:vnic_type values we can bind
"""
super(SimpleAgentMechanismDriverBase, self).__init__(
agent_type, supported_vnic_types)
self.vif_type = vif_type
self.vif_details = vif_details
def try_to_bind_segment_for_agent(self, context, segment, agent):
if self.check_segment_for_agent(segment, agent):
context.set_binding(segment[api.ID],
self.vif_type,
self.vif_details)
return True
else:
return False
@abc.abstractmethod
def get_allowed_network_types(self, agent=None):
"""Return the agent's or driver's allowed network types.
For example: return ('flat', ...). You can also refer to the
configuration the given agent exposes.
"""
pass
@abc.abstractmethod
def get_mappings(self, agent):
"""Return the agent's bridge or interface mappings.
For example: agent['configurations'].get('bridge_mappings', {}).
"""
pass
def physnet_in_mappings(self, physnet, mappings):
"""Is the physical network part of the given mappings?"""
return physnet in mappings
def check_segment_for_agent(self, segment, agent):
"""Check if segment can be bound for agent.
:param segment: segment dictionary describing segment to bind
:param agent: agents_db entry describing agent to bind
:returns: True iff segment can be bound for agent
Called outside any transaction during bind_port so that derived
MechanismDrivers can use agent_db data along with built-in
knowledge of the corresponding agent's capabilities to
determine whether or not the specified network segment can be
bound for the agent.
"""
mappings = self.get_mappings(agent)
allowed_network_types = self.get_allowed_network_types(agent)
LOG.debug("Checking segment: %(segment)s "
"for mappings: %(mappings)s "
"with network types: %(network_types)s",
{'segment': segment, 'mappings': mappings,
'network_types': allowed_network_types})
network_type = segment[api.NETWORK_TYPE]
if network_type not in allowed_network_types:
LOG.debug(
'Network %(network_id)s is of type %(network_type)s '
'but agent %(agent)s or mechanism driver only '
'support %(allowed_network_types)s.',
{'network_id': segment['id'],
'network_type': network_type,
'agent': agent['host'],
'allowed_network_types': allowed_network_types})
return False
if network_type in [p_constants.TYPE_FLAT, p_constants.TYPE_VLAN]:
physnet = segment[api.PHYSICAL_NETWORK]
if not self.physnet_in_mappings(physnet, mappings):
LOG.debug(
'Network %(network_id)s is connected to physical '
'network %(physnet)s, but agent %(agent)s reported '
'physical networks %(mappings)s. '
'The physical network must be configured on the '
'agent if binding is to succeed.',
{'network_id': segment['id'],
'physnet': physnet,
'agent': agent['host'],
'mappings': mappings})
return False
return True
| apache-2.0 |
Acidburn0zzz/readthedocs.org | readthedocs/projects/migrations/0030_auto__add_webhook.py | 18 | 12112 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'WebHook'
db.create_table('projects_webhook', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('project', self.gf('django.db.models.fields.related.ForeignKey')(related_name='webhook_notifications', to=orm['projects.Project'])),
('url', self.gf('django.db.models.fields.URLField')(max_length=200, blank=True)),
))
db.send_create_signal('projects', ['WebHook'])
# Adding model 'EmailHook'
db.create_table('projects_emailhook', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('project', self.gf('django.db.models.fields.related.ForeignKey')(related_name='emailhook_notifications', to=orm['projects.Project'])),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75)),
))
db.send_create_signal('projects', ['EmailHook'])
def backwards(self, orm):
# Deleting model 'WebHook'
db.delete_table('projects_webhook')
# Deleting model 'EmailHook'
db.delete_table('projects_emailhook')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'projects.file': {
'Meta': {'ordering': "('denormalized_path',)", 'object_name': 'File'},
'content': ('django.db.models.fields.TextField', [], {}),
'denormalized_path': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'heading': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ordering': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['projects.File']"}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'files'", 'to': "orm['projects.Project']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1'})
},
'projects.filerevision': {
'Meta': {'ordering': "('-revision_number',)", 'object_name': 'FileRevision'},
'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'diff': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'file': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'revisions'", 'to': "orm['projects.File']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_reverted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'revision_number': ('django.db.models.fields.IntegerField', [], {})
},
'projects.importedfile': {
'Meta': {'object_name': 'ImportedFile'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'md5': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'imported_files'", 'to': "orm['projects.Project']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'})
},
'projects.project': {
'Meta': {'ordering': "('slug',)", 'object_name': 'Project'},
'analytics_code': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'conf_py_file': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'copyright': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'crate_url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'default_branch': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'default_version': ('django.db.models.fields.CharField', [], {'default': "'latest'", 'max_length': '255'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'django_packages_url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'documentation_type': ('django.db.models.fields.CharField', [], {'default': "'sphinx'", 'max_length': '20'}),
'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'project_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'related_projects': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['projects.Project']", 'null': 'True', 'through': "orm['projects.ProjectRelationship']", 'blank': 'True'}),
'repo': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'repo_type': ('django.db.models.fields.CharField', [], {'default': "'git'", 'max_length': '10'}),
'requirements_file': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'skip': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255'}),
'suffix': ('django.db.models.fields.CharField', [], {'default': "'.rst'", 'max_length': '10'}),
'theme': ('django.db.models.fields.CharField', [], {'default': "'default'", 'max_length': '20'}),
'use_virtualenv': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'projects'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
'projects.projectrelationship': {
'Meta': {'object_name': 'ProjectRelationship'},
'child': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'superprojects'", 'to': "orm['projects.Project']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'subprojects'", 'to': "orm['projects.Project']"})
},
'projects.webhook': {
'Meta': {'object_name': 'WebHook'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'webhook_notifications'", 'to': "orm['projects.Project']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'taggit.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_tagged_items'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_items'", 'to': "orm['taggit.Tag']"})
}
}
complete_apps = ['projects']
| mit |
msebire/intellij-community | python/lib/Lib/site-packages/django/core/files/storage.py | 89 | 9863 | import os
import errno
import urlparse
import itertools
from datetime import datetime
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured, SuspiciousOperation
from django.core.files import locks, File
from django.core.files.move import file_move_safe
from django.utils.encoding import force_unicode
from django.utils.functional import LazyObject
from django.utils.importlib import import_module
from django.utils.text import get_valid_filename
from django.utils._os import safe_join
__all__ = ('Storage', 'FileSystemStorage', 'DefaultStorage', 'default_storage')
class Storage(object):
"""
A base storage class, providing some default behaviors that all other
storage systems can inherit or override, as necessary.
"""
# The following methods represent a public interface to private methods.
# These shouldn't be overridden by subclasses unless absolutely necessary.
def open(self, name, mode='rb', mixin=None):
"""
Retrieves the specified file from storage, using the optional mixin
class to customize what features are available on the File returned.
"""
file = self._open(name, mode)
if mixin:
# Add the mixin as a parent class of the File returned from storage.
file.__class__ = type(mixin.__name__, (mixin, file.__class__), {})
return file
def save(self, name, content):
"""
Saves new content to the file specified by name. The content should be a
proper File object, ready to be read from the beginning.
"""
# Get the proper name for the file, as it will actually be saved.
if name is None:
name = content.name
name = self.get_available_name(name)
name = self._save(name, content)
# Store filenames with forward slashes, even on Windows
return force_unicode(name.replace('\\', '/'))
# These methods are part of the public API, with default implementations.
def get_valid_name(self, name):
"""
Returns a filename, based on the provided filename, that's suitable for
use in the target storage system.
"""
return get_valid_filename(name)
def get_available_name(self, name):
"""
Returns a filename that's free on the target storage system, and
available for new content to be written to.
"""
dir_name, file_name = os.path.split(name)
file_root, file_ext = os.path.splitext(file_name)
# If the filename already exists, add an underscore and a number (before
# the file extension, if one exists) to the filename until the generated
# filename doesn't exist.
count = itertools.count(1)
while self.exists(name):
# file_ext includes the dot.
name = os.path.join(dir_name, "%s_%s%s" % (file_root, count.next(), file_ext))
return name
def path(self, name):
"""
Returns a local filesystem path where the file can be retrieved using
Python's built-in open() function. Storage systems that can't be
accessed using open() should *not* implement this method.
"""
raise NotImplementedError("This backend doesn't support absolute paths.")
# The following methods form the public API for storage systems, but with
# no default implementations. Subclasses must implement *all* of these.
def delete(self, name):
"""
Deletes the specified file from the storage system.
"""
raise NotImplementedError()
def exists(self, name):
"""
Returns True if a file referened by the given name already exists in the
storage system, or False if the name is available for a new file.
"""
raise NotImplementedError()
def listdir(self, path):
"""
Lists the contents of the specified path, returning a 2-tuple of lists;
the first item being directories, the second item being files.
"""
raise NotImplementedError()
def size(self, name):
"""
Returns the total size, in bytes, of the file specified by name.
"""
raise NotImplementedError()
def url(self, name):
"""
Returns an absolute URL where the file's contents can be accessed
directly by a Web browser.
"""
raise NotImplementedError()
def accessed_time(self, name):
"""
Returns the last accessed time (as datetime object) of the file
specified by name.
"""
raise NotImplementedError()
def created_time(self, name):
"""
Returns the creation time (as datetime object) of the file
specified by name.
"""
raise NotImplementedError()
def modified_time(self, name):
"""
Returns the last modified time (as datetime object) of the file
specified by name.
"""
raise NotImplementedError()
class FileSystemStorage(Storage):
"""
Standard filesystem storage
"""
def __init__(self, location=None, base_url=None):
if location is None:
location = settings.MEDIA_ROOT
if base_url is None:
base_url = settings.MEDIA_URL
self.location = os.path.abspath(location)
self.base_url = base_url
def _open(self, name, mode='rb'):
return File(open(self.path(name), mode))
def _save(self, name, content):
full_path = self.path(name)
directory = os.path.dirname(full_path)
if not os.path.exists(directory):
os.makedirs(directory)
elif not os.path.isdir(directory):
raise IOError("%s exists and is not a directory." % directory)
# There's a potential race condition between get_available_name and
# saving the file; it's possible that two threads might return the
# same name, at which point all sorts of fun happens. So we need to
# try to create the file, but if it already exists we have to go back
# to get_available_name() and try again.
while True:
try:
# This file has a file path that we can move.
if hasattr(content, 'temporary_file_path'):
file_move_safe(content.temporary_file_path(), full_path)
content.close()
# This is a normal uploadedfile that we can stream.
else:
# This fun binary flag incantation makes os.open throw an
# OSError if the file already exists before we open it.
fd = os.open(full_path, os.O_WRONLY | os.O_CREAT | os.O_EXCL | getattr(os, 'O_BINARY', 0))
try:
locks.lock(fd, locks.LOCK_EX)
for chunk in content.chunks():
os.write(fd, chunk)
finally:
locks.unlock(fd)
os.close(fd)
except OSError, e:
if e.errno == errno.EEXIST:
# Ooops, the file exists. We need a new file name.
name = self.get_available_name(name)
full_path = self.path(name)
else:
raise
else:
# OK, the file save worked. Break out of the loop.
break
if settings.FILE_UPLOAD_PERMISSIONS is not None:
os.chmod(full_path, settings.FILE_UPLOAD_PERMISSIONS)
return name
def delete(self, name):
name = self.path(name)
# If the file exists, delete it from the filesystem.
if os.path.exists(name):
os.remove(name)
def exists(self, name):
return os.path.exists(self.path(name))
def listdir(self, path):
path = self.path(path)
directories, files = [], []
for entry in os.listdir(path):
if os.path.isdir(os.path.join(path, entry)):
directories.append(entry)
else:
files.append(entry)
return directories, files
def path(self, name):
try:
path = safe_join(self.location, name)
except ValueError:
raise SuspiciousOperation("Attempted access to '%s' denied." % name)
return os.path.normpath(path)
def size(self, name):
return os.path.getsize(self.path(name))
def url(self, name):
if self.base_url is None:
raise ValueError("This file is not accessible via a URL.")
return urlparse.urljoin(self.base_url, name).replace('\\', '/')
def accessed_time(self, name):
return datetime.fromtimestamp(os.path.getatime(self.path(name)))
def created_time(self, name):
return datetime.fromtimestamp(os.path.getctime(self.path(name)))
def modified_time(self, name):
return datetime.fromtimestamp(os.path.getmtime(self.path(name)))
def get_storage_class(import_path=None):
if import_path is None:
import_path = settings.DEFAULT_FILE_STORAGE
try:
dot = import_path.rindex('.')
except ValueError:
raise ImproperlyConfigured("%s isn't a storage module." % import_path)
module, classname = import_path[:dot], import_path[dot+1:]
try:
mod = import_module(module)
except ImportError, e:
raise ImproperlyConfigured('Error importing storage module %s: "%s"' % (module, e))
try:
return getattr(mod, classname)
except AttributeError:
raise ImproperlyConfigured('Storage module "%s" does not define a "%s" class.' % (module, classname))
class DefaultStorage(LazyObject):
def _setup(self):
self._wrapped = get_storage_class()()
default_storage = DefaultStorage()
| apache-2.0 |
akvo/akvo-sites-zz-template | code/wp-content/themes/akvo-sites/node_modules/node-gyp/gyp/pylib/gyp/common_test.py | 2542 | 1970 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for the common.py file."""
import gyp.common
import unittest
import sys
class TestTopologicallySorted(unittest.TestCase):
def test_Valid(self):
"""Test that sorting works on a valid graph with one possible order."""
graph = {
'a': ['b', 'c'],
'b': [],
'c': ['d'],
'd': ['b'],
}
def GetEdge(node):
return tuple(graph[node])
self.assertEqual(
gyp.common.TopologicallySorted(graph.keys(), GetEdge),
['a', 'c', 'd', 'b'])
def test_Cycle(self):
"""Test that an exception is thrown on a cyclic graph."""
graph = {
'a': ['b'],
'b': ['c'],
'c': ['d'],
'd': ['a'],
}
def GetEdge(node):
return tuple(graph[node])
self.assertRaises(
gyp.common.CycleError, gyp.common.TopologicallySorted,
graph.keys(), GetEdge)
class TestGetFlavor(unittest.TestCase):
"""Test that gyp.common.GetFlavor works as intended"""
original_platform = ''
def setUp(self):
self.original_platform = sys.platform
def tearDown(self):
sys.platform = self.original_platform
def assertFlavor(self, expected, argument, param):
sys.platform = argument
self.assertEqual(expected, gyp.common.GetFlavor(param))
def test_platform_default(self):
self.assertFlavor('freebsd', 'freebsd9' , {})
self.assertFlavor('freebsd', 'freebsd10', {})
self.assertFlavor('openbsd', 'openbsd5' , {})
self.assertFlavor('solaris', 'sunos5' , {});
self.assertFlavor('solaris', 'sunos' , {});
self.assertFlavor('linux' , 'linux2' , {});
self.assertFlavor('linux' , 'linux3' , {});
def test_param(self):
self.assertFlavor('foobar', 'linux2' , {'flavor': 'foobar'})
if __name__ == '__main__':
unittest.main()
| agpl-3.0 |
bcingle/sigo-kodi-repository | src/plugin.video.guideboxkodi/addontools.py | 2 | 11690 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
import urlparse
import urllib
import xbmcgui
import xbmcplugin
import xbmcaddon
import xbmc
import json
import os
import pickle
class AddonHelper(dict):
'''
Set of tools that can be easily used to communicate with the XBMC/Kodi Plugin API. These tools
make it easier to do common plugin tasks, like build URLs to sub folders, add folders, etc.
'''
def __init__(self, args):
self['base_url'] = args[0]
self['xbmcaddon'] = xbmcaddon.Addon()
self["addon_name"] = self["xbmcaddon"].getAddonInfo("name")
self["addon_id"] = self["xbmcaddon"].getAddonInfo("id")
print "Base URL: " + str(self['base_url'])
self['addon_handle'] = int(args[1])
print "Addon Handle: " + str(self['addon_handle'])
if len(args) >= 3:
self['params'] = urlparse.parse_qs(args[2][1:])
else:
self['params'] = {}
print "Parameters: " + str(self['params'])
xbmcplugin.setContent(self['addon_handle'], 'movies')
self['full_path'] = self['base_url'] + '?' + urllib.urlencode(self['params']);
print "Full path of this addon run: " + self["full_path"]
self["user_data_folder"] = xbmc.translatePath("special://profile/addon_data/"+self['addon_id'])
if not os.path.isdir(self["user_data_folder"]):
os.mkdir(self["user_data_folder"])
self["user_data_file"] = os.path.join(self["user_data_folder"], "userdata.p")
def build_url(self, query):
"""
Build a url from a given query. The query should be a tuple in the form ["key1": "value1", "key2": "value2", ...]
"""
url = self['base_url'] + '?' + urllib.urlencode(query)
print "Building addon URL: " + url
return url
def get_param(self, name, default=None):
"""
Get a parameter by name, as passed to the addon
"""
print "Retrieving parameter " + name
if name in self['params']:
return self['params'][name][0]
else:
return default
def add_folder(self, label, path={}, artwork=None, contextMenu=None, mediaType=None, listInfo=None, of=0, overrideContextMenu=False):
"""
Add a subfolder to the current view with the given parameters. A subfolder is a special endpoint that
links back to the same plugin with different parameters
:param label: String, the label of the entry that will be shown
:param path: Dictionary of parameters to pass to this addon
:param artwork: A dictionary, as defined at http://mirrors.kodi.tv/docs/python-docs/16.x-jarvis/xbmcgui.html#ListItem-setArt
:param contextMenu: A dictionary, as defined at http://mirrors.kodi.tv/docs/python-docs/16.x-jarvis/xbmcgui.html#ListItem-addContextMenuItems
:param mediaType: String, type of media for listInfo, one of ['video', 'music', 'pictures'] - only applicable if listInfo is also defined
:param listInfo: A dictionary with listInfo properties, as defined at http://mirrors.kodi.tv/docs/python-docs/16.x-jarvis/xbmcgui.html#ListItem-setInfo
"""
self.add_endpoint(label, url=self.build_url(path), folder=True, artwork=artwork, contextMenu=contextMenu, mediaType=mediaType, listInfo=listInfo, overrideContextMenu=overrideContextMenu)
#print "Added a folder: " + label
print path
def add_endpoint(self, label, url=None, folder=False, artwork=None, contextMenu=None, overrideContextMenu=False, mediaType=None, listInfo=None, of=0):
"""
Add an endpoint. An endpoint is an item in the list of items shown to the user, either a folder or
a link/url recognized by XBMC
:param label: String, the label of the entry that will be shown
:param url: String, the url of the entry, where this item directs to
:param folder: True/False, whether this is a folder (default False)
:param artwork: A dictionary, as defined at http://mirrors.kodi.tv/docs/python-docs/16.x-jarvis/xbmcgui.html#ListItem-setArt
:param contextMenu: A dictionary, as defined at http://mirrors.kodi.tv/docs/python-docs/16.x-jarvis/xbmcgui.html#ListItem-addContextMenuItems
:param mediaType: String, type of media for listInfo, one of ['video', 'music', 'pictures'] - only applicable if listInfo is also defined
:param listInfo: A dictionary with listInfo properties, as defined at http://mirrors.kodi.tv/docs/python-docs/16.x-jarvis/xbmcgui.html#ListItem-setInfo
"""
li = xbmcgui.ListItem(label=label)
if artwork:
print "Adding artwork " + json.dumps(artwork)
li.setArt(artwork)
if mediaType and listInfo:
print "Adding info " + json.dumps(listInfo)
li.setInfo(mediaType, listInfo)
if contextMenu:
print "Adding context menu items " + json.dumps(contextMenu)
li.addContextMenuItems(contextMenu, overrideContextMenu)
xbmcplugin.addDirectoryItem(handle=self['addon_handle'], url=url, listitem=li, isFolder=folder, totalItems=of)
#print "Added navigation element to menu: " + label + " with path " + url
def end(self, viewMode=None):
"""
Mark the end of adding subfolders and end points
:param viewMode: Optional param to set a view mode for the list
"""
if viewMode:
self.set_view_mode(viewMode)
xbmcplugin.endOfDirectory(self['addon_handle'])
print "Closed navigation"
def is_platform(self, platform):
"""
Returns true is Kodi is running on the given platform
"""
return xbmc.getCondVisibility('System.Platform.' + platform)
def get_string(self, key):
"""
Get a localized string from the strings.xml localization file
:param id: The id of the string, given by a number lik '320001'
"""
return self['xbmcaddon'].getLocalizedString(int(key))
def get_setting(self, key):
"""
Get the string representation of some setting from the user settings for this plugin
:param id: The name of the setting to get
"""
return self['xbmcaddon'].getSetting(key)
def set_setting(self, key, value):
"""
Manually set some user setting
"""
self['xbmcaddon'].setSetting(key, value)
def navigate_now(self, path={}):
"""
Immediately redirect to a new path
"""
path = self.build_url(path);
xbmc.executebuiltin('RunPlugin(' + path + ')')
def refresh_current_path(self):
"""
Refresh the current path to accept changes that might have been made to data on the page
"""
xbmc.executebuiltin('RunPlugin(' + self['full_path'] + ')')
def get_current_path(self):
"""
Provide the full plugin:// path that was used to call this plugin, including parameters
"""
return self["full_path"]
def set_view_mode(self, viewMode):
"""
Sets the view mode for the vurrent list. Must be called after adding all folders and before calling AddonHelper.end().
It is preferable to pass the view mode to the end() function instead of using this, but not always possible.
"""
xbmc.executebuiltin("Container.SetViewMode(" + viewMode + ")")
def get_platform(self):
"""
Returns one of:
linux
win
osx
ios
android
atv2
raspberry_pi
unknown
"""
if self.is_platform('Linux.RaspberryPi'):
return "raspberry_pi"
if self.is_platform("Linux"):
return "linux"
if self.is_platform("Windows"):
return "win"
if self.is_platform("OSX"):
return "osx"
if self.is_platform("IOS"):
return "ios"
if self.is_platform("ATV2"):
return "atv2"
if self.is_platform("Android"):
return "android"
return "unknown"
def notify(self, message, time=5000, sound=True):
"""
Displays a notification in Kodi
:param message: Message of the notification
:param time: Time to display njotification in ms (default 5000)
:param sound: Whether to play sound or not (default True)
"""
print "Showing notification with message [" + message + "], time " + str(time) + " and sound " + str(sound)
xbmcgui.Dialog().notification(self["addon_name"], message, time=time, sound=sound)
def set_user_data(self, key, value):
"""
Set any data or object as the user's data. Writes to a file in the user's "special" folder for this addon.
:param key: The key for the user data, must be a string or convertible to a string
:param value: The value for the user data, any data tupe (old value will be overwritten if key already exists)
"""
if not os.path.isfile(self["user_data_file"]):
userData = {}
else:
with open(self["user_data_file"], "rb") as f:
userData = pickle.load(f)
userData[str(key)] = value
pickle.dump(userData, open(self["user_data_file"], "wb"))
def get_user_data(self, key):
"""
Get full user data stored in the user's special folder
:param key: The key for the data to be retrieved, must be a string or convertible to a string
:returns: The value stored in user data, or None if no value was stored with the given key
"""
if not os.path.isfile(self["user_data_file"]):
return None
with open(self["user_data_file"], "rb") as f:
userData = pickle.load(f)
key = str(key)
if key in userData:
return userData[key]
else:
return None
def get_user_input_alphanum(self, heading="Enter text"):
d = xbmcgui.Dialog()
return d.input(heading, type=xbmcgui.INPUT_ALPHANUM)
def get_user_input_date(self, heading="Enter date"):
d = xbmcgui.Dialog()
return d.input(heading, type=xbmcgui.INPUT_DATE)
def get_user_input_ip(self, heading="Enter IP address"):
d = xbmcgui.Dialog()
return d.input(heading, type=xbmcgui.INPUT_IPADDRESS)
def get_user_input_numeric(self, heading="Enter number"):
d = xbmcgui.Dialog()
return d.input(heading, type=xbmcgui.INPUT_NUMERIC)
def get_user_input_time(self, heading="Enter time"):
d = xbmcgui.Dialog()
return d.input(heading, type=xbmcgui.INPUT_TIME)
def get_user_input_password(self, heading="Enter password"):
d = xbmcgui.Dialog()
return d.input(heading, type=xbmcgui.INPUT_PASSWORD)
def get_user_input_select(self, heading="Select one", options=[]):
if not options:
return None
d = xbmcgui.Dialog()
return d.select(heading, options)
def get_user_input_yesno(self, heading="Choose an option", question="Yes or No?", nolabel = "No", yeslabel="Yes"):
d = xbmcgui.Dialog()
return d.yesno(heading, question, nolabel=nolabel, yeslabel=yeslabel)
def open_addon_settings(self):
self["xbmcaddon"].openSettings()
| gpl-3.0 |
clinton-hall/nzbToMedia | libs/common/setuptools/command/upload_docs.py | 173 | 7311 | # -*- coding: utf-8 -*-
"""upload_docs
Implements a Distutils 'upload_docs' subcommand (upload documentation to
PyPI's pythonhosted.org).
"""
from base64 import standard_b64encode
from distutils import log
from distutils.errors import DistutilsOptionError
import os
import socket
import zipfile
import tempfile
import shutil
import itertools
import functools
from setuptools.extern import six
from setuptools.extern.six.moves import http_client, urllib
from pkg_resources import iter_entry_points
from .upload import upload
def _encode(s):
errors = 'surrogateescape' if six.PY3 else 'strict'
return s.encode('utf-8', errors)
class upload_docs(upload):
# override the default repository as upload_docs isn't
# supported by Warehouse (and won't be).
DEFAULT_REPOSITORY = 'https://pypi.python.org/pypi/'
description = 'Upload documentation to PyPI'
user_options = [
('repository=', 'r',
"url of repository [default: %s]" % upload.DEFAULT_REPOSITORY),
('show-response', None,
'display full response text from server'),
('upload-dir=', None, 'directory to upload'),
]
boolean_options = upload.boolean_options
def has_sphinx(self):
if self.upload_dir is None:
for ep in iter_entry_points('distutils.commands', 'build_sphinx'):
return True
sub_commands = [('build_sphinx', has_sphinx)]
def initialize_options(self):
upload.initialize_options(self)
self.upload_dir = None
self.target_dir = None
def finalize_options(self):
upload.finalize_options(self)
if self.upload_dir is None:
if self.has_sphinx():
build_sphinx = self.get_finalized_command('build_sphinx')
self.target_dir = build_sphinx.builder_target_dir
else:
build = self.get_finalized_command('build')
self.target_dir = os.path.join(build.build_base, 'docs')
else:
self.ensure_dirname('upload_dir')
self.target_dir = self.upload_dir
if 'pypi.python.org' in self.repository:
log.warn("Upload_docs command is deprecated. Use RTD instead.")
self.announce('Using upload directory %s' % self.target_dir)
def create_zipfile(self, filename):
zip_file = zipfile.ZipFile(filename, "w")
try:
self.mkpath(self.target_dir) # just in case
for root, dirs, files in os.walk(self.target_dir):
if root == self.target_dir and not files:
tmpl = "no files found in upload directory '%s'"
raise DistutilsOptionError(tmpl % self.target_dir)
for name in files:
full = os.path.join(root, name)
relative = root[len(self.target_dir):].lstrip(os.path.sep)
dest = os.path.join(relative, name)
zip_file.write(full, dest)
finally:
zip_file.close()
def run(self):
# Run sub commands
for cmd_name in self.get_sub_commands():
self.run_command(cmd_name)
tmp_dir = tempfile.mkdtemp()
name = self.distribution.metadata.get_name()
zip_file = os.path.join(tmp_dir, "%s.zip" % name)
try:
self.create_zipfile(zip_file)
self.upload_file(zip_file)
finally:
shutil.rmtree(tmp_dir)
@staticmethod
def _build_part(item, sep_boundary):
key, values = item
title = '\nContent-Disposition: form-data; name="%s"' % key
# handle multiple entries for the same name
if not isinstance(values, list):
values = [values]
for value in values:
if isinstance(value, tuple):
title += '; filename="%s"' % value[0]
value = value[1]
else:
value = _encode(value)
yield sep_boundary
yield _encode(title)
yield b"\n\n"
yield value
if value and value[-1:] == b'\r':
yield b'\n' # write an extra newline (lurve Macs)
@classmethod
def _build_multipart(cls, data):
"""
Build up the MIME payload for the POST data
"""
boundary = b'--------------GHSKFJDLGDS7543FJKLFHRE75642756743254'
sep_boundary = b'\n--' + boundary
end_boundary = sep_boundary + b'--'
end_items = end_boundary, b"\n",
builder = functools.partial(
cls._build_part,
sep_boundary=sep_boundary,
)
part_groups = map(builder, data.items())
parts = itertools.chain.from_iterable(part_groups)
body_items = itertools.chain(parts, end_items)
content_type = 'multipart/form-data; boundary=%s' % boundary.decode('ascii')
return b''.join(body_items), content_type
def upload_file(self, filename):
with open(filename, 'rb') as f:
content = f.read()
meta = self.distribution.metadata
data = {
':action': 'doc_upload',
'name': meta.get_name(),
'content': (os.path.basename(filename), content),
}
# set up the authentication
credentials = _encode(self.username + ':' + self.password)
credentials = standard_b64encode(credentials)
if six.PY3:
credentials = credentials.decode('ascii')
auth = "Basic " + credentials
body, ct = self._build_multipart(data)
msg = "Submitting documentation to %s" % (self.repository)
self.announce(msg, log.INFO)
# build the Request
# We can't use urllib2 since we need to send the Basic
# auth right with the first request
schema, netloc, url, params, query, fragments = \
urllib.parse.urlparse(self.repository)
assert not params and not query and not fragments
if schema == 'http':
conn = http_client.HTTPConnection(netloc)
elif schema == 'https':
conn = http_client.HTTPSConnection(netloc)
else:
raise AssertionError("unsupported schema " + schema)
data = ''
try:
conn.connect()
conn.putrequest("POST", url)
content_type = ct
conn.putheader('Content-type', content_type)
conn.putheader('Content-length', str(len(body)))
conn.putheader('Authorization', auth)
conn.endheaders()
conn.send(body)
except socket.error as e:
self.announce(str(e), log.ERROR)
return
r = conn.getresponse()
if r.status == 200:
msg = 'Server response (%s): %s' % (r.status, r.reason)
self.announce(msg, log.INFO)
elif r.status == 301:
location = r.getheader('Location')
if location is None:
location = 'https://pythonhosted.org/%s/' % meta.get_name()
msg = 'Upload successful. Visit %s' % location
self.announce(msg, log.INFO)
else:
msg = 'Upload failed (%s): %s' % (r.status, r.reason)
self.announce(msg, log.ERROR)
if self.show_response:
print('-' * 75, r.read(), '-' * 75)
| gpl-3.0 |
sjshank/spotEmployee | node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/mac_tool.py | 1569 | 23354 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility functions to perform Xcode-style build steps.
These functions are executed via gyp-mac-tool when using the Makefile generator.
"""
import fcntl
import fnmatch
import glob
import json
import os
import plistlib
import re
import shutil
import string
import subprocess
import sys
import tempfile
def main(args):
executor = MacTool()
exit_code = executor.Dispatch(args)
if exit_code is not None:
sys.exit(exit_code)
class MacTool(object):
"""This class performs all the Mac tooling steps. The methods can either be
executed directly, or dispatched from an argument list."""
def Dispatch(self, args):
"""Dispatches a string command to a method."""
if len(args) < 1:
raise Exception("Not enough arguments")
method = "Exec%s" % self._CommandifyName(args[0])
return getattr(self, method)(*args[1:])
def _CommandifyName(self, name_string):
"""Transforms a tool name like copy-info-plist to CopyInfoPlist"""
return name_string.title().replace('-', '')
def ExecCopyBundleResource(self, source, dest, convert_to_binary):
"""Copies a resource file to the bundle/Resources directory, performing any
necessary compilation on each resource."""
extension = os.path.splitext(source)[1].lower()
if os.path.isdir(source):
# Copy tree.
# TODO(thakis): This copies file attributes like mtime, while the
# single-file branch below doesn't. This should probably be changed to
# be consistent with the single-file branch.
if os.path.exists(dest):
shutil.rmtree(dest)
shutil.copytree(source, dest)
elif extension == '.xib':
return self._CopyXIBFile(source, dest)
elif extension == '.storyboard':
return self._CopyXIBFile(source, dest)
elif extension == '.strings':
self._CopyStringsFile(source, dest, convert_to_binary)
else:
shutil.copy(source, dest)
def _CopyXIBFile(self, source, dest):
"""Compiles a XIB file with ibtool into a binary plist in the bundle."""
# ibtool sometimes crashes with relative paths. See crbug.com/314728.
base = os.path.dirname(os.path.realpath(__file__))
if os.path.relpath(source):
source = os.path.join(base, source)
if os.path.relpath(dest):
dest = os.path.join(base, dest)
args = ['xcrun', 'ibtool', '--errors', '--warnings', '--notices',
'--output-format', 'human-readable-text', '--compile', dest, source]
ibtool_section_re = re.compile(r'/\*.*\*/')
ibtool_re = re.compile(r'.*note:.*is clipping its content')
ibtoolout = subprocess.Popen(args, stdout=subprocess.PIPE)
current_section_header = None
for line in ibtoolout.stdout:
if ibtool_section_re.match(line):
current_section_header = line
elif not ibtool_re.match(line):
if current_section_header:
sys.stdout.write(current_section_header)
current_section_header = None
sys.stdout.write(line)
return ibtoolout.returncode
def _ConvertToBinary(self, dest):
subprocess.check_call([
'xcrun', 'plutil', '-convert', 'binary1', '-o', dest, dest])
def _CopyStringsFile(self, source, dest, convert_to_binary):
"""Copies a .strings file using iconv to reconvert the input into UTF-16."""
input_code = self._DetectInputEncoding(source) or "UTF-8"
# Xcode's CpyCopyStringsFile / builtin-copyStrings seems to call
# CFPropertyListCreateFromXMLData() behind the scenes; at least it prints
# CFPropertyListCreateFromXMLData(): Old-style plist parser: missing
# semicolon in dictionary.
# on invalid files. Do the same kind of validation.
import CoreFoundation
s = open(source, 'rb').read()
d = CoreFoundation.CFDataCreate(None, s, len(s))
_, error = CoreFoundation.CFPropertyListCreateFromXMLData(None, d, 0, None)
if error:
return
fp = open(dest, 'wb')
fp.write(s.decode(input_code).encode('UTF-16'))
fp.close()
if convert_to_binary == 'True':
self._ConvertToBinary(dest)
def _DetectInputEncoding(self, file_name):
"""Reads the first few bytes from file_name and tries to guess the text
encoding. Returns None as a guess if it can't detect it."""
fp = open(file_name, 'rb')
try:
header = fp.read(3)
except e:
fp.close()
return None
fp.close()
if header.startswith("\xFE\xFF"):
return "UTF-16"
elif header.startswith("\xFF\xFE"):
return "UTF-16"
elif header.startswith("\xEF\xBB\xBF"):
return "UTF-8"
else:
return None
def ExecCopyInfoPlist(self, source, dest, convert_to_binary, *keys):
"""Copies the |source| Info.plist to the destination directory |dest|."""
# Read the source Info.plist into memory.
fd = open(source, 'r')
lines = fd.read()
fd.close()
# Insert synthesized key/value pairs (e.g. BuildMachineOSBuild).
plist = plistlib.readPlistFromString(lines)
if keys:
plist = dict(plist.items() + json.loads(keys[0]).items())
lines = plistlib.writePlistToString(plist)
# Go through all the environment variables and replace them as variables in
# the file.
IDENT_RE = re.compile(r'[/\s]')
for key in os.environ:
if key.startswith('_'):
continue
evar = '${%s}' % key
evalue = os.environ[key]
lines = string.replace(lines, evar, evalue)
# Xcode supports various suffices on environment variables, which are
# all undocumented. :rfc1034identifier is used in the standard project
# template these days, and :identifier was used earlier. They are used to
# convert non-url characters into things that look like valid urls --
# except that the replacement character for :identifier, '_' isn't valid
# in a URL either -- oops, hence :rfc1034identifier was born.
evar = '${%s:identifier}' % key
evalue = IDENT_RE.sub('_', os.environ[key])
lines = string.replace(lines, evar, evalue)
evar = '${%s:rfc1034identifier}' % key
evalue = IDENT_RE.sub('-', os.environ[key])
lines = string.replace(lines, evar, evalue)
# Remove any keys with values that haven't been replaced.
lines = lines.split('\n')
for i in range(len(lines)):
if lines[i].strip().startswith("<string>${"):
lines[i] = None
lines[i - 1] = None
lines = '\n'.join(filter(lambda x: x is not None, lines))
# Write out the file with variables replaced.
fd = open(dest, 'w')
fd.write(lines)
fd.close()
# Now write out PkgInfo file now that the Info.plist file has been
# "compiled".
self._WritePkgInfo(dest)
if convert_to_binary == 'True':
self._ConvertToBinary(dest)
def _WritePkgInfo(self, info_plist):
"""This writes the PkgInfo file from the data stored in Info.plist."""
plist = plistlib.readPlist(info_plist)
if not plist:
return
# Only create PkgInfo for executable types.
package_type = plist['CFBundlePackageType']
if package_type != 'APPL':
return
# The format of PkgInfo is eight characters, representing the bundle type
# and bundle signature, each four characters. If that is missing, four
# '?' characters are used instead.
signature_code = plist.get('CFBundleSignature', '????')
if len(signature_code) != 4: # Wrong length resets everything, too.
signature_code = '?' * 4
dest = os.path.join(os.path.dirname(info_plist), 'PkgInfo')
fp = open(dest, 'w')
fp.write('%s%s' % (package_type, signature_code))
fp.close()
def ExecFlock(self, lockfile, *cmd_list):
"""Emulates the most basic behavior of Linux's flock(1)."""
# Rely on exception handling to report errors.
fd = os.open(lockfile, os.O_RDONLY|os.O_NOCTTY|os.O_CREAT, 0o666)
fcntl.flock(fd, fcntl.LOCK_EX)
return subprocess.call(cmd_list)
def ExecFilterLibtool(self, *cmd_list):
"""Calls libtool and filters out '/path/to/libtool: file: foo.o has no
symbols'."""
libtool_re = re.compile(r'^.*libtool: file: .* has no symbols$')
libtool_re5 = re.compile(
r'^.*libtool: warning for library: ' +
r'.* the table of contents is empty ' +
r'\(no object file members in the library define global symbols\)$')
env = os.environ.copy()
# Ref:
# http://www.opensource.apple.com/source/cctools/cctools-809/misc/libtool.c
# The problem with this flag is that it resets the file mtime on the file to
# epoch=0, e.g. 1970-1-1 or 1969-12-31 depending on timezone.
env['ZERO_AR_DATE'] = '1'
libtoolout = subprocess.Popen(cmd_list, stderr=subprocess.PIPE, env=env)
_, err = libtoolout.communicate()
for line in err.splitlines():
if not libtool_re.match(line) and not libtool_re5.match(line):
print >>sys.stderr, line
# Unconditionally touch the output .a file on the command line if present
# and the command succeeded. A bit hacky.
if not libtoolout.returncode:
for i in range(len(cmd_list) - 1):
if cmd_list[i] == "-o" and cmd_list[i+1].endswith('.a'):
os.utime(cmd_list[i+1], None)
break
return libtoolout.returncode
def ExecPackageFramework(self, framework, version):
"""Takes a path to Something.framework and the Current version of that and
sets up all the symlinks."""
# Find the name of the binary based on the part before the ".framework".
binary = os.path.basename(framework).split('.')[0]
CURRENT = 'Current'
RESOURCES = 'Resources'
VERSIONS = 'Versions'
if not os.path.exists(os.path.join(framework, VERSIONS, version, binary)):
# Binary-less frameworks don't seem to contain symlinks (see e.g.
# chromium's out/Debug/org.chromium.Chromium.manifest/ bundle).
return
# Move into the framework directory to set the symlinks correctly.
pwd = os.getcwd()
os.chdir(framework)
# Set up the Current version.
self._Relink(version, os.path.join(VERSIONS, CURRENT))
# Set up the root symlinks.
self._Relink(os.path.join(VERSIONS, CURRENT, binary), binary)
self._Relink(os.path.join(VERSIONS, CURRENT, RESOURCES), RESOURCES)
# Back to where we were before!
os.chdir(pwd)
def _Relink(self, dest, link):
"""Creates a symlink to |dest| named |link|. If |link| already exists,
it is overwritten."""
if os.path.lexists(link):
os.remove(link)
os.symlink(dest, link)
def ExecCompileXcassets(self, keys, *inputs):
"""Compiles multiple .xcassets files into a single .car file.
This invokes 'actool' to compile all the inputs .xcassets files. The
|keys| arguments is a json-encoded dictionary of extra arguments to
pass to 'actool' when the asset catalogs contains an application icon
or a launch image.
Note that 'actool' does not create the Assets.car file if the asset
catalogs does not contains imageset.
"""
command_line = [
'xcrun', 'actool', '--output-format', 'human-readable-text',
'--compress-pngs', '--notices', '--warnings', '--errors',
]
is_iphone_target = 'IPHONEOS_DEPLOYMENT_TARGET' in os.environ
if is_iphone_target:
platform = os.environ['CONFIGURATION'].split('-')[-1]
if platform not in ('iphoneos', 'iphonesimulator'):
platform = 'iphonesimulator'
command_line.extend([
'--platform', platform, '--target-device', 'iphone',
'--target-device', 'ipad', '--minimum-deployment-target',
os.environ['IPHONEOS_DEPLOYMENT_TARGET'], '--compile',
os.path.abspath(os.environ['CONTENTS_FOLDER_PATH']),
])
else:
command_line.extend([
'--platform', 'macosx', '--target-device', 'mac',
'--minimum-deployment-target', os.environ['MACOSX_DEPLOYMENT_TARGET'],
'--compile',
os.path.abspath(os.environ['UNLOCALIZED_RESOURCES_FOLDER_PATH']),
])
if keys:
keys = json.loads(keys)
for key, value in keys.iteritems():
arg_name = '--' + key
if isinstance(value, bool):
if value:
command_line.append(arg_name)
elif isinstance(value, list):
for v in value:
command_line.append(arg_name)
command_line.append(str(v))
else:
command_line.append(arg_name)
command_line.append(str(value))
# Note: actool crashes if inputs path are relative, so use os.path.abspath
# to get absolute path name for inputs.
command_line.extend(map(os.path.abspath, inputs))
subprocess.check_call(command_line)
def ExecMergeInfoPlist(self, output, *inputs):
"""Merge multiple .plist files into a single .plist file."""
merged_plist = {}
for path in inputs:
plist = self._LoadPlistMaybeBinary(path)
self._MergePlist(merged_plist, plist)
plistlib.writePlist(merged_plist, output)
def ExecCodeSignBundle(self, key, resource_rules, entitlements, provisioning):
"""Code sign a bundle.
This function tries to code sign an iOS bundle, following the same
algorithm as Xcode:
1. copy ResourceRules.plist from the user or the SDK into the bundle,
2. pick the provisioning profile that best match the bundle identifier,
and copy it into the bundle as embedded.mobileprovision,
3. copy Entitlements.plist from user or SDK next to the bundle,
4. code sign the bundle.
"""
resource_rules_path = self._InstallResourceRules(resource_rules)
substitutions, overrides = self._InstallProvisioningProfile(
provisioning, self._GetCFBundleIdentifier())
entitlements_path = self._InstallEntitlements(
entitlements, substitutions, overrides)
subprocess.check_call([
'codesign', '--force', '--sign', key, '--resource-rules',
resource_rules_path, '--entitlements', entitlements_path,
os.path.join(
os.environ['TARGET_BUILD_DIR'],
os.environ['FULL_PRODUCT_NAME'])])
def _InstallResourceRules(self, resource_rules):
"""Installs ResourceRules.plist from user or SDK into the bundle.
Args:
resource_rules: string, optional, path to the ResourceRules.plist file
to use, default to "${SDKROOT}/ResourceRules.plist"
Returns:
Path to the copy of ResourceRules.plist into the bundle.
"""
source_path = resource_rules
target_path = os.path.join(
os.environ['BUILT_PRODUCTS_DIR'],
os.environ['CONTENTS_FOLDER_PATH'],
'ResourceRules.plist')
if not source_path:
source_path = os.path.join(
os.environ['SDKROOT'], 'ResourceRules.plist')
shutil.copy2(source_path, target_path)
return target_path
def _InstallProvisioningProfile(self, profile, bundle_identifier):
"""Installs embedded.mobileprovision into the bundle.
Args:
profile: string, optional, short name of the .mobileprovision file
to use, if empty or the file is missing, the best file installed
will be used
bundle_identifier: string, value of CFBundleIdentifier from Info.plist
Returns:
A tuple containing two dictionary: variables substitutions and values
to overrides when generating the entitlements file.
"""
source_path, provisioning_data, team_id = self._FindProvisioningProfile(
profile, bundle_identifier)
target_path = os.path.join(
os.environ['BUILT_PRODUCTS_DIR'],
os.environ['CONTENTS_FOLDER_PATH'],
'embedded.mobileprovision')
shutil.copy2(source_path, target_path)
substitutions = self._GetSubstitutions(bundle_identifier, team_id + '.')
return substitutions, provisioning_data['Entitlements']
def _FindProvisioningProfile(self, profile, bundle_identifier):
"""Finds the .mobileprovision file to use for signing the bundle.
Checks all the installed provisioning profiles (or if the user specified
the PROVISIONING_PROFILE variable, only consult it) and select the most
specific that correspond to the bundle identifier.
Args:
profile: string, optional, short name of the .mobileprovision file
to use, if empty or the file is missing, the best file installed
will be used
bundle_identifier: string, value of CFBundleIdentifier from Info.plist
Returns:
A tuple of the path to the selected provisioning profile, the data of
the embedded plist in the provisioning profile and the team identifier
to use for code signing.
Raises:
SystemExit: if no .mobileprovision can be used to sign the bundle.
"""
profiles_dir = os.path.join(
os.environ['HOME'], 'Library', 'MobileDevice', 'Provisioning Profiles')
if not os.path.isdir(profiles_dir):
print >>sys.stderr, (
'cannot find mobile provisioning for %s' % bundle_identifier)
sys.exit(1)
provisioning_profiles = None
if profile:
profile_path = os.path.join(profiles_dir, profile + '.mobileprovision')
if os.path.exists(profile_path):
provisioning_profiles = [profile_path]
if not provisioning_profiles:
provisioning_profiles = glob.glob(
os.path.join(profiles_dir, '*.mobileprovision'))
valid_provisioning_profiles = {}
for profile_path in provisioning_profiles:
profile_data = self._LoadProvisioningProfile(profile_path)
app_id_pattern = profile_data.get(
'Entitlements', {}).get('application-identifier', '')
for team_identifier in profile_data.get('TeamIdentifier', []):
app_id = '%s.%s' % (team_identifier, bundle_identifier)
if fnmatch.fnmatch(app_id, app_id_pattern):
valid_provisioning_profiles[app_id_pattern] = (
profile_path, profile_data, team_identifier)
if not valid_provisioning_profiles:
print >>sys.stderr, (
'cannot find mobile provisioning for %s' % bundle_identifier)
sys.exit(1)
# If the user has multiple provisioning profiles installed that can be
# used for ${bundle_identifier}, pick the most specific one (ie. the
# provisioning profile whose pattern is the longest).
selected_key = max(valid_provisioning_profiles, key=lambda v: len(v))
return valid_provisioning_profiles[selected_key]
def _LoadProvisioningProfile(self, profile_path):
"""Extracts the plist embedded in a provisioning profile.
Args:
profile_path: string, path to the .mobileprovision file
Returns:
Content of the plist embedded in the provisioning profile as a dictionary.
"""
with tempfile.NamedTemporaryFile() as temp:
subprocess.check_call([
'security', 'cms', '-D', '-i', profile_path, '-o', temp.name])
return self._LoadPlistMaybeBinary(temp.name)
def _MergePlist(self, merged_plist, plist):
"""Merge |plist| into |merged_plist|."""
for key, value in plist.iteritems():
if isinstance(value, dict):
merged_value = merged_plist.get(key, {})
if isinstance(merged_value, dict):
self._MergePlist(merged_value, value)
merged_plist[key] = merged_value
else:
merged_plist[key] = value
else:
merged_plist[key] = value
def _LoadPlistMaybeBinary(self, plist_path):
"""Loads into a memory a plist possibly encoded in binary format.
This is a wrapper around plistlib.readPlist that tries to convert the
plist to the XML format if it can't be parsed (assuming that it is in
the binary format).
Args:
plist_path: string, path to a plist file, in XML or binary format
Returns:
Content of the plist as a dictionary.
"""
try:
# First, try to read the file using plistlib that only supports XML,
# and if an exception is raised, convert a temporary copy to XML and
# load that copy.
return plistlib.readPlist(plist_path)
except:
pass
with tempfile.NamedTemporaryFile() as temp:
shutil.copy2(plist_path, temp.name)
subprocess.check_call(['plutil', '-convert', 'xml1', temp.name])
return plistlib.readPlist(temp.name)
def _GetSubstitutions(self, bundle_identifier, app_identifier_prefix):
"""Constructs a dictionary of variable substitutions for Entitlements.plist.
Args:
bundle_identifier: string, value of CFBundleIdentifier from Info.plist
app_identifier_prefix: string, value for AppIdentifierPrefix
Returns:
Dictionary of substitutions to apply when generating Entitlements.plist.
"""
return {
'CFBundleIdentifier': bundle_identifier,
'AppIdentifierPrefix': app_identifier_prefix,
}
def _GetCFBundleIdentifier(self):
"""Extracts CFBundleIdentifier value from Info.plist in the bundle.
Returns:
Value of CFBundleIdentifier in the Info.plist located in the bundle.
"""
info_plist_path = os.path.join(
os.environ['TARGET_BUILD_DIR'],
os.environ['INFOPLIST_PATH'])
info_plist_data = self._LoadPlistMaybeBinary(info_plist_path)
return info_plist_data['CFBundleIdentifier']
def _InstallEntitlements(self, entitlements, substitutions, overrides):
"""Generates and install the ${BundleName}.xcent entitlements file.
Expands variables "$(variable)" pattern in the source entitlements file,
add extra entitlements defined in the .mobileprovision file and the copy
the generated plist to "${BundlePath}.xcent".
Args:
entitlements: string, optional, path to the Entitlements.plist template
to use, defaults to "${SDKROOT}/Entitlements.plist"
substitutions: dictionary, variable substitutions
overrides: dictionary, values to add to the entitlements
Returns:
Path to the generated entitlements file.
"""
source_path = entitlements
target_path = os.path.join(
os.environ['BUILT_PRODUCTS_DIR'],
os.environ['PRODUCT_NAME'] + '.xcent')
if not source_path:
source_path = os.path.join(
os.environ['SDKROOT'],
'Entitlements.plist')
shutil.copy2(source_path, target_path)
data = self._LoadPlistMaybeBinary(target_path)
data = self._ExpandVariables(data, substitutions)
if overrides:
for key in overrides:
if key not in data:
data[key] = overrides[key]
plistlib.writePlist(data, target_path)
return target_path
def _ExpandVariables(self, data, substitutions):
"""Expands variables "$(variable)" in data.
Args:
data: object, can be either string, list or dictionary
substitutions: dictionary, variable substitutions to perform
Returns:
Copy of data where each references to "$(variable)" has been replaced
by the corresponding value found in substitutions, or left intact if
the key was not found.
"""
if isinstance(data, str):
for key, value in substitutions.iteritems():
data = data.replace('$(%s)' % key, value)
return data
if isinstance(data, list):
return [self._ExpandVariables(v, substitutions) for v in data]
if isinstance(data, dict):
return {k: self._ExpandVariables(data[k], substitutions) for k in data}
return data
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| epl-1.0 |
pastephens/pysal | pysal/spreg/ml_error.py | 6 | 19663 | """
ML Estimation of Spatial Error Model
"""
__author__ = "Luc Anselin luc.anselin@asu.edu,\
Serge Rey srey@asu.edu, \
Levi Wolf levi.john.wolf@asu.edu"
import numpy as np
import numpy.linalg as la
from scipy import sparse as sp
from scipy.sparse.linalg import splu as SuperLU
import pysal as ps
from utils import RegressionPropsY, RegressionPropsVM
import diagnostics as DIAG
import user_output as USER
import summary_output as SUMMARY
import regimes as REGI
from w_utils import symmetrize
try:
from scipy.optimize import minimize_scalar
minimize_scalar_available = True
except ImportError:
minimize_scalar_available = False
from .sputils import spdot, spfill_diagonal, spinv
__all__ = ["ML_Error"]
class BaseML_Error(RegressionPropsY, RegressionPropsVM, REGI.Regimes_Frame):
"""
ML estimation of the spatial error model (note no consistency
checks, diagnostics or constants added); Anselin (1988) [Anselin1988]_
Parameters
----------
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, excluding the constant
w : Sparse matrix
Spatial weights sparse matrix
method : string
if 'full', brute force calculation (full matrix expressions)
if 'ord', Ord eigenvalue calculation
if 'LU', LU decomposition for sparse matrices
epsilon : float
tolerance criterion in mimimize_scalar function and inverse_product
regimes_att : dictionary
Dictionary containing elements to be used in case of a regimes model,
i.e. 'x' before regimes, 'regimes' list and 'cols2regi'
Attributes
----------
betas : array
kx1 array of estimated coefficients
lam : float
estimate of spatial autoregressive coefficient
u : array
nx1 array of residuals
e_filtered : array
spatially filtered residuals
predy : array
nx1 array of predicted y values
n : integer
Number of observations
k : integer
Number of variables for which coefficients are estimated
(including the constant, excluding the rho)
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
method : string
log Jacobian method
if 'full': brute force (full matrix computations)
if 'ord' : Ord eigenvalue method
epsilon : float
tolerance criterion used in minimize_scalar function and inverse_product
mean_y : float
Mean of dependent variable
std_y : float
Standard deviation of dependent variable
vm : array
Variance covariance matrix (k+1 x k+1) - includes lambda
vm1 : array
2x2 array of variance covariance for lambda, sigma
sig2 : float
Sigma squared used in computations
logll : float
maximized log-likelihood (including constant terms)
Examples
--------
>>> import numpy as np
>>> import pysal as ps
>>> np.set_printoptions(suppress=True) #prevent scientific format
>>> db = ps.open(ps.examples.get_path("south.dbf"),'r')
>>> y_name = "HR90"
>>> y = np.array(db.by_col(y_name))
>>> y.shape = (len(y),1)
>>> x_names = ["RD90","PS90","UE90","DV90"]
>>> x = np.array([db.by_col(var) for var in x_names]).T
>>> x = np.hstack((np.ones((len(y),1)),x))
>>> ww = ps.open(ps.examples.get_path("south_q.gal"))
>>> w = ww.read()
>>> ww.close()
>>> w.transform = 'r'
>>> mlerr = BaseML_Error(y,x,w) #doctest: +SKIP
>>> "{0:.6f}".format(mlerr.lam) #doctest: +SKIP
'0.299078'
>>> np.around(mlerr.betas, decimals=4) #doctest: +SKIP
array([[ 6.1492],
[ 4.4024],
[ 1.7784],
[-0.3781],
[ 0.4858],
[ 0.2991]])
>>> "{0:.6f}".format(mlerr.mean_y) #doctest: +SKIP
'9.549293'
>>> "{0:.6f}".format(mlerr.std_y) #doctest: +SKIP
'7.038851'
>>> np.diag(mlerr.vm) #doctest: +SKIP
array([ 1.06476526, 0.05548248, 0.04544514, 0.00614425, 0.01481356,
0.00143001])
>>> "{0:.6f}".format(mlerr.sig2[0][0]) #doctest: +SKIP
'32.406854'
>>> "{0:.6f}".format(mlerr.logll) #doctest: +SKIP
'-4471.407067'
>>> mlerr1 = BaseML_Error(y,x,w,method='ord') #doctest: +SKIP
>>> "{0:.6f}".format(mlerr1.lam) #doctest: +SKIP
'0.299078'
>>> np.around(mlerr1.betas, decimals=4) #doctest: +SKIP
array([[ 6.1492],
[ 4.4024],
[ 1.7784],
[-0.3781],
[ 0.4858],
[ 0.2991]])
>>> "{0:.6f}".format(mlerr1.mean_y) #doctest: +SKIP
'9.549293'
>>> "{0:.6f}".format(mlerr1.std_y) #doctest: +SKIP
'7.038851'
>>> np.around(np.diag(mlerr1.vm), decimals=4) #doctest: +SKIP
array([ 1.0648, 0.0555, 0.0454, 0.0061, 0.0148, 0.0014])
>>> "{0:.4f}".format(mlerr1.sig2[0][0]) #doctest: +SKIP
'32.4069'
>>> "{0:.4f}".format(mlerr1.logll) #doctest: +SKIP
'-4471.4071'
"""
def __init__(self, y, x, w, method='full', epsilon=0.0000001, regimes_att=None):
# set up main regression variables and spatial filters
self.y = y
if regimes_att:
self.x = x.toarray()
else:
self.x = x
self.n, self.k = self.x.shape
self.method = method
self.epsilon = epsilon
#W = w.full()[0] #wait to build pending what is needed
#Wsp = w.sparse
ylag = ps.lag_spatial(w, self.y)
xlag = self.get_x_lag(w, regimes_att)
# call minimizer using concentrated log-likelihood to get lambda
methodML = method.upper()
if methodML in ['FULL', 'LU', 'ORD']:
if methodML == 'FULL':
W = w.full()[0] # need dense here
res = minimize_scalar(err_c_loglik, 0.0, bounds=(-1.0, 1.0),
args=(self.n, self.y, ylag, self.x,
xlag, W), method='bounded',
tol=epsilon)
elif methodML == 'LU':
I = sp.identity(w.n)
Wsp = w.sparse # need sparse here
res = minimize_scalar(err_c_loglik_sp, 0.0, bounds=(-1.0,1.0),
args=(self.n, self.y, ylag,
self.x, xlag, I, Wsp),
method='bounded', tol=epsilon)
W = Wsp
elif methodML == 'ORD':
# check on symmetry structure
if w.asymmetry(intrinsic=False) == []:
ww = symmetrize(w)
WW = np.array(ww.todense())
evals = la.eigvalsh(WW)
W = WW
else:
W = w.full()[0] # need dense here
evals = la.eigvals(W)
res = minimize_scalar(
err_c_loglik_ord, 0.0, bounds=(-1.0, 1.0),
args=(self.n, self.y, ylag, self.x,
xlag, evals), method='bounded',
tol=epsilon)
else:
raise Exception("{0} is an unsupported method".format(method))
self.lam = res.x
# compute full log-likelihood, including constants
ln2pi = np.log(2.0 * np.pi)
llik = -res.fun - self.n / 2.0 * ln2pi - self.n / 2.0
self.logll = llik
# b, residuals and predicted values
ys = self.y - self.lam * ylag
xs = self.x - self.lam * xlag
xsxs = np.dot(xs.T, xs)
xsxsi = np.linalg.inv(xsxs)
xsys = np.dot(xs.T, ys)
b = np.dot(xsxsi, xsys)
self.betas = np.vstack((b, self.lam))
self.u = y - np.dot(self.x, b)
self.predy = self.y - self.u
# residual variance
self.e_filtered = self.u - self.lam * ps.lag_spatial(w, self.u)
self.sig2 = np.dot(self.e_filtered.T, self.e_filtered) / self.n
# variance-covariance matrix betas
varb = self.sig2 * xsxsi
# variance-covariance matrix lambda, sigma
a = -self.lam * W
spfill_diagonal(a, 1.0)
ai = spinv(a)
wai = spdot(W, ai)
tr1 = wai.diagonal().sum()
wai2 = spdot(wai, wai)
tr2 = wai2.diagonal().sum()
waiTwai = spdot(wai.T, wai)
tr3 = waiTwai.diagonal().sum()
v1 = np.vstack((tr2 + tr3,
tr1 / self.sig2))
v2 = np.vstack((tr1 / self.sig2,
self.n / (2.0 * self.sig2 ** 2)))
v = np.hstack((v1, v2))
self.vm1 = np.linalg.inv(v)
# create variance matrix for beta, lambda
vv = np.hstack((varb, np.zeros((self.k, 1))))
vv1 = np.hstack(
(np.zeros((1, self.k)), self.vm1[0, 0] * np.ones((1, 1))))
self.vm = np.vstack((vv, vv1))
def get_x_lag(self, w, regimes_att):
if regimes_att:
xlag = ps.lag_spatial(w, regimes_att['x'])
xlag = REGI.Regimes_Frame.__init__(self, xlag,
regimes_att['regimes'], constant_regi=None, cols2regi=regimes_att['cols2regi'])[0]
xlag = xlag.toarray()
else:
xlag = ps.lag_spatial(w, self.x)
return xlag
class ML_Error(BaseML_Error):
"""
ML estimation of the spatial lag model with all results and diagnostics;
Anselin (1988) [Anselin1988]_
Parameters
----------
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, excluding the constant
w : Sparse matrix
Spatial weights sparse matrix
method : string
if 'full', brute force calculation (full matrix expressions)
if 'ord', Ord eigenvalue method
if 'LU', LU sparse matrix decomposition
epsilon : float
tolerance criterion in mimimize_scalar function and inverse_product
spat_diag : boolean
if True, include spatial diagnostics
vm : boolean
if True, include variance-covariance matrix in summary
results
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
Attributes
----------
betas : array
(k+1)x1 array of estimated coefficients (rho first)
lam : float
estimate of spatial autoregressive coefficient
u : array
nx1 array of residuals
e_filtered : array
nx1 array of spatially filtered residuals
predy : array
nx1 array of predicted y values
n : integer
Number of observations
k : integer
Number of variables for which coefficients are estimated
(including the constant, excluding lambda)
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
method : string
log Jacobian method
if 'full': brute force (full matrix computations)
epsilon : float
tolerance criterion used in minimize_scalar function and inverse_product
mean_y : float
Mean of dependent variable
std_y : float
Standard deviation of dependent variable
varb : array
Variance covariance matrix (k+1 x k+1) - includes var(lambda)
vm1 : array
variance covariance matrix for lambda, sigma (2 x 2)
sig2 : float
Sigma squared used in computations
logll : float
maximized log-likelihood (including constant terms)
pr2 : float
Pseudo R squared (squared correlation between y and ypred)
utu : float
Sum of squared residuals
std_err : array
1xk array of standard errors of the betas
z_stat : list of tuples
z statistic; each tuple contains the pair (statistic,
p-value), where each is a float
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
title : string
Name of the regression method used
Examples
--------
>>> import numpy as np
>>> import pysal as ps
>>> np.set_printoptions(suppress=True) #prevent scientific format
>>> db = ps.open(ps.examples.get_path("south.dbf"),'r')
>>> ds_name = "south.dbf"
>>> y_name = "HR90"
>>> y = np.array(db.by_col(y_name))
>>> y.shape = (len(y),1)
>>> x_names = ["RD90","PS90","UE90","DV90"]
>>> x = np.array([db.by_col(var) for var in x_names]).T
>>> ww = ps.open(ps.examples.get_path("south_q.gal"))
>>> w = ww.read()
>>> ww.close()
>>> w_name = "south_q.gal"
>>> w.transform = 'r'
>>> mlerr = ML_Error(y,x,w,name_y=y_name,name_x=x_names,\
name_w=w_name,name_ds=ds_name) #doctest: +SKIP
>>> np.around(mlerr.betas, decimals=4) #doctest: +SKIP
array([[ 6.1492],
[ 4.4024],
[ 1.7784],
[-0.3781],
[ 0.4858],
[ 0.2991]])
>>> "{0:.4f}".format(mlerr.lam) #doctest: +SKIP
'0.2991'
>>> "{0:.4f}".format(mlerr.mean_y) #doctest: +SKIP
'9.5493'
>>> "{0:.4f}".format(mlerr.std_y) #doctest: +SKIP
'7.0389'
>>> np.around(np.diag(mlerr.vm), decimals=4) #doctest: +SKIP
array([ 1.0648, 0.0555, 0.0454, 0.0061, 0.0148, 0.0014])
>>> np.around(mlerr.sig2, decimals=4) #doctest: +SKIP
array([[ 32.4069]])
>>> "{0:.4f}".format(mlerr.logll) #doctest: +SKIP
'-4471.4071'
>>> "{0:.4f}".format(mlerr.aic) #doctest: +SKIP
'8952.8141'
>>> "{0:.4f}".format(mlerr.schwarz) #doctest: +SKIP
'8979.0779'
>>> "{0:.4f}".format(mlerr.pr2) #doctest: +SKIP
'0.3058'
>>> "{0:.4f}".format(mlerr.utu) #doctest: +SKIP
'48534.9148'
>>> np.around(mlerr.std_err, decimals=4) #doctest: +SKIP
array([ 1.0319, 0.2355, 0.2132, 0.0784, 0.1217, 0.0378])
>>> np.around(mlerr.z_stat, decimals=4) #doctest: +SKIP
array([[ 5.9593, 0. ],
[ 18.6902, 0. ],
[ 8.3422, 0. ],
[ -4.8233, 0. ],
[ 3.9913, 0.0001],
[ 7.9089, 0. ]])
>>> mlerr.name_y #doctest: +SKIP
'HR90'
>>> mlerr.name_x #doctest: +SKIP
['CONSTANT', 'RD90', 'PS90', 'UE90', 'DV90', 'lambda']
>>> mlerr.name_w #doctest: +SKIP
'south_q.gal'
>>> mlerr.name_ds #doctest: +SKIP
'south.dbf'
>>> mlerr.title #doctest: +SKIP
'MAXIMUM LIKELIHOOD SPATIAL ERROR (METHOD = FULL)'
"""
def __init__(self, y, x, w, method='full', epsilon=0.0000001,
spat_diag=False, vm=False, name_y=None, name_x=None,
name_w=None, name_ds=None):
n = USER.check_arrays(y, x)
USER.check_y(y, n)
USER.check_weights(w, y, w_required=True)
x_constant = USER.check_constant(x)
method = method.upper()
BaseML_Error.__init__(self, y=y, x=x_constant,
w=w, method=method, epsilon=epsilon)
self.title = "MAXIMUM LIKELIHOOD SPATIAL ERROR" + \
" (METHOD = " + method + ")"
self.name_ds = USER.set_name_ds(name_ds)
self.name_y = USER.set_name_y(name_y)
self.name_x = USER.set_name_x(name_x, x)
self.name_x.append('lambda')
self.name_w = USER.set_name_w(name_w, w)
self.aic = DIAG.akaike(reg=self)
self.schwarz = DIAG.schwarz(reg=self)
SUMMARY.ML_Error(reg=self, w=w, vm=vm, spat_diag=spat_diag)
def err_c_loglik(lam, n, y, ylag, x, xlag, W):
# concentrated log-lik for error model, no constants, brute force
ys = y - lam * ylag
xs = x - lam * xlag
ysys = np.dot(ys.T, ys)
xsxs = np.dot(xs.T, xs)
xsxsi = np.linalg.inv(xsxs)
xsys = np.dot(xs.T, ys)
x1 = np.dot(xsxsi, xsys)
x2 = np.dot(xsys.T, x1)
ee = ysys - x2
sig2 = ee[0][0] / n
nlsig2 = (n / 2.0) * np.log(sig2)
a = -lam * W
np.fill_diagonal(a, 1.0)
jacob = np.log(np.linalg.det(a))
# this is the negative of the concentrated log lik for minimization
clik = nlsig2 - jacob
return clik
def err_c_loglik_sp(lam, n, y, ylag, x, xlag, I, Wsp):
# concentrated log-lik for error model, no constants, LU
if isinstance(lam, np.ndarray):
if lam.shape == (1,1):
lam = lam[0][0] #why does the interior value change?
ys = y - lam * ylag
xs = x - lam * xlag
ysys = np.dot(ys.T, ys)
xsxs = np.dot(xs.T, xs)
xsxsi = np.linalg.inv(xsxs)
xsys = np.dot(xs.T, ys)
x1 = np.dot(xsxsi, xsys)
x2 = np.dot(xsys.T, x1)
ee = ysys - x2
sig2 = ee[0][0] / n
nlsig2 = (n / 2.0) * np.log(sig2)
a = I - lam * Wsp
LU = SuperLU(a.tocsc())
jacob = np.sum(np.log(np.abs(LU.U.diagonal())))
# this is the negative of the concentrated log lik for minimization
clik = nlsig2 - jacob
return clik
def err_c_loglik_ord(lam, n, y, ylag, x, xlag, evals):
# concentrated log-lik for error model, no constants, eigenvalues
ys = y - lam * ylag
xs = x - lam * xlag
ysys = np.dot(ys.T, ys)
xsxs = np.dot(xs.T, xs)
xsxsi = np.linalg.inv(xsxs)
xsys = np.dot(xs.T, ys)
x1 = np.dot(xsxsi, xsys)
x2 = np.dot(xsys.T, x1)
ee = ysys - x2
sig2 = ee[0][0] / n
nlsig2 = (n / 2.0) * np.log(sig2)
revals = lam * evals
jacob = np.log(1 - revals).sum()
if isinstance(jacob, complex):
jacob = jacob.real
# this is the negative of the concentrated log lik for minimization
clik = nlsig2 - jacob
return clik
def _test():
import doctest
start_suppress = np.get_printoptions()['suppress']
np.set_printoptions(suppress=True)
doctest.testmod()
np.set_printoptions(suppress=start_suppress)
| bsd-3-clause |
philippebeaudoin/mmomie | third_party/Paste/docs/conf.py | 48 | 4078 | # -*- coding: utf-8 -*-
#
# Paste documentation build configuration file, created by
# sphinx-quickstart on Tue Apr 22 22:08:49 2008.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# All configuration values have a default value; values that are commented out
# serve to show the default value.
import sys
# If your extensions are in another directory, add it here.
#sys.path.append('some/directory')
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.txt'
# The master toctree document.
master_doc = 'index'
# General substitutions.
project = 'Paste'
copyright = '2008, Ian Bicking'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
# The short X.Y version.
version = '1.7'
# The full version, including alpha/beta/rc tags.
release = '1.7.5.1'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
unused_docs = ['include/contact.txt', 'include/reference_header.txt']
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Options for HTML output
# -----------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'default.css'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Content template for the index page.
#html_index = ''
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If true, the reST sources are included in the HTML build as _sources/<name>.
#html_copy_source = True
# Output file base name for HTML help builder.
htmlhelp_basename = 'Pastedoc'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
#latex_documents = []
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
| apache-2.0 |
yfried/ansible | lib/ansible/modules/remote_management/redfish/redfish_facts.py | 16 | 9140 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2017-2018 Dell EMC Inc.
# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: redfish_facts
version_added: "2.7"
short_description: Manages Out-Of-Band controllers using Redfish APIs
description:
- Builds Redfish URIs locally and sends them to remote OOB controllers to
get information back.
- Information retrieved is placed in a location specified by the user.
options:
category:
required: false
description:
- List of categories to execute on OOB controller
default: ['Systems']
command:
required: false
description:
- List of commands to execute on OOB controller
baseuri:
required: true
description:
- Base URI of OOB controller
user:
required: true
description:
- User for authentication with OOB controller
password:
required: true
description:
- Password for authentication with OOB controller
author: "Jose Delarosa (github: jose-delarosa)"
'''
EXAMPLES = '''
- name: Get CPU inventory
redfish_facts:
category: Systems
command: GetCpuInventory
baseuri: "{{ baseuri }}"
user: "{{ user }}"
password: "{{ password }}"
- name: Get fan inventory
redfish_facts:
category: Chassis
command: GetFanInventory
baseuri: "{{ baseuri }}"
user: "{{ user }}"
password: "{{ password }}"
- name: Get default inventory information
redfish_facts:
baseuri: "{{ baseuri }}"
user: "{{ user }}"
password: "{{ password }}"
- name: Get several inventories
redfish_facts:
category: Systems
command: GetNicInventory,GetPsuInventory,GetBiosAttributes
baseuri: "{{ baseuri }}"
user: "{{ user }}"
password: "{{ password }}"
- name: Get default system inventory and user information
redfish_facts:
category: Systems,Accounts
baseuri: "{{ baseuri }}"
user: "{{ user }}"
password: "{{ password }}"
- name: Get default system, user and firmware information
redfish_facts:
category: ["Systems", "Accounts", "Update"]
baseuri: "{{ baseuri }}"
user: "{{ user }}"
password: "{{ password }}"
- name: Get all information available in the Manager category
redfish_facts:
category: Manager
command: all
baseuri: "{{ baseuri }}"
user: "{{ user }}"
password: "{{ password }}"
- name: Get all information available in all categories
redfish_facts:
category: all
command: all
baseuri: "{{ baseuri }}"
user: "{{ user }}"
password: "{{ password }}"
'''
RETURN = '''
result:
description: different results depending on task
returned: always
type: dict
sample: List of CPUs on system
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.redfish_utils import RedfishUtils
CATEGORY_COMMANDS_ALL = {
"Systems": ["GetSystemInventory", "GetPsuInventory", "GetCpuInventory",
"GetNicInventory", "GetStorageControllerInventory",
"GetDiskInventory", "GetBiosAttributes", "GetBiosBootOrder"],
"Chassis": ["GetFanInventory"],
"Accounts": ["ListUsers"],
"Update": ["GetFirmwareInventory"],
"Manager": ["GetManagerAttributes", "GetLogs"],
}
CATEGORY_COMMANDS_DEFAULT = {
"Systems": "GetSystemInventory",
"Chassis": "GetFanInventory",
"Accounts": "ListUsers",
"Update": "GetFirmwareInventory",
"Manager": "GetManagerAttributes"
}
def main():
result = {}
resource = {}
category_list = []
module = AnsibleModule(
argument_spec=dict(
category=dict(type='list', default=['Systems']),
command=dict(type='list'),
baseuri=dict(required=True),
user=dict(required=True),
password=dict(required=True, no_log=True),
),
supports_check_mode=False
)
# admin credentials used for authentication
creds = {'user': module.params['user'],
'pswd': module.params['password']}
# Build root URI
root_uri = "https://" + module.params['baseuri']
rf_uri = "/redfish/v1/"
rf_utils = RedfishUtils(creds, root_uri)
# Build Category list
if "all" in module.params['category']:
for entry in CATEGORY_COMMANDS_ALL:
category_list.append(entry)
else:
# one or more categories specified
category_list = module.params['category']
for category in category_list:
command_list = []
# Build Command list for each Category
if category in CATEGORY_COMMANDS_ALL:
if not module.params['command']:
# True if we don't specify a command --> use default
command_list.append(CATEGORY_COMMANDS_DEFAULT[category])
elif "all" in module.params['command']:
for entry in range(len(CATEGORY_COMMANDS_ALL[category])):
command_list.append(CATEGORY_COMMANDS_ALL[category][entry])
# one or more commands
else:
command_list = module.params['command']
# Verify that all commands are valid
for cmd in command_list:
# Fail if even one command given is invalid
if cmd not in CATEGORY_COMMANDS_ALL[category]:
module.fail_json(msg="Invalid Command: %s" % cmd)
else:
# Fail if even one category given is invalid
module.fail_json(msg="Invalid Category: %s" % category)
# Organize by Categories / Commands
if category == "Systems":
# execute only if we find a Systems resource
resource = rf_utils._find_systems_resource(rf_uri)
if resource['ret'] is False:
module.fail_json(msg=resource['msg'])
for command in command_list:
if command == "GetSystemInventory":
result["system"] = rf_utils.get_system_inventory()
elif command == "GetPsuInventory":
result["psu"] = rf_utils.get_psu_inventory()
elif command == "GetCpuInventory":
result["cpu"] = rf_utils.get_cpu_inventory()
elif command == "GetNicInventory":
result["nic"] = rf_utils.get_nic_inventory()
elif command == "GetStorageControllerInventory":
result["storage_controller"] = rf_utils.get_storage_controller_inventory()
elif command == "GetDiskInventory":
result["disk"] = rf_utils.get_disk_inventory()
elif command == "GetBiosAttributes":
result["bios_attribute"] = rf_utils.get_bios_attributes()
elif command == "GetBiosBootOrder":
result["bios_boot_order"] = rf_utils.get_bios_boot_order()
elif category == "Chassis":
# execute only if we find Chassis resource
resource = rf_utils._find_chassis_resource(rf_uri)
if resource['ret'] is False:
module.fail_json(msg=resource['msg'])
for command in command_list:
if command == "GetFanInventory":
result["fan"] = rf_utils.get_fan_inventory()
elif category == "Accounts":
# execute only if we find an Account service resource
resource = rf_utils._find_accountservice_resource(rf_uri)
if resource['ret'] is False:
module.fail_json(msg=resource['msg'])
for command in command_list:
if command == "ListUsers":
result["user"] = rf_utils.list_users()
elif category == "Update":
# execute only if we find UpdateService resources
resource = rf_utils._find_updateservice_resource(rf_uri)
if resource['ret'] is False:
module.fail_json(msg=resource['msg'])
for command in command_list:
if command == "GetFirmwareInventory":
result["firmware"] = rf_utils.get_firmware_inventory()
elif category == "Manager":
# execute only if we find a Manager service resource
resource = rf_utils._find_managers_resource(rf_uri)
if resource['ret'] is False:
module.fail_json(msg=resource['msg'])
for command in command_list:
if command == "GetManagerAttributes":
result["manager_attributes"] = rf_utils.get_manager_attributes()
elif command == "GetLogs":
result["log"] = rf_utils.get_logs()
# Return data back
module.exit_json(ansible_facts=dict(redfish_facts=result))
if __name__ == '__main__':
main()
| gpl-3.0 |
snnn/tensorflow | tensorflow/python/debug/lib/debug_graphs_test.py | 82 | 4301 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tfdbg module debug_data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.debug.lib import debug_graphs
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
class ParseNodeOrTensorNameTest(test_util.TensorFlowTestCase):
def testParseNodeName(self):
node_name, slot = debug_graphs.parse_node_or_tensor_name(
"namespace1/node_1")
self.assertEqual("namespace1/node_1", node_name)
self.assertIsNone(slot)
def testParseTensorName(self):
node_name, slot = debug_graphs.parse_node_or_tensor_name(
"namespace1/node_2:3")
self.assertEqual("namespace1/node_2", node_name)
self.assertEqual(3, slot)
class GetNodeNameAndOutputSlotTest(test_util.TensorFlowTestCase):
def testParseTensorNameInputWorks(self):
self.assertEqual("a", debug_graphs.get_node_name("a:0"))
self.assertEqual(0, debug_graphs.get_output_slot("a:0"))
self.assertEqual("_b", debug_graphs.get_node_name("_b:1"))
self.assertEqual(1, debug_graphs.get_output_slot("_b:1"))
def testParseNodeNameInputWorks(self):
self.assertEqual("a", debug_graphs.get_node_name("a"))
self.assertEqual(0, debug_graphs.get_output_slot("a"))
class NodeNameChecksTest(test_util.TensorFlowTestCase):
def testIsCopyNode(self):
self.assertTrue(debug_graphs.is_copy_node("__copy_ns1/ns2/node3_0"))
self.assertFalse(debug_graphs.is_copy_node("copy_ns1/ns2/node3_0"))
self.assertFalse(debug_graphs.is_copy_node("_copy_ns1/ns2/node3_0"))
self.assertFalse(debug_graphs.is_copy_node("_copyns1/ns2/node3_0"))
self.assertFalse(debug_graphs.is_copy_node("__dbg_ns1/ns2/node3_0"))
def testIsDebugNode(self):
self.assertTrue(
debug_graphs.is_debug_node("__dbg_ns1/ns2/node3:0_0_DebugIdentity"))
self.assertFalse(
debug_graphs.is_debug_node("dbg_ns1/ns2/node3:0_0_DebugIdentity"))
self.assertFalse(
debug_graphs.is_debug_node("_dbg_ns1/ns2/node3:0_0_DebugIdentity"))
self.assertFalse(
debug_graphs.is_debug_node("_dbgns1/ns2/node3:0_0_DebugIdentity"))
self.assertFalse(debug_graphs.is_debug_node("__copy_ns1/ns2/node3_0"))
class ParseDebugNodeNameTest(test_util.TensorFlowTestCase):
def testParseDebugNodeName_valid(self):
debug_node_name_1 = "__dbg_ns_a/ns_b/node_c:1_0_DebugIdentity"
(watched_node, watched_output_slot, debug_op_index,
debug_op) = debug_graphs.parse_debug_node_name(debug_node_name_1)
self.assertEqual("ns_a/ns_b/node_c", watched_node)
self.assertEqual(1, watched_output_slot)
self.assertEqual(0, debug_op_index)
self.assertEqual("DebugIdentity", debug_op)
def testParseDebugNodeName_invalidPrefix(self):
invalid_debug_node_name_1 = "__copy_ns_a/ns_b/node_c:1_0_DebugIdentity"
with self.assertRaisesRegexp(ValueError, "Invalid prefix"):
debug_graphs.parse_debug_node_name(invalid_debug_node_name_1)
def testParseDebugNodeName_missingDebugOpIndex(self):
invalid_debug_node_name_1 = "__dbg_node1:0_DebugIdentity"
with self.assertRaisesRegexp(ValueError, "Invalid debug node name"):
debug_graphs.parse_debug_node_name(invalid_debug_node_name_1)
def testParseDebugNodeName_invalidWatchedTensorName(self):
invalid_debug_node_name_1 = "__dbg_node1_0_DebugIdentity"
with self.assertRaisesRegexp(ValueError,
"Invalid tensor name in debug node name"):
debug_graphs.parse_debug_node_name(invalid_debug_node_name_1)
if __name__ == "__main__":
test.main()
| apache-2.0 |
ShinyROM/android_external_chromium_org | third_party/android_testrunner/run_command.py | 148 | 5751 | #!/usr/bin/python2.4
#
#
# Copyright 2007, The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# System imports
import os
import signal
import subprocess
import tempfile
import threading
import time
# local imports
import errors
import logger
_abort_on_error = False
def SetAbortOnError(abort=True):
"""Sets behavior of RunCommand to throw AbortError if command process returns
a negative error code"""
global _abort_on_error
_abort_on_error = abort
def RunCommand(cmd, timeout_time=None, retry_count=3, return_output=True,
stdin_input=None):
"""Spawn and retry a subprocess to run the given shell command.
Args:
cmd: shell command to run
timeout_time: time in seconds to wait for command to run before aborting.
retry_count: number of times to retry command
return_output: if True return output of command as string. Otherwise,
direct output of command to stdout.
stdin_input: data to feed to stdin
Returns:
output of command
"""
result = None
while True:
try:
result = RunOnce(cmd, timeout_time=timeout_time,
return_output=return_output, stdin_input=stdin_input)
except errors.WaitForResponseTimedOutError:
if retry_count == 0:
raise
retry_count -= 1
logger.Log("No response for %s, retrying" % cmd)
else:
# Success
return result
def RunOnce(cmd, timeout_time=None, return_output=True, stdin_input=None):
"""Spawns a subprocess to run the given shell command.
Args:
cmd: shell command to run
timeout_time: time in seconds to wait for command to run before aborting.
return_output: if True return output of command as string. Otherwise,
direct output of command to stdout.
stdin_input: data to feed to stdin
Returns:
output of command
Raises:
errors.WaitForResponseTimedOutError if command did not complete within
timeout_time seconds.
errors.AbortError is command returned error code and SetAbortOnError is on.
"""
start_time = time.time()
so = []
global _abort_on_error, error_occurred
error_occurred = False
if return_output:
output_dest = tempfile.TemporaryFile(bufsize=0)
else:
# None means direct to stdout
output_dest = None
if stdin_input:
stdin_dest = subprocess.PIPE
else:
stdin_dest = None
pipe = subprocess.Popen(
cmd,
executable='/bin/bash',
stdin=stdin_dest,
stdout=output_dest,
stderr=subprocess.STDOUT,
shell=True, close_fds=True,
preexec_fn=lambda: signal.signal(signal.SIGPIPE, signal.SIG_DFL))
def Run():
global error_occurred
try:
pipe.communicate(input=stdin_input)
output = None
if return_output:
output_dest.seek(0)
output = output_dest.read()
output_dest.close()
if output is not None and len(output) > 0:
so.append(output)
except OSError, e:
logger.SilentLog("failed to retrieve stdout from: %s" % cmd)
logger.Log(e)
so.append("ERROR")
error_occurred = True
if pipe.returncode:
logger.SilentLog("Error: %s returned %d error code" %(cmd,
pipe.returncode))
error_occurred = True
t = threading.Thread(target=Run)
t.start()
t.join(timeout_time)
if t.isAlive():
try:
pipe.kill()
except OSError:
# Can't kill a dead process.
pass
finally:
logger.SilentLog("about to raise a timeout for: %s" % cmd)
raise errors.WaitForResponseTimedOutError
output = "".join(so)
if _abort_on_error and error_occurred:
raise errors.AbortError(msg=output)
return "".join(so)
def RunHostCommand(binary, valgrind=False):
"""Run a command on the host (opt using valgrind).
Runs the host binary and returns the exit code.
If successfull, the output (stdout and stderr) are discarded,
but printed in case of error.
The command can be run under valgrind in which case all the
output are always discarded.
Args:
binary: full path of the file to be run.
valgrind: If True the command will be run under valgrind.
Returns:
The command exit code (int)
"""
if not valgrind:
subproc = subprocess.Popen(binary, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
subproc.wait()
if subproc.returncode != 0: # In case of error print the output
print subproc.communicate()[0]
return subproc.returncode
else:
# Need the full path to valgrind to avoid other versions on the system.
subproc = subprocess.Popen(["/usr/bin/valgrind", "--tool=memcheck",
"--leak-check=yes", "-q", binary],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
# Cannot rely on the retcode of valgrind. Instead look for an empty output.
valgrind_out = subproc.communicate()[0].strip()
if valgrind_out:
print valgrind_out
return 1
else:
return 0
def HasValgrind():
"""Check that /usr/bin/valgrind exists.
We look for the fullpath to avoid picking up 'alternative' valgrind
on the system.
Returns:
True if a system valgrind was found.
"""
return os.path.exists("/usr/bin/valgrind")
| bsd-3-clause |
markap/TravelMap | boilerplate/external/babel/__init__.py | 71 | 1333 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2008 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://babel.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://babel.edgewall.org/log/.
"""Integrated collection of utilities that assist in internationalizing and
localizing applications.
This package is basically composed of two major parts:
* tools to build and work with ``gettext`` message catalogs
* a Python interface to the CLDR (Common Locale Data Repository), providing
access to various locale display names, localized number and date
formatting, etc.
:see: http://www.gnu.org/software/gettext/
:see: http://docs.python.org/lib/module-gettext.html
:see: http://www.unicode.org/cldr/
"""
from babel.core import *
__docformat__ = 'restructuredtext en'
try:
from pkg_resources import get_distribution, ResolutionError
try:
__version__ = get_distribution('Babel').version
except ResolutionError:
__version__ = None # unknown
except ImportError:
__version__ = None # unknown
| lgpl-3.0 |
percyfal/bokeh | examples/app/pivot/main.py | 8 | 27818 | ''' Provide a pivot chart maker example app. Similar to Excel pivot charts,
but with additonal ability to explode into multiple charts.
See README.md for more information.
'''
from __future__ import division
import os
import math
import json
import pandas as pd
import collections
import bokeh.io as bio
import bokeh.layouts as bl
import bokeh.models.widgets as bmw
import bokeh.models.sources as bms
import bokeh.models.tools as bmt
import bokeh.plotting as bp
import datetime
import six.moves.urllib.parse as urlp
#Defaults to configure:
PLOT_WIDTH = 300
PLOT_HEIGHT = 300
PLOT_FONT_SIZE = 10
PLOT_AXIS_LABEL_SIZE = 8
PLOT_LABEL_ORIENTATION = 45
OPACITY = 0.8
X_SCALE = 1
Y_SCALE = 1
CIRCLE_SIZE = 9
BAR_WIDTH = 0.5
LINE_WIDTH = 2
COLORS = ['#5e4fa2', '#3288bd', '#66c2a5', '#abdda4', '#e6f598', '#fee08b', '#fdae61', '#f46d43', '#d53e4f', '#9e0142']*1000
C_NORM = "#31AADE"
CHARTTYPES = ['Dot', 'Line', 'Bar', 'Area']
STACKEDTYPES = ['Bar', 'Area']
AGGREGATIONS = ['None', 'Sum']
def get_data(data_source):
'''
Read a csv into a pandas dataframe, and determine which columns of the dataframe
are discrete (strings), continuous (numbers), able to be filtered (aka filterable),
and able to be used as a series (aka seriesable). NA values are filled based on the type of column,
and the dataframe and columns are returned.
Args:
data_source (string): Path to csv file.
Returns:
df_source (pandas dataframe): A dataframe of the csv source, with filled NA values.
cols (dict): Keys are categories of columns of df_source, and values are a list of columns of that category.
'''
df_source = pd.read_csv(data_source)
cols = {}
cols['all'] = df_source.columns.values.tolist()
cols['discrete'] = [x for x in cols['all'] if df_source[x].dtype == object]
cols['continuous'] = [x for x in cols['all'] if x not in cols['discrete']]
cols['filterable'] = cols['discrete']+[x for x in cols['continuous'] if len(df_source[x].unique()) < 100]
cols['seriesable'] = cols['discrete']+[x for x in cols['continuous'] if len(df_source[x].unique()) < 60]
df_source[cols['discrete']] = df_source[cols['discrete']].fillna('{BLANK}')
df_source[cols['continuous']] = df_source[cols['continuous']].fillna(0)
return (df_source, cols)
def build_widgets(df_source, cols, defaults, init_load=False, init_config={}):
'''
Use a dataframe and its columns to set widget options. Widget values may
be set by URL parameters via init_config.
Args:
df_source (pandas dataframe): Dataframe of the csv source.
cols (dict): Keys are categories of columns of df_source, and values are a list of columns of that category.
defaults (dict): Keys correspond to widgets, and values (str) are the default values of those widgets.
init_load (boolean, optional): If this is the initial page load, then this will be True, else False.
init_config (dict): Initial widget configuration passed via URL.
Returns:
wdg (ordered dict): Dictionary of bokeh.model.widgets.
'''
#Add widgets
wdg = collections.OrderedDict()
wdg['data'] = bmw.TextInput(title='Data Source (required)', value=defaults['data_source'], css_classes=['wdgkey-data'])
wdg['x_dropdown'] = bmw.Div(text='X-Axis (required)', css_classes=['x-dropdown'])
wdg['x'] = bmw.Select(title='X-Axis (required)', value=defaults['x'], options=['None'] + cols['all'], css_classes=['wdgkey-x', 'x-drop'])
wdg['x_group'] = bmw.Select(title='Group X-Axis By', value=defaults['x_group'], options=['None'] + cols['seriesable'], css_classes=['wdgkey-x_group', 'x-drop'])
wdg['y_dropdown'] = bmw.Div(text='Y-Axis (required)', css_classes=['y-dropdown'])
wdg['y'] = bmw.Select(title='Y-Axis (required)', value=defaults['y'], options=['None'] + cols['all'], css_classes=['wdgkey-y', 'y-drop'])
wdg['y_agg'] = bmw.Select(title='Y-Axis Aggregation', value='Sum', options=AGGREGATIONS, css_classes=['wdgkey-y_agg', 'y-drop'])
wdg['series_dropdown'] = bmw.Div(text='Series', css_classes=['series-dropdown'])
wdg['series'] = bmw.Select(title='Separate Series By', value=defaults['series'], options=['None'] + cols['seriesable'],
css_classes=['wdgkey-series', 'series-drop'])
wdg['series_legend'] = bmw.Div(text='', css_classes=['series-drop'])
wdg['explode_dropdown'] = bmw.Div(text='Explode', css_classes=['explode-dropdown'])
wdg['explode'] = bmw.Select(title='Explode By', value=defaults['explode'], options=['None'] + cols['seriesable'], css_classes=['wdgkey-explode', 'explode-drop'])
wdg['explode_group'] = bmw.Select(title='Group Exploded Charts By', value=defaults['explode_group'], options=['None'] + cols['seriesable'],
css_classes=['wdgkey-explode_group', 'explode-drop'])
wdg['filters'] = bmw.Div(text='Filters', css_classes=['filters-dropdown'])
for j, col in enumerate(cols['filterable']):
val_list = [str(i) for i in sorted(df_source[col].unique().tolist())]
wdg['heading_filter_'+str(j)] = bmw.Div(text=col, css_classes=['filter-head'])
wdg['filter_'+str(j)] = bmw.CheckboxGroup(labels=val_list, active=list(range(len(val_list))), css_classes=['wdgkey-filter_'+str(j), 'filter'])
wdg['update'] = bmw.Button(label='Update Filters', button_type='success', css_classes=['filters-update'])
wdg['adjustments'] = bmw.Div(text='Plot Adjustments', css_classes=['adjust-dropdown'])
wdg['chart_type'] = bmw.Select(title='Chart Type', value=defaults['chart_type'], options=CHARTTYPES, css_classes=['wdgkey-chart_type', 'adjust-drop'])
wdg['plot_width'] = bmw.TextInput(title='Plot Width (px)', value=str(PLOT_WIDTH), css_classes=['wdgkey-plot_width', 'adjust-drop'])
wdg['plot_height'] = bmw.TextInput(title='Plot Height (px)', value=str(PLOT_HEIGHT), css_classes=['wdgkey-plot_height', 'adjust-drop'])
wdg['plot_title'] = bmw.TextInput(title='Plot Title', value='', css_classes=['wdgkey-plot_title', 'adjust-drop'])
wdg['plot_title_size'] = bmw.TextInput(title='Plot Title Font Size', value=str(PLOT_FONT_SIZE), css_classes=['wdgkey-plot_title_size', 'adjust-drop'])
wdg['opacity'] = bmw.TextInput(title='Opacity (0-1)', value=str(OPACITY), css_classes=['wdgkey-opacity', 'adjust-drop'])
wdg['x_scale'] = bmw.TextInput(title='X Scale', value=str(X_SCALE), css_classes=['wdgkey-x_scale', 'adjust-drop'])
wdg['x_min'] = bmw.TextInput(title='X Min', value='', css_classes=['wdgkey-x_min', 'adjust-drop'])
wdg['x_max'] = bmw.TextInput(title='X Max', value='', css_classes=['wdgkey-x_max', 'adjust-drop'])
wdg['x_title'] = bmw.TextInput(title='X Title', value='', css_classes=['wdgkey-x_title', 'adjust-drop'])
wdg['x_title_size'] = bmw.TextInput(title='X Title Font Size', value=str(PLOT_FONT_SIZE), css_classes=['wdgkey-x_title_size', 'adjust-drop'])
wdg['x_major_label_size'] = bmw.TextInput(title='X Labels Font Size', value=str(PLOT_AXIS_LABEL_SIZE), css_classes=['wdgkey-x_major_label_size', 'adjust-drop'])
wdg['x_major_label_orientation'] = bmw.TextInput(title='X Labels Degrees', value=str(PLOT_LABEL_ORIENTATION),
css_classes=['wdgkey-x_major_label_orientation', 'adjust-drop'])
wdg['y_scale'] = bmw.TextInput(title='Y Scale', value=str(Y_SCALE), css_classes=['wdgkey-y_scale', 'adjust-drop'])
wdg['y_min'] = bmw.TextInput(title='Y Min', value='', css_classes=['wdgkey-y_min', 'adjust-drop'])
wdg['y_max'] = bmw.TextInput(title='Y Max', value='', css_classes=['wdgkey-y_max', 'adjust-drop'])
wdg['y_title'] = bmw.TextInput(title='Y Title', value='', css_classes=['wdgkey-y_title', 'adjust-drop'])
wdg['y_title_size'] = bmw.TextInput(title='Y Title Font Size', value=str(PLOT_FONT_SIZE), css_classes=['wdgkey-y_title_size', 'adjust-drop'])
wdg['y_major_label_size'] = bmw.TextInput(title='Y Labels Font Size', value=str(PLOT_AXIS_LABEL_SIZE), css_classes=['wdgkey-y_major_label_size', 'adjust-drop'])
wdg['circle_size'] = bmw.TextInput(title='Circle Size (Dot Only)', value=str(CIRCLE_SIZE), css_classes=['wdgkey-circle_size', 'adjust-drop'])
wdg['bar_width'] = bmw.TextInput(title='Bar Width (Bar Only)', value=str(BAR_WIDTH), css_classes=['wdgkey-bar_width', 'adjust-drop'])
wdg['line_width'] = bmw.TextInput(title='Line Width (Line Only)', value=str(LINE_WIDTH), css_classes=['wdgkey-line_width', 'adjust-drop'])
wdg['download'] = bmw.Button(label='Download csv', button_type='success')
wdg['export_config'] = bmw.Div(text='Export Config to URL', css_classes=['export-config', 'bk-bs-btn', 'bk-bs-btn-success'])
#use init_config (from 'widgets' parameter in URL query string) to configure widgets.
if init_load:
for key in init_config:
if key in wdg:
if hasattr(wdg[key], 'value'):
wdg[key].value = str(init_config[key])
elif hasattr(wdg[key], 'active'):
wdg[key].active = init_config[key]
#Add update functions for widgets
wdg['data'].on_change('value', update_data)
wdg['update'].on_click(update_plots)
wdg['download'].on_click(download)
for name in wdg_col:
wdg[name].on_change('value', update_wdg_col)
for name in wdg_non_col:
wdg[name].on_change('value', update_wdg)
return wdg
def set_df_plots(df_source, cols, wdg):
'''
Apply filters, scaling, aggregation, and sorting to source dataframe, and return the result.
Args:
df_source (pandas dataframe): Dataframe of the csv source.
cols (dict): Keys are categories of columns of df_source, and values are a list of columns of that category.
wdg (ordered dict): Dictionary of bokeh model widgets.
Returns:
df_plots (pandas dataframe): df_source after having been filtered, scaled, aggregated, and sorted.
'''
df_plots = df_source.copy()
#Apply filters
for j, col in enumerate(cols['filterable']):
active = [wdg['filter_'+str(j)].labels[i] for i in wdg['filter_'+str(j)].active]
if col in cols['continuous']:
active = [float(i) for i in active]
df_plots = df_plots[df_plots[col].isin(active)]
#Scale Axes
if wdg['x_scale'].value != '' and wdg['x'].value in cols['continuous']:
df_plots[wdg['x'].value] = df_plots[wdg['x'].value] * float(wdg['x_scale'].value)
if wdg['y_scale'].value != '' and wdg['y'].value in cols['continuous']:
df_plots[wdg['y'].value] = df_plots[wdg['y'].value] * float(wdg['y_scale'].value)
#Apply Aggregation
if wdg['y_agg'].value == 'Sum' and wdg['y'].value in cols['continuous']:
groupby_cols = [wdg['x'].value]
if wdg['x_group'].value != 'None': groupby_cols = [wdg['x_group'].value] + groupby_cols
if wdg['series'].value != 'None': groupby_cols = [wdg['series'].value] + groupby_cols
if wdg['explode'].value != 'None': groupby_cols = [wdg['explode'].value] + groupby_cols
if wdg['explode_group'].value != 'None': groupby_cols = [wdg['explode_group'].value] + groupby_cols
df_plots = df_plots.groupby(groupby_cols, as_index=False, sort=False)[wdg['y'].value].sum()
#Sort Dataframe
sortby_cols = [wdg['x'].value]
if wdg['x_group'].value != 'None': sortby_cols = [wdg['x_group'].value] + sortby_cols
if wdg['series'].value != 'None': sortby_cols = [wdg['series'].value] + sortby_cols
if wdg['explode'].value != 'None': sortby_cols = [wdg['explode'].value] + sortby_cols
if wdg['explode_group'].value != 'None': sortby_cols = [wdg['explode_group'].value] + sortby_cols
df_plots = df_plots.sort_values(sortby_cols).reset_index(drop=True)
#Rearrange column order for csv download
unsorted_columns = [col for col in df_plots.columns if col not in sortby_cols + [wdg['y'].value]]
df_plots = df_plots[sortby_cols + unsorted_columns + [wdg['y'].value]]
return df_plots
def create_figures(df_plots, wdg, cols):
'''
Create figures based on the data in a dataframe and widget configuration, and return figures in a list.
The explode widget determines if there will be multiple figures.
Args:
df_plots (pandas dataframe): Dataframe of csv source after being filtered, scaled, aggregated, and sorted.
wdg (ordered dict): Dictionary of bokeh model widgets.
cols (dict): Keys are categories of columns of df_source, and values are a list of columns of that category.
Returns:
plot_list (list): List of bokeh.model.figures.
'''
plot_list = []
df_plots_cp = df_plots.copy()
if wdg['explode'].value == 'None':
plot_list.append(create_figure(df_plots_cp, df_plots, wdg, cols))
else:
if wdg['explode_group'].value == 'None':
for explode_val in df_plots_cp[wdg['explode'].value].unique().tolist():
df_exploded = df_plots_cp[df_plots_cp[wdg['explode'].value].isin([explode_val])]
plot_list.append(create_figure(df_exploded, df_plots, wdg, cols, explode_val))
else:
for explode_group in df_plots_cp[wdg['explode_group'].value].unique().tolist():
df_exploded_group = df_plots_cp[df_plots_cp[wdg['explode_group'].value].isin([explode_group])]
for explode_val in df_exploded_group[wdg['explode'].value].unique().tolist():
df_exploded = df_exploded_group[df_exploded_group[wdg['explode'].value].isin([explode_val])]
plot_list.append(create_figure(df_exploded, df_plots, wdg, cols, explode_val, explode_group))
return plot_list
def create_figure(df_exploded, df_plots, wdg, cols, explode_val=None, explode_group=None):
'''
Create and return a figure based on the data in a dataframe and widget configuration.
Args:
df_exploded (pandas dataframe): Dataframe of just the data that will be plotted in this figure.
df_plots (pandas dataframe): Dataframe of all plots data, used only for maintaining consistent series colors.
wdg (ordered dict): Dictionary of bokeh model widgets.
cols (dict): Keys are categories of columns of df_source, and values are a list of columns of that category.
explode_val (string, optional): The value in the column designated by wdg['explode'] that applies to this figure.
explode_group (string, optional): The value in the wdg['explode_group'] column that applies to this figure.
Returns:
p (bokeh.model.figure): A figure, with all glyphs added by the add_glyph() function.
'''
# If x_group has a value, create a combined column in the dataframe for x and x_group
x_col = wdg['x'].value
if wdg['x_group'].value != 'None':
x_col = str(wdg['x_group'].value) + '_' + str(wdg['x'].value)
df_exploded[x_col] = df_exploded[wdg['x_group'].value].map(str) + ' ' + df_exploded[wdg['x'].value].map(str)
#Build x and y ranges and figure title
kw = dict()
#Set x and y ranges. When x is grouped, there is added complication of separating the groups
xs = df_exploded[x_col].values.tolist()
ys = df_exploded[wdg['y'].value].values.tolist()
if wdg['x_group'].value != 'None':
kw['x_range'] = []
unique_groups = df_exploded[wdg['x_group'].value].unique().tolist()
unique_xs = df_exploded[wdg['x'].value].unique().tolist()
for i, ugr in enumerate(unique_groups):
for uxs in unique_xs:
kw['x_range'].append(str(ugr) + ' ' + str(uxs))
#Between groups, add entries that consist of spaces. Increase number of spaces from
#one break to the next so that each entry is unique
kw['x_range'].append(' ' * (i + 1))
elif wdg['x'].value in cols['discrete']:
kw['x_range'] = sorted(set(xs))
if wdg['y'].value in cols['discrete']:
kw['y_range'] = sorted(set(ys))
#Set figure title
kw['title'] = wdg['plot_title'].value
seperator = '' if kw['title'] == '' else ', '
if explode_val is not None:
if explode_group is not None:
kw['title'] = kw['title'] + seperator + "%s = %s" % (wdg['explode_group'].value, str(explode_group))
seperator = '' if kw['title'] == '' else ', '
kw['title'] = kw['title'] + seperator + "%s = %s" % (wdg['explode'].value, str(explode_val))
#Add figure tools
hover = bmt.HoverTool(
tooltips=[
("ser", "@ser_legend"),
("x", "@x_legend"),
("y", "@y_legend"),
]
)
TOOLS = [bmt.BoxZoomTool(), bmt.PanTool(), hover, bmt.ResetTool(), bmt.SaveTool()]
#Create figure with the ranges, titles, and tools, and adjust formatting and labels
p = bp.figure(plot_height=int(wdg['plot_height'].value), plot_width=int(wdg['plot_width'].value), tools=TOOLS, **kw)
p.toolbar.active_drag = TOOLS[0]
p.title.text_font_size = wdg['plot_title_size'].value + 'pt'
p.xaxis.axis_label = wdg['x_title'].value
p.yaxis.axis_label = wdg['y_title'].value
p.xaxis.axis_label_text_font_size = wdg['x_title_size'].value + 'pt'
p.yaxis.axis_label_text_font_size = wdg['y_title_size'].value + 'pt'
p.xaxis.major_label_text_font_size = wdg['x_major_label_size'].value + 'pt'
p.yaxis.major_label_text_font_size = wdg['y_major_label_size'].value + 'pt'
p.xaxis.major_label_orientation = 'horizontal' if wdg['x_major_label_orientation'].value == '0' else math.radians(float(wdg['x_major_label_orientation'].value))
if wdg['x'].value in cols['continuous']:
if wdg['x_min'].value != '': p.x_range.start = float(wdg['x_min'].value)
if wdg['x_max'].value != '': p.x_range.end = float(wdg['x_max'].value)
if wdg['y'].value in cols['continuous']:
if wdg['y_min'].value != '': p.y_range.start = float(wdg['y_min'].value)
if wdg['y_max'].value != '': p.y_range.end = float(wdg['y_max'].value)
#Add glyphs to figure
c = C_NORM
if wdg['series'].value == 'None':
if wdg['y_agg'].value != 'None' and wdg['y'].value in cols['continuous']:
xs = df_exploded[x_col].values.tolist()
ys = df_exploded[wdg['y'].value].values.tolist()
add_glyph(wdg, p, xs, ys, c)
else:
full_series = df_plots[wdg['series'].value].unique().tolist() #for colors only
if wdg['chart_type'].value in STACKEDTYPES: #We are stacking the series
xs_full = sorted(df_exploded[x_col].unique().tolist())
y_bases_pos = [0]*len(xs_full)
y_bases_neg = [0]*len(xs_full)
for i, ser in enumerate(df_exploded[wdg['series'].value].unique().tolist()):
c = COLORS[full_series.index(ser)]
df_series = df_exploded[df_exploded[wdg['series'].value].isin([ser])]
xs_ser = df_series[x_col].values.tolist()
ys_ser = df_series[wdg['y'].value].values.tolist()
if wdg['chart_type'].value not in STACKEDTYPES: #The series will not be stacked
add_glyph(wdg, p, xs_ser, ys_ser, c, series=ser)
else: #We are stacking the series
ys_pos = [ys_ser[xs_ser.index(x)] if x in xs_ser and ys_ser[xs_ser.index(x)] > 0 else 0 for i, x in enumerate(xs_full)]
ys_neg = [ys_ser[xs_ser.index(x)] if x in xs_ser and ys_ser[xs_ser.index(x)] < 0 else 0 for i, x in enumerate(xs_full)]
ys_stacked_pos = [ys_pos[i] + y_bases_pos[i] for i in range(len(xs_full))]
ys_stacked_neg = [ys_neg[i] + y_bases_neg[i] for i in range(len(xs_full))]
add_glyph(wdg, p, xs_full, ys_stacked_pos, c, y_bases=y_bases_pos, series=ser)
add_glyph(wdg, p, xs_full, ys_stacked_neg, c, y_bases=y_bases_neg, series=ser)
y_bases_pos = ys_stacked_pos
y_bases_neg = ys_stacked_neg
return p
def add_glyph(wdg, p, xs, ys, c, y_bases=None, series=None):
'''
Add a glyph to a Bokeh figure, depending on the chosen chart type.
Args:
wdg (ordered dict): Dictionary of bokeh model widgets.
p (bokeh.model.figure): Bokeh figure.
xs (list): List of x-values. These could be numeric or strings.
ys (list): List of y-values. These could be numeric or strings. If series data is stacked, these values include stacking.
c (string): Color to use for this series.
y_bases (list, optional): Only used when stacking series. This is the previous cumulative stacking level.
series (string): Name of current series for this glyph.
Returns:
Nothing.
'''
alpha = float(wdg['opacity'].value)
y_unstacked = list(ys) if y_bases is None else [ys[i] - y_bases[i] for i in range(len(ys))]
ser = ['None']*len(xs) if series is None else [series]*len(xs)
if wdg['chart_type'].value == 'Dot':
source = bms.ColumnDataSource({'x': xs, 'y': ys, 'x_legend': xs, 'y_legend': y_unstacked, 'ser_legend': ser})
p.circle('x', 'y', source=source, color=c, size=int(wdg['circle_size'].value), fill_alpha=alpha, line_color=None, line_width=None)
elif wdg['chart_type'].value == 'Line':
source = bms.ColumnDataSource({'x': xs, 'y': ys, 'x_legend': xs, 'y_legend': y_unstacked, 'ser_legend': ser})
p.line('x', 'y', source=source, color=c, alpha=alpha, line_width=float(wdg['line_width'].value))
elif wdg['chart_type'].value == 'Bar':
if y_bases is None: y_bases = [0]*len(ys)
centers = [(ys[i] + y_bases[i])/2 for i in range(len(ys))]
heights = [abs(ys[i] - y_bases[i]) for i in range(len(ys))]
source = bms.ColumnDataSource({'x': xs, 'y': centers, 'x_legend': xs, 'y_legend': y_unstacked, 'h': heights, 'ser_legend': ser})
p.rect('x', 'y', source=source, height='h', color=c, fill_alpha=alpha, width=float(wdg['bar_width'].value), line_color=None, line_width=None)
elif wdg['chart_type'].value == 'Area':
if y_bases is None: y_bases = [0]*len(ys)
xs_around = xs + xs[::-1]
ys_around = y_bases + ys[::-1]
source = bms.ColumnDataSource({'x': xs_around, 'y': ys_around})
p.patch('x', 'y', source=source, alpha=alpha, fill_color=c, line_color=None, line_width=None)
def build_series_legend(df_plots, series_val):
'''
Return html for series legend, based on values of column that was chosen for series, and global COLORS.
Args:
df_plots (pandas dataframe): Dataframe of all plots data.
series_val (string): Header for column chosen as series.
Returns:
series_legend_string (string): html to be used as legend.
'''
series_legend_string = '<div class="legend-header">Series Legend</div><div class="legend-body">'
if series_val != 'None':
active_list = df_plots[series_val].unique().tolist()
for i, txt in reversed(list(enumerate(active_list))):
series_legend_string += '<div class="legend-entry"><span class="legend-color" style="background-color:' + str(COLORS[i]) + ';"></span>'
series_legend_string += '<span class="legend-text">' + str(txt) +'</span></div>'
series_legend_string += '</div>'
return series_legend_string
def update_data(attr, old, new):
'''
When data source is updated, rebuild widgets and plots.
'''
defaults['data_source'] = gl['widgets']['data'].value
for w in wdg_col:
defaults[w] = 'None'
defaults['chart_type'] = 'Dot'
gl['df_source'], gl['columns'] = get_data(defaults['data_source'])
gl['widgets'] = build_widgets(gl['df_source'], gl['columns'], defaults)
gl['controls'].children = list(gl['widgets'].values())
gl['plots'].children = []
def update_wdg(attr, old, new):
'''
When general widgets are updated (not in wdg_col), update plots only.
'''
update_plots()
def update_wdg_col(attr, old, new):
'''
When widgets in wdg_col are updated, set the options of all wdg_col widgets,
and update plots.
'''
set_wdg_col_options()
update_plots()
def set_wdg_col_options():
'''
Limit available options for wdg_col widgets based on their selected values, so that users
cannot select the same value for two different wdg_col widgets.
'''
cols = gl['columns']
wdg = gl['widgets']
#get list of selected values and use to reduce selection options.
sels = [str(wdg[w].value) for w in wdg_col if str(wdg[w].value) !='None']
all_reduced = [x for x in cols['all'] if x not in sels]
ser_reduced = [x for x in cols['seriesable'] if x not in sels]
for w in wdg_col:
val = str(wdg[w].value)
none_append = [] if val == 'None' else ['None']
opt_reduced = all_reduced if w in wdg_col_all else ser_reduced
wdg[w].options = [val] + opt_reduced + none_append
def update_plots():
'''
Make sure x axis and y axis are set. If so, set the dataframe for the plots and build them.
'''
if gl['widgets']['x'].value == 'None' or gl['widgets']['y'].value == 'None':
gl['plots'].children = []
return
gl['df_plots'] = set_df_plots(gl['df_source'], gl['columns'], gl['widgets'])
gl['widgets']['series_legend'].text = build_series_legend(gl['df_plots'], gl['widgets']['series'].value)
gl['plots'].children = create_figures(gl['df_plots'], gl['widgets'], gl['columns'])
def download():
'''
Download a csv file of the currently viewed data to the downloads/ directory,
with the current timestamp.
'''
gl['df_plots'].to_csv(os.path.dirname(os.path.realpath(__file__)) + '/downloads/out '+
datetime.datetime.now().strftime("%Y-%m-%d %H-%M-%S-%f")+'.csv', index=False)
#initialize globals dict
gl = {'df_source':None, 'df_plots':None, 'columns':None, 'widgets':None, 'controls': None, 'plots':None}
#List of widgets that use columns as their selectors
wdg_col_all = ['x', 'y'] #all columns available for these widgets
wdg_col_ser = ['x_group', 'series', 'explode', 'explode_group'] #seriesable columns available for these widgets
wdg_col = wdg_col_all + wdg_col_ser
#List of widgets that don't use columns as selector and share general widget update function
wdg_non_col = ['chart_type', 'y_agg', 'plot_title', 'plot_title_size',
'plot_width', 'plot_height', 'opacity', 'x_min', 'x_max', 'x_scale', 'x_title',
'x_title_size', 'x_major_label_size', 'x_major_label_orientation',
'y_min', 'y_max', 'y_scale', 'y_title', 'y_title_size', 'y_major_label_size',
'circle_size', 'bar_width', 'line_width']
#Specify default widget values
defaults = {}
defaults['data_source'] = os.path.dirname(os.path.realpath(__file__)) + '/csv/US_electric_power_generation.csv'
for w in wdg_col:
defaults[w] = 'None'
defaults['x'] = 'Year'
defaults['y'] = 'Electricity Generation (TWh)'
defaults['series'] = 'Technology'
defaults['explode'] = 'Case'
defaults['chart_type'] = 'Area'
#On initial load, read 'widgets' parameter from URL query string and use to set data source (data_source)
#and widget configuration object (wdg_config)
wdg_config = {}
args = bio.curdoc().session_context.request.arguments
wdg_arr = args.get('widgets')
if wdg_arr is not None:
wdg_config = json.loads(urlp.unquote(wdg_arr[0].decode('utf-8')))
if 'data' in wdg_config:
defaults['data_source'] = str(wdg_config['data'])
for w in wdg_col:
defaults[w] = 'None'
#build widgets and plots
gl['df_source'], gl['columns'] = get_data(defaults['data_source'])
gl['widgets'] = build_widgets(gl['df_source'], gl['columns'], defaults, init_load=True, init_config=wdg_config)
set_wdg_col_options()
gl['controls'] = bl.widgetbox(list(gl['widgets'].values()), id='widgets_section')
gl['plots'] = bl.column([], id='plots_section')
update_plots()
layout = bl.row(gl['controls'], gl['plots'], id='layout')
bio.curdoc().add_root(layout)
bio.curdoc().title = "Exploding Pivot Chart Maker"
| bsd-3-clause |
Alweezy/cp2-bucketlist-api | tests/test_user.py | 1 | 3772 | import unittest
import json
from app import create_app, db
class UserTest(unittest.TestCase):
def setUp(self):
"""Define test variables and initialize app."""
self.app = create_app(config_name="testing")
self.client = self.app.test_client
# binds the app to the current context
with self.app.app_context():
# create all tables
db.create_all()
self.user = {"username": "nerd",
"password": "nerdy",
"email": "nerd@gmail.com"
}
def test_registration_successful(self):
"""Test successful user registration."""
result = self.client().post("/auth/register/", data=json.dumps(self.user),
content_type="application/json")
results = json.loads(result.data.decode())
self.assertEqual(results["message"],
'User registration successful.')
self.assertEqual(result.status_code, 201)
def test_duplicate_user_registration(self):
"""Test registered user registration."""
res = self.client().post("/auth/register/", data=json.dumps(self.user),
content_type="application/json")
self.assertEqual(res.status_code, 201)
# Test double registration
res_2 = self.client().post("/auth/register/",
data=json.dumps(self.user),
content_type="application/json")
self.assertEqual(res_2.status_code, 409)
final_result = json.loads(res_2.data.decode())
self.assertEqual(final_result["message"],
"User already exists. Please login")
def test_login_successful(self):
"""Test successful user login."""
res = self.client().post("/auth/register/", data=json.dumps(self.user),
content_type="application/json")
self.assertEqual(res.status_code, 201)
login_res = self.client().post("/auth/login/", data=json.dumps(self.user),
content_type="application/json")
results = json.loads(login_res.data.decode())
self.assertEqual(results["message"], "You logged in successfully.")
# Confirm the status code and access token
self.assertEqual(login_res.status_code, 200)
self.assertTrue(results["token"])
def test_unauthorised_login_attempt(self):
"""Test unauthorised login attempt."""
res = self.client().post('/auth/login/',
data=json.dumps(self.user),
content_type="application/json")
self.assertEqual(res.status_code, 401)
result = json.loads(res.data)
self.assertEqual(result['message'],
'Invalid email or password. Please try again.')
def test_incomplete_login_credentials(self):
"""Test partial issue of login credentials"""
new_user = {"username": "",
"password": "new_password"
}
res = self.client().post("/auth/register/", data=json.dumps(new_user),
content_type="application/json")
final_result = json.loads(res.data.decode())
self.assertEqual(res.status_code, 400)
self.assertEqual(final_result["message"],
"Error. The username or password cannot be empty")
def tearDown(self):
"""teardown all initialized variables."""
with self.app.app_context():
# drop all tables
db.session.remove()
db.drop_all()
if __name__ == '__main__':
unittest.main()
| mit |
thiphariel/navitia | source/tyr/tyr/api.py | 1 | 2459 | #!/usr/bin/env python
#coding: utf-8
# Copyright (c) 2001-2014, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from tyr import resources
from tyr import app, api
import flask_restful
#we always want pretty json
flask_restful.representations.json.settings = {'indent': 4}
api.app.url_map.strict_slashes = False
api.add_resource(resources.Instance, '/v0/instances/', '/v0/instances/<int:id>/', '/v0/instances/<string:name>/')
api.add_resource(resources.Api, '/v0/api/')
api.add_resource(resources.User, '/v0/users/',
'/v0/users/<int:user_id>/')
api.add_resource(resources.Key, '/v0/users/<int:user_id>/keys/',
'/v0/users/<int:user_id>/keys/<int:key_id>/')
api.add_resource(resources.Authorization,
'/v0/users/<int:user_id>/authorizations/')
api.add_resource(resources.Index, '/')
api.add_resource(resources.Job, '/v0/jobs/', '/v0/jobs/<string:instance_name>/', endpoint='jobs')
api.add_resource(resources.EndPoint, '/v0/end_points/', '/v0/end_points/<int:id>/', endpoint='end_points')
api.add_resource(resources.TravelerProfile,
'/v0/instances/<string:name>/traveler_profiles/',
'/v0/instances/<string:name>/traveler_profiles/<string:traveler_type>')
@app.errorhandler(Exception)
def error_handler(exception):
"""
log exception
"""
app.logger.exception('')
| agpl-3.0 |
gicsi/aap | src/machine_learning/nltk-trainer-master/analyze_tagger_coverage.py | 5 | 5537 | #!/usr/bin/env python
import argparse, collections, math, os.path
import nltk.corpus, nltk.corpus.reader, nltk.data, nltk.tag, nltk.metrics
from nltk.corpus.util import LazyCorpusLoader
from nltk_trainer import load_corpus_reader, load_model, simplify_wsj_tag
from nltk_trainer.tagging import taggers
########################################
## command options & argument parsing ##
########################################
parser = argparse.ArgumentParser(description='Analyze a part-of-speech tagger on a tagged corpus',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('corpus',
help='''The name of a tagged corpus included with NLTK, such as treebank,
brown, cess_esp, floresta, or the root path to a corpus directory,
which can be either an absolute path or relative to a nltk_data directory.''')
parser.add_argument('--tagger', default=nltk.tag._POS_TAGGER,
help='''pickled tagger filename/path relative to an nltk_data directory
default is NLTK's default tagger''')
parser.add_argument('--trace', default=1, type=int,
help='How much trace output you want, defaults to 1. 0 is no trace output.')
parser.add_argument('--metrics', action='store_true', default=False,
help='Use tagged sentences to determine tagger accuracy and tag precision & recall')
corpus_group = parser.add_argument_group('Corpus Reader Options')
corpus_group.add_argument('--reader', default=None,
help='''Full module path to a corpus reader class, such as
nltk.corpus.reader.tagged.TaggedCorpusReader''')
corpus_group.add_argument('--fileids', default=None,
help='Specify fileids to load from corpus')
corpus_group.add_argument('--fraction', default=1.0, type=float,
help='''The fraction of the corpus to use for testing coverage''')
if simplify_wsj_tag:
corpus_group.add_argument('--simplify_tags', action='store_true', default=False,
help='Use simplified tags')
else:
corpus_group.add_argument('--tagset', default=None,
help='Map tags to a given tagset, such as "universal"')
args = parser.parse_args()
###################
## corpus reader ##
###################
corpus = load_corpus_reader(args.corpus, reader=args.reader, fileids=args.fileids)
kwargs = {'fileids': args.fileids}
if simplify_wsj_tag and args.simplify_tags and not args.metrics:
raise ValueError('simplify_tags can only be used with the --metrics option')
elif simplify_wsj_tag and args.simplify_tags and args.corpus not in ['conll2000', 'switchboard']:
kwargs['simplify_tags'] = True
elif not simplify_wsj_tag and args.tagset and not args.metrics:
raise ValueError('tagset can only be used with the --metrics option')
elif not simplify_wsj_tag and args.tagset:
kwargs['tagset'] = args.tagset
# TODO: support corpora with alternatives to tagged_sents that work just as well
if args.metrics and not hasattr(corpus, 'tagged_sents'):
raise ValueError('%s does not support metrics' % args.corpus)
############
## tagger ##
############
if args.trace:
print('loading tagger %s' % args.tagger)
if args.tagger == 'pattern':
tagger = taggers.PatternTagger()
else:
tagger = load_model(args.tagger)
#######################
## coverage analysis ##
#######################
if args.trace:
print('analyzing tag coverage of %s with %s\n' % (args.corpus, tagger.__class__.__name__))
tags_found = collections.defaultdict(int)
unknown_words = set()
if args.metrics:
tags_actual = collections.defaultdict(int)
tag_refs = []
tag_test = []
tag_word_refs = collections.defaultdict(set)
tag_word_test = collections.defaultdict(set)
tagged_sents = corpus.tagged_sents(**kwargs)
taglen = 7
if args.fraction != 1.0:
cutoff = int(math.ceil(len(tagged_sents) * args.fraction))
tagged_sents = tagged_sents[:cutoff]
for tagged_sent in tagged_sents:
for word, tag in tagged_sent:
tags_actual[tag] += 1
tag_refs.append(tag)
tag_word_refs[tag].add(word)
if len(tag) > taglen:
taglen = len(tag)
for word, tag in tagger.tag(nltk.tag.untag(tagged_sent)):
tags_found[tag] += 1
tag_test.append(tag)
tag_word_test[tag].add(word)
if tag == '-NONE-':
unknown_words.add(word)
print('Accuracy: %f' % nltk.metrics.accuracy(tag_refs, tag_test))
print('Unknown words: %d' % len(unknown_words))
if args.trace and unknown_words:
print(', '.join(sorted(unknown_words)))
print('')
print(' '.join(['Tag'.center(taglen), 'Found'.center(9), 'Actual'.center(10),
'Precision'.center(13), 'Recall'.center(13)]))
print(' '.join(['='*taglen, '='*9, '='*10, '='*13, '='*13]))
for tag in sorted(set(tags_found.keys()) | set(tags_actual.keys())):
found = tags_found[tag]
actual = tags_actual[tag]
precision = nltk.metrics.precision(tag_word_refs[tag], tag_word_test[tag])
recall = nltk.metrics.recall(tag_word_refs[tag], tag_word_test[tag])
print(' '.join([tag.ljust(taglen), str(found).rjust(9), str(actual).rjust(10),
str(precision).ljust(13)[:13], str(recall).ljust(13)[:13]]))
print(' '.join(['='*taglen, '='*9, '='*10, '='*13, '='*13]))
else:
sents = corpus.sents(**kwargs)
taglen = 7
if args.fraction != 1.0:
cutoff = int(math.ceil(len(sents) * args.fraction))
sents = sents[:cutoff]
for sent in sents:
for word, tag in tagger.tag(sent):
tags_found[tag] += 1
if len(tag) > taglen:
taglen = len(tag)
print(' '.join(['Tag'.center(taglen), 'Count'.center(9)]))
print(' '.join(['='*taglen, '='*9]))
for tag in sorted(tags_found.keys()):
print(' '.join([tag.ljust(taglen), str(tags_found[tag]).rjust(9)]))
print(' '.join(['='*taglen, '='*9])) | gpl-3.0 |
jean/pybossa | pybossa/view/home.py | 3 | 2697 | # -*- coding: utf8 -*-
# This file is part of PyBossa.
#
# Copyright (C) 2013 SF Isle of Man Limited
#
# PyBossa is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBossa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBossa. If not, see <http://www.gnu.org/licenses/>.
"""Home view for PyBossa."""
from flask import current_app
from flask.ext.login import current_user
from pybossa.model.category import Category
from flask import Blueprint
from flask import render_template
from pybossa.cache import projects as cached_projects
from pybossa.cache import users as cached_users
from pybossa.cache import categories as cached_cat
from pybossa.util import rank
blueprint = Blueprint('home', __name__)
@blueprint.route('/')
def home():
"""Render home page with the cached projects and users."""
page = 1
per_page = current_app.config.get('APPS_PER_PAGE')
if per_page is None: # pragma: no cover
per_page = 5
d = {'top_projects': cached_projects.get_top(),
'top_users': None}
# Get all the categories with projects
categories = cached_cat.get_used()
d['categories'] = categories
d['categories_projects'] = {}
for c in categories:
tmp_projects = cached_projects.get(c['short_name'], page, per_page)
d['categories_projects'][c['short_name']] = rank(tmp_projects)
# Add featured
tmp_projects = cached_projects.get_featured('featured', page, per_page)
if len(tmp_projects) > 0:
featured = Category(name='Featured', short_name='featured')
d['categories'].insert(0, featured)
d['categories_projects']['featured'] = rank(tmp_projects)
if (current_app.config['ENFORCE_PRIVACY']
and current_user.is_authenticated()):
if current_user.admin:
d['top_users'] = cached_users.get_top()
if not current_app.config['ENFORCE_PRIVACY']:
d['top_users'] = cached_users.get_top()
return render_template('/home/index.html', **d)
@blueprint.route("about")
def about():
"""Render the about template."""
return render_template("/home/about.html")
@blueprint.route("search")
def search():
"""Render search results page."""
return render_template("/home/search.html")
| agpl-3.0 |
praekelt/molo.commenting | molo/commenting/tests/test_views.py | 1 | 25207 | from bs4 import BeautifulSoup
from django.conf.urls import url, include
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.test import TestCase, Client, override_settings
from django.contrib.auth.models import Group
from django.utils import timezone
from molo.commenting.models import MoloComment
from molo.commenting.forms import MoloCommentForm
from molo.core.models import SiteLanguageRelation, Languages, Main
from molo.core.tests.base import MoloTestCaseMixin
from notifications.models import Notification
urlpatterns = [
url(r'^commenting/',
include('molo.commenting.urls', namespace='molo.commenting')),
url(r'', include('django_comments.urls')),
url(r'', include('molo.core.urls')),
url(r'', include('wagtail.core.urls')),
]
@override_settings(ROOT_URLCONF='molo.commenting.tests.test_views')
class ViewsTest(TestCase, MoloTestCaseMixin):
def setUp(self):
# Creates main page
self.mk_main()
self.main = Main.objects.all().first()
self.language_setting = Languages.objects.create(
site_id=self.main.get_site().pk)
self.english = SiteLanguageRelation.objects.create(
language_setting=self.language_setting,
locale='en',
is_active=True)
self.user = User.objects.create_user(
'test', 'test@example.org', 'test')
self.content_type = ContentType.objects.get_for_model(self.user)
self.client = Client()
self.client.login(username='test', password='test')
self.yourmind = self.mk_section(
self.section_index, title='Your mind')
self.article1 = self.mk_article(
title='article 1', slug='article-1', parent=self.yourmind)
def mk_comment(self, comment):
return MoloComment.objects.create(
content_type=self.content_type,
object_pk=self.user.pk,
content_object=self.user,
site=Site.objects.get_current(),
user=self.user,
comment=comment,
submit_date=timezone.now())
def test_reporting_without_removal(self):
comment = self.mk_comment('the comment')
response = self.client.get(
reverse('molo.commenting:molo-comments-report',
args=(comment.pk,)))
self.assertEqual(response.status_code, 302)
[flag] = comment.flags.all()
self.assertEqual(flag.comment, comment)
self.assertEqual(flag.user, self.user)
self.assertFalse(MoloComment.objects.get(pk=comment.pk).is_removed)
self.assertTrue('The comment has been reported.'
in response.cookies['messages'].value)
def test_reporting_with_removal(self):
comment = self.mk_comment('the comment')
with self.settings(COMMENTS_FLAG_THRESHHOLD=1):
response = self.client.get(
reverse('molo.commenting:molo-comments-report',
args=(comment.pk,)))
self.assertEqual(response.status_code, 302)
[flag] = comment.flags.all()
self.assertEqual(flag.comment, comment)
self.assertEqual(flag.user, self.user)
self.assertTrue(MoloComment.objects.get(pk=comment.pk).is_removed)
self.assertTrue('The comment has been reported.'
in response.cookies['messages'].value)
def test_molo_post_comment(self):
data = MoloCommentForm(self.user, {}).generate_security_data()
data.update({
'name': 'the supplied name',
'comment': 'Foo',
})
self.client.post(
reverse('molo.commenting:molo-comments-post'), data)
[comment] = MoloComment.objects.filter(user=self.user)
self.assertEqual(comment.comment, 'Foo')
self.assertEqual(comment.user_name, 'the supplied name')
def test_molo_post_comment_anonymous(self):
data = MoloCommentForm(self.user, {}).generate_security_data()
data.update({
'name': 'the supplied name',
'comment': 'Foo',
'submit_anonymously': '1',
})
self.client.post(
reverse('molo.commenting:molo-comments-post'), data)
[comment] = MoloComment.objects.filter(user=self.user)
self.assertEqual(comment.comment, 'Foo')
self.assertEqual(comment.user_name, 'Anonymous')
self.assertEqual(comment.user_email, self.user.email)
def test_molo_post_comment_without_email_address(self):
self.user.email = ''
self.user.save()
data = MoloCommentForm(self.user, {}).generate_security_data()
data.update({
'name': 'the supplied name',
'comment': 'Foo',
})
self.client.post(
reverse('molo.commenting:molo-comments-post'), data)
[comment] = MoloComment.objects.filter(user=self.user)
self.assertEqual(comment.comment, 'Foo')
self.assertEqual(comment.user_name, 'the supplied name')
self.assertEqual(comment.user_email, 'blank@email.com')
def test_report_response(self):
article = self.article1
comment = MoloComment.objects.create(
content_object=article, object_pk=article.id,
content_type=ContentType.objects.get_for_model(article),
site=Site.objects.get_current(), user=self.user,
comment='comment 1', submit_date=timezone.now())
response = self.client.get(reverse('molo.commenting:report_response',
args=(comment.id,)))
self.assertContains(
response,
"This comment has been reported."
)
def test_commenting_closed(self):
article = self.article1
article.save()
initial = {
'object_pk': article.id,
'content_type': "core.articlepage"
}
data = MoloCommentForm(article, {},
initial=initial).generate_security_data()
data.update({
'comment': "This is another comment"
})
response = self.client.post(
reverse('molo.commenting:molo-comments-post'), data)
self.assertEqual(response.status_code, 302)
def test_commenting_open(self):
article = self.article1
initial = {
'object_pk': article.id,
'content_type': "core.articlepage"
}
data = MoloCommentForm(article, {},
initial=initial).generate_security_data()
data.update({
'comment': "This is a second comment",
})
response = self.client.post(
reverse('molo.commenting:molo-comments-post'), data)
self.assertEqual(response.status_code, 302)
@override_settings(ROOT_URLCONF='molo.commenting.tests.test_views')
class ViewMoreCommentsTest(TestCase, MoloTestCaseMixin):
def setUp(self):
# Creates main page
self.mk_main()
self.user = User.objects.create_user(
'test', 'test@example.org', 'test')
self.yourmind = self.mk_section(
self.section_index, title='Your mind')
self.article = self.mk_article(
title='article 1', slug='article-1', parent=self.yourmind)
self.client = Client()
def create_comment(self, comment, parent=None, user=None):
commenter = user or self.user
return MoloComment.objects.create(
content_type=ContentType.objects.get_for_model(self.article),
object_pk=self.article.pk,
content_object=self.article,
site=Site.objects.get_current(),
user=commenter,
comment=comment,
parent=parent,
submit_date=timezone.now())
def test_view_more_comments(self):
for i in range(50):
self.create_comment('comment %d' % i)
response = self.client.get(
reverse('molo.commenting:more-comments',
args=[self.article.pk, ],))
self.assertContains(response, 'Page 1 of 3')
self.assertContains(response, '→')
self.assertNotContains(response, '←')
response = self.client.get('%s?p=2' % (reverse(
'molo.commenting:more-comments', args=[self.article.pk, ],),))
self.assertContains(response, 'Page 2 of 3')
self.assertContains(response, '→')
self.assertContains(response, '←')
response = self.client.get('%s?p=3' % (reverse(
'molo.commenting:more-comments', args=[self.article.pk, ],),))
self.assertContains(response, 'Page 3 of 3')
self.assertNotContains(response, '→')
self.assertContains(response, '←')
def test_view_page_not_integer(self):
'''If the requested page number is not an integer, the first page
should be returned.'''
response = self.client.get('%s?p=foo' % reverse(
'molo.commenting:more-comments', args=(self.article.pk,)))
self.assertContains(response, 'Page 1 of 1')
def test_view_empty_page(self):
'''If the requested page number is too large, it should show the
last page.'''
for i in range(40):
self.create_comment('comment %d' % i)
response = self.client.get('%s?p=3' % reverse(
'molo.commenting:more-comments', args=(self.article.pk,)))
self.assertContains(response, 'Page 2 of 2')
def test_view_nested_comments(self):
comment1 = self.create_comment('test comment1 text')
comment2 = self.create_comment('test comment2 text')
comment3 = self.create_comment('test comment3 text')
reply1 = self.create_comment('test reply1 text', parent=comment2)
reply2 = self.create_comment('test reply2 text', parent=comment2)
response = self.client.get(
reverse('molo.commenting:more-comments', args=(self.article.pk,)))
html = BeautifulSoup(response.content, 'html.parser')
[c3row, c2row, reply1row, reply2row, c1row] = html.find_all(
class_='comment-list__item')
self.assertTrue(comment3.comment in c3row.prettify())
self.assertTrue(comment2.comment in c2row.prettify())
self.assertTrue(reply1.comment in reply1row.prettify())
self.assertTrue(reply2.comment in reply2row.prettify())
self.assertTrue(comment1.comment in c1row.prettify())
def test_view_replies_report(self):
'''If a comment is a reply, there should only be a report button
if the reply is not made by an admin'''
comment = self.create_comment('test comment1 text')
reply = self.create_comment('test reply text', parent=comment)
response = self.client.get(
reverse('molo.commenting:more-comments', args=(self.article.pk,)))
html = BeautifulSoup(response.content, 'html.parser')
[crow, replyrow] = html.find_all(class_='comment-list__item')
self.assertTrue(comment.comment in crow.prettify())
self.assertTrue('report' in crow.prettify())
self.assertTrue(reply.comment in replyrow.prettify())
self.assertTrue('report' in replyrow.prettify())
comment2 = self.create_comment('test comment2 text')
superuser = User.objects.create_superuser(
username='superuser',
email='superuser@email.com',
password='password'
)
reply2 = self.create_comment('test reply2 text',
parent=comment2,
user=superuser)
response = self.client.get(
reverse('molo.commenting:more-comments', args=(self.article.pk,)))
html = BeautifulSoup(response.content, 'html.parser')
[crow2, replyrow2, crow, replyrow] = html.find_all(
class_='comment-list__item')
self.assertTrue(comment2.comment in crow2.prettify())
self.assertTrue('report' in crow2.prettify())
self.assertTrue(reply2.comment in replyrow2.prettify())
self.assertFalse('report' in replyrow2.prettify())
class TestFrontEndCommentReplies(TestCase, MoloTestCaseMixin):
def create_comment(self, article, comment, user, parent=None):
return MoloComment.objects.create(
content_type=ContentType.objects.get_for_model(article),
object_pk=article.pk,
content_object=article,
site=Site.objects.get_current(),
user=user,
comment=comment,
parent=parent,
submit_date=timezone.now())
def setUp(self):
self.mk_main()
self.main = Main.objects.all().first()
self.language_setting = Languages.objects.create(
site_id=self.main.get_site().pk)
self.english = SiteLanguageRelation.objects.create(
language_setting=self.language_setting,
locale='en',
is_active=True)
self.client = Client()
self.superuser = User.objects.create_superuser(
username='superuser',
email='superuser@email.com',
password='password'
)
self.moderator_group, _created = Group.objects.get_or_create(
name='Moderator')
self.comment_moderator_group, _created = Group.objects.get_or_create(
name='Comment Moderator')
self.expert_group, _created = Group.objects.get_or_create(
name='Expert')
self.moderator = User.objects.create_user(
username='moderator',
email='moderator@example.com',
password='password',
)
self.moderator.groups.set([self.moderator_group])
self.comment_moderator = User.objects.create_user(
username='comment_moderator',
email='comment_moderator@example.com',
password='password',
)
self.comment_moderator.groups.set([self.comment_moderator_group])
self.expert = User.objects.create_user(
username='expert',
email='expert@example.com',
password='password',
)
self.expert.groups.set([self.expert_group])
# create ordinary user
self.bob = User.objects.create_user(
username='bob',
email='bob@example.com',
password='password',
)
self.section = self.mk_section(
self.section_index, title='section')
self.article = self.mk_article(self.section, title='article 1',
subtitle='article 1 subtitle',
slug='article-1')
self.comment = self.create_comment(
article=self.article,
comment="this_is_comment_content",
user=self.bob
)
def check_reply_exists(self, client):
response = client.get(
reverse('molo.commenting:more-comments',
args=[self.article.pk, ],))
self.assertTrue(response.status_code, 200)
html = BeautifulSoup(response.content, 'html.parser')
[comment] = html.find_all(class_='comment-list__item')
self.assertTrue(comment.find('p', string='this_is_comment_content'))
self.assertTrue(comment.find('a', string='Reply'))
comment_reply_url = comment.find('a', string='Reply')['href']
response = self.client.get(comment_reply_url)
self.assertTrue(response.status_code, 200)
def test_expert_can_reply_to_comments_on_front_end(self):
client = Client()
client.login(
username=self.expert.username, password='password')
self.check_reply_exists(client)
def test_moderator_can_reply_to_comments_on_front_end(self):
client = Client()
client.login(
username=self.moderator.username, password='password')
self.check_reply_exists(client)
def test_comment_moderator_can_reply_to_comments_on_front_end(self):
client = Client()
client.login(
username=self.comment_moderator.username, password='password')
self.check_reply_exists(client)
def test_superuser_can_reply_to_comments_on_front_end(self):
client = Client()
client.login(
username=self.superuser.username, password='password')
self.check_reply_exists(client)
def test_ordinary_user_can_reply_to_comments_on_front_end(self):
client = Client()
client.login(
username=self.bob.username, password='password')
self.check_reply_exists(client)
def test_user_cannot_reply_to_comments_when_logged_out(self):
response = self.client.get(
reverse('molo.commenting:more-comments',
args=[self.article.pk, ],))
self.assertTrue(response.status_code, 200)
html = BeautifulSoup(response.content, 'html.parser')
[comment] = html.find_all(class_='comment-list__item')
self.assertTrue(comment.find('p', string='this_is_comment_content'))
self.assertFalse(comment.find('a', string='Reply'))
class TestThreadedComments(TestCase, MoloTestCaseMixin):
def setUp(self):
# Creates main page
self.mk_main()
self.main = Main.objects.all().first()
self.language_setting = Languages.objects.create(
site_id=self.main.get_site().pk)
self.english = SiteLanguageRelation.objects.create(
language_setting=self.language_setting,
locale='en',
is_active=True)
self.user = User.objects.create_user(
'test', 'test@example.org', 'test')
self.section = self.mk_section(
self.section_index, title='section')
self.article = self.mk_article(self.section, title='article 1',
subtitle='article 1 subtitle',
slug='article-1')
self.client = Client()
def create_comment(self, comment, parent=None, user=None):
commenter = user or self.user
return MoloComment.objects.create(
content_type=ContentType.objects.get_for_model(self.article),
object_pk=self.article.pk,
content_object=self.article,
site=Site.objects.get_current(),
user=commenter,
comment=comment,
parent=parent,
submit_date=timezone.now())
def test_restrict_article_comment_count(self):
for i in range(3):
self.create_comment('comment %d' % i)
response = self.client.get(self.article.url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "comment 2")
self.assertContains(response, "comment 1")
self.assertNotContains(response, "comment 0")
def test_restrict_article_comment_reply_count(self):
comment = self.create_comment('Original Comment')
for i in range(3):
self.create_comment('reply %d' % i, parent=comment)
response = self.client.get(self.article.url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Original Comment")
self.assertContains(response, "reply 2")
self.assertContains(response, "reply 1")
self.assertNotContains(response, "reply 0")
def test_restrict_article_comment_reply_truncation(self):
comment = self.create_comment('Original Comment')
comment_text = "Lorem ipsum dolor sit amet, consectetur adipisicing \
tempor incididunt ut labore et dolore magna aliqua. Ut enim ad \
quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea \
consequat. Duis aute irure dolor in reprehenderit in voluptate \
cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat \
proident, sunt in culpa qui officia deserunt mollit anim id est"
self.create_comment(comment_text, parent=comment)
response = self.client.get(self.article.url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Original Comment")
truncated_text = "Lorem ipsum dolor sit amet, consectetur adipisicing"
self.assertContains(response, truncated_text)
self.assertNotContains(response, "officia deserunt mollit anim id est")
def test_comment_reply_list(self):
comment = self.create_comment('Original Comment')
for i in range(3):
self.create_comment('Reply %d' % i, parent=comment)
response = self.client.get(
reverse('molo.commenting:molo-comments-reply',
args=(comment.pk, )))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Original Comment')
self.assertContains(response, 'Reply 0')
self.assertContains(response, 'Reply 1')
self.assertContains(response, 'Reply 2')
def test_reply_pagination(self):
comment = self.create_comment('Original Comment')
for i in range(15):
self.create_comment('Reply %d' % i, parent=comment)
response = self.client.get(
reverse('molo.commenting:molo-comments-reply',
args=[comment.pk, ],))
self.assertContains(response, 'Original Comment')
self.assertContains(response, 'Page 1 of 3')
self.assertContains(response, '→')
self.assertNotContains(response, '←')
response = self.client.get('%s?p=2' % (reverse(
'molo.commenting:molo-comments-reply',
args=[comment.pk, ],),))
self.assertContains(response, 'Page 2 of 3')
self.assertContains(response, '→')
self.assertContains(response, '←')
response = self.client.get('%s?p=3' % (reverse(
'molo.commenting:molo-comments-reply',
args=[comment.pk, ],),))
self.assertContains(response, 'Page 3 of 3')
self.assertNotContains(response, '→')
self.assertContains(response, '←')
@override_settings(ROOT_URLCONF='molo.commenting.tests.test_views')
class ViewNotificationsRepliesOnCommentsTest(TestCase, MoloTestCaseMixin):
def setUp(self):
# Creates main page
self.mk_main()
self.user = User.objects.create_user(
'test', 'test@example.org', 'test')
self.section = self.mk_section(
self.section_index, title='section')
self.article = self.mk_article(self.section, title='article 1',
subtitle='article 1 subtitle',
slug='article-1')
self.client = Client()
self.client.login(username='test', password='test')
def test_notification_reply_list(self):
data = MoloCommentForm(self.article, {}).generate_security_data()
data.update({
'name': 'the supplied name',
'comment': 'Foo',
})
self.client.post(
reverse('molo.commenting:molo-comments-post'), data)
[comment] = MoloComment.objects.filter(user=self.user)
self.assertEqual(comment.comment, 'Foo')
self.assertEqual(comment.user_name, 'the supplied name')
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, 'Unread replies: 0')
data = MoloCommentForm(self.article, {}).generate_security_data()
data.update({
'name': 'the supplied name',
'comment': 'Foo reply',
'parent': comment.pk
})
self.client.post(
reverse('molo.commenting:molo-comments-post'), data)
self.assertEqual(Notification.objects.unread().count(), 1)
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
html = BeautifulSoup(response.content, 'html.parser')
[ntfy] = html.find_all("div", class_='reply-notification')
self.assertTrue(
ntfy.find("p").get_text().strip() in [
'You have 1 unread reply',
'You have 2 unread replies'
])
# Unread notifications
response = self.client.get(
reverse('molo.commenting:reply_list'))
self.assertTrue(response, [
'You have 1 unread reply',
'You have 2 unread replies'
])
n = Notification.objects.filter(recipient=self.user).first()
n.mark_as_read()
self.assertEqual(Notification.objects.unread().count(), 0)
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, 'Unread replies: 0')
# Read notifications
response = self.client.get(
reverse('molo.commenting:reply_list'))
self.assertEqual(Notification.objects.read().count(), 1)
self.assertNotContains(response, 'You have 0 unread replies')
self.assertContains(response, 'Read')
| bsd-2-clause |
netscaler/horizon | openstack_dashboard/dashboards/admin/routers/tables.py | 6 | 2581 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.template.defaultfilters import title # noqa
from django.utils.translation import ugettext_lazy as _ # noqa
from horizon import tables
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.routers import tables as r_tables
LOG = logging.getLogger(__name__)
class DeleteRouter(r_tables.DeleteRouter):
redirect_url = "horizon:admin:routers:index"
def delete(self, request, obj_id):
search_opts = {'device_owner': 'network:router_interface',
'device_id': obj_id}
ports = api.neutron.port_list(request, **search_opts)
for port in ports:
api.neutron.router_remove_interface(request, obj_id,
port_id=port.id)
super(DeleteRouter, self).delete(request, obj_id)
def allowed(self, request, router=None):
return True
class UpdateRow(tables.Row):
ajax = True
def get_data(self, request, router_id):
router = api.neutron.router_get(request, router_id)
return router
class RoutersTable(tables.DataTable):
tenant = tables.Column("tenant_name", verbose_name=_("Project"))
name = tables.Column("name",
verbose_name=_("Name"),
link="horizon:admin:routers:detail")
status = tables.Column("status",
filters=(title,),
verbose_name=_("Status"),
status=True)
ext_net = tables.Column(r_tables.get_external_network,
verbose_name=_("External Network"))
def get_object_display(self, obj):
return obj.name
class Meta:
name = "Routers"
verbose_name = _("Routers")
status_columns = ["status"]
row_class = UpdateRow
table_actions = (DeleteRouter,)
row_actions = (DeleteRouter,)
| apache-2.0 |
glneo/gnuradio-davisaf | gr-digital/python/ofdm.py | 10 | 29462 | #!/usr/bin/env python
#
# Copyright 2006,2007,2008 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import math
from gnuradio import gr
import digital_swig
import ofdm_packet_utils
from ofdm_receiver import ofdm_receiver
import gnuradio.gr.gr_threading as _threading
import psk, qam
# /////////////////////////////////////////////////////////////////////////////
# mod/demod with packets as i/o
# /////////////////////////////////////////////////////////////////////////////
class ofdm_mod(gr.hier_block2):
"""
Modulates an OFDM stream. Based on the options fft_length, occupied_tones, and
cp_length, this block creates OFDM symbols using a specified modulation option.
Send packets by calling send_pkt
"""
def __init__(self, options, msgq_limit=2, pad_for_usrp=True):
"""
Hierarchical block for sending packets
Packets to be sent are enqueued by calling send_pkt.
The output is the complex modulated signal at baseband.
@param options: pass modulation options from higher layers (fft length, occupied tones, etc.)
@param msgq_limit: maximum number of messages in message queue
@type msgq_limit: int
@param pad_for_usrp: If true, packets are padded such that they end up a multiple of 128 samples
"""
gr.hier_block2.__init__(self, "ofdm_mod",
gr.io_signature(0, 0, 0), # Input signature
gr.io_signature(1, 1, gr.sizeof_gr_complex)) # Output signature
self._pad_for_usrp = pad_for_usrp
self._modulation = options.modulation
self._fft_length = options.fft_length
self._occupied_tones = options.occupied_tones
self._cp_length = options.cp_length
win = [] #[1 for i in range(self._fft_length)]
# Use freq domain to get doubled-up known symbol for correlation in time domain
zeros_on_left = int(math.ceil((self._fft_length - self._occupied_tones)/2.0))
ksfreq = known_symbols_4512_3[0:self._occupied_tones]
for i in range(len(ksfreq)):
if((zeros_on_left + i) & 1):
ksfreq[i] = 0
# hard-coded known symbols
preambles = (ksfreq,)
padded_preambles = list()
for pre in preambles:
padded = self._fft_length*[0,]
padded[zeros_on_left : zeros_on_left + self._occupied_tones] = pre
padded_preambles.append(padded)
symbol_length = options.fft_length + options.cp_length
mods = {"bpsk": 2, "qpsk": 4, "8psk": 8, "qam8": 8, "qam16": 16, "qam64": 64, "qam256": 256}
arity = mods[self._modulation]
rot = 1
if self._modulation == "qpsk":
rot = (0.707+0.707j)
# FIXME: pass the constellation objects instead of just the points
if(self._modulation.find("psk") >= 0):
constel = psk.psk_constellation(arity)
rotated_const = map(lambda pt: pt * rot, constel.points())
elif(self._modulation.find("qam") >= 0):
constel = qam.qam_constellation(arity)
rotated_const = map(lambda pt: pt * rot, constel.points())
#print rotated_const
self._pkt_input = digital_swig.ofdm_mapper_bcv(rotated_const,
msgq_limit,
options.occupied_tones,
options.fft_length)
self.preambles = digital_swig.ofdm_insert_preamble(self._fft_length,
padded_preambles)
self.ifft = gr.fft_vcc(self._fft_length, False, win, True)
self.cp_adder = digital_swig.ofdm_cyclic_prefixer(self._fft_length,
symbol_length)
self.scale = gr.multiply_const_cc(1.0 / math.sqrt(self._fft_length))
self.connect((self._pkt_input, 0), (self.preambles, 0))
self.connect((self._pkt_input, 1), (self.preambles, 1))
self.connect(self.preambles, self.ifft, self.cp_adder, self.scale, self)
if options.verbose:
self._print_verbage()
if options.log:
self.connect(self._pkt_input, gr.file_sink(gr.sizeof_gr_complex*options.fft_length,
"ofdm_mapper_c.dat"))
self.connect(self.preambles, gr.file_sink(gr.sizeof_gr_complex*options.fft_length,
"ofdm_preambles.dat"))
self.connect(self.ifft, gr.file_sink(gr.sizeof_gr_complex*options.fft_length,
"ofdm_ifft_c.dat"))
self.connect(self.cp_adder, gr.file_sink(gr.sizeof_gr_complex,
"ofdm_cp_adder_c.dat"))
def send_pkt(self, payload='', eof=False):
"""
Send the payload.
@param payload: data to send
@type payload: string
"""
if eof:
msg = gr.message(1) # tell self._pkt_input we're not sending any more packets
else:
# print "original_payload =", string_to_hex_list(payload)
pkt = ofdm_packet_utils.make_packet(payload, 1, 1,
self._pad_for_usrp,
whitening=True)
#print "pkt =", string_to_hex_list(pkt)
msg = gr.message_from_string(pkt)
self._pkt_input.msgq().insert_tail(msg)
def add_options(normal, expert):
"""
Adds OFDM-specific options to the Options Parser
"""
normal.add_option("-m", "--modulation", type="string", default="bpsk",
help="set modulation type (bpsk, qpsk, 8psk, qam{16,64}) [default=%default]")
expert.add_option("", "--fft-length", type="intx", default=512,
help="set the number of FFT bins [default=%default]")
expert.add_option("", "--occupied-tones", type="intx", default=200,
help="set the number of occupied FFT bins [default=%default]")
expert.add_option("", "--cp-length", type="intx", default=128,
help="set the number of bits in the cyclic prefix [default=%default]")
# Make a static method to call before instantiation
add_options = staticmethod(add_options)
def _print_verbage(self):
"""
Prints information about the OFDM modulator
"""
print "\nOFDM Modulator:"
print "Modulation Type: %s" % (self._modulation)
print "FFT length: %3d" % (self._fft_length)
print "Occupied Tones: %3d" % (self._occupied_tones)
print "CP length: %3d" % (self._cp_length)
class ofdm_demod(gr.hier_block2):
"""
Demodulates a received OFDM stream. Based on the options fft_length, occupied_tones, and
cp_length, this block performs synchronization, FFT, and demodulation of incoming OFDM
symbols and passes packets up the a higher layer.
The input is complex baseband. When packets are demodulated, they are passed to the
app via the callback.
"""
def __init__(self, options, callback=None):
"""
Hierarchical block for demodulating and deframing packets.
The input is the complex modulated signal at baseband.
Demodulated packets are sent to the handler.
@param options: pass modulation options from higher layers (fft length, occupied tones, etc.)
@param callback: function of two args: ok, payload
@type callback: ok: bool; payload: string
"""
gr.hier_block2.__init__(self, "ofdm_demod",
gr.io_signature(1, 1, gr.sizeof_gr_complex), # Input signature
gr.io_signature(1, 1, gr.sizeof_gr_complex)) # Output signature
self._rcvd_pktq = gr.msg_queue() # holds packets from the PHY
self._modulation = options.modulation
self._fft_length = options.fft_length
self._occupied_tones = options.occupied_tones
self._cp_length = options.cp_length
self._snr = options.snr
# Use freq domain to get doubled-up known symbol for correlation in time domain
zeros_on_left = int(math.ceil((self._fft_length - self._occupied_tones)/2.0))
ksfreq = known_symbols_4512_3[0:self._occupied_tones]
for i in range(len(ksfreq)):
if((zeros_on_left + i) & 1):
ksfreq[i] = 0
# hard-coded known symbols
preambles = (ksfreq,)
symbol_length = self._fft_length + self._cp_length
self.ofdm_recv = ofdm_receiver(self._fft_length,
self._cp_length,
self._occupied_tones,
self._snr, preambles,
options.log)
mods = {"bpsk": 2, "qpsk": 4, "8psk": 8, "qam8": 8, "qam16": 16, "qam64": 64, "qam256": 256}
arity = mods[self._modulation]
rot = 1
if self._modulation == "qpsk":
rot = (0.707+0.707j)
# FIXME: pass the constellation objects instead of just the points
if(self._modulation.find("psk") >= 0):
constel = psk.psk_constellation(arity)
rotated_const = map(lambda pt: pt * rot, constel.points())
elif(self._modulation.find("qam") >= 0):
constel = qam.qam_constellation(arity)
rotated_const = map(lambda pt: pt * rot, constel.points())
#print rotated_const
phgain = 0.25
frgain = phgain*phgain / 4.0
self.ofdm_demod = digital_swig.ofdm_frame_sink(rotated_const, range(arity),
self._rcvd_pktq,
self._occupied_tones,
phgain, frgain)
self.connect(self, self.ofdm_recv)
self.connect((self.ofdm_recv, 0), (self.ofdm_demod, 0))
self.connect((self.ofdm_recv, 1), (self.ofdm_demod, 1))
# added output signature to work around bug, though it might not be a bad
# thing to export, anyway
self.connect(self.ofdm_recv.chan_filt, self)
if options.log:
self.connect(self.ofdm_demod,
gr.file_sink(gr.sizeof_gr_complex*self._occupied_tones,
"ofdm_frame_sink_c.dat"))
else:
self.connect(self.ofdm_demod,
gr.null_sink(gr.sizeof_gr_complex*self._occupied_tones))
if options.verbose:
self._print_verbage()
self._watcher = _queue_watcher_thread(self._rcvd_pktq, callback)
def add_options(normal, expert):
"""
Adds OFDM-specific options to the Options Parser
"""
normal.add_option("-m", "--modulation", type="string", default="bpsk",
help="set modulation type (bpsk or qpsk) [default=%default]")
expert.add_option("", "--fft-length", type="intx", default=512,
help="set the number of FFT bins [default=%default]")
expert.add_option("", "--occupied-tones", type="intx", default=200,
help="set the number of occupied FFT bins [default=%default]")
expert.add_option("", "--cp-length", type="intx", default=128,
help="set the number of bits in the cyclic prefix [default=%default]")
expert.add_option("", "--snr", type="float", default=30.0,
help="SNR estimate [default=%default]")
# Make a static method to call before instantiation
add_options = staticmethod(add_options)
def _print_verbage(self):
"""
Prints information about the OFDM demodulator
"""
print "\nOFDM Demodulator:"
print "Modulation Type: %s" % (self._modulation)
print "FFT length: %3d" % (self._fft_length)
print "Occupied Tones: %3d" % (self._occupied_tones)
print "CP length: %3d" % (self._cp_length)
class _queue_watcher_thread(_threading.Thread):
def __init__(self, rcvd_pktq, callback):
_threading.Thread.__init__(self)
self.setDaemon(1)
self.rcvd_pktq = rcvd_pktq
self.callback = callback
self.keep_running = True
self.start()
def run(self):
while self.keep_running:
msg = self.rcvd_pktq.delete_head()
ok, payload = ofdm_packet_utils.unmake_packet(msg.to_string())
if self.callback:
self.callback(ok, payload)
# Generating known symbols with:
# i = [2*random.randint(0,1)-1 for i in range(4512)]
known_symbols_4512_3 = [-1, -1, 1, -1, 1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, -1, 1, -1, 1, 1, -1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, 1, -1, -1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, 1, -1, 1, 1, 1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1, -1, 1, -1, -1, 1, -1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, 1, 1, -1, -1, 1, -1, -1, 1, 1, 1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, 1, -1, 1, 1, 1, 1, -1, 1, 1, 1, -1, 1, -1, 1, 1, 1, -1, 1, 1, -1, 1, 1, 1, -1, 1, 1, 1, 1, -1, 1, -1, -1, 1, -1, -1, 1, 1, 1, -1, 1, -1, -1, -1, -1, -1, 1, -1, 1, -1, 1, 1, 1, 1, -1, 1, -1, -1, -1, 1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, 1, -1, 1, 1, 1, 1, -1, 1, -1, 1, 1, 1, -1, -1, 1, -1, 1, -1, -1, -1, -1, -1, 1, 1, -1, 1, 1, 1, -1, 1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, -1, -1, 1, 1, -1, 1, 1, -1, 1, 1, -1, -1, 1, 1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, -1, 1, 1, 1, -1, 1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, -1, -1, -1, 1, -1, 1, 1, 1, 1, 1, 1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, 1, 1, 1, 1, -1, 1, -1, -1, -1, 1, -1, -1, 1, 1, 1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, 1, 1, -1, -1, 1, -1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, -1, 1, -1, -1, -1, -1, -1, 1, 1, -1, 1, 1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, 1, 1, 1, -1, 1, -1, -1, -1, -1, -1, -1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1, -1, 1, -1, 1, -1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, 1, -1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, -1, -1, -1, 1, 1, -1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, -1, -1, -1, 1, 1, -1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, 1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, -1, 1, 1, -1, -1, -1, -1, 1, 1, 1, -1, 1, 1, -1, 1, 1, -1, -1, -1, -1, -1, -1, -1, 1, 1, 1, 1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, -1, -1, -1, 1, 1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, -1, 1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, 1, 1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, -1, 1, -1, 1, 1, -1, 1, -1, 1, 1, 1, -1, 1, 1, 1, -1, -1, 1, -1, -1, -1, -1, 1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, -1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, 1, 1, -1, -1, 1, -1, 1, 1, 1, -1, -1, 1, 1, -1, 1, -1, -1, -1, 1, -1, -1, -1, 1, 1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, 1, 1, -1, -1, -1, 1, 1, -1, 1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, 1, 1, 1, -1, 1, 1, -1, 1, 1, -1, -1, 1, 1, -1, 1, -1, -1, 1, -1, 1, 1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, -1, -1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, 1, 1, -1, 1, -1, -1, -1, 1, -1, -1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, -1, -1, 1, -1, -1, 1, 1, 1, -1, -1, 1, -1, 1, 1, -1, 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, -1, -1, 1, -1, 1, -1, -1, -1, -1, 1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, -1, 1, 1, 1, 1, -1, 1, 1, 1, 1, 1, 1, -1, 1, 1, 1, 1, -1, -1, -1, -1, 1, -1, -1, 1, 1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, 1, 1, -1, 1, 1, 1, 1, 1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, -1, 1, -1, 1, 1, 1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, 1, 1, 1, 1, -1, -1, 1, 1, -1, -1, 1, -1, -1, 1, -1, 1, 1, -1, 1, 1, 1, -1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, -1, -1, 1, 1, 1, 1, 1, 1, -1, 1, -1, -1, 1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, 1, 1, -1, 1, 1, -1, 1, -1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, -1, 1, -1, -1, -1, 1, -1, -1, -1, 1, -1, -1, 1, -1, 1, -1, 1, 1, 1, 1, 1, -1, 1, 1, 1, 1, -1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, -1, 1, -1, 1, 1, 1, -1, 1, -1, 1, 1, -1, 1, 1, 1, -1, 1, 1, 1, -1, -1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, -1, -1, -1, -1, 1, -1, 1, 1, -1, -1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1, -1, 1, -1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, -1, -1, 1, 1, 1, -1, 1, -1, -1, -1, -1, -1, 1, -1, 1, -1, -1, 1, -1, 1, 1, 1, 1, 1, -1, 1, 1, -1, 1, 1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, 1, -1, 1, 1, -1, -1, 1, 1, -1, 1, -1, -1, -1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1, -1, 1, 1, 1, 1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, -1, -1, -1, -1, 1, -1, 1, -1, 1, 1, 1, 1, 1, -1, 1, -1, 1, 1, 1, -1, -1, 1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, -1, -1, -1, -1, 1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, -1, -1, -1, -1, -1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, 1, 1, -1, -1, -1, -1, 1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, 1, 1, -1, 1, -1, 1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, -1, 1, 1, 1, -1, -1, 1, -1, -1, 1, 1, 1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, -1, 1, -1, -1, 1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, -1, -1, 1, -1, 1, -1, -1, -1, 1, 1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, 1, 1, 1, -1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, -1, -1, 1, 1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, -1, 1, -1, 1, -1, 1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1, 1, -1, -1, -1, 1, -1, 1, 1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, 1, -1, 1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, 1, 1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, 1, -1, -1, -1, -1, -1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, -1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, -1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, 1, 1, 1, -1, 1, 1, -1, -1, -1, 1, 1, -1, 1, 1, -1, 1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, -1, -1, -1, -1, -1, 1, -1, 1, 1, 1, 1, -1, 1, -1, 1, 1, -1, 1, -1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1, -1, 1, 1, 1, -1, 1, -1, -1, -1, -1, 1, -1, -1, 1, 1, 1, -1, 1, -1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, 1, -1, 1, 1, -1, 1, -1, -1, 1, -1, -1, -1, 1, 1, 1, -1, 1, -1, -1, 1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, 1, 1, -1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, 1, -1, -1, 1, -1, 1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, 1, 1, -1, 1, -1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, -1, -1, 1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, 1, 1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, 1, 1, -1, -1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, -1, 1, -1, 1, 1, 1, -1, 1, 1, -1, 1, 1, -1, 1, -1, -1, 1, 1, 1, -1, 1, -1, -1, 1, -1, -1, -1, 1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, 1, 1, 1, -1, 1, -1, 1, 1, 1, 1, -1, -1, 1, 1, 1, -1, 1, -1, 1, -1, -1, -1, -1, 1, -1, 1, -1, -1, -1, 1, -1, -1, -1, 1, 1, -1, 1, 1, 1, 1, -1, 1, 1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, 1, -1, -1, 1, -1, 1, 1, 1, -1, 1, -1, 1, 1, -1, -1, -1, 1, -1, 1, 1, 1, 1, -1, -1, 1, 1, 1, -1, -1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, 1, 1, -1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, -1, 1, 1, -1, -1, 1, 1, -1, 1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1, 1, 1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, -1, 1, -1, -1, -1, 1, 1, 1, -1, 1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, -1, -1, 1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1, -1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1, -1, 1, -1, 1, -1, -1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, 1, 1, 1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, 1, 1, -1, -1, -1, -1, 1, -1, -1, -1, 1, 1, 1, -1, 1, -1, -1, -1, -1, -1, -1, 1, -1, 1, 1, -1, 1, 1, 1, 1, -1, 1, 1, 1, 1, 1, -1, -1, 1, 1, -1, -1, -1, -1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, 1, -1, 1, -1, -1, 1, -1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1, -1, 1, -1, 1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, 1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, 1, 1, -1, 1, 1, -1, 1, 1, 1, 1, -1, -1, -1, -1, -1, -1, 1, -1, -1, 1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, -1, 1, 1, -1, 1, 1, 1, 1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, -1, -1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, 1, 1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, -1, 1, -1, -1, 1, 1, 1, -1, -1, -1, 1, -1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, -1, 1, -1, -1, -1, -1, 1, -1, 1, 1, 1, -1, -1, -1, -1, 1, 1, 1, -1, -1, 1, -1, 1, -1, -1, -1, 1, 1, -1, 1, 1, -1, -1, 1, 1, 1, -1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, 1, -1, 1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1, 1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, -1, 1, 1, -1, -1, -1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, 1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, -1, -1, -1, 1, 1, -1, -1, 1, 1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, 1, -1, -1, -1, 1, 1, 1, -1, -1, 1, -1, 1, 1, -1, -1, 1, -1, -1, -1, -1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, -1, 1, 1, 1, 1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, -1, 1, 1, -1, 1, 1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, -1, 1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, -1, -1, 1, 1, 1, 1, -1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, -1, 1, -1, -1, 1, 1, 1, -1, 1, -1, 1, -1, -1, -1, -1, -1, 1, -1, 1, 1, -1, -1, 1, 1, 1, 1, -1, 1, -1, 1, 1, -1, -1, 1, 1, 1, -1, -1, 1, 1, -1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1, 1, 1, 1, 1, -1, 1, -1, 1, 1, 1, 1, -1, 1, 1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, 1, -1, 1, 1, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, 1, -1, -1, -1, -1, 1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, 1, -1, -1, 1, -1, -1, -1, -1, -1, -1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, -1, -1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, -1, 1, 1, -1, -1, 1, 1, 1, 1, -1, 1, -1, -1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, -1, 1, 1, -1, -1, -1, 1, -1, 1, 1, -1, -1, 1, -1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, 1, 1, 1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, 1, -1, 1, 1, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, -1, -1, 1, -1, -1, -1, 1, 1, 1, 1, 1, -1, 1, 1, 1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, 1, -1, -1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, 1, 1, -1, 1, -1, -1, 1, -1, 1, -1, 1, 1, -1, -1, -1, 1, 1, -1, 1, 1, -1, -1, 1, 1, -1, 1, 1, -1, 1, 1, 1, -1, 1, -1, -1, 1, -1, 1, 1, 1, -1, -1, -1, 1, 1, 1, -1, 1, -1, 1, 1, 1, -1, -1, -1, -1, -1, -1, -1, -1, 1, -1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, -1, 1, 1, 1, 1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, 1, 1, -1, -1, 1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, 1, -1, 1, -1, 1, 1, 1, -1, -1, 1, 1, 1, 1, -1, -1, 1, 1, 1, 1, -1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, -1, 1, 1, -1, 1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, 1, 1, -1, -1, 1, -1, 1, -1, -1, -1, -1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, -1, 1, -1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, 1, -1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, -1, 1, -1, 1, 1, -1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, -1, 1, 1, -1, -1, 1, 1, -1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, -1, 1, 1, -1, 1, 1, -1, -1, -1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, -1, 1, 1, -1, 1, -1, -1, 1, 1, 1, -1, 1, -1, 1, 1, 1, 1, -1, -1, -1, 1, -1, 1, -1, -1, 1, -1, 1, 1, -1, -1, -1, 1, -1, 1, -1, 1, 1, 1, -1, 1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, -1, 1, 1, -1, 1, -1, -1, -1, 1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, 1, 1, -1, -1, -1, 1, -1, 1, 1, 1, -1, 1, 1, 1, -1, -1, -1, 1, 1, -1, -1, 1, 1, -1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, 1, -1, -1, 1, 1, -1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, -1, -1, 1, 1, -1, 1, 1, 1, 1, 1, -1, 1, -1, 1, 1, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, 1, 1, 1, -1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, -1, 1, 1, 1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, -1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, 1, 1, 1, -1, -1, -1, -1, 1, -1, -1, 1, -1, 1, 1, 1, 1, 1, -1, -1, -1, -1, 1, -1, -1, -1, 1, 1, 1, 1, -1, -1, -1, -1, 1, -1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, -1, -1, -1, 1, 1, 1, 1, -1, -1, -1, 1, 1, -1, 1, 1, 1, 1, 1, -1, 1, 1, -1, 1, 1, 1, -1, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, 1, 1, -1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, -1, -1, 1, 1, 1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, 1, -1, 1, -1, -1, 1, -1, 1, -1, 1, -1, 1, 1, -1, -1, 1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, -1, -1, -1, -1, -1, 1, 1, -1, 1, -1, 1, -1, 1, -1, -1, 1, -1, 1, 1, 1, 1, -1, -1, -1, -1, 1, -1, -1, 1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, -1, -1, -1, 1, 1, 1, 1, -1, -1, 1, 1, 1, -1, 1, -1, -1, 1, -1, 1, 1, 1, -1, 1, 1, -1, 1, -1, 1, -1, 1, 1, -1, 1, 1, -1, 1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, 1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, -1, 1, -1, -1, 1, 1, 1, 1, 1, 1, -1, 1, 1, -1, 1, -1, 1, -1, -1, -1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, 1, 1, 1, -1, 1, 1, -1, -1, -1, 1, -1, 1, -1, 1, 1, 1, 1, -1, 1, 1, 1, 1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, 1, 1, -1, 1, -1]
| gpl-3.0 |
hottwaj/django | django/core/mail/message.py | 310 | 17138 | from __future__ import unicode_literals
import mimetypes
import os
import random
import time
from email import (
charset as Charset, encoders as Encoders, generator, message_from_string,
)
from email.header import Header
from email.message import Message
from email.mime.base import MIMEBase
from email.mime.message import MIMEMessage
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.utils import formataddr, formatdate, getaddresses, parseaddr
from io import BytesIO
from django.conf import settings
from django.core.mail.utils import DNS_NAME
from django.utils import six
from django.utils.encoding import force_text
# Don't BASE64-encode UTF-8 messages so that we avoid unwanted attention from
# some spam filters.
utf8_charset = Charset.Charset('utf-8')
utf8_charset.body_encoding = None # Python defaults to BASE64
# Default MIME type to use on attachments (if it is not explicitly given
# and cannot be guessed).
DEFAULT_ATTACHMENT_MIME_TYPE = 'application/octet-stream'
class BadHeaderError(ValueError):
pass
# Copied from Python 3.2+ standard library, with the following modifications:
# * Used cached hostname for performance.
# TODO: replace with email.utils.make_msgid(.., domain=DNS_NAME) when dropping
# Python 2 (Python 2's version doesn't have domain parameter) (#23905).
def make_msgid(idstring=None, domain=None):
"""Returns a string suitable for RFC 2822 compliant Message-ID, e.g:
<20020201195627.33539.96671@nightshade.la.mastaler.com>
Optional idstring if given is a string used to strengthen the
uniqueness of the message id. Optional domain if given provides the
portion of the message id after the '@'. It defaults to the locally
defined hostname.
"""
timeval = time.time()
utcdate = time.strftime('%Y%m%d%H%M%S', time.gmtime(timeval))
pid = os.getpid()
randint = random.randrange(100000)
if idstring is None:
idstring = ''
else:
idstring = '.' + idstring
if domain is None:
# stdlib uses socket.getfqdn() here instead
domain = DNS_NAME
msgid = '<%s.%s.%s%s@%s>' % (utcdate, pid, randint, idstring, domain)
return msgid
# Header names that contain structured address data (RFC #5322)
ADDRESS_HEADERS = {
'from',
'sender',
'reply-to',
'to',
'cc',
'bcc',
'resent-from',
'resent-sender',
'resent-to',
'resent-cc',
'resent-bcc',
}
def forbid_multi_line_headers(name, val, encoding):
"""Forbids multi-line headers, to prevent header injection."""
encoding = encoding or settings.DEFAULT_CHARSET
val = force_text(val)
if '\n' in val or '\r' in val:
raise BadHeaderError("Header values can't contain newlines (got %r for header %r)" % (val, name))
try:
val.encode('ascii')
except UnicodeEncodeError:
if name.lower() in ADDRESS_HEADERS:
val = ', '.join(sanitize_address(addr, encoding)
for addr in getaddresses((val,)))
else:
val = Header(val, encoding).encode()
else:
if name.lower() == 'subject':
val = Header(val).encode()
return str(name), val
def sanitize_address(addr, encoding):
if not isinstance(addr, tuple):
addr = parseaddr(force_text(addr))
nm, addr = addr
nm = Header(nm, encoding).encode()
try:
addr.encode('ascii')
except UnicodeEncodeError: # IDN
if '@' in addr:
localpart, domain = addr.split('@', 1)
localpart = str(Header(localpart, encoding))
domain = domain.encode('idna').decode('ascii')
addr = '@'.join([localpart, domain])
else:
addr = Header(addr, encoding).encode()
return formataddr((nm, addr))
class MIMEMixin():
def as_string(self, unixfrom=False, linesep='\n'):
"""Return the entire formatted message as a string.
Optional `unixfrom' when True, means include the Unix From_ envelope
header.
This overrides the default as_string() implementation to not mangle
lines that begin with 'From '. See bug #13433 for details.
"""
fp = six.StringIO()
g = generator.Generator(fp, mangle_from_=False)
if six.PY2:
g.flatten(self, unixfrom=unixfrom)
else:
g.flatten(self, unixfrom=unixfrom, linesep=linesep)
return fp.getvalue()
if six.PY2:
as_bytes = as_string
else:
def as_bytes(self, unixfrom=False, linesep='\n'):
"""Return the entire formatted message as bytes.
Optional `unixfrom' when True, means include the Unix From_ envelope
header.
This overrides the default as_bytes() implementation to not mangle
lines that begin with 'From '. See bug #13433 for details.
"""
fp = BytesIO()
g = generator.BytesGenerator(fp, mangle_from_=False)
g.flatten(self, unixfrom=unixfrom, linesep=linesep)
return fp.getvalue()
class SafeMIMEMessage(MIMEMixin, MIMEMessage):
def __setitem__(self, name, val):
# message/rfc822 attachments must be ASCII
name, val = forbid_multi_line_headers(name, val, 'ascii')
MIMEMessage.__setitem__(self, name, val)
class SafeMIMEText(MIMEMixin, MIMEText):
def __init__(self, _text, _subtype='plain', _charset=None):
self.encoding = _charset
if _charset == 'utf-8':
# Unfortunately, Python < 3.5 doesn't support setting a Charset instance
# as MIMEText init parameter (http://bugs.python.org/issue16324).
# We do it manually and trigger re-encoding of the payload.
MIMEText.__init__(self, _text, _subtype, None)
del self['Content-Transfer-Encoding']
self.set_payload(_text, utf8_charset)
self.replace_header('Content-Type', 'text/%s; charset="%s"' % (_subtype, _charset))
elif _charset is None:
# the default value of '_charset' is 'us-ascii' on Python 2
MIMEText.__init__(self, _text, _subtype)
else:
MIMEText.__init__(self, _text, _subtype, _charset)
def __setitem__(self, name, val):
name, val = forbid_multi_line_headers(name, val, self.encoding)
MIMEText.__setitem__(self, name, val)
class SafeMIMEMultipart(MIMEMixin, MIMEMultipart):
def __init__(self, _subtype='mixed', boundary=None, _subparts=None, encoding=None, **_params):
self.encoding = encoding
MIMEMultipart.__init__(self, _subtype, boundary, _subparts, **_params)
def __setitem__(self, name, val):
name, val = forbid_multi_line_headers(name, val, self.encoding)
MIMEMultipart.__setitem__(self, name, val)
class EmailMessage(object):
"""
A container for email information.
"""
content_subtype = 'plain'
mixed_subtype = 'mixed'
encoding = None # None => use settings default
def __init__(self, subject='', body='', from_email=None, to=None, bcc=None,
connection=None, attachments=None, headers=None, cc=None,
reply_to=None):
"""
Initialize a single email message (which can be sent to multiple
recipients).
All strings used to create the message can be unicode strings
(or UTF-8 bytestrings). The SafeMIMEText class will handle any
necessary encoding conversions.
"""
if to:
if isinstance(to, six.string_types):
raise TypeError('"to" argument must be a list or tuple')
self.to = list(to)
else:
self.to = []
if cc:
if isinstance(cc, six.string_types):
raise TypeError('"cc" argument must be a list or tuple')
self.cc = list(cc)
else:
self.cc = []
if bcc:
if isinstance(bcc, six.string_types):
raise TypeError('"bcc" argument must be a list or tuple')
self.bcc = list(bcc)
else:
self.bcc = []
if reply_to:
if isinstance(reply_to, six.string_types):
raise TypeError('"reply_to" argument must be a list or tuple')
self.reply_to = list(reply_to)
else:
self.reply_to = []
self.from_email = from_email or settings.DEFAULT_FROM_EMAIL
self.subject = subject
self.body = body
self.attachments = attachments or []
self.extra_headers = headers or {}
self.connection = connection
def get_connection(self, fail_silently=False):
from django.core.mail import get_connection
if not self.connection:
self.connection = get_connection(fail_silently=fail_silently)
return self.connection
def message(self):
encoding = self.encoding or settings.DEFAULT_CHARSET
msg = SafeMIMEText(self.body, self.content_subtype, encoding)
msg = self._create_message(msg)
msg['Subject'] = self.subject
msg['From'] = self.extra_headers.get('From', self.from_email)
msg['To'] = self.extra_headers.get('To', ', '.join(map(force_text, self.to)))
if self.cc:
msg['Cc'] = ', '.join(map(force_text, self.cc))
if self.reply_to:
msg['Reply-To'] = self.extra_headers.get('Reply-To', ', '.join(map(force_text, self.reply_to)))
# Email header names are case-insensitive (RFC 2045), so we have to
# accommodate that when doing comparisons.
header_names = [key.lower() for key in self.extra_headers]
if 'date' not in header_names:
msg['Date'] = formatdate()
if 'message-id' not in header_names:
# Use cached DNS_NAME for performance
msg['Message-ID'] = make_msgid(domain=DNS_NAME)
for name, value in self.extra_headers.items():
if name.lower() in ('from', 'to'): # From and To are already handled
continue
msg[name] = value
return msg
def recipients(self):
"""
Returns a list of all recipients of the email (includes direct
addressees as well as Cc and Bcc entries).
"""
return self.to + self.cc + self.bcc
def send(self, fail_silently=False):
"""Sends the email message."""
if not self.recipients():
# Don't bother creating the network connection if there's nobody to
# send to.
return 0
return self.get_connection(fail_silently).send_messages([self])
def attach(self, filename=None, content=None, mimetype=None):
"""
Attaches a file with the given filename and content. The filename can
be omitted and the mimetype is guessed, if not provided.
If the first parameter is a MIMEBase subclass it is inserted directly
into the resulting message attachments.
"""
if isinstance(filename, MIMEBase):
assert content is None
assert mimetype is None
self.attachments.append(filename)
else:
assert content is not None
self.attachments.append((filename, content, mimetype))
def attach_file(self, path, mimetype=None):
"""
Attaches a file from the filesystem.
The mimetype will be set to the DEFAULT_ATTACHMENT_MIME_TYPE if it is
not specified and cannot be guessed or (PY3 only) if it suggests
text/* for a binary file.
"""
filename = os.path.basename(path)
if not mimetype:
mimetype, _ = mimetypes.guess_type(filename)
if not mimetype:
mimetype = DEFAULT_ATTACHMENT_MIME_TYPE
basetype, subtype = mimetype.split('/', 1)
read_mode = 'r' if basetype == 'text' else 'rb'
content = None
with open(path, read_mode) as f:
try:
content = f.read()
except UnicodeDecodeError:
# If mimetype suggests the file is text but it's actually
# binary, read() will raise a UnicodeDecodeError on Python 3.
pass
# If the previous read in text mode failed, try binary mode.
if content is None:
with open(path, 'rb') as f:
content = f.read()
mimetype = DEFAULT_ATTACHMENT_MIME_TYPE
self.attach(filename, content, mimetype)
def _create_message(self, msg):
return self._create_attachments(msg)
def _create_attachments(self, msg):
if self.attachments:
encoding = self.encoding or settings.DEFAULT_CHARSET
body_msg = msg
msg = SafeMIMEMultipart(_subtype=self.mixed_subtype, encoding=encoding)
if self.body:
msg.attach(body_msg)
for attachment in self.attachments:
if isinstance(attachment, MIMEBase):
msg.attach(attachment)
else:
msg.attach(self._create_attachment(*attachment))
return msg
def _create_mime_attachment(self, content, mimetype):
"""
Converts the content, mimetype pair into a MIME attachment object.
If the mimetype is message/rfc822, content may be an
email.Message or EmailMessage object, as well as a str.
"""
basetype, subtype = mimetype.split('/', 1)
if basetype == 'text':
encoding = self.encoding or settings.DEFAULT_CHARSET
attachment = SafeMIMEText(content, subtype, encoding)
elif basetype == 'message' and subtype == 'rfc822':
# Bug #18967: per RFC2046 s5.2.1, message/rfc822 attachments
# must not be base64 encoded.
if isinstance(content, EmailMessage):
# convert content into an email.Message first
content = content.message()
elif not isinstance(content, Message):
# For compatibility with existing code, parse the message
# into an email.Message object if it is not one already.
content = message_from_string(content)
attachment = SafeMIMEMessage(content, subtype)
else:
# Encode non-text attachments with base64.
attachment = MIMEBase(basetype, subtype)
attachment.set_payload(content)
Encoders.encode_base64(attachment)
return attachment
def _create_attachment(self, filename, content, mimetype=None):
"""
Converts the filename, content, mimetype triple into a MIME attachment
object.
"""
if mimetype is None:
mimetype, _ = mimetypes.guess_type(filename)
if mimetype is None:
mimetype = DEFAULT_ATTACHMENT_MIME_TYPE
attachment = self._create_mime_attachment(content, mimetype)
if filename:
try:
filename.encode('ascii')
except UnicodeEncodeError:
if six.PY2:
filename = filename.encode('utf-8')
filename = ('utf-8', '', filename)
attachment.add_header('Content-Disposition', 'attachment',
filename=filename)
return attachment
class EmailMultiAlternatives(EmailMessage):
"""
A version of EmailMessage that makes it easy to send multipart/alternative
messages. For example, including text and HTML versions of the text is
made easier.
"""
alternative_subtype = 'alternative'
def __init__(self, subject='', body='', from_email=None, to=None, bcc=None,
connection=None, attachments=None, headers=None, alternatives=None,
cc=None, reply_to=None):
"""
Initialize a single email message (which can be sent to multiple
recipients).
All strings used to create the message can be unicode strings (or UTF-8
bytestrings). The SafeMIMEText class will handle any necessary encoding
conversions.
"""
super(EmailMultiAlternatives, self).__init__(
subject, body, from_email, to, bcc, connection, attachments,
headers, cc, reply_to,
)
self.alternatives = alternatives or []
def attach_alternative(self, content, mimetype):
"""Attach an alternative content representation."""
assert content is not None
assert mimetype is not None
self.alternatives.append((content, mimetype))
def _create_message(self, msg):
return self._create_attachments(self._create_alternatives(msg))
def _create_alternatives(self, msg):
encoding = self.encoding or settings.DEFAULT_CHARSET
if self.alternatives:
body_msg = msg
msg = SafeMIMEMultipart(_subtype=self.alternative_subtype, encoding=encoding)
if self.body:
msg.attach(body_msg)
for alternative in self.alternatives:
msg.attach(self._create_mime_attachment(*alternative))
return msg
| bsd-3-clause |
maxosprojects/cozmo-blockly | closure-library/closure/bin/build/closurebuilder.py | 25 | 9934 | #!/usr/bin/env python
#
# Copyright 2009 The Closure Library Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility for Closure Library dependency calculation.
ClosureBuilder scans source files to build dependency info. From the
dependencies, the script can produce a manifest in dependency order,
a concatenated script, or compiled output from the Closure Compiler.
Paths to files can be expressed as individual arguments to the tool (intended
for use with find and xargs). As a convenience, --root can be used to specify
all JS files below a directory.
usage: %prog [options] [file1.js file2.js ...]
"""
__author__ = 'nnaze@google.com (Nathan Naze)'
import io
import logging
import optparse
import os
import sys
import depstree
import jscompiler
import source
import treescan
def _GetOptionsParser():
"""Get the options parser."""
parser = optparse.OptionParser(__doc__)
parser.add_option('-i',
'--input',
dest='inputs',
action='append',
default=[],
help='One or more input files to calculate dependencies '
'for. The namespaces in this file will be combined with '
'those given with the -n flag to form the set of '
'namespaces to find dependencies for.')
parser.add_option('-n',
'--namespace',
dest='namespaces',
action='append',
default=[],
help='One or more namespaces to calculate dependencies '
'for. These namespaces will be combined with those given '
'with the -i flag to form the set of namespaces to find '
'dependencies for. A Closure namespace is a '
'dot-delimited path expression declared with a call to '
'goog.provide() (e.g. "goog.array" or "foo.bar").')
parser.add_option('--root',
dest='roots',
action='append',
default=[],
help='The paths that should be traversed to build the '
'dependencies.')
parser.add_option('-o',
'--output_mode',
dest='output_mode',
type='choice',
action='store',
choices=['list', 'script', 'compiled'],
default='list',
help='The type of output to generate from this script. '
'Options are "list" for a list of filenames, "script" '
'for a single script containing the contents of all the '
'files, or "compiled" to produce compiled output with '
'the Closure Compiler. Default is "list".')
parser.add_option('-c',
'--compiler_jar',
dest='compiler_jar',
action='store',
help='The location of the Closure compiler .jar file.')
parser.add_option('-f',
'--compiler_flags',
dest='compiler_flags',
default=[],
action='append',
help='Additional flags to pass to the Closure compiler. '
'To pass multiple flags, --compiler_flags has to be '
'specified multiple times.')
parser.add_option('-j',
'--jvm_flags',
dest='jvm_flags',
default=[],
action='append',
help='Additional flags to pass to the JVM compiler. '
'To pass multiple flags, --jvm_flags has to be '
'specified multiple times.')
parser.add_option('--output_file',
dest='output_file',
action='store',
help=('If specified, write output to this path instead of '
'writing to standard output.'))
return parser
def _GetInputByPath(path, sources):
"""Get the source identified by a path.
Args:
path: str, A path to a file that identifies a source.
sources: An iterable collection of source objects.
Returns:
The source from sources identified by path, if found. Converts to
real paths for comparison.
"""
for js_source in sources:
# Convert both to real paths for comparison.
if os.path.realpath(path) == os.path.realpath(js_source.GetPath()):
return js_source
def _GetClosureBaseFile(sources):
"""Given a set of sources, returns the one base.js file.
Note that if zero or two or more base.js files are found, an error message
will be written and the program will be exited.
Args:
sources: An iterable of _PathSource objects.
Returns:
The _PathSource representing the base Closure file.
"""
base_files = [
js_source for js_source in sources if _IsClosureBaseFile(js_source)
]
if not base_files:
logging.error('No Closure base.js file found.')
sys.exit(1)
if len(base_files) > 1:
logging.error('More than one Closure base.js files found at these paths:')
for base_file in base_files:
logging.error(base_file.GetPath())
sys.exit(1)
return base_files[0]
def _IsClosureBaseFile(js_source):
"""Returns true if the given _PathSource is the Closure base.js source."""
return (os.path.basename(js_source.GetPath()) == 'base.js' and
js_source.provides == set(['goog']))
class _PathSource(source.Source):
"""Source file subclass that remembers its file path."""
def __init__(self, path):
"""Initialize a source.
Args:
path: str, Path to a JavaScript file. The source string will be read
from this file.
"""
super(_PathSource, self).__init__(source.GetFileContents(path))
self._path = path
def __str__(self):
return 'PathSource %s' % self._path
def GetPath(self):
"""Returns the path."""
return self._path
def _WrapGoogModuleSource(src):
return ('goog.loadModule(function(exports) {{'
'"use strict";'
'{0}'
'\n' # terminate any trailing single line comment.
';return exports'
'}});\n').format(src)
def main():
logging.basicConfig(format=(sys.argv[0] + ': %(message)s'),
level=logging.INFO)
options, args = _GetOptionsParser().parse_args()
# Make our output pipe.
if options.output_file:
out = io.open(options.output_file, 'wb')
else:
version = sys.version_info[:2]
if version >= (3, 0):
# Write bytes to stdout
out = sys.stdout.buffer
else:
out = sys.stdout
sources = set()
logging.info('Scanning paths...')
for path in options.roots:
for js_path in treescan.ScanTreeForJsFiles(path):
sources.add(_PathSource(js_path))
# Add scripts specified on the command line.
for js_path in args:
sources.add(_PathSource(js_path))
logging.info('%s sources scanned.', len(sources))
# Though deps output doesn't need to query the tree, we still build it
# to validate dependencies.
logging.info('Building dependency tree..')
tree = depstree.DepsTree(sources)
input_namespaces = set()
inputs = options.inputs or []
for input_path in inputs:
js_input = _GetInputByPath(input_path, sources)
if not js_input:
logging.error('No source matched input %s', input_path)
sys.exit(1)
input_namespaces.update(js_input.provides)
input_namespaces.update(options.namespaces)
if not input_namespaces:
logging.error('No namespaces found. At least one namespace must be '
'specified with the --namespace or --input flags.')
sys.exit(2)
# The Closure Library base file must go first.
base = _GetClosureBaseFile(sources)
deps = [base] + tree.GetDependencies(input_namespaces)
output_mode = options.output_mode
if output_mode == 'list':
out.writelines([js_source.GetPath() + '\n' for js_source in deps])
elif output_mode == 'script':
for js_source in deps:
src = js_source.GetSource()
if js_source.is_goog_module:
src = _WrapGoogModuleSource(src)
out.write(src.encode('utf-8') + b'\n')
elif output_mode == 'compiled':
logging.warning("""\
Closure Compiler now natively understands and orders Closure dependencies and
is prefererred over using this script for performing JavaScript compilation.
Please migrate your codebase.
See:
https://github.com/google/closure-compiler/wiki/Managing-Dependencies
""")
# Make sure a .jar is specified.
if not options.compiler_jar:
logging.error('--compiler_jar flag must be specified if --output is '
'"compiled"')
sys.exit(2)
# Will throw an error if the compilation fails.
compiled_source = jscompiler.Compile(options.compiler_jar,
[js_source.GetPath()
for js_source in deps],
jvm_flags=options.jvm_flags,
compiler_flags=options.compiler_flags)
logging.info('JavaScript compilation succeeded.')
out.write(compiled_source.encode('utf-8'))
else:
logging.error('Invalid value for --output flag.')
sys.exit(2)
if __name__ == '__main__':
main()
| mit |
guillermo-carrasco/bcbio-nextgen-vm | bcbiovm/docker/defaults.py | 3 | 3632 | """Save and retrieve default locations associated with a bcbio-nextgen installation.
"""
from __future__ import print_function
import os
import sys
import yaml
from bcbio.distributed import objectstore
from bcbiovm.aws import config as awsconfig
TOSAVE_DEFAULTS = {"datadir": None}
def update_check_args(args, command_info, need_datadir=True):
args = add_defaults(args)
args = _handle_remotes(args)
if not args.datadir:
default_datadir = _find_default_datadir(need_datadir)
if default_datadir:
args.datadir = default_datadir
else:
print("Must specify a `--datadir` or save the default location with `saveconfig`.\n" + command_info)
sys.exit(1)
return args
def _handle_remotes(args):
"""Retrieve supported remote inputs specified on the command line.
"""
if hasattr(args, "sample_config"):
if objectstore.is_remote(args.sample_config):
if args.sample_config.startswith("s3://"):
args.sample_config = awsconfig.load_s3(args.sample_config)
else:
raise NotImplementedError("Do not recognize remote input %s" % args.sample_config)
return args
def _find_default_datadir(must_exist=True):
"""Check if the default data directory/standard setup is present
"""
datadir = os.path.realpath(os.path.normpath(os.path.join(
os.path.dirname(sys.executable), os.pardir, os.pardir, "data")))
if (os.path.exists(os.path.join(datadir, "config", "install-params.yaml")) and
os.path.exists(os.path.join(datadir, "galaxy", "bcbio_system.yaml"))):
return datadir
elif not must_exist:
return datadir
else:
return None
def save(args):
"""Save user specific defaults to a yaml configuration file.
"""
out = get_defaults()
for k in TOSAVE_DEFAULTS:
karg = getattr(args, k, None)
if karg and karg != TOSAVE_DEFAULTS[k]:
out[k] = karg
if len(out) > 0:
with open(_get_config_file(just_filename=True), "w") as out_handle:
yaml.dump(out, out_handle, default_flow_style=False, allow_unicode=False)
def add_defaults(args):
"""Add user configured defaults to supplied command line arguments.
"""
config_defaults = get_defaults()
for k in TOSAVE_DEFAULTS:
karg = getattr(args, k, None)
if not karg or karg == TOSAVE_DEFAULTS[k]:
if k in config_defaults:
setattr(args, k, config_defaults[k])
return args
def get_datadir():
"""Retrieve the default data directory for this installation
"""
datadir = get_defaults().get("datadir")
if datadir is None:
datadir = _find_default_datadir()
return datadir
def get_defaults():
"""Retrieve saved default configurations.
"""
config_file = _get_config_file()
if config_file:
with open(config_file) as in_handle:
return yaml.load(in_handle)
else:
return {}
def _get_config_file(just_filename=False):
"""Retrieve standard user configuration file.
Uses location from appdirs (https://github.com/ActiveState/appdirs). Could
pull this in as dependency for more broad platform support.
"""
config_dir = os.path.join(os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config")),
"bcbio-nextgen")
if not os.path.exists(config_dir):
os.makedirs(config_dir)
config_file = os.path.join(config_dir, "bcbio-docker-config.yaml")
if just_filename or os.path.exists(config_file):
return config_file
else:
return None
| mit |
srmagura/potential | ps/polarfd.py | 1 | 6731 | import numpy as np
import itertools as it
from scipy.sparse import csc_matrix
from scipy.sparse.linalg import spsolve
class PolarFD:
def __init__(self, N, a, nu, R0, R1):
self.N = N
self.a = a
self.nu = nu
self.R0 = R0
self.R1 = R1
self.calc_N_var()
@classmethod
def my_print(cls, x):
pass
#print('(polarfd) ' + str(x))
def get_r(self, m):
return self.R0 + m*self.hr
def get_th(self, l):
return self.a + l*self.hth
def get_index(self, m, l):
return m*(self.N+1) + l
def calc_N_var(self):
""" Calculate variables that depend on N """
self.hr = (self.R1-self.R0)/self.N
self.hth = (2*np.pi - self.a) / self.N
def new_system(self):
""" Create variables for making a new sparse system """
self.rhs = []
self.row = 0
self.data = []
self.row_ind = []
self.col_ind = []
def set_bc_origin(self):
"""
Set BC near origin
"""
N = self.N
for l in range(1, N):
i = self.get_index(0, l)
th = self.get_th(l)
self.data.append(1)
self.row_ind.append(self.row)
self.col_ind.append(i)
self.rhs.append(self.eval_phi_initial(th))
self.row += 1
def set_bc_wedge(self):
"""
Set BC's on wedge
Be careful not to have any overlap when setting
boundary and initial conditions
"""
N = self.N
for m in range(N+1):
r = self.get_r(m)
i = self.get_index(m, 0)
self.data.append(1)
self.row_ind.append(self.row)
self.col_ind.append(i)
self.rhs.append(self.eval_phi2(r))
self.row += 1
i = self.get_index(m, N)
self.data.append(1)
self.row_ind.append(self.row)
self.col_ind.append(i)
self.rhs.append(self.eval_phi1(r))
self.row += 1
def set_bc_arc(self):
# Set BC at arc
N = self.N
for l in range(1, N):
self.data.append(1)
self.row_ind.append(self.row)
self.col_ind.append(self.get_index(N, l))
th = self.get_th(l)
self.rhs.append(self.eval_phi0(th))
self.row += 1
def set_fd4(self, m, l):
"""
Create equation for fourth order finite difference scheme
at the node (m, l)
"""
r_2 = self.get_r(m-1)
r_1 = self.get_r(m-1/2)
r = self.get_r(m)
r1 = self.get_r(m+1/2)
r2 = self.get_r(m+1)
hr = self.hr
hth = self.hth
k = self.k
local = np.zeros([3, 3])
# 4th order
local[-1, -1] += (
-hr/(24*hth**2*r*r_2**2) + 1/(12*hth**2*r_2**2)
+ r_1/(12*hr**2*r)
)
local[0, -1] += (
hr**2/(12*hth**2*r**4)
+ k**2/12 + 5/(6*hth**2*r**2) - r1/(12*hr**2*r)
- r_1/(12*hr**2*r)
)
local[1, -1] += (
hr/(24*hth**2*r*r2**2) + 1/(12*hth**2*r2**2)
+ r1/(12*hr**2*r)
)
local[-1, 0] += (
-hr*k**2/(24*r) - hr/(12*r**3)
+ hr/(12*hth**2*r*r_2**2) + k**2/12 - 1/(6*hth**2*r_2**2)
+ 5*r_1/(6*hr**2*r)
)
local[0, 0] += (hr**2*k**2/(12*r**2)
- hr**2/(6*hth**2*r**4) + 2*k**2/3
- 5/(3*hth**2*r**2) - 5*r1/(6*hr**2*r)
- 5*r_1/(6*hr**2*r)
)
local[1, 0] += (
hr*k**2/(24*r) + hr/(12*r**3)
- hr/(12*hth**2*r*r2**2) + k**2/12 - 1/(6*hth**2*r2**2)
+ 5*r1/(6*hr**2*r)
)
local[-1, 1] += (
-hr/(24*hth**2*r*r_2**2)
+ 1/(12*hth**2*r_2**2) + r_1/(12*hr**2*r)
)
local[0, 1] += (hr**2/(12*hth**2*r**4)
+ k**2/12 + 5/(6*hth**2*r**2) - r1/(12*hr**2*r)
- r_1/(12*hr**2*r)
)
local[1, 1] += (
hr/(24*hth**2*r*r2**2)
+ 1/(12*hth**2*r2**2) + r1/(12*hr**2*r)
)
th = self.get_th(l)
f = self.eval_f(r, th)
d_f_r = self.eval_d_f_r(r, th)
d2_f_r = self.eval_d2_f_r(r, th)
d2_f_th = self.eval_d2_f_th(r, th)
# 4th order
self.rhs.append(d2_f_r*hr**2/12
+ d2_f_th*hth**2/12
+ d_f_r*hr**2/(12*r)
+ f*hr**2/(12*r**2)
+ f
)
self.copy_local(m, l, local)
self.row += 1
def copy_local(self, m, l, local):
for dm, dl in it.product((-1, 0, 1), repeat=2):
m1 = m + dm
l1 = l + dl
self.data.append(local[dm, dl])
self.row_ind.append(self.row)
self.col_ind.append(self.get_index(m1, l1))
def solve(self, k, eval_phi_initial, eval_phi0, eval_phi1, eval_phi2,
eval_f, eval_d_f_r, eval_d2_f_r, eval_d2_f_th):
"""
"""
self.k = k
self.eval_phi_initial = eval_phi_initial
self.eval_phi0 = eval_phi0
self.eval_phi1 = eval_phi1
self.eval_phi2 = eval_phi2
self.eval_f = eval_f
self.eval_d_f_r = eval_d_f_r
self.eval_d2_f_r = eval_d2_f_r
self.eval_d2_f_th = eval_d2_f_th
N = self.N
self.my_print('N = {}'.format(N))
self.my_print('R0 = {}'.format(self.R0))
self.my_print('R1 = {}'.format(self.R1))
self.new_system()
self.set_bc_origin()
self.set_bc_wedge()
self.set_bc_arc()
# Finite difference scheme
for m in range(1, N):
for l in range(1, N):
self.set_fd4(m, l)
shape = (max(self.row_ind)+1, max(self.col_ind)+1)
self.my_print('System shape: {} x {}'.format(*shape))
# Calculate linear system residual
M = csc_matrix((self.data, (self.row_ind, self.col_ind)))
self.rhs = np.array(self.rhs)
return spsolve(M, self.rhs)
def calc_rel_convergence(self, u0, u1, u2):
"""
Calculate the relative convergence of the sequence (u0, u1, u2)
where the u's are numpy arrays.
"""
N = self.N
diff12 = []
diff01 = []
for m in range(1, N):
for l in range(1, N):
k0 = (m, l)
k1 = (m//2, l//2)
k2 = (m//4, l//4)
if m % 4 == 0 and l % 4 == 0:
diff12.append(abs(u1[k1] - u2[k2]))
if m % 2 == 0 and l % 2 == 0:
diff01.append(abs(u0[k0] - u1[k1]))
return np.log2(max(diff12) / max(diff01))
| gpl-3.0 |
fxfitz/ansible | lib/ansible/galaxy/role.py | 34 | 15345 | ########################################################################
#
# (C) 2015, Brian Coca <bcoca@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
########################################################################
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import errno
import datetime
import os
import tarfile
import tempfile
import yaml
from distutils.version import LooseVersion
from shutil import rmtree
from ansible.errors import AnsibleError
from ansible.module_utils.urls import open_url
from ansible.playbook.role.requirement import RoleRequirement
from ansible.galaxy.api import GalaxyAPI
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class GalaxyRole(object):
SUPPORTED_SCMS = set(['git', 'hg'])
META_MAIN = os.path.join('meta', 'main.yml')
META_INSTALL = os.path.join('meta', '.galaxy_install_info')
ROLE_DIRS = ('defaults', 'files', 'handlers', 'meta', 'tasks', 'templates', 'vars', 'tests')
def __init__(self, galaxy, name, src=None, version=None, scm=None, path=None):
self._metadata = None
self._install_info = None
self._validate_certs = not galaxy.options.ignore_certs
display.debug('Validate TLS certificates: %s' % self._validate_certs)
self.options = galaxy.options
self.galaxy = galaxy
self.name = name
self.version = version
self.src = src or name
self.scm = scm
if path is not None:
if self.name not in path:
path = os.path.join(path, self.name)
self.path = path
else:
# use the first path by default
self.path = os.path.join(galaxy.roles_paths[0], self.name)
# create list of possible paths
self.paths = [x for x in galaxy.roles_paths]
self.paths = [os.path.join(x, self.name) for x in self.paths]
def __repr__(self):
"""
Returns "rolename (version)" if version is not null
Returns "rolename" otherwise
"""
if self.version:
return "%s (%s)" % (self.name, self.version)
else:
return self.name
def __eq__(self, other):
return self.name == other.name
@property
def metadata(self):
"""
Returns role metadata
"""
if self._metadata is None:
meta_path = os.path.join(self.path, self.META_MAIN)
if os.path.isfile(meta_path):
try:
f = open(meta_path, 'r')
self._metadata = yaml.safe_load(f)
except:
display.vvvvv("Unable to load metadata for %s" % self.name)
return False
finally:
f.close()
return self._metadata
@property
def install_info(self):
"""
Returns role install info
"""
if self._install_info is None:
info_path = os.path.join(self.path, self.META_INSTALL)
if os.path.isfile(info_path):
try:
f = open(info_path, 'r')
self._install_info = yaml.safe_load(f)
except:
display.vvvvv("Unable to load Galaxy install info for %s" % self.name)
return False
finally:
f.close()
return self._install_info
def _write_galaxy_install_info(self):
"""
Writes a YAML-formatted file to the role's meta/ directory
(named .galaxy_install_info) which contains some information
we can use later for commands like 'list' and 'info'.
"""
info = dict(
version=self.version,
install_date=datetime.datetime.utcnow().strftime("%c"),
)
if not os.path.exists(os.path.join(self.path, 'meta')):
os.makedirs(os.path.join(self.path, 'meta'))
info_path = os.path.join(self.path, self.META_INSTALL)
with open(info_path, 'w+') as f:
try:
self._install_info = yaml.safe_dump(info, f)
except:
return False
return True
def remove(self):
"""
Removes the specified role from the roles path.
There is a sanity check to make sure there's a meta/main.yml file at this
path so the user doesn't blow away random directories.
"""
if self.metadata:
try:
rmtree(self.path)
return True
except:
pass
return False
def fetch(self, role_data):
"""
Downloads the archived role to a temp location based on role data
"""
if role_data:
# first grab the file and save it to a temp location
if "github_user" in role_data and "github_repo" in role_data:
archive_url = 'https://github.com/%s/%s/archive/%s.tar.gz' % (role_data["github_user"], role_data["github_repo"], self.version)
else:
archive_url = self.src
display.display("- downloading role from %s" % archive_url)
try:
url_file = open_url(archive_url, validate_certs=self._validate_certs)
temp_file = tempfile.NamedTemporaryFile(delete=False)
data = url_file.read()
while data:
temp_file.write(data)
data = url_file.read()
temp_file.close()
return temp_file.name
except Exception as e:
display.error("failed to download the file: %s" % str(e))
return False
def install(self):
if self.scm:
# create tar file from scm url
tmp_file = RoleRequirement.scm_archive_role(keep_scm_meta=self.options.keep_scm_meta, **self.spec)
elif self.src:
if os.path.isfile(self.src):
tmp_file = self.src
elif '://' in self.src:
role_data = self.src
tmp_file = self.fetch(role_data)
else:
api = GalaxyAPI(self.galaxy)
role_data = api.lookup_role_by_name(self.src)
if not role_data:
raise AnsibleError("- sorry, %s was not found on %s." % (self.src, api.api_server))
if role_data.get('role_type') == 'APP':
# Container Role
display.warning("%s is a Container App role, and should only be installed using Ansible "
"Container" % self.name)
role_versions = api.fetch_role_related('versions', role_data['id'])
if not self.version:
# convert the version names to LooseVersion objects
# and sort them to get the latest version. If there
# are no versions in the list, we'll grab the head
# of the master branch
if len(role_versions) > 0:
loose_versions = [LooseVersion(a.get('name', None)) for a in role_versions]
try:
loose_versions.sort()
except TypeError:
raise AnsibleError(
'Unable to compare role versions (%s) to determine the most recent version due to incompatible version formats. '
'Please contact the role author to resolve versioning conflicts, or specify an explicit role version to '
'install.' % ', '.join([v.vstring for v in loose_versions])
)
self.version = str(loose_versions[-1])
elif role_data.get('github_branch', None):
self.version = role_data['github_branch']
else:
self.version = 'master'
elif self.version != 'master':
if role_versions and str(self.version) not in [a.get('name', None) for a in role_versions]:
raise AnsibleError("- the specified version (%s) of %s was not found in the list of available versions (%s)." % (self.version,
self.name,
role_versions))
# check if there's a source link for our role_version
for role_version in role_versions:
if role_version['name'] == self.version and 'source' in role_version:
self.src = role_version['source']
tmp_file = self.fetch(role_data)
else:
raise AnsibleError("No valid role data found")
if tmp_file:
display.debug("installing from %s" % tmp_file)
if not tarfile.is_tarfile(tmp_file):
raise AnsibleError("the file downloaded was not a tar.gz")
else:
if tmp_file.endswith('.gz'):
role_tar_file = tarfile.open(tmp_file, "r:gz")
else:
role_tar_file = tarfile.open(tmp_file, "r")
# verify the role's meta file
meta_file = None
members = role_tar_file.getmembers()
# next find the metadata file
for member in members:
if self.META_MAIN in member.name:
# Look for parent of meta/main.yml
# Due to possibility of sub roles each containing meta/main.yml
# look for shortest length parent
meta_parent_dir = os.path.dirname(os.path.dirname(member.name))
if not meta_file:
archive_parent_dir = meta_parent_dir
meta_file = member
else:
if len(meta_parent_dir) < len(archive_parent_dir):
archive_parent_dir = meta_parent_dir
meta_file = member
if not meta_file:
raise AnsibleError("this role does not appear to have a meta/main.yml file.")
else:
try:
self._metadata = yaml.safe_load(role_tar_file.extractfile(meta_file))
except:
raise AnsibleError("this role does not appear to have a valid meta/main.yml file.")
# we strip off any higher-level directories for all of the files contained within
# the tar file here. The default is 'github_repo-target'. Gerrit instances, on the other
# hand, does not have a parent directory at all.
installed = False
while not installed:
display.display("- extracting %s to %s" % (self.name, self.path))
try:
if os.path.exists(self.path):
if not os.path.isdir(self.path):
raise AnsibleError("the specified roles path exists and is not a directory.")
elif not getattr(self.options, "force", False):
raise AnsibleError("the specified role %s appears to already exist. Use --force to replace it." % self.name)
else:
# using --force, remove the old path
if not self.remove():
raise AnsibleError("%s doesn't appear to contain a role.\n please remove this directory manually if you really "
"want to put the role here." % self.path)
else:
os.makedirs(self.path)
# now we do the actual extraction to the path
for member in members:
# we only extract files, and remove any relative path
# bits that might be in the file for security purposes
# and drop any containing directory, as mentioned above
if member.isreg() or member.issym():
parts = member.name.replace(archive_parent_dir, "", 1).split(os.sep)
final_parts = []
for part in parts:
if part != '..' and '~' not in part and '$' not in part:
final_parts.append(part)
member.name = os.path.join(*final_parts)
role_tar_file.extract(member, self.path)
# write out the install info file for later use
self._write_galaxy_install_info()
installed = True
except OSError as e:
error = True
if e.errno == errno.EACCES and len(self.paths) > 1:
current = self.paths.index(self.path)
if len(self.paths) > current:
self.path = self.paths[current + 1]
error = False
if error:
raise AnsibleError("Could not update files in %s: %s" % (self.path, str(e)))
# return the parsed yaml metadata
display.display("- %s was installed successfully" % str(self))
if not (self.src and os.path.isfile(self.src)):
try:
os.unlink(tmp_file)
except (OSError, IOError) as e:
display.warning("Unable to remove tmp file (%s): %s" % (tmp_file, str(e)))
return True
return False
@property
def spec(self):
"""
Returns role spec info
{
'scm': 'git',
'src': 'http://git.example.com/repos/repo.git',
'version': 'v1.0',
'name': 'repo'
}
"""
return dict(scm=self.scm, src=self.src, version=self.version, name=self.name)
| gpl-3.0 |
petry/kanboard | apps/issues/models.py | 1 | 1209 | from datetime import timedelta
from django.db import models
# Create your models here.
from django.db.models.query import QuerySet
from apps.core.managers import QuerySetManager
class IssueQuerySet(QuerySet):
def duration_avg(self):
durations = [i.get_duration() for i in self if i.get_duration()]
return sum(durations, timedelta()) / len(durations)
class Issue(models.Model):
name = models.CharField(max_length=255)
description = models.TextField(blank=True, null=True)
objects = QuerySetManager(IssueQuerySet)
def __unicode__(self):
return self.name
def get_first_transition(self):
return self.transition_set.get(step__initial=True)
def get_last_transition(self):
try:
transition = self.transition_set.get(step__next=None)
except self.transition_set.model.DoesNotExist:
transition = None
return transition
def get_duration(self):
if not self.get_last_transition():
return None
return self.get_last_transition().date - self.get_first_transition().date
def get_expected_date(self, time_delta):
return self.get_first_transition().date + time_delta
| apache-2.0 |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/sympy/functions/elementary/piecewise.py | 69 | 23292 | from __future__ import print_function, division
from sympy.core import Basic, S, Function, diff, Tuple
from sympy.core.relational import Equality, Relational
from sympy.functions.elementary.miscellaneous import Max, Min
from sympy.logic.boolalg import (And, Boolean, distribute_and_over_or, Not, Or,
true, false)
from sympy.core.compatibility import default_sort_key, range
class ExprCondPair(Tuple):
"""Represents an expression, condition pair."""
def __new__(cls, expr, cond):
if cond == True:
return Tuple.__new__(cls, expr, true)
elif cond == False:
return Tuple.__new__(cls, expr, false)
return Tuple.__new__(cls, expr, cond)
@property
def expr(self):
"""
Returns the expression of this pair.
"""
return self.args[0]
@property
def cond(self):
"""
Returns the condition of this pair.
"""
return self.args[1]
@property
def free_symbols(self):
"""
Return the free symbols of this pair.
"""
# Overload Basic.free_symbols because self.args[1] may contain non-Basic
result = self.expr.free_symbols
if hasattr(self.cond, 'free_symbols'):
result |= self.cond.free_symbols
return result
@property
def is_commutative(self):
return self.expr.is_commutative
def __iter__(self):
yield self.expr
yield self.cond
class Piecewise(Function):
"""
Represents a piecewise function.
Usage:
Piecewise( (expr,cond), (expr,cond), ... )
- Each argument is a 2-tuple defining an expression and condition
- The conds are evaluated in turn returning the first that is True.
If any of the evaluated conds are not determined explicitly False,
e.g. x < 1, the function is returned in symbolic form.
- If the function is evaluated at a place where all conditions are False,
a ValueError exception will be raised.
- Pairs where the cond is explicitly False, will be removed.
Examples
========
>>> from sympy import Piecewise, log
>>> from sympy.abc import x
>>> f = x**2
>>> g = log(x)
>>> p = Piecewise( (0, x<-1), (f, x<=1), (g, True))
>>> p.subs(x,1)
1
>>> p.subs(x,5)
log(5)
See Also
========
piecewise_fold
"""
nargs = None
is_Piecewise = True
def __new__(cls, *args, **options):
# (Try to) sympify args first
newargs = []
for ec in args:
# ec could be a ExprCondPair or a tuple
pair = ExprCondPair(*getattr(ec, 'args', ec))
cond = pair.cond
if cond == false:
continue
if not isinstance(cond, (bool, Relational, Boolean)):
raise TypeError(
"Cond %s is of type %s, but must be a Relational,"
" Boolean, or a built-in bool." % (cond, type(cond)))
newargs.append(pair)
if cond == True:
break
if options.pop('evaluate', True):
r = cls.eval(*newargs)
else:
r = None
if r is None:
return Basic.__new__(cls, *newargs, **options)
else:
return r
@classmethod
def eval(cls, *args):
# Check for situations where we can evaluate the Piecewise object.
# 1) Hit an unevaluable cond (e.g. x<1) -> keep object
# 2) Hit a true condition -> return that expr
# 3) Remove false conditions, if no conditions left -> raise ValueError
all_conds_evaled = True # Do all conds eval to a bool?
piecewise_again = False # Should we pass args to Piecewise again?
non_false_ecpairs = []
or1 = Or(*[cond for (_, cond) in args if cond != true])
for expr, cond in args:
# Check here if expr is a Piecewise and collapse if one of
# the conds in expr matches cond. This allows the collapsing
# of Piecewise((Piecewise(x,x<0),x<0)) to Piecewise((x,x<0)).
# This is important when using piecewise_fold to simplify
# multiple Piecewise instances having the same conds.
# Eventually, this code should be able to collapse Piecewise's
# having different intervals, but this will probably require
# using the new assumptions.
if isinstance(expr, Piecewise):
or2 = Or(*[c for (_, c) in expr.args if c != true])
for e, c in expr.args:
# Don't collapse if cond is "True" as this leads to
# incorrect simplifications with nested Piecewises.
if c == cond and (or1 == or2 or cond != true):
expr = e
piecewise_again = True
cond_eval = cls.__eval_cond(cond)
if cond_eval is None:
all_conds_evaled = False
elif cond_eval:
if all_conds_evaled:
return expr
if len(non_false_ecpairs) != 0:
if non_false_ecpairs[-1].cond == cond:
continue
elif non_false_ecpairs[-1].expr == expr:
newcond = Or(cond, non_false_ecpairs[-1].cond)
if isinstance(newcond, (And, Or)):
newcond = distribute_and_over_or(newcond)
non_false_ecpairs[-1] = ExprCondPair(expr, newcond)
continue
non_false_ecpairs.append(ExprCondPair(expr, cond))
if len(non_false_ecpairs) != len(args) or piecewise_again:
return cls(*non_false_ecpairs)
return None
def doit(self, **hints):
"""
Evaluate this piecewise function.
"""
newargs = []
for e, c in self.args:
if hints.get('deep', True):
if isinstance(e, Basic):
e = e.doit(**hints)
if isinstance(c, Basic):
c = c.doit(**hints)
newargs.append((e, c))
return self.func(*newargs)
def _eval_as_leading_term(self, x):
for e, c in self.args:
if c == True or c.subs(x, 0) == True:
return e.as_leading_term(x)
def _eval_adjoint(self):
return self.func(*[(e.adjoint(), c) for e, c in self.args])
def _eval_conjugate(self):
return self.func(*[(e.conjugate(), c) for e, c in self.args])
def _eval_derivative(self, x):
return self.func(*[(diff(e, x), c) for e, c in self.args])
def _eval_evalf(self, prec):
return self.func(*[(e.evalf(prec), c) for e, c in self.args])
def _eval_integral(self, x):
from sympy.integrals import integrate
return self.func(*[(integrate(e, x), c) for e, c in self.args])
def _eval_interval(self, sym, a, b):
"""Evaluates the function along the sym in a given interval ab"""
# FIXME: Currently complex intervals are not supported. A possible
# replacement algorithm, discussed in issue 5227, can be found in the
# following papers;
# http://portal.acm.org/citation.cfm?id=281649
# http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.70.4127&rep=rep1&type=pdf
if a is None or b is None:
# In this case, it is just simple substitution
return piecewise_fold(
super(Piecewise, self)._eval_interval(sym, a, b))
mul = 1
if (a == b) == True:
return S.Zero
elif (a > b) == True:
a, b, mul = b, a, -1
elif (a <= b) != True:
newargs = []
for e, c in self.args:
intervals = self._sort_expr_cond(
sym, S.NegativeInfinity, S.Infinity, c)
values = []
for lower, upper, expr in intervals:
if (a < lower) == True:
mid = lower
rep = b
val = e._eval_interval(sym, mid, b)
val += self._eval_interval(sym, a, mid)
elif (a > upper) == True:
mid = upper
rep = b
val = e._eval_interval(sym, mid, b)
val += self._eval_interval(sym, a, mid)
elif (a >= lower) == True and (a <= upper) == True:
rep = b
val = e._eval_interval(sym, a, b)
elif (b < lower) == True:
mid = lower
rep = a
val = e._eval_interval(sym, a, mid)
val += self._eval_interval(sym, mid, b)
elif (b > upper) == True:
mid = upper
rep = a
val = e._eval_interval(sym, a, mid)
val += self._eval_interval(sym, mid, b)
elif ((b >= lower) == True) and ((b <= upper) == True):
rep = a
val = e._eval_interval(sym, a, b)
else:
raise NotImplementedError(
"""The evaluation of a Piecewise interval when both the lower
and the upper limit are symbolic is not yet implemented.""")
values.append(val)
if len(set(values)) == 1:
try:
c = c.subs(sym, rep)
except AttributeError:
pass
e = values[0]
newargs.append((e, c))
else:
for i in range(len(values)):
newargs.append((values[i], (c == True and i == len(values) - 1) or
And(rep >= intervals[i][0], rep <= intervals[i][1])))
return self.func(*newargs)
# Determine what intervals the expr,cond pairs affect.
int_expr = self._sort_expr_cond(sym, a, b)
# Finally run through the intervals and sum the evaluation.
ret_fun = 0
for int_a, int_b, expr in int_expr:
if isinstance(expr, Piecewise):
# If we still have a Piecewise by now, _sort_expr_cond would
# already have determined that its conditions are independent
# of the integration variable, thus we just use substitution.
ret_fun += piecewise_fold(
super(Piecewise, expr)._eval_interval(sym, Max(a, int_a), Min(b, int_b)))
else:
ret_fun += expr._eval_interval(sym, Max(a, int_a), Min(b, int_b))
return mul * ret_fun
def _sort_expr_cond(self, sym, a, b, targetcond=None):
"""Determine what intervals the expr, cond pairs affect.
1) If cond is True, then log it as default
1.1) Currently if cond can't be evaluated, throw NotImplementedError.
2) For each inequality, if previous cond defines part of the interval
update the new conds interval.
- eg x < 1, x < 3 -> [oo,1],[1,3] instead of [oo,1],[oo,3]
3) Sort the intervals to make it easier to find correct exprs
Under normal use, we return the expr,cond pairs in increasing order
along the real axis corresponding to the symbol sym. If targetcond
is given, we return a list of (lowerbound, upperbound) pairs for
this condition."""
from sympy.solvers.inequalities import _solve_inequality
default = None
int_expr = []
expr_cond = []
or_cond = False
or_intervals = []
independent_expr_cond = []
for expr, cond in self.args:
if isinstance(cond, Or):
for cond2 in sorted(cond.args, key=default_sort_key):
expr_cond.append((expr, cond2))
else:
expr_cond.append((expr, cond))
if cond == True:
break
for expr, cond in expr_cond:
if cond == True:
independent_expr_cond.append((expr, cond))
default = self.func(*independent_expr_cond)
break
orig_cond = cond
if sym not in cond.free_symbols:
independent_expr_cond.append((expr, cond))
continue
elif isinstance(cond, Equality):
continue
elif isinstance(cond, And):
lower = S.NegativeInfinity
upper = S.Infinity
for cond2 in cond.args:
if sym not in [cond2.lts, cond2.gts]:
cond2 = _solve_inequality(cond2, sym)
if cond2.lts == sym:
upper = Min(cond2.gts, upper)
elif cond2.gts == sym:
lower = Max(cond2.lts, lower)
else:
raise NotImplementedError(
"Unable to handle interval evaluation of expression.")
else:
if sym not in [cond.lts, cond.gts]:
cond = _solve_inequality(cond, sym)
lower, upper = cond.lts, cond.gts # part 1: initialize with givens
if cond.lts == sym: # part 1a: expand the side ...
lower = S.NegativeInfinity # e.g. x <= 0 ---> -oo <= 0
elif cond.gts == sym: # part 1a: ... that can be expanded
upper = S.Infinity # e.g. x >= 0 ---> oo >= 0
else:
raise NotImplementedError(
"Unable to handle interval evaluation of expression.")
# part 1b: Reduce (-)infinity to what was passed in.
lower, upper = Max(a, lower), Min(b, upper)
for n in range(len(int_expr)):
# Part 2: remove any interval overlap. For any conflicts, the
# iterval already there wins, and the incoming interval updates
# its bounds accordingly.
if self.__eval_cond(lower < int_expr[n][1]) and \
self.__eval_cond(lower >= int_expr[n][0]):
lower = int_expr[n][1]
elif len(int_expr[n][1].free_symbols) and \
self.__eval_cond(lower >= int_expr[n][0]):
if self.__eval_cond(lower == int_expr[n][0]):
lower = int_expr[n][1]
else:
int_expr[n][1] = Min(lower, int_expr[n][1])
elif len(int_expr[n][0].free_symbols) and \
self.__eval_cond(upper == int_expr[n][1]):
upper = Min(upper, int_expr[n][0])
elif len(int_expr[n][1].free_symbols) and \
(lower >= int_expr[n][0]) != True and \
(int_expr[n][1] == Min(lower, upper)) != True:
upper = Min(upper, int_expr[n][0])
elif self.__eval_cond(upper > int_expr[n][0]) and \
self.__eval_cond(upper <= int_expr[n][1]):
upper = int_expr[n][0]
elif len(int_expr[n][0].free_symbols) and \
self.__eval_cond(upper < int_expr[n][1]):
int_expr[n][0] = Max(upper, int_expr[n][0])
if self.__eval_cond(lower >= upper) != True: # Is it still an interval?
int_expr.append([lower, upper, expr])
if orig_cond == targetcond:
return [(lower, upper, None)]
elif isinstance(targetcond, Or) and cond in targetcond.args:
or_cond = Or(or_cond, cond)
or_intervals.append((lower, upper, None))
if or_cond == targetcond:
or_intervals.sort(key=lambda x: x[0])
return or_intervals
int_expr.sort(key=lambda x: x[1].sort_key(
) if x[1].is_number else S.NegativeInfinity.sort_key())
int_expr.sort(key=lambda x: x[0].sort_key(
) if x[0].is_number else S.Infinity.sort_key())
for n in range(len(int_expr)):
if len(int_expr[n][0].free_symbols) or len(int_expr[n][1].free_symbols):
if isinstance(int_expr[n][1], Min) or int_expr[n][1] == b:
newval = Min(*int_expr[n][:-1])
if n > 0 and int_expr[n][0] == int_expr[n - 1][1]:
int_expr[n - 1][1] = newval
int_expr[n][0] = newval
else:
newval = Max(*int_expr[n][:-1])
if n < len(int_expr) - 1 and int_expr[n][1] == int_expr[n + 1][0]:
int_expr[n + 1][0] = newval
int_expr[n][1] = newval
# Add holes to list of intervals if there is a default value,
# otherwise raise a ValueError.
holes = []
curr_low = a
for int_a, int_b, expr in int_expr:
if (curr_low < int_a) == True:
holes.append([curr_low, Min(b, int_a), default])
elif (curr_low >= int_a) != True:
holes.append([curr_low, Min(b, int_a), default])
curr_low = Min(b, int_b)
if (curr_low < b) == True:
holes.append([Min(b, curr_low), b, default])
elif (curr_low >= b) != True:
holes.append([Min(b, curr_low), b, default])
if holes and default is not None:
int_expr.extend(holes)
if targetcond == True:
return [(h[0], h[1], None) for h in holes]
elif holes and default is None:
raise ValueError("Called interval evaluation over piecewise "
"function on undefined intervals %s" %
", ".join([str((h[0], h[1])) for h in holes]))
return int_expr
def _eval_nseries(self, x, n, logx):
args = [(ec.expr._eval_nseries(x, n, logx), ec.cond) for ec in self.args]
return self.func(*args)
def _eval_power(self, s):
return self.func(*[(e**s, c) for e, c in self.args])
def _eval_subs(self, old, new):
"""
Piecewise conditions may contain bool which are not of Basic type.
"""
args = list(self.args)
for i, (e, c) in enumerate(args):
if isinstance(c, bool):
pass
elif isinstance(c, Basic):
c = c._subs(old, new)
if c != False:
e = e._subs(old, new)
args[i] = e, c
if c == True:
return self.func(*args)
return self.func(*args)
def _eval_transpose(self):
return self.func(*[(e.transpose(), c) for e, c in self.args])
def _eval_template_is_attr(self, is_attr, when_multiple=None):
b = None
for expr, _ in self.args:
a = getattr(expr, is_attr)
if a is None:
return None
if b is None:
b = a
elif b is not a:
return when_multiple
return b
_eval_is_finite = lambda self: self._eval_template_is_attr(
'is_finite', when_multiple=False)
_eval_is_complex = lambda self: self._eval_template_is_attr('is_complex')
_eval_is_even = lambda self: self._eval_template_is_attr('is_even')
_eval_is_imaginary = lambda self: self._eval_template_is_attr(
'is_imaginary')
_eval_is_integer = lambda self: self._eval_template_is_attr('is_integer')
_eval_is_irrational = lambda self: self._eval_template_is_attr(
'is_irrational')
_eval_is_negative = lambda self: self._eval_template_is_attr('is_negative')
_eval_is_nonnegative = lambda self: self._eval_template_is_attr(
'is_nonnegative')
_eval_is_nonpositive = lambda self: self._eval_template_is_attr(
'is_nonpositive')
_eval_is_nonzero = lambda self: self._eval_template_is_attr(
'is_nonzero', when_multiple=True)
_eval_is_odd = lambda self: self._eval_template_is_attr('is_odd')
_eval_is_polar = lambda self: self._eval_template_is_attr('is_polar')
_eval_is_positive = lambda self: self._eval_template_is_attr('is_positive')
_eval_is_real = lambda self: self._eval_template_is_attr('is_real')
_eval_is_zero = lambda self: self._eval_template_is_attr(
'is_zero', when_multiple=False)
@classmethod
def __eval_cond(cls, cond):
"""Return the truth value of the condition."""
from sympy.solvers.solvers import checksol
if cond == True:
return True
if isinstance(cond, Equality):
if checksol(cond, {}, minimal=True):
# the equality is trivially solved
return True
diff = cond.lhs - cond.rhs
if diff.is_commutative:
return diff.is_zero
return None
def as_expr_set_pairs(self):
exp_sets = []
U = S.Reals
for expr, cond in self.args:
cond_int = U.intersect(cond.as_set())
U = U - cond_int
exp_sets.append((expr, cond_int))
return exp_sets
def piecewise_fold(expr):
"""
Takes an expression containing a piecewise function and returns the
expression in piecewise form.
Examples
========
>>> from sympy import Piecewise, piecewise_fold, sympify as S
>>> from sympy.abc import x
>>> p = Piecewise((x, x < 1), (1, S(1) <= x))
>>> piecewise_fold(x*p)
Piecewise((x**2, x < 1), (x, 1 <= x))
See Also
========
Piecewise
"""
if not isinstance(expr, Basic) or not expr.has(Piecewise):
return expr
new_args = list(map(piecewise_fold, expr.args))
if expr.func is ExprCondPair:
return ExprCondPair(*new_args)
piecewise_args = []
for n, arg in enumerate(new_args):
if isinstance(arg, Piecewise):
piecewise_args.append(n)
if len(piecewise_args) > 0:
n = piecewise_args[0]
new_args = [(expr.func(*(new_args[:n] + [e] + new_args[n + 1:])), c)
for e, c in new_args[n].args]
if isinstance(expr, Boolean):
# If expr is Boolean, we must return some kind of PiecewiseBoolean.
# This is constructed by means of Or, And and Not.
# piecewise_fold(0 < Piecewise( (sin(x), x<0), (cos(x), True)))
# can't return Piecewise((0 < sin(x), x < 0), (0 < cos(x), True))
# but instead Or(And(x < 0, 0 < sin(x)), And(0 < cos(x), Not(x<0)))
other = True
rtn = False
for e, c in new_args:
rtn = Or(rtn, And(other, c, e))
other = And(other, Not(c))
if len(piecewise_args) > 1:
return piecewise_fold(rtn)
return rtn
if len(piecewise_args) > 1:
return piecewise_fold(Piecewise(*new_args))
return Piecewise(*new_args)
else:
return expr.func(*new_args)
| mit |
axelkennedal/dissen | dissenEnv/lib/python3.5/site-packages/django/conf/locale/en_GB/formats.py | 504 | 2117 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j M Y' # '25 Oct 2006'
TIME_FORMAT = 'P' # '2:30 p.m.'
DATETIME_FORMAT = 'j M Y, P' # '25 Oct 2006, 2:30 p.m.'
YEAR_MONTH_FORMAT = 'F Y' # 'October 2006'
MONTH_DAY_FORMAT = 'j F' # '25 October'
SHORT_DATE_FORMAT = 'd/m/Y' # '25/10/2006'
SHORT_DATETIME_FORMAT = 'd/m/Y P' # '25/10/2006 2:30 p.m.'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d/%m/%Y', '%d/%m/%y', # '25/10/2006', '25/10/06'
# '%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
# '%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
# '%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
# '%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
]
DATETIME_INPUT_FORMATS = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M:%S.%f', # '25/10/2006 14:30:59.000200'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
'%d/%m/%y %H:%M:%S', # '25/10/06 14:30:59'
'%d/%m/%y %H:%M:%S.%f', # '25/10/06 14:30:59.000200'
'%d/%m/%y %H:%M', # '25/10/06 14:30'
'%d/%m/%y', # '25/10/06'
]
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ','
NUMBER_GROUPING = 3
| mit |
openstack/glance | glance/tests/unit/api/test_common.py | 1 | 5882 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from unittest import mock
import webob
import glance.api.common
from glance.common import exception
class SimpleIterator(object):
def __init__(self, file_object, chunk_size):
self.file_object = file_object
self.chunk_size = chunk_size
def __iter__(self):
def read_chunk():
return self.fobj.read(self.chunk_size)
chunk = read_chunk()
while chunk:
yield chunk
chunk = read_chunk()
else:
return
class TestSizeCheckedIter(testtools.TestCase):
def _get_image_metadata(self):
return {'id': 'e31cb99c-fe89-49fb-9cc5-f5104fffa636'}
def _get_webob_response(self):
request = webob.Request.blank('/')
response = webob.Response()
response.request = request
return response
def test_uniform_chunk_size(self):
resp = self._get_webob_response()
meta = self._get_image_metadata()
checked_image = glance.api.common.size_checked_iter(
resp, meta, 4, ['AB', 'CD'], None)
self.assertEqual('AB', next(checked_image))
self.assertEqual('CD', next(checked_image))
self.assertRaises(StopIteration, next, checked_image)
def test_small_last_chunk(self):
resp = self._get_webob_response()
meta = self._get_image_metadata()
checked_image = glance.api.common.size_checked_iter(
resp, meta, 3, ['AB', 'C'], None)
self.assertEqual('AB', next(checked_image))
self.assertEqual('C', next(checked_image))
self.assertRaises(StopIteration, next, checked_image)
def test_variable_chunk_size(self):
resp = self._get_webob_response()
meta = self._get_image_metadata()
checked_image = glance.api.common.size_checked_iter(
resp, meta, 6, ['AB', '', 'CDE', 'F'], None)
self.assertEqual('AB', next(checked_image))
self.assertEqual('', next(checked_image))
self.assertEqual('CDE', next(checked_image))
self.assertEqual('F', next(checked_image))
self.assertRaises(StopIteration, next, checked_image)
def test_too_many_chunks(self):
"""An image should streamed regardless of expected_size"""
resp = self._get_webob_response()
meta = self._get_image_metadata()
checked_image = glance.api.common.size_checked_iter(
resp, meta, 4, ['AB', 'CD', 'EF'], None)
self.assertEqual('AB', next(checked_image))
self.assertEqual('CD', next(checked_image))
self.assertEqual('EF', next(checked_image))
self.assertRaises(exception.GlanceException, next, checked_image)
def test_too_few_chunks(self):
resp = self._get_webob_response()
meta = self._get_image_metadata()
checked_image = glance.api.common.size_checked_iter(resp, meta, 6,
['AB', 'CD'],
None)
self.assertEqual('AB', next(checked_image))
self.assertEqual('CD', next(checked_image))
self.assertRaises(exception.GlanceException, next, checked_image)
def test_too_much_data(self):
resp = self._get_webob_response()
meta = self._get_image_metadata()
checked_image = glance.api.common.size_checked_iter(resp, meta, 3,
['AB', 'CD'],
None)
self.assertEqual('AB', next(checked_image))
self.assertEqual('CD', next(checked_image))
self.assertRaises(exception.GlanceException, next, checked_image)
def test_too_little_data(self):
resp = self._get_webob_response()
meta = self._get_image_metadata()
checked_image = glance.api.common.size_checked_iter(resp, meta, 6,
['AB', 'CD', 'E'],
None)
self.assertEqual('AB', next(checked_image))
self.assertEqual('CD', next(checked_image))
self.assertEqual('E', next(checked_image))
self.assertRaises(exception.GlanceException, next, checked_image)
class TestThreadPool(testtools.TestCase):
@mock.patch('glance.async_.get_threadpool_model')
def test_get_thread_pool(self, mock_gtm):
get_thread_pool = glance.api.common.get_thread_pool
pool1 = get_thread_pool('pool1', size=123)
get_thread_pool('pool2', size=456)
pool1a = get_thread_pool('pool1')
# Two calls for the same pool should return the exact same thing
self.assertEqual(pool1, pool1a)
# Only two calls to get new threadpools should have been made
mock_gtm.return_value.assert_has_calls(
[mock.call(123), mock.call(456)])
@mock.patch('glance.async_.get_threadpool_model')
def test_get_thread_pool_log(self, mock_gtm):
with mock.patch.object(glance.api.common, 'LOG') as mock_log:
glance.api.common.get_thread_pool('test-pool')
mock_log.debug.assert_called_once_with(
'Initializing named threadpool %r', 'test-pool')
| apache-2.0 |
MyAOSP/external_chromium_org | tools/telemetry/telemetry/page/page_test_results.py | 23 | 2163 | # Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import sys
import traceback
import unittest
class PageTestResults(unittest.TestResult):
def __init__(self):
super(PageTestResults, self).__init__()
self.pages_that_had_errors = set()
self.pages_that_had_failures = set()
self.successes = []
self.skipped = []
@property
def pages_that_had_errors_or_failures(self):
return self.pages_that_had_errors.union(
self.pages_that_had_failures)
def _exc_info_to_string(self, err, test):
if isinstance(test, unittest.TestCase):
return super(PageTestResults, self)._exc_info_to_string(err, test)
else:
return ''.join(traceback.format_exception(*err))
def addSuccess(self, test):
self.successes.append(test)
def addSkip(self, test, reason): # Python 2.7 has this in unittest.TestResult
self.skipped.append((test, reason))
def StartTest(self, page):
self.startTest(page.display_name)
def StopTest(self, page):
self.stopTest(page.display_name)
def AddError(self, page, err):
self.pages_that_had_errors.add(page)
self.addError(page.display_name, err)
def AddFailure(self, page, err):
self.pages_that_had_failures.add(page)
self.addFailure(page.display_name, err)
def AddSuccess(self, page):
self.addSuccess(page.display_name)
def AddSkip(self, page, reason):
self.addSkip(page.display_name, reason)
def AddFailureMessage(self, page, message):
try:
raise Exception(message)
except Exception:
self.AddFailure(page, sys.exc_info())
def AddErrorMessage(self, page, message):
try:
raise Exception(message)
except Exception:
self.AddError(page, sys.exc_info())
def PrintSummary(self):
if self.failures:
logging.error('Failed pages:\n%s', '\n'.join(zip(*self.failures)[0]))
if self.errors:
logging.error('Errored pages:\n%s', '\n'.join(zip(*self.errors)[0]))
if self.skipped:
logging.warning('Skipped pages:\n%s', '\n'.join(zip(*self.skipped)[0]))
| bsd-3-clause |
nsat/gnuradio | gr-filter/python/filter/qa_fractional_interpolator.py | 47 | 3147 | #!/usr/bin/env python
#
# Copyright 2007,2010,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest, filter, blocks
import math
def sig_source_f(samp_rate, freq, amp, N):
t = map(lambda x: float(x)/samp_rate, xrange(N))
y = map(lambda x: math.sin(2.*math.pi*freq*x), t)
return y
def sig_source_c(samp_rate, freq, amp, N):
t = map(lambda x: float(x)/samp_rate, xrange(N))
y = map(lambda x: math.cos(2.*math.pi*freq*x) + \
1j*math.sin(2.*math.pi*freq*x), t)
return y
class test_fractional_resampler(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_001_ff(self):
N = 10000 # number of samples to use
fs = 1000 # baseband sampling rate
rrate = 1.123 # resampling rate
freq = 10
data = sig_source_f(fs, freq, 1, N)
signal = blocks.vector_source_f(data)
op = filter.fractional_interpolator_ff(0, rrate)
snk = blocks.vector_sink_f()
self.tb.connect(signal, op, snk)
self.tb.run()
Ntest = 5000
L = len(snk.data())
t = map(lambda x: float(x)/(fs/rrate), xrange(L))
phase = 0.1884
expected_data = map(lambda x: math.sin(2.*math.pi*freq*x+phase), t)
dst_data = snk.data()
self.assertFloatTuplesAlmostEqual(expected_data[-Ntest:], dst_data[-Ntest:], 3)
def test_002_cc(self):
N = 10000 # number of samples to use
fs = 1000 # baseband sampling rate
rrate = 1.123 # resampling rate
freq = 10
data = sig_source_c(fs, freq, 1, N)
signal = blocks.vector_source_c(data)
op = filter.fractional_interpolator_cc(0.0, rrate)
snk = blocks.vector_sink_c()
self.tb.connect(signal, op, snk)
self.tb.run()
Ntest = 5000
L = len(snk.data())
t = map(lambda x: float(x)/(fs/rrate), xrange(L))
phase = 0.1884
expected_data = map(lambda x: math.cos(2.*math.pi*freq*x+phase) + \
1j*math.sin(2.*math.pi*freq*x+phase), t)
dst_data = snk.data()
self.assertComplexTuplesAlmostEqual(expected_data[-Ntest:], dst_data[-Ntest:], 3)
if __name__ == '__main__':
gr_unittest.run(test_fractional_resampler, "test_fractional_resampler.xml")
| gpl-3.0 |
merelcoin/merelcoin | test/lint/lint-format-strings.py | 13 | 12612 | #!/usr/bin/env python3
#
# Copyright (c) 2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Lint format strings: This program checks that the number of arguments passed
# to a variadic format string function matches the number of format specifiers
# in the format string.
import argparse
import re
import sys
FALSE_POSITIVES = [
("src/dbwrapper.cpp", "vsnprintf(p, limit - p, format, backup_ap)"),
("src/index/base.cpp", "FatalError(const char* fmt, const Args&... args)"),
("src/netbase.cpp", "LogConnectFailure(bool manual_connection, const char* fmt, const Args&... args)"),
("src/util.cpp", "strprintf(_(COPYRIGHT_HOLDERS), _(COPYRIGHT_HOLDERS_SUBSTITUTION))"),
("src/util.cpp", "strprintf(COPYRIGHT_HOLDERS, COPYRIGHT_HOLDERS_SUBSTITUTION)"),
("src/wallet/wallet.h", "WalletLogPrintf(std::string fmt, Params... parameters)"),
("src/wallet/wallet.h", "LogPrintf((\"%s \" + fmt).c_str(), GetDisplayName(), parameters...)"),
]
def parse_function_calls(function_name, source_code):
"""Return an array with all calls to function function_name in string source_code.
Preprocessor directives and C++ style comments ("//") in source_code are removed.
>>> len(parse_function_calls("foo", "foo();bar();foo();bar();"))
2
>>> parse_function_calls("foo", "foo(1);bar(1);foo(2);bar(2);")[0].startswith("foo(1);")
True
>>> parse_function_calls("foo", "foo(1);bar(1);foo(2);bar(2);")[1].startswith("foo(2);")
True
>>> len(parse_function_calls("foo", "foo();bar();// foo();bar();"))
1
>>> len(parse_function_calls("foo", "#define FOO foo();"))
0
"""
assert(type(function_name) is str and type(source_code) is str and function_name)
lines = [re.sub("// .*", " ", line).strip()
for line in source_code.split("\n")
if not line.strip().startswith("#")]
return re.findall(r"[^a-zA-Z_](?=({}\(.*).*)".format(function_name), " " + " ".join(lines))
def normalize(s):
"""Return a normalized version of string s with newlines, tabs and C style comments ("/* ... */")
replaced with spaces. Multiple spaces are replaced with a single space.
>>> normalize(" /* nothing */ foo\tfoo /* bar */ foo ")
'foo foo foo'
"""
assert(type(s) is str)
s = s.replace("\n", " ")
s = s.replace("\t", " ")
s = re.sub("/\*.*?\*/", " ", s)
s = re.sub(" {2,}", " ", s)
return s.strip()
ESCAPE_MAP = {
r"\n": "[escaped-newline]",
r"\t": "[escaped-tab]",
r'\"': "[escaped-quote]",
}
def escape(s):
"""Return the escaped version of string s with "\\\"", "\\n" and "\\t" escaped as
"[escaped-backslash]", "[escaped-newline]" and "[escaped-tab]".
>>> unescape(escape("foo")) == "foo"
True
>>> escape(r'foo \\t foo \\n foo \\\\ foo \\ foo \\"bar\\"')
'foo [escaped-tab] foo [escaped-newline] foo \\\\\\\\ foo \\\\ foo [escaped-quote]bar[escaped-quote]'
"""
assert(type(s) is str)
for raw_value, escaped_value in ESCAPE_MAP.items():
s = s.replace(raw_value, escaped_value)
return s
def unescape(s):
"""Return the unescaped version of escaped string s.
Reverses the replacements made in function escape(s).
>>> unescape(escape("bar"))
'bar'
>>> unescape("foo [escaped-tab] foo [escaped-newline] foo \\\\\\\\ foo \\\\ foo [escaped-quote]bar[escaped-quote]")
'foo \\\\t foo \\\\n foo \\\\\\\\ foo \\\\ foo \\\\"bar\\\\"'
"""
assert(type(s) is str)
for raw_value, escaped_value in ESCAPE_MAP.items():
s = s.replace(escaped_value, raw_value)
return s
def parse_function_call_and_arguments(function_name, function_call):
"""Split string function_call into an array of strings consisting of:
* the string function_call followed by "("
* the function call argument #1
* ...
* the function call argument #n
* a trailing ");"
The strings returned are in escaped form. See escape(...).
>>> parse_function_call_and_arguments("foo", 'foo("%s", "foo");')
['foo(', '"%s",', ' "foo"', ')']
>>> parse_function_call_and_arguments("foo", 'foo("%s", "foo");')
['foo(', '"%s",', ' "foo"', ')']
>>> parse_function_call_and_arguments("foo", 'foo("%s %s", "foo", "bar");')
['foo(', '"%s %s",', ' "foo",', ' "bar"', ')']
>>> parse_function_call_and_arguments("fooprintf", 'fooprintf("%050d", i);')
['fooprintf(', '"%050d",', ' i', ')']
>>> parse_function_call_and_arguments("foo", 'foo(bar(foobar(barfoo("foo"))), foobar); barfoo')
['foo(', 'bar(foobar(barfoo("foo"))),', ' foobar', ')']
>>> parse_function_call_and_arguments("foo", "foo()")
['foo(', '', ')']
>>> parse_function_call_and_arguments("foo", "foo(123)")
['foo(', '123', ')']
>>> parse_function_call_and_arguments("foo", 'foo("foo")')
['foo(', '"foo"', ')']
>>> parse_function_call_and_arguments("strprintf", 'strprintf("%s (%d)", std::wstring_convert<std::codecvt_utf8_utf16<wchar_t>,wchar_t>().to_bytes(buf), err);')
['strprintf(', '"%s (%d)",', ' std::wstring_convert<std::codecvt_utf8_utf16<wchar_t>,wchar_t>().to_bytes(buf),', ' err', ')']
>>> parse_function_call_and_arguments("strprintf", 'strprintf("%s (%d)", foo<wchar_t>().to_bytes(buf), err);')
['strprintf(', '"%s (%d)",', ' foo<wchar_t>().to_bytes(buf),', ' err', ')']
>>> parse_function_call_and_arguments("strprintf", 'strprintf("%s (%d)", foo().to_bytes(buf), err);')
['strprintf(', '"%s (%d)",', ' foo().to_bytes(buf),', ' err', ')']
>>> parse_function_call_and_arguments("strprintf", 'strprintf("%s (%d)", foo << 1, err);')
['strprintf(', '"%s (%d)",', ' foo << 1,', ' err', ')']
>>> parse_function_call_and_arguments("strprintf", 'strprintf("%s (%d)", foo<bar>() >> 1, err);')
['strprintf(', '"%s (%d)",', ' foo<bar>() >> 1,', ' err', ')']
>>> parse_function_call_and_arguments("strprintf", 'strprintf("%s (%d)", foo < 1 ? bar : foobar, err);')
['strprintf(', '"%s (%d)",', ' foo < 1 ? bar : foobar,', ' err', ')']
>>> parse_function_call_and_arguments("strprintf", 'strprintf("%s (%d)", foo < 1, err);')
['strprintf(', '"%s (%d)",', ' foo < 1,', ' err', ')']
>>> parse_function_call_and_arguments("strprintf", 'strprintf("%s (%d)", foo > 1 ? bar : foobar, err);')
['strprintf(', '"%s (%d)",', ' foo > 1 ? bar : foobar,', ' err', ')']
>>> parse_function_call_and_arguments("strprintf", 'strprintf("%s (%d)", foo > 1, err);')
['strprintf(', '"%s (%d)",', ' foo > 1,', ' err', ')']
>>> parse_function_call_and_arguments("strprintf", 'strprintf("%s (%d)", foo <= 1, err);')
['strprintf(', '"%s (%d)",', ' foo <= 1,', ' err', ')']
>>> parse_function_call_and_arguments("strprintf", 'strprintf("%s (%d)", foo <= bar<1, 2>(1, 2), err);')
['strprintf(', '"%s (%d)",', ' foo <= bar<1, 2>(1, 2),', ' err', ')']
>>> parse_function_call_and_arguments("strprintf", 'strprintf("%s (%d)", foo>foo<1,2>(1,2)?bar:foobar,err)');
['strprintf(', '"%s (%d)",', ' foo>foo<1,2>(1,2)?bar:foobar,', 'err', ')']
>>> parse_function_call_and_arguments("strprintf", 'strprintf("%s (%d)", foo>foo<1,2>(1,2),err)');
['strprintf(', '"%s (%d)",', ' foo>foo<1,2>(1,2),', 'err', ')']
"""
assert(type(function_name) is str and type(function_call) is str and function_name)
remaining = normalize(escape(function_call))
expected_function_call = "{}(".format(function_name)
assert(remaining.startswith(expected_function_call))
parts = [expected_function_call]
remaining = remaining[len(expected_function_call):]
open_parentheses = 1
open_template_arguments = 0
in_string = False
parts.append("")
for i, char in enumerate(remaining):
parts.append(parts.pop() + char)
if char == "\"":
in_string = not in_string
continue
if in_string:
continue
if char == "(":
open_parentheses += 1
continue
if char == ")":
open_parentheses -= 1
if open_parentheses > 1:
continue
if open_parentheses == 0:
parts.append(parts.pop()[:-1])
parts.append(char)
break
prev_char = remaining[i - 1] if i - 1 >= 0 else None
next_char = remaining[i + 1] if i + 1 <= len(remaining) - 1 else None
if char == "<" and next_char not in [" ", "<", "="] and prev_char not in [" ", "<"]:
open_template_arguments += 1
continue
if char == ">" and next_char not in [" ", ">", "="] and prev_char not in [" ", ">"] and open_template_arguments > 0:
open_template_arguments -= 1
if open_template_arguments > 0:
continue
if char == ",":
parts.append("")
return parts
def parse_string_content(argument):
"""Return the text within quotes in string argument.
>>> parse_string_content('1 "foo %d bar" 2')
'foo %d bar'
>>> parse_string_content('1 foobar 2')
''
>>> parse_string_content('1 "bar" 2')
'bar'
>>> parse_string_content('1 "foo" 2 "bar" 3')
'foobar'
>>> parse_string_content('1 "foo" 2 " " "bar" 3')
'foo bar'
>>> parse_string_content('""')
''
>>> parse_string_content('')
''
>>> parse_string_content('1 2 3')
''
"""
assert(type(argument) is str)
string_content = ""
in_string = False
for char in normalize(escape(argument)):
if char == "\"":
in_string = not in_string
elif in_string:
string_content += char
return string_content
def count_format_specifiers(format_string):
"""Return the number of format specifiers in string format_string.
>>> count_format_specifiers("foo bar foo")
0
>>> count_format_specifiers("foo %d bar foo")
1
>>> count_format_specifiers("foo %d bar %i foo")
2
>>> count_format_specifiers("foo %d bar %i foo %% foo")
2
>>> count_format_specifiers("foo %d bar %i foo %% foo %d foo")
3
>>> count_format_specifiers("foo %d bar %i foo %% foo %*d foo")
4
"""
assert(type(format_string) is str)
n = 0
in_specifier = False
for i, char in enumerate(format_string):
if format_string[i - 1:i + 1] == "%%" or format_string[i:i + 2] == "%%":
pass
elif char == "%":
in_specifier = True
n += 1
elif char in "aAcdeEfFgGinopsuxX":
in_specifier = False
elif in_specifier and char == "*":
n += 1
return n
def main():
parser = argparse.ArgumentParser(description="This program checks that the number of arguments passed "
"to a variadic format string function matches the number of format "
"specifiers in the format string.")
parser.add_argument("--skip-arguments", type=int, help="number of arguments before the format string "
"argument (e.g. 1 in the case of fprintf)", default=0)
parser.add_argument("function_name", help="function name (e.g. fprintf)", default=None)
parser.add_argument("file", type=argparse.FileType("r", encoding="utf-8"), nargs="*", help="C++ source code file (e.g. foo.cpp)")
args = parser.parse_args()
exit_code = 0
for f in args.file:
for function_call_str in parse_function_calls(args.function_name, f.read()):
parts = parse_function_call_and_arguments(args.function_name, function_call_str)
relevant_function_call_str = unescape("".join(parts))[:512]
if (f.name, relevant_function_call_str) in FALSE_POSITIVES:
continue
if len(parts) < 3 + args.skip_arguments:
exit_code = 1
print("{}: Could not parse function call string \"{}(...)\": {}".format(f.name, args.function_name, relevant_function_call_str))
continue
argument_count = len(parts) - 3 - args.skip_arguments
format_str = parse_string_content(parts[1 + args.skip_arguments])
format_specifier_count = count_format_specifiers(format_str)
if format_specifier_count != argument_count:
exit_code = 1
print("{}: Expected {} argument(s) after format string but found {} argument(s): {}".format(f.name, format_specifier_count, argument_count, relevant_function_call_str))
continue
sys.exit(exit_code)
if __name__ == "__main__":
main()
| mit |
jimmy-ren/RPN2T | external/_caffe/python/draw_net.py | 19 | 1934 | #!/usr/bin/env python
"""
Draw a graph of the net architecture.
"""
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from google.protobuf import text_format
import caffe
import caffe.draw
from caffe.proto import caffe_pb2
def parse_args():
"""Parse input arguments
"""
parser = ArgumentParser(description=__doc__,
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('input_net_proto_file',
help='Input network prototxt file')
parser.add_argument('output_image_file',
help='Output image file')
parser.add_argument('--rankdir',
help=('One of TB (top-bottom, i.e., vertical), '
'RL (right-left, i.e., horizontal), or another '
'valid dot option; see '
'http://www.graphviz.org/doc/info/'
'attrs.html#k:rankdir'),
default='LR')
parser.add_argument('--phase',
help=('Which network phase to draw: can be TRAIN, '
'TEST, or ALL. If ALL, then all layers are drawn '
'regardless of phase.'),
default="ALL")
args = parser.parse_args()
return args
def main():
args = parse_args()
net = caffe_pb2.NetParameter()
text_format.Merge(open(args.input_net_proto_file).read(), net)
print('Drawing net to %s' % args.output_image_file)
phase=None;
if args.phase == "TRAIN":
phase = caffe.TRAIN
elif args.phase == "TEST":
phase = caffe.TEST
elif args.phase != "ALL":
raise ValueError("Unknown phase: " + args.phase)
caffe.draw.draw_net_to_file(net, args.output_image_file, args.rankdir,
phase)
if __name__ == '__main__':
main()
| mit |
ctsit/vivo-pump | uf_examples/sponsors/merge_filter.py | 3 | 1440 | #!/usr/bin/env/python
"""
merge_filter.py -- find the sponsors in VIVO, and match them to the sponsors in the source. They
must match on sponsorid
There are two inputs:
1. sponsors in VIVO. Keyed by sponsorid
2. UF sponsors in the source. Keyed the same.
There are three cases
1. sponsor in VIVO and in Source => Update VIVO from source
1. sponsor in VIVO, not in source => nothing to do
1. sponsor not in VIVO, is in source => Add to VIVO
See CHANGELOG.md for history
"""
__author__ = "Michael Conlon"
__copyright__ = "Copyright 2015 (c) Michael Conlon"
__license__ = "New BSD License"
__version__ = "0.02"
import sys
from pump.vivopump import read_csv_fp, write_csv_fp, get_vivo_sponsorid, get_parms
parms = get_parms()
if parms['verbose']:
print >>sys.stderr, parms
data_in = read_csv_fp(sys.stdin)
print >>sys.stderr, len(data_in)
data_out = {}
vivo_sponsors = get_vivo_sponsorid(parms) # get dictionary of sponsor uri keyed by sponsorid
print >>sys.stderr, 'VIVO sponsors', len(vivo_sponsors)
for row, data in data_in.items():
new_data = dict(data)
if data['sponsorid'] in vivo_sponsors: # sponsorid is in vivo and source
new_data['uri'] = vivo_sponsors[data['sponsorid']]
else: # key is in source, not in vivo
new_data['uri'] = ''
data_out[row] = new_data
print >>sys.stderr, 'data out', len(data_out)
write_csv_fp(sys.stdout, data_out)
| bsd-2-clause |
tchernomax/ansible | lib/ansible/modules/network/aci/aci_domain_to_vlan_pool.py | 10 | 10592 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Dag Wieers <dag@wieers.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: aci_domain_to_vlan_pool
short_description: Bind Domain to VLAN Pools (infra:RsVlanNs)
description:
- Bind Domain to VLAN Pools on Cisco ACI fabrics.
notes:
- The C(domain) and C(vlan_pool) parameters should exist before using this module.
The M(aci_domain) and M(aci_vlan_pool) can be used for these.
- More information about the internal APIC class B(infra:RsVlanNs) from
L(the APIC Management Information Model reference,https://developer.cisco.com/docs/apic-mim-ref/).
author:
- Dag Wieers (@dagwieers)
version_added: '2.5'
options:
domain:
description:
- Name of the domain being associated with the VLAN Pool.
aliases: [ domain_name, domain_profile ]
domain_type:
description:
- Determines if the Domain is physical (phys) or virtual (vmm).
choices: [ fc, l2dom, l3dom, phys, vmm ]
pool:
description:
- The name of the pool.
aliases: [ pool_name, vlan_pool ]
pool_allocation_mode:
description:
- The method used for allocating VLANs to resources.
choices: [ dynamic, static]
required: yes
aliases: [ allocation_mode, mode ]
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
choices: [ absent, present, query ]
default: present
vm_provider:
description:
- The VM platform for VMM Domains.
- Support for Kubernetes was added in ACI v3.0.
- Support for CloudFoundry, OpenShift and Red Hat was added in ACI v3.1.
choices: [ cloudfoundry, kubernetes, microsoft, openshift, openstack, redhat, vmware ]
extends_documentation_fragment: aci
'''
EXAMPLES = r'''
- name: Bind a VMM domain to VLAN pool
aci_domain_to_vlan_pool:
host: apic
username: admin
password: SomeSecretPassword
domain: vmw_dom
domain_type: vmm
pool: vmw_pool
pool_allocation_mode: dynamic
vm_provider: vmware
state: present
delegate_to: localhost
- name: Remove a VMM domain to VLAN pool binding
aci_domain_to_vlan_pool:
host: apic
username: admin
password: SomeSecretPassword
domain: vmw_dom
domain_type: vmm
pool: vmw_pool
pool_allocation_mode: dynamic
vm_provider: vmware
state: absent
delegate_to: localhost
- name: Bind a physical domain to VLAN pool
aci_domain_to_vlan_pool:
host: apic
username: admin
password: SomeSecretPassword
domain: phys_dom
domain_type: phys
pool: phys_pool
pool_allocation_mode: static
state: present
delegate_to: localhost
- name: Bind a physical domain to VLAN pool
aci_domain_to_vlan_pool:
host: apic
username: admin
password: SomeSecretPassword
domain: phys_dom
domain_type: phys
pool: phys_pool
pool_allocation_mode: static
state: absent
delegate_to: localhost
- name: Query an domain to VLAN pool binding
aci_domain_to_vlan_pool:
host: apic
username: admin
password: SomeSecretPassword
domain: phys_dom
domain_type: phys
pool: phys_pool
pool_allocation_mode: static
state: query
delegate_to: localhost
register: query_result
- name: Query all domain to VLAN pool bindings
aci_domain_to_vlan_pool:
host: apic
username: admin
password: SomeSecretPassword
domain_type: phys
state: query
delegate_to: localhost
register: query_result
'''
RETURN = r'''
current:
description: The existing configuration from the APIC after the module has finished
returned: success
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
error:
description: The error information as returned from the APIC
returned: failure
type: dict
sample:
{
"code": "122",
"text": "unknown managed object class foo"
}
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: string
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
sent:
description: The actual/minimal configuration pushed to the APIC
returned: info
type: list
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment"
}
}
}
previous:
description: The original configuration from the APIC before the module has started
returned: info
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
proposed:
description: The assembled configuration from the user-provided parameters
returned: info
type: dict
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"name": "production"
}
}
}
filter_string:
description: The filter string used for the request
returned: failure or debug
type: string
sample: ?rsp-prop-include=config-only
method:
description: The HTTP method used for the request to the APIC
returned: failure or debug
type: string
sample: POST
response:
description: The HTTP response from the APIC
returned: failure or debug
type: string
sample: OK (30 bytes)
status:
description: The HTTP status from the APIC
returned: failure or debug
type: int
sample: 200
url:
description: The HTTP url used for the request to the APIC
returned: failure or debug
type: string
sample: https://10.11.12.13/api/mo/uni/tn-production.json
'''
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
from ansible.module_utils.basic import AnsibleModule
VM_PROVIDER_MAPPING = dict(
cloudfoundry='CloudFoundry',
kubernetes='Kubernetes',
microsoft='Microsoft',
openshift='OpenShift',
openstack='OpenStack',
redhat='Redhat',
vmware='VMware',
)
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
domain=dict(type='str', aliases=['domain_name', 'domain_profile']), # Not required for querying all objects
domain_type=dict(type='str', required=True, choices=['fc', 'l2dom', 'l3dom', 'phys', 'vmm']), # Not required for querying all objects
pool=dict(type='str', aliases=['pool_name', 'vlan_pool']), # Not required for querying all objects
pool_allocation_mode=dict(type='str', required=True, aliases=['allocation_mode', 'mode'], choices=['dynamic', 'static']),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
vm_provider=dict(type='str', choices=['cloudfoundry', 'kubernetes', 'microsoft', 'openshift', 'openstack', 'redhat', 'vmware']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['domain_type', 'vmm', ['vm_provider']],
['state', 'absent', ['domain', 'domain_type', 'pool']],
['state', 'present', ['domain', 'domain_type', 'pool']],
],
)
domain = module.params['domain']
domain_type = module.params['domain_type']
pool = module.params['pool']
pool_allocation_mode = module.params['pool_allocation_mode']
vm_provider = module.params['vm_provider']
state = module.params['state']
# Report when vm_provider is set when type is not virtual
if domain_type != 'vmm' and vm_provider is not None:
module.fail_json(msg="Domain type '{0}' cannot have a 'vm_provider'".format(domain_type))
# ACI Pool URL requires the allocation mode for vlan and vsan pools (ex: uni/infra/vlanns-[poolname]-static)
pool_name = pool
if pool is not None:
pool_name = '[{0}]-{1}'.format(pool, pool_allocation_mode)
# Compile the full domain for URL building
if domain_type == 'fc':
domain_class = 'fcDomP'
domain_mo = 'uni/fc-{0}'.format(domain)
domain_rn = 'fc-{0}'.format(domain)
elif domain_type == 'l2dom':
domain_class = 'l2extDomP'
domain_mo = 'uni/l2dom-{0}'.format(domain)
domain_rn = 'l2dom-{0}'.format(domain)
elif domain_type == 'l3dom':
domain_class = 'l3extDomP'
domain_mo = 'uni/l3dom-{0}'.format(domain)
domain_rn = 'l3dom-{0}'.format(domain)
elif domain_type == 'phys':
domain_class = 'physDomP'
domain_mo = 'uni/phys-{0}'.format(domain)
domain_rn = 'phys-{0}'.format(domain)
elif domain_type == 'vmm':
domain_class = 'vmmDomP'
domain_mo = 'uni/vmmp-{0}/dom-{1}'.format(VM_PROVIDER_MAPPING[vm_provider], domain)
domain_rn = 'vmmp-{0}/dom-{1}'.format(VM_PROVIDER_MAPPING[vm_provider], domain)
# Ensure that querying all objects works when only domain_type is provided
if domain is None:
domain_mo = None
aci_mo = 'uni/infra/vlanns-{0}'.format(pool_name)
aci = ACIModule(module)
aci.construct_url(
root_class=dict(
aci_class=domain_class,
aci_rn=domain_rn,
module_object=domain_mo,
target_filter={'name': domain},
),
child_classes=['infraRsVlanNs'],
)
aci.get_existing()
if state == 'present':
aci.payload(
aci_class=domain_class,
class_config=dict(name=domain),
child_configs=[
{'infraRsVlanNs': {'attributes': {'tDn': aci_mo}}},
]
)
aci.get_diff(aci_class=domain_class)
aci.post_config()
elif state == 'absent':
aci.delete_config()
aci.exit_json()
if __name__ == "__main__":
main()
| gpl-3.0 |
aldian/tensorflow | tensorflow/contrib/kfac/examples/convnet.py | 15 | 16970 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Train a ConvNet on MNIST using K-FAC.
This library fits a 5-layer ConvNet on MNIST using K-FAC. The model has the
following structure,
- Conv Layer: 5x5 kernel, 16 output channels.
- Max Pool: 3x3 kernel, stride 2.
- Conv Layer: 5x5 kernel, 16 output channels.
- Max Pool: 3x3 kernel, stride 2.
- Linear: 10 output dims.
After 3k~6k steps, this should reach perfect accuracy on the training set.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import tensorflow as tf
from tensorflow.contrib.kfac.examples import mlp
from tensorflow.contrib.kfac.examples import mnist
lc = tf.contrib.kfac.layer_collection
oq = tf.contrib.kfac.op_queue
opt = tf.contrib.kfac.optimizer
__all__ = [
"conv_layer",
"max_pool_layer",
"linear_layer",
"build_model",
"minimize_loss_single_machine",
"minimize_loss_distributed",
"train_mnist_single_machine",
"train_mnist_distributed",
]
def conv_layer(layer_id, inputs, kernel_size, out_channels):
"""Builds a convolutional layer with ReLU non-linearity.
Args:
layer_id: int. Integer ID for this layer's variables.
inputs: Tensor of shape [num_examples, width, height, in_channels]. Each row
corresponds to a single example.
kernel_size: int. Width and height of the convolution kernel. The kernel is
assumed to be square.
out_channels: int. Number of output features per pixel.
Returns:
preactivations: Tensor of shape [num_examples, width, height, out_channels].
Values of the layer immediately before the activation function.
activations: Tensor of shape [num_examples, width, height, out_channels].
Values of the layer immediately after the activation function.
params: Tuple of (kernel, bias), parameters for this layer.
"""
# TODO(b/67004004): Delete this function and rely on tf.layers exclusively.
layer = tf.layers.Conv2D(
out_channels,
kernel_size=[kernel_size, kernel_size],
kernel_initializer=tf.random_normal_initializer(stddev=0.01),
padding="SAME",
name="conv_%d" % layer_id)
preactivations = layer(inputs)
activations = tf.nn.relu(preactivations)
# layer.weights is a list. This converts it a (hashable) tuple.
return preactivations, activations, (layer.kernel, layer.bias)
def max_pool_layer(layer_id, inputs, kernel_size, stride):
"""Build a max-pooling layer.
Args:
layer_id: int. Integer ID for this layer's variables.
inputs: Tensor of shape [num_examples, width, height, in_channels]. Each row
corresponds to a single example.
kernel_size: int. Width and height to pool over per input channel. The
kernel is assumed to be square.
stride: int. Step size between pooling operations.
Returns:
Tensor of shape [num_examples, width/stride, height/stride, out_channels].
Result of applying max pooling to 'inputs'.
"""
# TODO(b/67004004): Delete this function and rely on tf.layers exclusively.
with tf.variable_scope("pool_%d" % layer_id):
return tf.nn.max_pool(
inputs, [1, kernel_size, kernel_size, 1], [1, stride, stride, 1],
padding="SAME",
name="pool")
def linear_layer(layer_id, inputs, output_size):
"""Builds the final linear layer for an MNIST classification problem.
Args:
layer_id: int. Integer ID for this layer's variables.
inputs: Tensor of shape [num_examples, width, height, in_channels]. Each row
corresponds to a single example.
output_size: int. Number of output dims per example.
Returns:
activations: Tensor of shape [num_examples, output_size]. Values of the
layer immediately after the activation function.
params: Tuple of (weights, bias), parameters for this layer.
"""
# TODO(b/67004004): Delete this function and rely on tf.layers exclusively.
pre, _, params = mlp.fc_layer(layer_id, inputs, output_size)
return pre, params
def build_model(examples, labels, num_labels, layer_collection):
"""Builds a ConvNet classification model.
Args:
examples: Tensor of shape [num_examples, num_features]. Represents inputs of
model.
labels: Tensor of shape [num_examples]. Contains integer IDs to be predicted
by softmax for each example.
num_labels: int. Number of distinct values 'labels' can take on.
layer_collection: LayerCollection instance. Layers will be registered here.
Returns:
loss: 0-D Tensor representing loss to be minimized.
accuracy: 0-D Tensor representing model's accuracy.
"""
# Build a ConvNet. For each layer with parameters, we'll keep track of the
# preactivations, activations, weights, and bias.
tf.logging.info("Building model.")
pre0, act0, params0 = conv_layer(
layer_id=0, inputs=examples, kernel_size=5, out_channels=16)
act1 = max_pool_layer(layer_id=1, inputs=act0, kernel_size=3, stride=2)
pre2, act2, params2 = conv_layer(
layer_id=2, inputs=act1, kernel_size=5, out_channels=16)
act3 = max_pool_layer(layer_id=3, inputs=act2, kernel_size=3, stride=2)
flat_act3 = tf.reshape(act3, shape=[-1, int(np.prod(act3.shape[1:4]))])
logits, params4 = linear_layer(
layer_id=4, inputs=flat_act3, output_size=num_labels)
loss = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits))
accuracy = tf.reduce_mean(
tf.cast(tf.equal(labels, tf.argmax(logits, axis=1)), dtype=tf.float32))
tf.summary.scalar("loss", loss)
tf.summary.scalar("accuracy", accuracy)
# Register parameters. K-FAC needs to know about the inputs, outputs, and
# parameters of each conv/fully connected layer and the logits powering the
# posterior probability over classes.
tf.logging.info("Building LayerCollection.")
layer_collection.register_conv2d(params0, (1, 1, 1, 1), "SAME", examples,
pre0)
layer_collection.register_conv2d(params2, (1, 1, 1, 1), "SAME", act1, pre2)
layer_collection.register_fully_connected(params4, flat_act3, logits)
layer_collection.register_categorical_predictive_distribution(
logits, name="logits")
return loss, accuracy
def minimize_loss_single_machine(loss,
accuracy,
layer_collection,
session_config=None):
"""Minimize loss with K-FAC on a single machine.
A single Session is responsible for running all of K-FAC's ops.
Args:
loss: 0-D Tensor. Loss to be minimized.
accuracy: 0-D Tensor. Accuracy of classifier on current minibatch.
layer_collection: LayerCollection instance describing model architecture.
Used by K-FAC to construct preconditioner.
session_config: None or tf.ConfigProto. Configuration for tf.Session().
Returns:
final value for 'accuracy'.
"""
# Train with K-FAC.
global_step = tf.train.get_or_create_global_step()
optimizer = opt.KfacOptimizer(
learning_rate=0.0001,
cov_ema_decay=0.95,
damping=0.001,
layer_collection=layer_collection,
momentum=0.9)
train_op = optimizer.minimize(loss, global_step=global_step)
tf.logging.info("Starting training.")
with tf.train.MonitoredTrainingSession(config=session_config) as sess:
while not sess.should_stop():
global_step_, loss_, accuracy_, _, _ = sess.run(
[global_step, loss, accuracy, train_op, optimizer.cov_update_op])
if global_step_ % 100 == 0:
sess.run(optimizer.inv_update_op)
if global_step_ % 100 == 0:
tf.logging.info("global_step: %d | loss: %f | accuracy: %s",
global_step_, loss_, accuracy_)
return accuracy_
def _is_gradient_task(task_id, num_tasks):
"""Returns True if this task should update the weights."""
if num_tasks < 3:
return True
return 0 <= task_id < 0.6 * num_tasks
def _is_cov_update_task(task_id, num_tasks):
"""Returns True if this task should update K-FAC's covariance matrices."""
if num_tasks < 3:
return False
return 0.6 * num_tasks <= task_id < num_tasks - 1
def _is_inv_update_task(task_id, num_tasks):
"""Returns True if this task should update K-FAC's preconditioner."""
if num_tasks < 3:
return False
return task_id == num_tasks - 1
def _num_gradient_tasks(num_tasks):
"""Number of tasks that will update weights."""
if num_tasks < 3:
return num_tasks
return int(np.ceil(0.6 * num_tasks))
def minimize_loss_distributed(task_id, num_worker_tasks, num_ps_tasks, master,
checkpoint_dir, loss, accuracy, layer_collection):
"""Minimize loss with an synchronous implementation of K-FAC.
Different tasks are responsible for different parts of K-FAC's Ops. The first
60% of tasks update weights; the next 20% accumulate covariance statistics;
the last 20% invert the matrices used to precondition gradients.
Args:
task_id: int. Integer in [0, num_worker_tasks). ID for this worker.
num_worker_tasks: int. Number of workers in this distributed training setup.
num_ps_tasks: int. Number of parameter servers holding variables. If 0,
parameter servers are not used.
master: string. IP and port of TensorFlow runtime process. Set to empty
string to run locally.
checkpoint_dir: string or None. Path to store checkpoints under.
loss: 0-D Tensor. Loss to be minimized.
accuracy: dict mapping strings to 0-D Tensors. Additional accuracy to
run with each step.
layer_collection: LayerCollection instance describing model architecture.
Used by K-FAC to construct preconditioner.
Returns:
final value for 'accuracy'.
Raises:
ValueError: if task_id >= num_worker_tasks.
"""
with tf.device(tf.train.replica_device_setter(num_ps_tasks)):
global_step = tf.train.get_or_create_global_step()
optimizer = opt.KfacOptimizer(
learning_rate=0.0001,
cov_ema_decay=0.95,
damping=0.001,
layer_collection=layer_collection,
momentum=0.9)
inv_update_queue = oq.OpQueue(optimizer.inv_updates_dict.values())
sync_optimizer = tf.train.SyncReplicasOptimizer(
opt=optimizer,
replicas_to_aggregate=_num_gradient_tasks(num_worker_tasks))
train_op = sync_optimizer.minimize(loss, global_step=global_step)
tf.logging.info("Starting training.")
is_chief = (task_id == 0)
hooks = [sync_optimizer.make_session_run_hook(is_chief)]
with tf.train.MonitoredTrainingSession(
master=master,
is_chief=is_chief,
checkpoint_dir=checkpoint_dir,
hooks=hooks,
stop_grace_period_secs=0) as sess:
while not sess.should_stop():
# Choose which op this task is responsible for running.
if _is_gradient_task(task_id, num_worker_tasks):
learning_op = train_op
elif _is_cov_update_task(task_id, num_worker_tasks):
learning_op = optimizer.cov_update_op
elif _is_inv_update_task(task_id, num_worker_tasks):
# TODO(duckworthd): Running this op before cov_update_op has been run a
# few times can result in "InvalidArgumentError: Cholesky decomposition
# was not successful." Delay running this op until cov_update_op has
# been run a few times.
learning_op = inv_update_queue.next_op(sess)
else:
raise ValueError("Which op should task %d do?" % task_id)
global_step_, loss_, accuracy_, _ = sess.run(
[global_step, loss, accuracy, learning_op])
tf.logging.info("global_step: %d | loss: %f | accuracy: %s", global_step_,
loss_, accuracy_)
return accuracy_
def train_mnist_single_machine(data_dir, num_epochs, use_fake_data=False):
"""Train a ConvNet on MNIST.
Args:
data_dir: string. Directory to read MNIST examples from.
num_epochs: int. Number of passes to make over the training set.
use_fake_data: bool. If True, generate a synthetic dataset.
Returns:
accuracy of model on the final minibatch of training data.
"""
# Load a dataset.
tf.logging.info("Loading MNIST into memory.")
examples, labels = mnist.load_mnist(
data_dir,
num_epochs=num_epochs,
batch_size=128,
use_fake_data=use_fake_data,
flatten_images=False)
# Build a ConvNet.
layer_collection = lc.LayerCollection()
loss, accuracy = build_model(
examples, labels, num_labels=10, layer_collection=layer_collection)
# Fit model.
return minimize_loss_single_machine(loss, accuracy, layer_collection)
def train_mnist_multitower(data_dir, num_epochs, num_towers,
use_fake_data=True):
"""Train a ConvNet on MNIST.
Args:
data_dir: string. Directory to read MNIST examples from.
num_epochs: int. Number of passes to make over the training set.
num_towers: int. Number of CPUs to split inference across.
use_fake_data: bool. If True, generate a synthetic dataset.
Returns:
accuracy of model on the final minibatch of training data.
"""
# Load a dataset.
tf.logging.info("Loading MNIST into memory.")
tower_batch_size = 128
batch_size = tower_batch_size * num_towers
tf.logging.info(
("Loading MNIST into memory. Using batch_size = %d = %d towers * %d "
"tower batch size.") % (batch_size, num_towers, tower_batch_size))
examples, labels = mnist.load_mnist(
data_dir,
num_epochs=num_epochs,
batch_size=batch_size,
use_fake_data=use_fake_data,
flatten_images=False)
# Split minibatch across towers.
examples = tf.split(examples, num_towers)
labels = tf.split(labels, num_towers)
# Build an MLP. Each tower's layers will be added to the LayerCollection.
layer_collection = lc.LayerCollection()
tower_results = []
for tower_id in range(num_towers):
with tf.device("/cpu:%d" % tower_id):
with tf.name_scope("tower%d" % tower_id):
with tf.variable_scope(tf.get_variable_scope(), reuse=(tower_id > 0)):
tf.logging.info("Building tower %d." % tower_id)
tower_results.append(
build_model(examples[tower_id], labels[tower_id], 10,
layer_collection))
losses, accuracies = zip(*tower_results)
# Average across towers.
loss = tf.reduce_mean(losses)
accuracy = tf.reduce_mean(accuracies)
# Fit model.
session_config = tf.ConfigProto(
allow_soft_placement=False, device_count={
"CPU": num_towers
})
return minimize_loss_single_machine(
loss, accuracy, layer_collection, session_config=session_config)
def train_mnist_distributed(task_id,
num_worker_tasks,
num_ps_tasks,
master,
data_dir,
num_epochs,
use_fake_data=False):
"""Train a ConvNet on MNIST.
Args:
task_id: int. Integer in [0, num_worker_tasks). ID for this worker.
num_worker_tasks: int. Number of workers in this distributed training setup.
num_ps_tasks: int. Number of parameter servers holding variables.
master: string. IP and port of TensorFlow runtime process.
data_dir: string. Directory to read MNIST examples from.
num_epochs: int. Number of passes to make over the training set.
use_fake_data: bool. If True, generate a synthetic dataset.
Returns:
accuracy of model on the final minibatch of training data.
"""
# Load a dataset.
tf.logging.info("Loading MNIST into memory.")
examples, labels = mnist.load_mnist(
data_dir,
num_epochs=num_epochs,
batch_size=128,
use_fake_data=use_fake_data,
flatten_images=False)
# Build a ConvNet.
layer_collection = lc.LayerCollection()
with tf.device(tf.train.replica_device_setter(num_ps_tasks)):
loss, accuracy = build_model(
examples, labels, num_labels=10, layer_collection=layer_collection)
# Fit model.
checkpoint_dir = None if data_dir is None else os.path.join(data_dir, "kfac")
return minimize_loss_distributed(task_id, num_worker_tasks, num_ps_tasks,
master, checkpoint_dir, loss, accuracy,
layer_collection)
if __name__ == "__main__":
tf.app.run()
| apache-2.0 |
brunoabud/ic | gui/playback.py | 1 | 5184 | # coding: utf-8
# Copyright (C) 2016 Bruno Abude Cardoso
#
# Imagem Cinemática is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Imagem Cinemática is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
from PyQt4.QtCore import QTimer
from PyQt4.QtGui import QIcon, QPixmap, QImage, QApplication
import cv2
from application import get_app
from ic.queue import Empty
from ic import engine
from ic import messages
# Create the logger object for this module
LOG = logging.getLogger(__name__)
class Playback(object):
"""Class that controls the Playback GUI and manages the FrameStream states.
"""
def __init__(self, main_window):
self._main = main_window
# Register this object to the messages system
messages.register(self)
# The timer responsible for getting frames from the preview queue
# and showing them to the user
self.preview_timer = QTimer()
self.preview_timer.setSingleShot(True)
self.preview_timer.timeout.connect(self.update_preview)
self.wait_to_seek = False
def slider_moved(self, value):
self._main.lbl_current.setText(self._format_pos(value))
def start_seeking(self):
self.wait_to_seek = True
def seek(self):
fs = engine.frame_stream
fs.seek(self._main.scb_pos.sliderPosition())
self.wait_to_seek = True
self.preview_timer.start()
def message_received(self, mtype, mdata, sender):
if mtype == "ENGINE_source_opened":
seekable, length = mdata.split(";")
if length != "None":
self.source_length = int(length)
else:
self.source_length = 0
if seekable == "True":
self._main.scb_pos.setEnabled(True)
else:
self._main.scb_pos.setEnabled(False)
self._update_gui()
elif mtype == "ENGINE_source_closed":
self._reset_gui()
elif mtype == "FS_stream_started":
self._play()
elif mtype == "FS_stream_stopped":
self._pause()
def _format_pos(self, pos):
str_size = len(str(self.source_length))
format_str = "{:0"+str(str_size)+"d}"
return format_str.format(pos)
def _reset_gui(self):
mw = self._main
mw.frm_playback.setEnabled(False)
mw.lbl_current.setText(self._format_pos(0))
mw.lbl_total.setText(self._format_pos(0))
mw.scb_pos.setRange(0, 0)
mw.pb_play.setChecked(False)
mw.pb_play.setIcon(QIcon(":icon/play"))
fs = engine.frame_stream
fs.start(True)
self.wait_to_seek = True
self.preview_timer.start()
def _update_gui(self, pos = 0):
mw = self._main
mw.frm_playback.setEnabled(True)
mw.scb_pos.setRange(0, max(self.source_length - 1, 0))
mw.lbl_total.setText(self._format_pos(mw.scb_pos.maximum()))
def _play(self):
self._main.pb_play.setChecked(True)
self._main.pb_play.setIcon(QIcon(":icon/pause"))
self.preview_timer.start()
def _pause(self):
self._main.pb_play.setChecked(False)
self._main.pb_play.setIcon(QIcon(":icon/play"))
def _set_pos(self, pos):
mw = self._main
mw.scb_pos.setValue(pos)
mw.lbl_current.setText(self._format_pos(pos))
def update_preview(self):
interval = 20
try:
mw = self._main
fs = engine.frame_stream
preview_queue = engine.preview_queue
# Don't get frames from the queue if the user is moving the time
# scroll bar
if not mw.scb_pos.isSliderDown():
targets_buffer = preview_queue.get(False)
pos = targets_buffer["pos"]
timestamp = targets_buffer["timestamp"]
self._set_pos(pos)
for target in targets_buffer:
for canvas in [c for c in self._main._canvas_list if c._target == target]:
f = targets_buffer[target]
canvas.update_preview(f)
QApplication.instance().processEvents()
self.wait_to_seek = False
except Empty:
interval = 0
except:
LOG.debug("", exc_info=True)
if self._main.pb_play.isChecked() or self.wait_to_seek:
self.preview_timer.start(interval)
def pb_play_clicked(self, checked):
app = get_app()
fs = engine.frame_stream
if checked:
fs.start()
self._play()
else:
fs.stop()
self._pause()
| gpl-3.0 |
RichardKnop/digitalmarketplace-api | scripts/process-g6-into-elastic-search.py | 2 | 15901 | #!/usr/bin/python
'''Process G6 JSON files into elasticsearch
This version reads JSON from disk or DM API and transforms this into the format
expected by the DM search.
Usage:
process-g6-into-elastic-search.py <es_endpoint> <dir_or_endpoint> [<token>]
Arguments:
es_endpoint Full ES index URL
dir_or_endpoint Directory path to import or an API URL if token is given
token Digital Marketplace API token
'''
import os
import sys
import json
import urllib2
CATEGORY_MAPPINGS = {
'Accounting and finance': '110',
'Business intelligence and analytics': '111',
'Collaboration': '112',
'Telecoms': '113',
'Customer relationship management (CRM)': '114',
'Creative and design': '115',
'Data management': '116',
'Sales': '117',
'Software development tools': '118',
'Electronic document and records management (EDRM)': '119',
'Human resources and employee management': '120',
'IT management': '121',
'Marketing': '122',
'Operations management': '123',
'Project management and planning': '124',
'Security': '125',
'Libraries': '126',
'Schools and education': '127',
'Energy and environment': '128',
'Healthcare': '129',
'Legal': '130',
'Transport and logistics': '131',
'Unlisted': '132',
'Compute': '133',
'Storage': '134',
'Other': '135',
'Platform as a service': '136',
'Planning': '137',
'Implementation': '138',
'Testing': '139',
'Training': '140',
'Ongoing support': '141',
'Specialist Cloud Services': '142'
}
def category_name_to_id(name):
return CATEGORY_MAPPINGS[name]
def attributes(data):
attributes = []
#
# Pricing
#
attributes.append(boolean_attribute("freeOption", "q45", data))
attributes.append(boolean_attribute("trialOption", "q46", data))
attributes.append(boolean_attribute(
"educationPricing", "has_education_pricing", data))
attributes.append(boolean_attribute("terminationCost", "q47", data))
# Value on G5 has a predixed "/"
if "minimumContractPeriod" in data:
attributes.append(
{"name": "q44", "q44": "/" + data["minimumContractPeriod"]})
#
# Technical Information
#
# cloud:
# values in G5 are public | private | hybrid |
# publicprivate | publichybrid | publicprivatehybrid
# map G6 to these
if "cloudDeploymentModel" in data:
if "Public Cloud" in data["cloudDeploymentModel"]:
attributes.append({"name": "q18", "q18": "public"})
if "Private Cloud" in data["cloudDeploymentModel"]:
attributes.append({"name": "q18", "q18": "private"})
if "Community Cloud" in data["cloudDeploymentModel"]:
attributes.append({"name": "q18", "q18": "publicprivatehybrid"})
if "Hybrid Cloud" in data["cloudDeploymentModel"]:
attributes.append({"name": "q18", "q18": "hybrid"})
# Networks:
# values in G5 are internet | psn | gsi | pnn | n3 | janet | other
# map G6 to these
if "networksConnected" in data:
if "Internet" in data["networksConnected"]:
attributes.append({"name": "q19", "q19": "internet"})
if "Public Services Network (PSN)" in data["networksConnected"]:
attributes.append({"name": "q19", "q19": "psn"})
if "Government Secure intranet (GSi)" in data["networksConnected"]:
attributes.append({"name": "q19", "q19": "gsi"})
if "Police National Network (PNN)" in data["networksConnected"]:
attributes.append({"name": "q19", "q19": "pnn"})
if "New NHS Network (N3)" in data["networksConnected"]:
attributes.append({"name": "q19", "q19": "n3"})
if "Joint Academic Network (JANET)" in data["networksConnected"]:
attributes.append({"name": "q19", "q19": "janet"})
if "Other" in data["networksConnected"]:
attributes.append({"name": "q19", "q19": "other"})
attributes.append(boolean_attribute("apiAccess", "q20", data))
attributes.append(boolean_attribute("openStandardsSupported", "q21", data))
attributes.append(boolean_attribute("openSource", "q22", data))
#
# service management
#
# support types is array in G6, boolean in G5 -
# any G6 value sets G5 to true
if "supportTypes" in data and len(data["supportTypes"]) > 0:
attributes.append({"name": "q25", "q25": "true"})
attributes.append(boolean_attribute("serviceOnboarding", "q26", data))
attributes.append(boolean_attribute("serviceOffboarding", "q27", data))
attributes.append(boolean_attribute("dataExtractionRemoval", "q28", data))
attributes.append(boolean_attribute("datacentresEUCode", "q31", data))
attributes.append(boolean_attribute("dataBackupRecovery", "q36", data))
attributes.append(
boolean_attribute("selfServiceProvisioning", "q39", data))
attributes.append(boolean_attribute("supportForThirdParties", "q41", data))
attributes.append(boolean_attribute("supportForThirdParties", "q41", data))
if "datacentreTier" in data:
attributes.append({"name": "q32", "q32": data["datacentreTier"]})
# Data centre tiers:
# map G6 to these
if "datacentreTier" in data:
if "TIA-942 Tier 1" in data["datacentreTier"]:
attributes.append({"name": "q32", "q32": "tier1tia942"})
if "Uptime Institute Tier 1" in data["datacentreTier"]:
attributes.append({"name": "q32", "q32": "tier1uptimeinstitute"})
if "TIA-942 Tier 2" in data["datacentreTier"]:
attributes.append({"name": "q32", "q32": "tier2tia942"})
if "Uptime Institute Tier 2" in data["datacentreTier"]:
attributes.append({"name": "q32", "q32": "tier2uptimeinstitute"})
if "TIA-942 Tier 3" in data["datacentreTier"]:
attributes.append({"name": "q32", "q32": "tier3tia942"})
if "Uptime Institute Tier 3" in data["datacentreTier"]:
attributes.append({"name": "q32", "q32": "tier3uptimeinstitute"})
if "TIA-942 Tier 4" in data["datacentreTier"]:
attributes.append({"name": "q32", "q32": "tier4tia942"})
if "Uptime Institute Tier 4" in data["datacentreTier"]:
attributes.append({"name": "q32", "q32": "tier4uptimeinstitute"})
# provisioning time is 1 Day etc in G6, but
# the question is "documented?" in G5
# so any value provokes true
if "provisioningTime" in data:
attributes.append({"name": "q40", "q40": "true"})
#
# IaaS
#
if data.get("lot") == "IaaS":
# multi selects
if "guaranteedResources" in data and data["guaranteedResources"]:
attributes.append({"name": "lot1q3", "lot1q3": "guaranteed"})
elif "guaranteedResources" in data and not data["guaranteedResources"]:
attributes.append({"name": "lot1q3", "lot1q3": "nonguaranteed"})
if "persistentStorage" in data and data["persistentStorage"]:
attributes.append({"name": "lot1q4", "lot1q4": "persistent"})
elif "persistentStorage" in data and not data["persistentStorage"]:
attributes.append({"name": "lot1q4", "lot1q4": "nonpersistent"})
attributes.append(boolean_attribute("elasticCloud", "lot1q2", data))
#
# PaaS
#
if data.get("lot") == "PaaS":
# multi selects
if "guaranteedResources" in data and data["guaranteedResources"]:
attributes.append({"name": "lot2q3", "lot2q3": "guaranteed"})
elif "guaranteedResources" in data and not data["guaranteedResources"]:
attributes.append({"name": "lot2q3", "lot2q3": "nonguaranteed"})
if "persistentStorage" in data and data["persistentStorage"]:
attributes.append({"name": "lot2q4", "lot2q4": "persistent"})
elif "persistentStorage" in data and not data["persistentStorage"]:
attributes.append({"name": "lot2q4", "lot2q4": "nonpersistent"})
attributes.append(boolean_attribute("elasticCloud", "lot2q2", data))
# ##############
# ## Pricing ###
# ##############
attributes.append(boolean_attribute("freeOption", "q45", data))
attributes.append(boolean_attribute("trialOption", "q46", data))
attributes.append(boolean_attribute("educationPricing",
"has_education_pricing", data))
attributes.append(boolean_attribute("terminationCost", "q47", data))
# Value on G5 has a predixed "/"
if "minimumContractPeriod" in data:
attributes.append({"name": "q44",
"q44": "/" + data["minimumContractPeriod"]})
# ############################
# ## Technical Information ###
# ############################
# ## cloud:
# ## values in G5 are public | private | hybrid | publicprivate |
# ## publichybrid | publicprivatehybrid
# ## map G6 to these
if "cloudDeploymentModel" in data:
if "Public Cloud" in data["cloudDeploymentModel"]:
attributes.append({"name": "q18", "q18": "public"})
if "Private Cloud" in data["cloudDeploymentModel"]:
attributes.append({"name": "q18", "q18": "private"})
if "Community Cloud" in data["cloudDeploymentModel"]:
attributes.append({"name": "q18", "q18": "publicprivatehybrid"})
if "Hybrid Cloud" in data["cloudDeploymentModel"]:
attributes.append({"name": "q18", "q18": "hybrid"})
# ## Networks:
# ## values in G5 are internet | psn | gsi | pnn | n3 | janet | other
# ## map G6 to these
if "networksConnected" in data:
if "Internet" in data["networksConnected"]:
attributes.append({"name": "q19", "q19": "internet"})
if "Public Services Network (PSN)" in data["networksConnected"]:
attributes.append({"name": "q19", "q19": "psn"})
if "Government Secure intranet (GSi)" in data["networksConnected"]:
attributes.append({"name": "q19", "q19": "gsi"})
if "Police National Network (PNN)" in data["networksConnected"]:
attributes.append({"name": "q19", "q19": "pnn"})
if "New NHS Network (N3)" in data["networksConnected"]:
attributes.append({"name": "q19", "q19": "n3"})
if "Joint Academic Network (JANET)" in data["networksConnected"]:
attributes.append({"name": "q19", "q19": "janet"})
if "Other" in data["networksConnected"]:
attributes.append({"name": "q19", "q19": "other"})
attributes.append(boolean_attribute("apiAccess", "q20", data))
attributes.append(boolean_attribute("openStandardsSupported", "q21", data))
attributes.append(boolean_attribute("openSource", "q22", data))
# #########################
# ## service management ###
# #########################
# support types is array in G6, boolean in G5
# - any G6 value sets G5 to true
if "supportTypes" in data and len(data["supportTypes"]) > 0:
attributes.append({"name": "q25", "q25": "true"})
attributes.append(boolean_attribute("serviceOnboarding", "q26", data))
attributes.append(boolean_attribute("serviceOffboarding", "q27", data))
attributes.append(boolean_attribute("dataExtractionRemoval", "q28", data))
attributes.append(boolean_attribute("datacentresEUCode", "q31", data))
attributes.append(boolean_attribute("dataBackupRecovery", "q36", data))
attributes.append(boolean_attribute(
"selfServiceProvisioning", "q39", data))
attributes.append(boolean_attribute("supportForThirdParties", "q41", data))
attributes.append(boolean_attribute("supportForThirdParties", "q41", data))
if "datacentreTier" in data:
attributes.append({"name": "q32", "q32": data["datacentreTier"]})
# ## Data centre tiers:
# ## map G6 to these
if "datacentreTier" in data:
if "TIA-942 Tier 1" in data["datacentreTier"]:
attributes.append({"name": "q32", "q32": "tier1tia942"})
if "Uptime Institute Tier 1" in data["datacentreTier"]:
attributes.append({"name": "q32", "q32": "tier1uptimeinstitute"})
if "TIA-942 Tier 2" in data["datacentreTier"]:
attributes.append({"name": "q32", "q32": "tier2tia942"})
if "Uptime Institute Tier 2" in data["datacentreTier"]:
attributes.append({"name": "q32", "q32": "tier2uptimeinstitute"})
if "TIA-942 Tier 3" in data["datacentreTier"]:
attributes.append({"name": "q32", "q32": "tier3tia942"})
if "Uptime Institute Tier 3" in data["datacentreTier"]:
attributes.append({"name": "q32", "q32": "tier3uptimeinstitute"})
if "TIA-942 Tier 4" in data["datacentreTier"]:
attributes.append({"name": "q32", "q32": "tier4tia942"})
if "Uptime Institute Tier 4" in data["datacentreTier"]:
attributes.append({"name": "q32", "q32": "tier4uptimeinstitute"})
return filter(None, attributes)
def boolean_attribute(g6_field_name, g5_field_name, data):
if g6_field_name in data:
return {
"name": g5_field_name,
g5_field_name: str(data[g6_field_name]).lower()
}
def g6_to_g5(data):
"""
Mappings
description == serviceSummary
name == serviceName
listingId == id
uniqueName == id
tags == []
enable == true
"""
categories = [category_name_to_id(t) for t in data.get('serviceTypes', [])]
return {
'uniqueName': data['id'],
'tags': data['lot'],
'name': data['serviceName'],
'listingId': str(data['id']),
'description': data['serviceSummary'],
'expired': False,
'state': 'published',
'details': {
'supplierId': data['supplierId'],
'lot': data['lot'],
'categories': categories,
'features': data['serviceFeatures'],
'benefits': data['serviceBenefits'],
'attributes': attributes(data)
}
}
def post_to_es(es_endpoint, data):
handler = urllib2.HTTPHandler()
opener = urllib2.build_opener(handler)
json_data = g6_to_g5(data)
if not es_endpoint.endswith('/'):
es_endpoint += '/'
request = urllib2.Request(es_endpoint + json_data['listingId'],
data=json.dumps(json_data))
request.add_header("Content-Type", 'application/json')
try:
opener.open(request)
except urllib2.HTTPError, error:
print error
def request_services(endpoint, token):
handler = urllib2.HTTPBasicAuthHandler()
opener = urllib2.build_opener(handler)
page_url = endpoint
while page_url:
print "processing page: {}".format(page_url)
request = urllib2.Request(page_url)
request.add_header("Authorization", "Bearer {}".format(token))
response = opener.open(request).read()
data = json.loads(response)
for service in data["services"]:
yield service
page_url = None
for link in data['links']:
if link['rel'] == 'next':
page_url = link['href']
def process_json_files_in_directory(dirname):
for filename in os.listdir(dirname):
with open(os.path.join(dirname, filename)) as f:
data = json.loads(f.read())
print "doing " + filename
yield data
def main():
if len(sys.argv) == 4:
es_endpoint, endpoint, token = sys.argv[1:]
for data in request_services(endpoint, token):
post_to_es(es_endpoint, data)
elif len(sys.argv) == 3:
es_endpoint, listing_dir = sys.argv[1:]
for data in process_json_files_in_directory(listing_dir):
post_to_es(es_endpoint, data)
else:
print __doc__
if __name__ == '__main__':
main()
| mit |
pytroll/satpy | satpy/modifiers/geometry.py | 1 | 7020 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2020 Satpy developers
#
# This file is part of satpy.
#
# satpy is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# satpy is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# satpy. If not, see <http://www.gnu.org/licenses/>.
"""Modifier classes for corrections based on sun and other angles."""
import logging
import time
from weakref import WeakValueDictionary
import numpy as np
import xarray as xr
from satpy.modifiers import ModifierBase
from satpy.utils import sunzen_corr_cos, atmospheric_path_length_correction
logger = logging.getLogger(__name__)
class SunZenithCorrectorBase(ModifierBase):
"""Base class for sun zenith correction modifiers."""
coszen = WeakValueDictionary()
def __init__(self, max_sza=95.0, **kwargs):
"""Collect custom configuration values.
Args:
max_sza (float): Maximum solar zenith angle in degrees that is
considered valid and correctable. Default 95.0.
"""
self.max_sza = max_sza
self.max_sza_cos = np.cos(np.deg2rad(max_sza)) if max_sza is not None else None
super(SunZenithCorrectorBase, self).__init__(**kwargs)
def __call__(self, projectables, **info):
"""Generate the composite."""
projectables = self.match_data_arrays(list(projectables) + list(info.get('optional_datasets', [])))
vis = projectables[0]
if vis.attrs.get("sunz_corrected"):
logger.debug("Sun zen correction already applied")
return vis
area_name = hash(vis.attrs['area'])
key = (vis.attrs["start_time"], area_name)
tic = time.time()
logger.debug("Applying sun zen correction")
coszen = self.coszen.get(key)
if coszen is None and not info.get('optional_datasets'):
# we were not given SZA, generate SZA then calculate cos(SZA)
from pyorbital.astronomy import cos_zen
logger.debug("Computing sun zenith angles.")
lons, lats = vis.attrs["area"].get_lonlats(chunks=vis.data.chunks)
coords = {}
if 'y' in vis.coords and 'x' in vis.coords:
coords['y'] = vis['y']
coords['x'] = vis['x']
coszen = xr.DataArray(cos_zen(vis.attrs["start_time"], lons, lats),
dims=['y', 'x'], coords=coords)
if self.max_sza is not None:
coszen = coszen.where(coszen >= self.max_sza_cos)
self.coszen[key] = coszen
elif coszen is None:
# we were given the SZA, calculate the cos(SZA)
coszen = np.cos(np.deg2rad(projectables[1]))
self.coszen[key] = coszen
proj = self._apply_correction(vis, coszen)
proj.attrs = vis.attrs.copy()
self.apply_modifier_info(vis, proj)
logger.debug("Sun-zenith correction applied. Computation time: %5.1f (sec)", time.time() - tic)
return proj
def _apply_correction(self, proj, coszen):
raise NotImplementedError("Correction method shall be defined!")
class SunZenithCorrector(SunZenithCorrectorBase):
"""Standard sun zenith correction using ``1 / cos(sunz)``.
In addition to adjusting the provided reflectances by the cosine of the
solar zenith angle, this modifier forces all reflectances beyond a
solar zenith angle of ``max_sza`` to 0. It also gradually reduces the
amount of correction done between ``correction_limit`` and ``max_sza``. If
``max_sza`` is ``None`` then a constant correction is applied to zenith
angles beyond ``correction_limit``.
To set ``max_sza`` to ``None`` in a YAML configuration file use:
.. code-block:: yaml
sunz_corrected:
compositor: !!python/name:satpy.composites.SunZenithCorrector
max_sza: !!null
optional_prerequisites:
- solar_zenith_angle
"""
def __init__(self, correction_limit=88., **kwargs):
"""Collect custom configuration values.
Args:
correction_limit (float): Maximum solar zenith angle to apply the
correction in degrees. Pixels beyond this limit have a
constant correction applied. Default 88.
max_sza (float): Maximum solar zenith angle in degrees that is
considered valid and correctable. Default 95.0.
"""
self.correction_limit = correction_limit
super(SunZenithCorrector, self).__init__(**kwargs)
def _apply_correction(self, proj, coszen):
logger.debug("Apply the standard sun-zenith correction [1/cos(sunz)]")
return sunzen_corr_cos(proj, coszen, limit=self.correction_limit, max_sza=self.max_sza)
class EffectiveSolarPathLengthCorrector(SunZenithCorrectorBase):
"""Special sun zenith correction with the method proposed by Li and Shibata.
(2006): https://doi.org/10.1175/JAS3682.1
In addition to adjusting the provided reflectances by the cosine of the
solar zenith angle, this modifier forces all reflectances beyond a
solar zenith angle of `max_sza` to 0 to reduce noise in the final data.
It also gradually reduces the amount of correction done between
``correction_limit`` and ``max_sza``. If ``max_sza`` is ``None`` then a
constant correction is applied to zenith angles beyond
``correction_limit``.
To set ``max_sza`` to ``None`` in a YAML configuration file use:
.. code-block:: yaml
effective_solar_pathlength_corrected:
compositor: !!python/name:satpy.composites.EffectiveSolarPathLengthCorrector
max_sza: !!null
optional_prerequisites:
- solar_zenith_angle
"""
def __init__(self, correction_limit=88., **kwargs):
"""Collect custom configuration values.
Args:
correction_limit (float): Maximum solar zenith angle to apply the
correction in degrees. Pixels beyond this limit have a
constant correction applied. Default 88.
max_sza (float): Maximum solar zenith angle in degrees that is
considered valid and correctable. Default 95.0.
"""
self.correction_limit = correction_limit
super(EffectiveSolarPathLengthCorrector, self).__init__(**kwargs)
def _apply_correction(self, proj, coszen):
logger.debug("Apply the effective solar atmospheric path length correction method by Li and Shibata")
return atmospheric_path_length_correction(proj, coszen, limit=self.correction_limit, max_sza=self.max_sza)
| gpl-3.0 |
anryko/ansible | lib/ansible/modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert_info.py | 16 | 2748 | #!/usr/bin/python
# Copyright: (c) 2018, Stephan Schwarz <stearz@gmx.de>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = """
---
module: utm_ca_host_key_cert_info
author:
- Stephan Schwarz (@stearz)
short_description: Get info for a ca host_key_cert entry in Sophos UTM
description:
- Get info for a ca host_key_cert entry in SOPHOS UTM.
version_added: "2.8"
options:
name:
description:
- The name of the object. Will be used to identify the entry
required: true
extends_documentation_fragment:
- utm
"""
EXAMPLES = """
- name: utm ca_host_key_cert_info
utm_ca_host_key_cert_info:
utm_host: sophos.host.name
utm_token: abcdefghijklmno1234
name: TestHostKeyCertEntry
"""
RETURN = """
result:
description: The utm object that was created
returned: success
type: complex
contains:
_ref:
description: The reference name of the object
type: str
_locked:
description: Whether or not the object is currently locked
type: bool
_type:
description: The type of the object
type: str
name:
description: The name of the object
type: str
ca:
description: A reference to an existing utm_ca_signing_ca or utm_ca_verification_ca object.
type: str
meta:
description: A reference to an existing utm_ca_meta_x509 object.
type: str
certificate:
description: The certificate in PEM format
type: str
comment:
description: Comment string (may be empty string)
type: str
encrypted:
description: If encryption is enabled
type: bool
key:
description: Private key in PEM format (may be empty string)
type: str
"""
from ansible.module_utils.utm_utils import UTM, UTMModule
from ansible.module_utils._text import to_native
def main():
endpoint = "ca/host_key_cert"
key_to_check_for_changes = []
module = UTMModule(
argument_spec=dict(
name=dict(type='str', required=True)
)
)
try:
# This is needed because the bool value only accepts int values in the backend
UTM(module, endpoint, key_to_check_for_changes, info_only=True).execute()
except Exception as e:
module.fail_json(msg=to_native(e))
if __name__ == '__main__':
main()
| gpl-3.0 |
bop/bauhaus | lib/python2.7/site-packages/distribute-0.6.24-py2.7.egg/setuptools/command/install_egg_info.py | 65 | 3732 | from setuptools import Command
from setuptools.archive_util import unpack_archive
from distutils import log, dir_util
import os, shutil, pkg_resources
class install_egg_info(Command):
"""Install an .egg-info directory for the package"""
description = "Install an .egg-info directory for the package"
user_options = [
('install-dir=', 'd', "directory to install to"),
]
def initialize_options(self):
self.install_dir = None
def finalize_options(self):
self.set_undefined_options('install_lib',('install_dir','install_dir'))
ei_cmd = self.get_finalized_command("egg_info")
basename = pkg_resources.Distribution(
None, None, ei_cmd.egg_name, ei_cmd.egg_version
).egg_name()+'.egg-info'
self.source = ei_cmd.egg_info
self.target = os.path.join(self.install_dir, basename)
self.outputs = [self.target]
def run(self):
self.run_command('egg_info')
target = self.target
if os.path.isdir(self.target) and not os.path.islink(self.target):
dir_util.remove_tree(self.target, dry_run=self.dry_run)
elif os.path.exists(self.target):
self.execute(os.unlink,(self.target,),"Removing "+self.target)
if not self.dry_run:
pkg_resources.ensure_directory(self.target)
self.execute(self.copytree, (),
"Copying %s to %s" % (self.source, self.target)
)
self.install_namespaces()
def get_outputs(self):
return self.outputs
def copytree(self):
# Copy the .egg-info tree to site-packages
def skimmer(src,dst):
# filter out source-control directories; note that 'src' is always
# a '/'-separated path, regardless of platform. 'dst' is a
# platform-specific path.
for skip in '.svn/','CVS/':
if src.startswith(skip) or '/'+skip in src:
return None
self.outputs.append(dst)
log.debug("Copying %s to %s", src, dst)
return dst
unpack_archive(self.source, self.target, skimmer)
def install_namespaces(self):
nsp = self._get_all_ns_packages()
if not nsp: return
filename,ext = os.path.splitext(self.target)
filename += '-nspkg.pth'; self.outputs.append(filename)
log.info("Installing %s",filename)
if not self.dry_run:
f = open(filename,'wt')
for pkg in nsp:
pth = tuple(pkg.split('.'))
trailer = '\n'
if '.' in pkg:
trailer = (
"; m and setattr(sys.modules[%r], %r, m)\n"
% ('.'.join(pth[:-1]), pth[-1])
)
f.write(
"import sys,types,os; "
"p = os.path.join(sys._getframe(1).f_locals['sitedir'], "
"*%(pth)r); "
"ie = os.path.exists(os.path.join(p,'__init__.py')); "
"m = not ie and "
"sys.modules.setdefault(%(pkg)r,types.ModuleType(%(pkg)r)); "
"mp = (m or []) and m.__dict__.setdefault('__path__',[]); "
"(p not in mp) and mp.append(p)%(trailer)s"
% locals()
)
f.close()
def _get_all_ns_packages(self):
nsp = {}
for pkg in self.distribution.namespace_packages or []:
pkg = pkg.split('.')
while pkg:
nsp['.'.join(pkg)] = 1
pkg.pop()
nsp=list(nsp)
nsp.sort() # set up shorter names first
return nsp
| mit |
dangtrinhnt/django-saml2 | djangosaml2/backends.py | 2 | 7701 | # Copyright (C) 2010-2012 Yaco Sistemas (http://www.yaco.es)
# Copyright (C) 2009 Lorenzo Gil Sanchez <lorenzo.gil.sanchez@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from django.conf import settings
from django.contrib.auth.backends import ModelBackend
from django.contrib.auth.models import User, SiteProfileNotAvailable
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
from djangosaml2.signals import pre_user_save
logger = logging.getLogger('djangosaml2')
class Saml2Backend(ModelBackend):
def authenticate(self, session_info=None, attribute_mapping=None,
create_unknown_user=True):
if session_info is None or attribute_mapping is None:
logger.error('Session info or attribute mapping are None')
return None
if not 'ava' in session_info:
logger.error('"ava" key not found in session_info')
return None
attributes = session_info['ava']
if not attributes:
logger.error('The attributes dictionary is empty')
django_user_main_attribute = getattr(
settings, 'SAML_DJANGO_USER_MAIN_ATTRIBUTE', 'username')
logger.debug('attributes: %s' % attributes)
logger.debug('attribute_mapping: %s' % attribute_mapping)
saml_user = None
for saml_attr, django_fields in attribute_mapping.items():
if (django_user_main_attribute in django_fields
and saml_attr in attributes):
saml_user = attributes[saml_attr][0]
if saml_user is None:
logger.error('Could not find saml_user value')
return None
if not self.is_authorized(attributes, attribute_mapping):
return None
user = None
main_attribute = self.clean_user_main_attribute(saml_user)
user_query_args = {django_user_main_attribute: main_attribute}
# Note that this could be accomplished in one try-except clause, but
# instead we use get_or_create when creating unknown users since it has
# built-in safeguards for multiple threads.
if create_unknown_user:
logger.debug('Check if the user "%s" exists or create otherwise'
% main_attribute)
try:
user, created = User.objects.get_or_create(**user_query_args)
except MultipleObjectsReturned:
logger.error("There are more than one user with %s = %s" %
(django_user_main_attribute, main_attribute))
return None
if created:
logger.debug('New user created')
user = self.configure_user(user, attributes, attribute_mapping)
else:
logger.debug('User updated')
user = self.update_user(user, attributes, attribute_mapping)
else:
logger.debug('Retrieving existing user "%s"' % main_attribute)
try:
user = User.objects.get(**user_query_args)
user = self.update_user(user, attributes, attribute_mapping)
except User.DoesNotExist:
logger.error('The user "%s" does not exist' % main_attribute)
return None
except MultipleObjectsReturned:
logger.error("There are more than one user with %s = %s" %
(django_user_main_attribute, main_attribute))
return None
return user
def is_authorized(self, attributes, attribute_mapping):
"""Hook to allow custom authorization policies based on
SAML attributes.
"""
return True
def clean_user_main_attribute(self, main_attribute):
"""Performs any cleaning on the user main attribute (which
usually is "username") prior to using it to get or
create the user object. Returns the cleaned attribute.
By default, returns the attribute unchanged.
"""
return main_attribute
def configure_user(self, user, attributes, attribute_mapping):
"""Configures a user after creation and returns the updated user.
By default, returns the user with his attributes updated.
"""
user.set_unusable_password()
return self.update_user(user, attributes, attribute_mapping,
force_save=True)
def update_user(self, user, attributes, attribute_mapping,
force_save=False):
"""Update a user with a set of attributes and returns the updated user.
By default it uses a mapping defined in the settings constant
SAML_ATTRIBUTE_MAPPING. For each attribute, if the user object has
that field defined it will be set, otherwise it will try to set
it in the profile object.
"""
if not attribute_mapping:
return user
try:
profile = user.get_profile()
except ObjectDoesNotExist:
profile = None
except SiteProfileNotAvailable:
profile = None
user_modified = False
profile_modified = False
for saml_attr, django_attrs in attribute_mapping.items():
try:
for attr in django_attrs:
if hasattr(user, attr):
modified = self._set_attribute(
user, attr, attributes[saml_attr][0])
user_modified = user_modified or modified
elif profile is not None and hasattr(profile, attr):
modified = self._set_attribute(
profile, attr, attributes[saml_attr][0])
profile_modified = profile_modified or modified
except KeyError:
# the saml attribute is missing
pass
logger.debug('Sending the pre_save signal')
signal_modified = any(
[response for receiver, response
in pre_user_save.send_robust(sender=user,
attributes=attributes,
user_modified=user_modified)]
)
if user_modified or signal_modified or force_save:
user.save()
if (profile is not None
and (profile_modified or signal_modified or force_save)):
profile.save()
return user
def _set_attribute(self, obj, attr, value):
"""Set an attribute of an object to a specific value.
Return True if the attribute was changed and False otherwise.
"""
field = obj._meta.get_field_by_name(attr)
if len(value) > field[0].max_length:
cleaned_value = value[:field[0].max_length]
logger.warn('The attribute "%s" was trimmed from "%s" to "%s"' %
(attr, value, cleaned_value))
else:
cleaned_value = value
old_value = getattr(obj, attr)
if cleaned_value != old_value:
setattr(obj, attr, cleaned_value)
return True
return False
| apache-2.0 |
damdam-s/purchase-workflow | framework_agreement/tests/test_price_list.py | 20 | 4797 | # -*- coding: utf-8 -*-
# Author: Nicolas Bessi, Leonardo Pistone
# Copyright 2013-2015 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from datetime import timedelta, date
from openerp import exceptions, fields
import openerp.tests.common as test_common
from .common import BaseAgreementTestMixin
class TestAgreementPriceList(test_common.TransactionCase,
BaseAgreementTestMixin):
"""Test observer on_change and purchase order on_change"""
def setUp(self):
""" Create a default agreement
with 3 price line
qty 0 price 70
qty 200 price 60
qty 500 price 50
qty 1000 price 45
"""
super(TestAgreementPriceList, self).setUp()
self.commonsetUp()
start_date = date.today() + timedelta(days=10)
end_date = date.today() + timedelta(days=20)
self.agreement = self.agreement_model.create({
'portfolio_id': self.portfolio.id,
'product_id': self.product.id,
'start_date': fields.Date.to_string(start_date),
'end_date': fields.Date.to_string(end_date),
'delay': 5,
'draft': False,
'quantity': 1500,
})
pl = self.agreement_pl_model.create(
{'framework_agreement_id': self.agreement.id,
'currency_id': self.ref('base.EUR')}
)
self.agreement_line_model.create(
{'framework_agreement_pricelist_id': pl.id,
'quantity': 0,
'price': 70.0}
)
self.agreement_line_model.create(
{'framework_agreement_pricelist_id': pl.id,
'quantity': 200,
'price': 60.0}
)
self.agreement_line_model.create(
{'framework_agreement_pricelist_id': pl.id,
'quantity': 500,
'price': 50.0}
)
self.agreement_line_model.create(
{'framework_agreement_pricelist_id': pl.id,
'quantity': 1000,
'price': 45.0}
)
self.agreement.refresh()
def test_00_test_qty(self):
"""Test if barem retrieval is correct"""
self.assertEqual(
self.agreement.get_price(0, currency=self.browse_ref('base.EUR')),
70.0
)
self.assertEqual(
self.agreement.get_price(
100,
currency=self.browse_ref('base.EUR')
),
70.0
)
self.assertEqual(
self.agreement.get_price(
200,
currency=self.browse_ref('base.EUR')
),
60.0
)
self.assertEqual(
self.agreement.get_price(
210,
currency=self.browse_ref('base.EUR')
),
60.0
)
self.assertEqual(
self.agreement.get_price(
500,
currency=self.browse_ref('base.EUR')),
50.0
)
self.assertEqual(
self.agreement.get_price(
800,
currency=self.browse_ref('base.EUR')
),
50.0
)
self.assertEqual(
self.agreement.get_price(
999,
currency=self.browse_ref('base.EUR')
),
50.0
)
self.assertEqual(
self.agreement.get_price(
1000,
currency=self.browse_ref('base.EUR')
),
45.0
)
self.assertEqual(
self.agreement.get_price(
10000,
currency=self.browse_ref('base.EUR')
),
45.0
)
self.assertEqual(
self.agreement.get_price(
-10,
currency=self.browse_ref('base.EUR')
),
70.0
)
def test_01_failed_wrong_currency(self):
"""Tests that wrong currency raise an exception"""
with self.assertRaises(exceptions.Warning):
self.agreement.get_price(0, currency=self.browse_ref('base.USD'))
| agpl-3.0 |
hefen1/chromium | tools/telemetry/telemetry/page/page_test_unittest.py | 11 | 6417 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import unittest
from telemetry import decorators
from telemetry.core import wpr_modes
from telemetry.page import page as page_module
from telemetry.page import page_set
from telemetry.page import page_test
from telemetry.unittest_util import options_for_unittests
from telemetry.unittest_util import page_test_test_case
from telemetry.wpr import archive_info
class PageTestThatFails(page_test.PageTest):
def ValidateAndMeasurePage(self, page, tab, results):
raise page_test.Failure
class PageTestForBlank(page_test.PageTest):
def ValidateAndMeasurePage(self, page, tab, results):
contents = tab.EvaluateJavaScript('document.body.textContent')
if contents.strip() != 'Hello world':
raise page_test.MeasurementFailure(
'Page contents were: ' + contents)
class PageTestForReplay(page_test.PageTest):
def ValidateAndMeasurePage(self, page, tab, results):
# Web Page Replay returns '404 Not found' if a page is not in the archive.
contents = tab.EvaluateJavaScript('document.body.textContent')
if '404 Not Found' in contents.strip():
raise page_test.MeasurementFailure('Page not in archive.')
class PageTestQueryParams(page_test.PageTest):
def ValidateAndMeasurePage(self, page, tab, results):
query = tab.EvaluateJavaScript('window.location.search')
expected = '?foo=1'
if query.strip() != expected:
raise page_test.MeasurementFailure(
'query was %s, not %s.' % (query, expected))
class PageTestWithAction(page_test.PageTest):
def __init__(self):
super(PageTestWithAction, self).__init__('RunTestAction')
def ValidateAndMeasurePage(self, page, tab, results):
pass
class PageWithAction(page_module.Page):
def __init__(self, url, ps):
super(PageWithAction, self).__init__(url, ps, ps.base_dir)
self.run_test_action_called = False
def RunTestAction(self, _):
self.run_test_action_called = True
class PageTestUnitTest(page_test_test_case.PageTestTestCase):
def setUp(self):
self._options = options_for_unittests.GetCopy()
self._options.browser_options.wpr_mode = wpr_modes.WPR_OFF
def testGotToBlank(self):
ps = self.CreatePageSetFromFileInUnittestDataDir('blank.html')
measurement = PageTestForBlank()
all_results = self.RunMeasurement(measurement, ps, options=self._options)
self.assertEquals(0, len(all_results.failures))
def testGotQueryParams(self):
ps = self.CreatePageSetFromFileInUnittestDataDir('blank.html?foo=1')
measurement = PageTestQueryParams()
all_results = self.RunMeasurement(measurement, ps, options=self._options)
self.assertEquals(0, len(all_results.failures))
def testFailure(self):
ps = self.CreatePageSetFromFileInUnittestDataDir('blank.html')
measurement = PageTestThatFails()
all_results = self.RunMeasurement(measurement, ps, options=self._options)
self.assertEquals(1, len(all_results.failures))
# This test is disabled because it runs against live sites, and needs to be
# fixed. crbug.com/179038
@decorators.Disabled
def testRecordAndReplay(self):
test_archive = '/tmp/google.wpr'
google_url = 'http://www.google.com/'
foo_url = 'http://www.foo.com/'
archive_info_template = ("""
{
"archives": {
"%s": ["%s"]
}
}
""")
try:
ps = page_set.PageSet()
measurement = PageTestForReplay()
# First record an archive with only www.google.com.
self._options.browser_options.wpr_mode = wpr_modes.WPR_RECORD
# pylint: disable=protected-access
ps._wpr_archive_info = archive_info.WprArchiveInfo(
'', '', ps.bucket, json.loads(archive_info_template %
(test_archive, google_url)))
ps.pages = [page_module.Page(google_url, ps)]
all_results = self.RunMeasurement(measurement, ps, options=self._options)
self.assertEquals(0, len(all_results.failures))
# Now replay it and verify that google.com is found but foo.com is not.
self._options.browser_options.wpr_mode = wpr_modes.WPR_REPLAY
# pylint: disable=protected-access
ps._wpr_archive_info = archive_info.WprArchiveInfo(
'', '', ps.bucket, json.loads(archive_info_template %
(test_archive, foo_url)))
ps.pages = [page_module.Page(foo_url, ps)]
all_results = self.RunMeasurement(measurement, ps, options=self._options)
self.assertEquals(1, len(all_results.failures))
# pylint: disable=protected-access
ps._wpr_archive_info = archive_info.WprArchiveInfo(
'', '', ps.bucket, json.loads(archive_info_template %
(test_archive, google_url)))
ps.pages = [page_module.Page(google_url, ps)]
all_results = self.RunMeasurement(measurement, ps, options=self._options)
self.assertEquals(0, len(all_results.failures))
self.assertTrue(os.path.isfile(test_archive))
finally:
if os.path.isfile(test_archive):
os.remove(test_archive)
def testRunActions(self):
ps = self.CreateEmptyPageSet()
page = PageWithAction('file://blank.html', ps)
ps.AddUserStory(page)
measurement = PageTestWithAction()
self.RunMeasurement(measurement, ps, options=self._options)
self.assertTrue(page.run_test_action_called)
class MultiTabPageTestUnitTest(unittest.TestCase):
def testNoTabForPageReturnsFalse(self):
class PageTestWithoutTabForPage(page_test.PageTest):
def ValidateAndMeasurePage(self, *_):
pass
test = PageTestWithoutTabForPage()
self.assertFalse(test.is_multi_tab_test)
def testHasTabForPageReturnsTrue(self):
class PageTestWithTabForPage(page_test.PageTest):
def ValidateAndMeasurePage(self, *_):
pass
def TabForPage(self, *_):
pass
test = PageTestWithTabForPage()
self.assertTrue(test.is_multi_tab_test)
def testHasTabForPageInAncestor(self):
class PageTestWithTabForPage(page_test.PageTest):
def ValidateAndMeasurePage(self, *_):
pass
def TabForPage(self, *_):
pass
class PageTestWithTabForPageInParent(PageTestWithTabForPage):
pass
test = PageTestWithTabForPageInParent()
self.assertTrue(test.is_multi_tab_test)
| bsd-3-clause |
windyuuy/opera | chromium/src/chrome/tools/build/win/make_policy_zip.py | 159 | 2874 | #!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Creates a zip archive with policy template files. The list of input files is
extracted from a grd file with grit. This is to keep the length of input
arguments below the limit on Windows.
"""
import optparse
import os
import sys
import zipfile
def add_files_to_zip(zip_file, base_dir, file_list):
"""Pack a list of files into a zip archive, that is already
opened for writing.
Args:
zip_file: An object representing the zip archive.
base_dir: Base path of all the files in the real file system.
files: List of file paths to add, all relative to base_dir.
The zip entries will only contain this componenet of the path.
"""
for file_path in file_list:
zip_file.write(base_dir + file_path, file_path)
return 0
def get_grd_outputs(grit_cmd, grit_defines, grd_file, grd_strip_path_prefix):
grit_path = os.path.join(os.getcwd(), os.path.dirname(grit_cmd))
sys.path.append(grit_path)
import grit_info
outputs = grit_info.Outputs(grd_file, grit_defines,
'GRIT_DIR/../gritsettings/resource_ids')
result = []
for item in outputs:
assert item.startswith(grd_strip_path_prefix)
result.append(item[len(grd_strip_path_prefix):])
return result
def main(argv):
"""Pack a list of files into a zip archive.
Args:
zip_path: The file name of the zip archive.
base_dir: Base path of input files.
locales: The list of locales that are used to generate the list of file
names using INPUT_FILES.
"""
parser = optparse.OptionParser()
parser.add_option("--output", dest="output")
parser.add_option("--basedir", dest="basedir")
parser.add_option("--grit_info", dest="grit_info")
parser.add_option("--grd_input", dest="grd_input")
parser.add_option("--grd_strip_path_prefix", dest="grd_strip_path_prefix")
parser.add_option("--extra_input", action="append", dest="extra_input",
default=[])
parser.add_option("-D", action="append", dest="grit_defines", default=[])
parser.add_option("-E", action="append", dest="grit_build_env", default=[])
options, args = parser.parse_args(argv[1:])
if (options.basedir[-1] != '/'):
options.basedir += '/'
grit_defines = {}
for define in options.grit_defines:
grit_defines[define] = 1
file_list = options.extra_input
file_list += get_grd_outputs(options.grit_info, grit_defines,
options.grd_input, options.grd_strip_path_prefix)
zip_file = zipfile.ZipFile(options.output, 'w', zipfile.ZIP_DEFLATED)
try:
return add_files_to_zip(zip_file, options.basedir, file_list)
finally:
zip_file.close()
if '__main__' == __name__:
sys.exit(main(sys.argv))
| bsd-3-clause |
valmynd/MediaFetcher | src/plugins/youtube_dl/youtube_dl/extractor/urplay.py | 1 | 1965 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
class URPlayIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?ur(?:play|skola)\.se/(?:program|Produkter)/(?P<id>[0-9]+)'
_TESTS = [{
'url': 'http://urplay.se/program/190031-tripp-trapp-trad-sovkudde',
'md5': 'ad5f0de86f16ca4c8062cd103959a9eb',
'info_dict': {
'id': '190031',
'ext': 'mp4',
'title': 'Tripp, Trapp, Träd : Sovkudde',
'description': 'md5:b86bffdae04a7e9379d1d7e5947df1d1',
},
}, {
'url': 'http://urskola.se/Produkter/155794-Smasagor-meankieli-Grodan-i-vida-varlden',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
urplayer_data = self._parse_json(self._search_regex(
r'urPlayer\.init\(({.+?})\);', webpage, 'urplayer data'), video_id)
host = self._download_json('http://streaming-loadbalancer.ur.se/loadbalancer.json', video_id)['redirect']
formats = []
for quality_attr, quality, preference in (('', 'sd', 0), ('_hd', 'hd', 1)):
file_http = urplayer_data.get('file_http' + quality_attr) or urplayer_data.get('file_http_sub' + quality_attr)
if file_http:
formats.extend(self._extract_wowza_formats(
'http://%s/%splaylist.m3u8' % (host, file_http), video_id, skip_protocols=['rtmp', 'rtsp']))
self._sort_formats(formats)
subtitles = {}
for subtitle in urplayer_data.get('subtitles', []):
subtitle_url = subtitle.get('file')
kind = subtitle.get('kind')
if not subtitle_url or (kind and kind != 'captions'):
continue
subtitles.setdefault(subtitle.get('label', 'Svenska'), []).append({
'url': subtitle_url,
})
return {
'id': video_id,
'title': urplayer_data['title'],
'description': self._og_search_description(webpage),
'thumbnail': urplayer_data.get('image'),
'series': urplayer_data.get('series_title'),
'subtitles': subtitles,
'formats': formats,
}
| gpl-3.0 |
dymkowsk/mantid | Framework/PythonInterface/test/python/mantid/kernel/TimeSeriesPropertyTest.py | 3 | 3550 | from __future__ import (absolute_import, division, print_function)
import unittest
import numpy as np
import mantid
from mantid.kernel import (DateAndTime, BoolTimeSeriesProperty, FloatTimeSeriesProperty, Int64TimeSeriesProperty,
StringTimeSeriesProperty)
from testhelpers import run_algorithm
class TimeSeriesPropertyTest(unittest.TestCase):
_test_ws = None
_ntemp = 5
_nframes = 6
def setUp(self):
if self._test_ws is not None:
return
alg = run_algorithm('CreateWorkspace', DataX=[1,2,3,4,5], DataY=[1,2,3,4,5],NSpec=1, child=True)
ws = alg.getProperty("OutputWorkspace").value
run = ws.run()
start_time = DateAndTime("2008-12-18T17:58:38")
nanosec = 1000000000
# === Float type ===
temp1 = FloatTimeSeriesProperty("TEMP1")
tempvalue = -0.00161
for i in range(self._ntemp):
temp1.addValue(start_time + i*nanosec, tempvalue)
run.addProperty(temp1.name, temp1,True)
# === Int type ===
raw_frames = Int64TimeSeriesProperty("raw_frames")
values = [17,1436,2942,4448,5955,7461]
for value in values:
raw_frames.addValue(start_time + i*nanosec, value)
run.addProperty(raw_frames.name, raw_frames,True)
# === String type ===
icp_event = temp1 = StringTimeSeriesProperty("icp_event")
values = ['CHANGE_PERIOD 1','START_COLLECTION PERIOD 1 GF 0 RF 0 GUAH 0.000000',
'BEGIN','STOP_COLLECTION PERIOD 1 GF 1053 RF 1053 GUAH 0.000000 DUR 22']
for value in values:
icp_event.addValue(start_time + i*nanosec, value)
run.addProperty(icp_event.name, icp_event,True)
# === Boolean type ===
period_1 = temp1 = BoolTimeSeriesProperty("period 1")
values = [True]
for value in values:
period_1.addValue(start_time + i*nanosec, value)
run.addProperty(period_1.name, period_1,True)
self.__class__._test_ws = ws
def test_time_series_double_can_be_extracted(self):
log_series = self._test_ws.getRun()["TEMP1"]
self._check_has_time_series_attributes(log_series)
self.assertEquals(log_series.size(), self._ntemp)
self.assertAlmostEqual(log_series.nthValue(0), -0.00161)
def test_time_series_int_can_be_extracted(self):
log_series = self._test_ws.getRun()["raw_frames"]
self._check_has_time_series_attributes(log_series)
self.assertEquals(log_series.size(), self._nframes)
self.assertEquals(log_series.nthValue(1), 1436)
def test_time_series_string_can_be_extracted(self):
log_series = self._test_ws.getRun()["icp_event"]
self._check_has_time_series_attributes(log_series, list)
self.assertEquals(log_series.size(), 4)
self.assertEquals(log_series.nthValue(0).strip(), 'CHANGE_PERIOD 1')
def test_time_series_bool_can_be_extracted(self):
log_series = self._test_ws.getRun()["period 1"]
self._check_has_time_series_attributes(log_series)
self.assertEquals(log_series.size(), 1)
def _check_has_time_series_attributes(self, log, values_type=np.ndarray):
self.assertTrue(hasattr(log, "value"))
self.assertTrue(hasattr(log, "times"))
self.assertTrue(hasattr(log, "getStatistics"))
values = log.value
self.assertTrue(isinstance(values, values_type))
self.assertEquals(log.size(), len(values))
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
7kbird/chrome | third_party/tlslite/tlslite/session.py | 117 | 4402 | # Authors:
# Trevor Perrin
# Dave Baggett (Arcode Corporation) - canonicalCipherName
#
# See the LICENSE file for legal information regarding use of this file.
"""Class representing a TLS session."""
from .utils.compat import *
from .mathtls import *
from .constants import *
class Session(object):
"""
This class represents a TLS session.
TLS distinguishes between connections and sessions. A new
handshake creates both a connection and a session. Data is
transmitted over the connection.
The session contains a more permanent record of the handshake. The
session can be inspected to determine handshake results. The
session can also be used to create a new connection through
"session resumption". If the client and server both support this,
they can create a new connection based on an old session without
the overhead of a full handshake.
The session for a L{tlslite.TLSConnection.TLSConnection} can be
retrieved from the connection's 'session' attribute.
@type srpUsername: str
@ivar srpUsername: The client's SRP username (or None).
@type clientCertChain: L{tlslite.x509certchain.X509CertChain}
@ivar clientCertChain: The client's certificate chain (or None).
@type serverCertChain: L{tlslite.x509certchain.X509CertChain}
@ivar serverCertChain: The server's certificate chain (or None).
@type tackExt: L{tack.structures.TackExtension.TackExtension}
@ivar tackExt: The server's TackExtension (or None).
@type tackInHelloExt: L{bool}
@ivar tackInHelloExt: True if a TACK was presented via TLS Extension.
"""
def __init__(self):
self.masterSecret = bytearray(0)
self.sessionID = bytearray(0)
self.cipherSuite = 0
self.srpUsername = ""
self.clientCertChain = None
self.serverCertChain = None
self.tackExt = None
self.tackInHelloExt = False
self.serverName = ""
self.resumable = False
def create(self, masterSecret, sessionID, cipherSuite,
srpUsername, clientCertChain, serverCertChain,
tackExt, tackInHelloExt, serverName, resumable=True):
self.masterSecret = masterSecret
self.sessionID = sessionID
self.cipherSuite = cipherSuite
self.srpUsername = srpUsername
self.clientCertChain = clientCertChain
self.serverCertChain = serverCertChain
self.tackExt = tackExt
self.tackInHelloExt = tackInHelloExt
self.serverName = serverName
self.resumable = resumable
def _clone(self):
other = Session()
other.masterSecret = self.masterSecret
other.sessionID = self.sessionID
other.cipherSuite = self.cipherSuite
other.srpUsername = self.srpUsername
other.clientCertChain = self.clientCertChain
other.serverCertChain = self.serverCertChain
other.tackExt = self.tackExt
other.tackInHelloExt = self.tackInHelloExt
other.serverName = self.serverName
other.resumable = self.resumable
return other
def valid(self):
"""If this session can be used for session resumption.
@rtype: bool
@return: If this session can be used for session resumption.
"""
return self.resumable and self.sessionID
def _setResumable(self, boolean):
#Only let it be set to True if the sessionID is non-null
if (not boolean) or (boolean and self.sessionID):
self.resumable = boolean
def getTackId(self):
if self.tackExt and self.tackExt.tack:
return self.tackExt.tack.getTackId()
else:
return None
def getBreakSigs(self):
if self.tackExt and self.tackExt.break_sigs:
return self.tackExt.break_sigs
else:
return None
def getCipherName(self):
"""Get the name of the cipher used with this connection.
@rtype: str
@return: The name of the cipher used with this connection.
"""
return CipherSuite.canonicalCipherName(self.cipherSuite)
def getMacName(self):
"""Get the name of the HMAC hash algo used with this connection.
@rtype: str
@return: The name of the HMAC hash algo used with this connection.
"""
return CipherSuite.canonicalMacName(self.cipherSuite)
| bsd-3-clause |
farseerri/git_code | tests/system/suite_general/tst_installed_languages/test.py | 4 | 3662 | #############################################################################
##
## Copyright (C) 2014 Digia Plc and/or its subsidiary(-ies).
## Contact: http://www.qt-project.org/legal
##
## This file is part of Qt Creator.
##
## Commercial License Usage
## Licensees holding valid commercial Qt licenses may use this file in
## accordance with the commercial license agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and Digia. For licensing terms and
## conditions see http://www.qt.io/licensing. For further information
## use the contact form at http://www.qt.io/contact-us.
##
## GNU Lesser General Public License Usage
## Alternatively, this file may be used under the terms of the GNU Lesser
## General Public License version 2.1 or version 3 as published by the Free
## Software Foundation and appearing in the file LICENSE.LGPLv21 and
## LICENSE.LGPLv3 included in the packaging of this file. Please review the
## following information to ensure the GNU Lesser General Public License
## requirements will be met: https://www.gnu.org/licenses/lgpl.html and
# http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
##
## In addition, as a special exception, Digia gives you certain additional
## rights. These rights are described in the Digia Qt LGPL Exception
## version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
##
#############################################################################
source("../../shared/qtcreator.py")
def main():
for lang in testData.dataset("languages.tsv"):
overrideStartApplication()
startApplication("qtcreator" + SettingsPath)
if not startedWithoutPluginError():
return
invokeMenuItem("Tools", "Options...")
waitForObjectItem(":Options_QListView", "Environment")
clickItem(":Options_QListView", "Environment", 14, 15, 0, Qt.LeftButton)
clickOnTab(":Options.qt_tabwidget_tabbar_QTabBar", "General")
languageName = testData.field(lang, "language")
if "%1" in languageName:
country = str(QLocale.countryToString(QLocale(testData.field(lang, "ISO")).country()))
languageName = languageName.replace("%1", country)
selectFromCombo(":User Interface.languageBox_QComboBox", languageName)
clickButton(waitForObject(":Options.OK_QPushButton"))
clickButton(waitForObject(":Restart required.OK_QPushButton"))
invokeMenuItem("File", "Exit")
waitForCleanShutdown()
snooze(4) # wait for complete unloading of Creator
overrideStartApplication()
startApplication("qtcreator" + SettingsPath)
try:
if platform.system() == 'Darwin':
# temporary hack for handling wrong menus when using Squish 5.0.1 with Qt5.2
fileMenu = waitForObjectItem(":Qt Creator.QtCreator.MenuBar_QMenuBar",
testData.field(lang, "File"))
activateItem(fileMenu)
waitForObject("{type='QMenu' visible='1'}")
activateItem(fileMenu)
nativeType("<Command+q>")
else:
invokeMenuItem(testData.field(lang, "File"), testData.field(lang, "Exit"))
test.passes("Creator was running in %s translation." % languageName)
except:
test.fail("Creator seems to be missing %s translation" % languageName)
sendEvent("QCloseEvent", ":Qt Creator_Core::Internal::MainWindow")
waitForCleanShutdown()
__removeTestingDir__()
copySettingsToTmpDir()
| lgpl-2.1 |
simonwydooghe/ansible | lib/ansible/executor/stats.py | 88 | 3295 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.module_utils.common._collections_compat import MutableMapping
from ansible.utils.vars import merge_hash
class AggregateStats:
''' holds stats about per-host activity during playbook runs '''
def __init__(self):
self.processed = {}
self.failures = {}
self.ok = {}
self.dark = {}
self.changed = {}
self.skipped = {}
self.rescued = {}
self.ignored = {}
# user defined stats, which can be per host or global
self.custom = {}
def increment(self, what, host):
''' helper function to bump a statistic '''
self.processed[host] = 1
prev = (getattr(self, what)).get(host, 0)
getattr(self, what)[host] = prev + 1
def decrement(self, what, host):
_what = getattr(self, what)
try:
if _what[host] - 1 < 0:
# This should never happen, but let's be safe
raise KeyError("Don't be so negative")
_what[host] -= 1
except KeyError:
_what[host] = 0
def summarize(self, host):
''' return information about a particular host '''
return dict(
ok=self.ok.get(host, 0),
failures=self.failures.get(host, 0),
unreachable=self.dark.get(host, 0),
changed=self.changed.get(host, 0),
skipped=self.skipped.get(host, 0),
rescued=self.rescued.get(host, 0),
ignored=self.ignored.get(host, 0),
)
def set_custom_stats(self, which, what, host=None):
''' allow setting of a custom stat'''
if host is None:
host = '_run'
if host not in self.custom:
self.custom[host] = {which: what}
else:
self.custom[host][which] = what
def update_custom_stats(self, which, what, host=None):
''' allow aggregation of a custom stat'''
if host is None:
host = '_run'
if host not in self.custom or which not in self.custom[host]:
return self.set_custom_stats(which, what, host)
# mismatching types
if not isinstance(what, type(self.custom[host][which])):
return None
if isinstance(what, MutableMapping):
self.custom[host][which] = merge_hash(self.custom[host][which], what)
else:
# let overloaded + take care of other types
self.custom[host][which] += what
| gpl-3.0 |
s-hertel/ansible | test/units/config/manager/test_find_ini_config_file.py | 35 | 10870 | # -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import os.path
import stat
import pytest
from ansible.config.manager import find_ini_config_file
from ansible.module_utils._text import to_text
real_exists = os.path.exists
real_isdir = os.path.isdir
working_dir = os.path.dirname(__file__)
cfg_in_cwd = os.path.join(working_dir, 'ansible.cfg')
cfg_dir = os.path.join(working_dir, 'data')
cfg_file = os.path.join(cfg_dir, 'ansible.cfg')
alt_cfg_file = os.path.join(cfg_dir, 'test.cfg')
cfg_in_homedir = os.path.expanduser('~/.ansible.cfg')
@pytest.fixture
def setup_env(request):
cur_config = os.environ.get('ANSIBLE_CONFIG', None)
cfg_path = request.param[0]
if cfg_path is None and cur_config:
del os.environ['ANSIBLE_CONFIG']
else:
os.environ['ANSIBLE_CONFIG'] = request.param[0]
yield
if cur_config is None and cfg_path:
del os.environ['ANSIBLE_CONFIG']
else:
os.environ['ANSIBLE_CONFIG'] = cur_config
@pytest.fixture
def setup_existing_files(request, monkeypatch):
def _os_path_exists(path):
if to_text(path) in (request.param[0]):
return True
else:
return False
def _os_access(path, access):
if to_text(path) in (request.param[0]):
return True
else:
return False
# Enable user and system dirs so that we know cwd takes precedence
monkeypatch.setattr("os.path.exists", _os_path_exists)
monkeypatch.setattr("os.access", _os_access)
monkeypatch.setattr("os.getcwd", lambda: os.path.dirname(cfg_dir))
monkeypatch.setattr("os.path.isdir", lambda path: True if to_text(path) == cfg_dir else real_isdir(path))
class TestFindIniFile:
# This tells us to run twice, once with a file specified and once with a directory
@pytest.mark.parametrize('setup_env, expected', (([alt_cfg_file], alt_cfg_file), ([cfg_dir], cfg_file)), indirect=['setup_env'])
# This just passes the list of files that exist to the fixture
@pytest.mark.parametrize('setup_existing_files',
[[('/etc/ansible/ansible.cfg', cfg_in_homedir, cfg_in_cwd, alt_cfg_file, cfg_file)]],
indirect=['setup_existing_files'])
def test_env_has_cfg_file(self, setup_env, setup_existing_files, expected):
"""ANSIBLE_CONFIG is specified, use it"""
warnings = set()
assert find_ini_config_file(warnings) == expected
assert warnings == set()
@pytest.mark.parametrize('setup_env', ([alt_cfg_file], [cfg_dir]), indirect=['setup_env'])
@pytest.mark.parametrize('setup_existing_files',
[[('/etc/ansible/ansible.cfg', cfg_in_homedir, cfg_in_cwd)]],
indirect=['setup_existing_files'])
def test_env_has_no_cfg_file(self, setup_env, setup_existing_files):
"""ANSIBLE_CONFIG is specified but the file does not exist"""
warnings = set()
# since the cfg file specified by ANSIBLE_CONFIG doesn't exist, the one at cwd that does
# exist should be returned
assert find_ini_config_file(warnings) == cfg_in_cwd
assert warnings == set()
# ANSIBLE_CONFIG not specified
@pytest.mark.parametrize('setup_env', [[None]], indirect=['setup_env'])
# All config files are present
@pytest.mark.parametrize('setup_existing_files',
[[('/etc/ansible/ansible.cfg', cfg_in_homedir, cfg_in_cwd, cfg_file, alt_cfg_file)]],
indirect=['setup_existing_files'])
def test_ini_in_cwd(self, setup_env, setup_existing_files):
"""ANSIBLE_CONFIG not specified. Use the cwd cfg"""
warnings = set()
assert find_ini_config_file(warnings) == cfg_in_cwd
assert warnings == set()
# ANSIBLE_CONFIG not specified
@pytest.mark.parametrize('setup_env', [[None]], indirect=['setup_env'])
# No config in cwd
@pytest.mark.parametrize('setup_existing_files',
[[('/etc/ansible/ansible.cfg', cfg_in_homedir, cfg_file, alt_cfg_file)]],
indirect=['setup_existing_files'])
def test_ini_in_homedir(self, setup_env, setup_existing_files):
"""First config found is in the homedir"""
warnings = set()
assert find_ini_config_file(warnings) == cfg_in_homedir
assert warnings == set()
# ANSIBLE_CONFIG not specified
@pytest.mark.parametrize('setup_env', [[None]], indirect=['setup_env'])
# No config in cwd
@pytest.mark.parametrize('setup_existing_files', [[('/etc/ansible/ansible.cfg', cfg_file, alt_cfg_file)]], indirect=['setup_existing_files'])
def test_ini_in_systemdir(self, setup_env, setup_existing_files):
"""First config found is the system config"""
warnings = set()
assert find_ini_config_file(warnings) == '/etc/ansible/ansible.cfg'
assert warnings == set()
# ANSIBLE_CONFIG not specified
@pytest.mark.parametrize('setup_env', [[None]], indirect=['setup_env'])
# No config in cwd
@pytest.mark.parametrize('setup_existing_files',
[[('/etc/ansible/ansible.cfg', cfg_in_homedir, cfg_file, alt_cfg_file)]],
indirect=['setup_existing_files'])
def test_cwd_does_not_exist(self, setup_env, setup_existing_files, monkeypatch):
"""Smoketest current working directory doesn't exist"""
def _os_stat(path):
raise OSError('%s does not exist' % path)
monkeypatch.setattr('os.stat', _os_stat)
warnings = set()
assert find_ini_config_file(warnings) == cfg_in_homedir
assert warnings == set()
@pytest.mark.parametrize('setup_env', [[None]], indirect=['setup_env'])
# No config in cwd
@pytest.mark.parametrize('setup_existing_files', [[list()]], indirect=['setup_existing_files'])
def test_no_config(self, setup_env, setup_existing_files):
"""No config present, no config found"""
warnings = set()
assert find_ini_config_file(warnings) is None
assert warnings == set()
# ANSIBLE_CONFIG not specified
@pytest.mark.parametrize('setup_env', [[None]], indirect=['setup_env'])
# All config files are present except in cwd
@pytest.mark.parametrize('setup_existing_files',
[[('/etc/ansible/ansible.cfg', cfg_in_homedir, cfg_file, alt_cfg_file)]],
indirect=['setup_existing_files'])
def test_no_cwd_cfg_no_warning_on_writable(self, setup_env, setup_existing_files, monkeypatch):
"""If the cwd is writable but there is no config file there, move on with no warning"""
real_stat = os.stat
def _os_stat(path):
if path == working_dir:
from posix import stat_result
stat_info = list(real_stat(path))
stat_info[stat.ST_MODE] |= stat.S_IWOTH
return stat_result(stat_info)
else:
return real_stat(path)
monkeypatch.setattr('os.stat', _os_stat)
warnings = set()
assert find_ini_config_file(warnings) == cfg_in_homedir
assert len(warnings) == 0
# ANSIBLE_CONFIG not specified
@pytest.mark.parametrize('setup_env', [[None]], indirect=['setup_env'])
# All config files are present
@pytest.mark.parametrize('setup_existing_files',
[[('/etc/ansible/ansible.cfg', cfg_in_homedir, cfg_in_cwd, cfg_file, alt_cfg_file)]],
indirect=['setup_existing_files'])
def test_cwd_warning_on_writable(self, setup_env, setup_existing_files, monkeypatch):
"""If the cwd is writable, warn and skip it """
real_stat = os.stat
def _os_stat(path):
if path == working_dir:
from posix import stat_result
stat_info = list(real_stat(path))
stat_info[stat.ST_MODE] |= stat.S_IWOTH
return stat_result(stat_info)
else:
return real_stat(path)
monkeypatch.setattr('os.stat', _os_stat)
warnings = set()
assert find_ini_config_file(warnings) == cfg_in_homedir
assert len(warnings) == 1
warning = warnings.pop()
assert u'Ansible is being run in a world writable directory' in warning
assert u'ignoring it as an ansible.cfg source' in warning
# ANSIBLE_CONFIG is sepcified
@pytest.mark.parametrize('setup_env, expected', (([alt_cfg_file], alt_cfg_file), ([cfg_in_cwd], cfg_in_cwd)), indirect=['setup_env'])
# All config files are present
@pytest.mark.parametrize('setup_existing_files',
[[('/etc/ansible/ansible.cfg', cfg_in_homedir, cfg_in_cwd, cfg_file, alt_cfg_file)]],
indirect=['setup_existing_files'])
def test_no_warning_on_writable_if_env_used(self, setup_env, setup_existing_files, monkeypatch, expected):
"""If the cwd is writable but ANSIBLE_CONFIG was used, no warning should be issued"""
real_stat = os.stat
def _os_stat(path):
if path == working_dir:
from posix import stat_result
stat_info = list(real_stat(path))
stat_info[stat.ST_MODE] |= stat.S_IWOTH
return stat_result(stat_info)
else:
return real_stat(path)
monkeypatch.setattr('os.stat', _os_stat)
warnings = set()
assert find_ini_config_file(warnings) == expected
assert warnings == set()
# ANSIBLE_CONFIG not specified
@pytest.mark.parametrize('setup_env', [[None]], indirect=['setup_env'])
# All config files are present
@pytest.mark.parametrize('setup_existing_files',
[[('/etc/ansible/ansible.cfg', cfg_in_homedir, cfg_in_cwd, cfg_file, alt_cfg_file)]],
indirect=['setup_existing_files'])
def test_cwd_warning_on_writable_no_warning_set(self, setup_env, setup_existing_files, monkeypatch):
"""Smoketest that the function succeeds even though no warning set was passed in"""
real_stat = os.stat
def _os_stat(path):
if path == working_dir:
from posix import stat_result
stat_info = list(real_stat(path))
stat_info[stat.ST_MODE] |= stat.S_IWOTH
return stat_result(stat_info)
else:
return real_stat(path)
monkeypatch.setattr('os.stat', _os_stat)
assert find_ini_config_file() == cfg_in_homedir
| gpl-3.0 |
achadwick/mypaint | gui/drawutils.py | 3 | 18428 | # This file is part of MyPaint.
# Copyright (C) 2014 by Andrew Chadwick <a.t.chadwick@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""Graphical rendering helpers (splines, alpha checks, brush preview)
See also: gui.style
"""
## Imports
from __future__ import division, print_function
import logging
logger = logging.getLogger(__name__)
import math
import numpy as np
import cairo
from lib.helpers import clamp
import gui.style
from lib.color import HCYColor, RGBColor
import gi
from gi.repository import GdkPixbuf
from gi.repository import Gdk
from gi.repository import Gtk
from lib.brush import Brush, BrushInfo
import lib.tiledsurface
from lib.pixbufsurface import render_as_pixbuf
## Module constants
_BRUSH_PREVIEW_POINTS = [
# px, py, press, xtilt, ytilt # px, py, press, xtilt, ytilt
(0.00, 0.00, 0.00, 0.00, 0.00), (1.00, 0.05, 0.00, -0.06, 0.05),
(0.10, 0.10, 0.20, 0.10, 0.05), (0.90, 0.15, 0.90, -0.05, 0.05),
(0.11, 0.30, 0.90, 0.08, 0.05), (0.86, 0.35, 0.90, -0.04, 0.05),
(0.13, 0.50, 0.90, 0.06, 0.05), (0.84, 0.55, 0.90, -0.03, 0.05),
(0.17, 0.70, 0.90, 0.04, 0.05), (0.83, 0.75, 0.90, -0.02, 0.05),
(0.25, 0.90, 0.20, 0.02, 0.00), (0.81, 0.95, 0.00, 0.00, 0.00),
(0.41, 0.95, 0.00, 0.00, 0.00), (0.80, 1.00, 0.00, 0.00, 0.00),
]
## Drawing functions
def spline_4p(t, p_1, p0, p1, p2):
"""Interpolated point using a Catmull-Rom spline
:param float t: Time parameter, between 0.0 and 1.0
:param array p_1: Point p[-1]
:param array p0: Point p[0]
:param array p1: Point p[1]
:param array p2: Point p[2]
:returns: Interpolated point, between p0 and p1
:rtype: array
Used for a succession of points, this function makes smooth curves
passing through all specified points, other than the first and last.
For each pair of points, and their immediate predecessor and
successor points, the `t` parameter should be stepped incrementally
from 0 (for point p0) to 1 (for point p1). See also:
* `spline_iter()`
* http://en.wikipedia.org/wiki/Cubic_Hermite_spline
* http://stackoverflow.com/questions/1251438
"""
return (
t*((2-t)*t - 1) * p_1 +
(t*t*(3*t - 5) + 2) * p0 +
t*((4 - 3*t)*t + 1) * p1 +
(t-1)*t*t * p2
) / 2
def spline_iter(tuples, double_first=True, double_last=True):
"""Converts an list of control point tuples to interpolatable arrays
:param list tuples: Sequence of tuples of floats
:param bool double_first: Repeat 1st point, putting it in the result
:param bool double_last: Repeat last point, putting it in the result
:returns: Iterator producing (p-1, p0, p1, p2)
The resulting sequence of 4-tuples is intended to be fed into
spline_4p(). The start and end points are therefore normally
doubled, producing a curve that passes through them, along a vector
aimed at the second or penultimate point respectively.
"""
cint = [None, None, None, None]
if double_first:
cint[0:3] = cint[1:4]
cint[3] = np.array(tuples[0])
for ctrlpt in tuples:
cint[0:3] = cint[1:4]
cint[3] = np.array(ctrlpt)
if not any((a is None) for a in cint):
yield cint
if double_last:
cint[0:3] = cint[1:4]
cint[3] = np.array(tuples[-1])
yield cint
def _variable_pressure_scribble(w, h, tmult):
points = _BRUSH_PREVIEW_POINTS
px, py, press, xtilt, ytilt = points[0]
yield (10, px*w, py*h, 0.0, xtilt, ytilt)
event_dtime = 0.005
point_time = 0.1
for p_1, p0, p1, p2 in spline_iter(points, True, True):
dt = 0.0
while dt < point_time:
t = dt/point_time
px, py, press, xtilt, ytilt = spline_4p(t, p_1, p0, p1, p2)
yield (event_dtime, px*w, py*h, press, xtilt, ytilt)
dt += event_dtime
px, py, press, xtilt, ytilt = points[-1]
yield (10, px*w, py*h, 0.0, xtilt, ytilt)
def render_brush_preview_pixbuf(brushinfo, max_edge_tiles=4):
"""Renders brush preview images
:param lib.brush.BrushInfo brushinfo: settings to render
:param int max_edge_tiles: Use at most this many tiles along an edge
:returns: Preview image, at 128x128 pixels
:rtype: GdkPixbuf
This generates the preview image (128px icon) used for brushes which
don't have saved ones. These include brushes picked from .ORA files
where the parent_brush_name doesn't correspond to a brush in the
user's MyPaint brushes - they're used as the default, and for the
Auto button in the Brush Icon editor.
Brushstrokes are inherently unpredictable in size, so the allowable
area is grown until the brush fits or until the rendering becomes
too big. `max_edge_tiles` limits this growth.
"""
assert max_edge_tiles >= 1
brushinfo = brushinfo.clone() # avoid capturing a ref
brush = Brush(brushinfo)
surface = lib.tiledsurface.Surface()
N = lib.tiledsurface.N
for size_in_tiles in range(1, max_edge_tiles):
width = N * size_in_tiles
height = N * size_in_tiles
surface.clear()
fg, spiral = _brush_preview_bg_fg(surface, size_in_tiles, brushinfo)
brushinfo.set_color_rgb(fg)
brush.reset()
# Curve
shape = _variable_pressure_scribble(width, height, size_in_tiles)
surface.begin_atomic()
for dt, x, y, p, xt, yt in shape:
brush.stroke_to(surface.backend, x, y, p, xt, yt, dt)
surface.end_atomic()
# Check rendered size
tposs = surface.tiledict.keys()
outside = min({tx for tx, ty in tposs}) < 0
outside = outside or (min({ty for tx, ty in tposs}) < 0)
outside = outside or (max({tx for tx, ty in tposs}) >= size_in_tiles)
outside = outside or (max({ty for tx, ty in tposs}) >= size_in_tiles)
if not outside:
break
# Convert to pixbuf at the right scale
rect = (0, 0, width, height)
pixbuf = render_as_pixbuf(surface, *rect, alpha=True)
if max(width, height) != 128:
interp = (GdkPixbuf.InterpType.NEAREST if max(width, height) < 128
else GdkPixbuf.InterpType.BILINEAR)
pixbuf = pixbuf.scale_simple(128, 128, interp)
# Composite over a checquered bg via Cairo: shows erases
size = gui.style.ALPHA_CHECK_SIZE
nchecks = int(128 // size)
cairo_surf = cairo.ImageSurface(cairo.FORMAT_ARGB32, 128, 128)
cr = cairo.Context(cairo_surf)
render_checks(cr, size, nchecks)
Gdk.cairo_set_source_pixbuf(cr, pixbuf, 0, 0)
cr.paint()
cairo_surf.flush()
return Gdk.pixbuf_get_from_surface(cairo_surf, 0, 0, 128, 128)
def _brush_preview_bg_fg(surface, size_in_tiles, brushinfo):
"""Render the background for brush previews, return paint color"""
# The background color represents the overall nature of the brush
col1 = (0.85, 0.85, 0.80) # Boring grey, with a hint of paper-yellow
col2 = (0.80, 0.80, 0.80) # Grey, but will appear blueish in contrast
fgcol = (0.05, 0.15, 0.20) # Hint of color shows off HSV varier brushes
spiral = False
N = lib.tiledsurface.N
fx = [
(
"eraser", # pink=rubber=eraser; red=danger
(0.8, 0.7, 0.7), # pink/red tones: pencil eraser/danger
(0.75, 0.60, 0.60),
False, fgcol
),
(
"colorize",
(0.8, 0.8, 0.8), # orange on gray
(0.6, 0.6, 0.6),
False, (0.6, 0.2, 0.0)
),
(
"smudge", # blue=water=wet, with some contrast
(0.85, 0.85, 0.80), # same as the regular paper color
(0.60, 0.60, 0.70), # bluer (water, wet); more contrast
True, fgcol
),
]
for cname, c1, c2, c_spiral, c_fg, in fx:
if brushinfo.has_large_base_value(cname):
col1 = c1
col2 = c2
fgcol = c_fg
spiral = c_spiral
break
never_smudger = (brushinfo.has_small_base_value("smudge") and
brushinfo.has_only_base_value("smudge"))
colorizer = brushinfo.has_large_base_value("colorize")
if never_smudger and not colorizer:
col2 = col1
a = 1 << 15
col1_fix15 = [c*a for c in col1] + [a]
col2_fix15 = [c*a for c in col2] + [a]
for ty in range(0, size_in_tiles):
tx_thres = max(0, size_in_tiles - ty - 1)
for tx in range(0, size_in_tiles):
topcol = col1_fix15
botcol = col1_fix15
if tx > tx_thres:
topcol = col2_fix15
if tx >= tx_thres:
botcol = col2_fix15
with surface.tile_request(tx, ty, readonly=False) as dst:
if topcol == botcol:
dst[:] = topcol
else:
for i in range(N):
dst[0:N-i, i, ...] = topcol
dst[N-i:N, i, ...] = botcol
return fgcol, spiral
def render_checks(cr, size, nchecks):
"""Render a checquerboard pattern to a cairo surface"""
cr.set_source_rgb(*gui.style.ALPHA_CHECK_COLOR_1)
cr.paint()
cr.set_source_rgb(*gui.style.ALPHA_CHECK_COLOR_2)
for i in xrange(0, nchecks):
for j in xrange(0, nchecks):
if (i+j) % 2 == 0:
continue
cr.rectangle(i*size, j*size, size, size)
cr.fill()
def load_symbolic_icon(icon_name, size, fg=None, success=None,
warning=None, error=None, outline=None):
"""More Pythonic wrapper for gtk_icon_info_load_symbolic() etc.
:param str icon_name: Name of the symbolic icon to render
:param int size: Pixel size to render at
:param tuple fg: foreground color (rgba tuple, values in [0..1])
:param tuple success: success color (rgba tuple, values in [0..1])
:param tuple warning: warning color (rgba tuple, values in [0..1])
:param tuple error: error color (rgba tuple, values in [0..1])
:param tuple outline: outline color (rgba tuple, values in [0..1])
:returns: The rendered symbolic icon
:rtype: GdkPixbuf.Pixbuf
If the outline color is specified, a single-pixel outline is faked
for the icon. Outlined renderings require a size 2 pixels larger
than non-outlined if the central icon is to be of the same size.
The returned value should be cached somewhere.
"""
theme = Gtk.IconTheme.get_default()
if outline is not None:
size -= 2
info = theme.lookup_icon(icon_name, size, Gtk.IconLookupFlags(0))
rgba_or_none = lambda tup: (tup is not None) and Gdk.RGBA(*tup) or None
icon_pixbuf, was_symbolic = info.load_symbolic(
fg=rgba_or_none(fg),
success_color=rgba_or_none(success),
warning_color=rgba_or_none(warning),
error_color=rgba_or_none(error),
)
assert was_symbolic
if outline is None:
return icon_pixbuf
result = GdkPixbuf.Pixbuf.new(
GdkPixbuf.Colorspace.RGB, True, 8,
size+2, size+2,
)
result.fill(0x00000000)
outline_rgba = list(outline)
outline_rgba[3] /= 3.0
outline_rgba = Gdk.RGBA(*outline_rgba)
outline_stamp, was_symbolic = info.load_symbolic(
fg=outline_rgba,
success_color=outline_rgba,
warning_color=outline_rgba,
error_color=outline_rgba,
)
w = outline_stamp.get_width()
h = outline_stamp.get_height()
assert was_symbolic
offsets = [
(-1, -1), (0, -1), (1, -1),
(-1, 0), (1, 0),
(-1, 1), (0, 1), (1, 1),
]
for dx, dy in offsets:
outline_stamp.composite(
result,
dx+1, dy+1, w, h,
dx+1, dy+1, 1, 1,
GdkPixbuf.InterpType.NEAREST, 255,
)
icon_pixbuf.composite(
result,
1, 1, w, h,
1, 1, 1, 1,
GdkPixbuf.InterpType.NEAREST, 255,
)
return result
def render_round_floating_button(cr, x, y, color, pixbuf, z=2,
radius=gui.style.FLOATING_BUTTON_RADIUS):
"""Draw a round floating button with a standard size.
:param cairo.Context cr: Context in which to draw.
:param float x: X coordinate of the center pixel.
:param float y: Y coordinate of the center pixel.
:param lib.color.UIColor color: Color for the button base.
:param GdkPixbuf.Pixbuf pixbuf: Icon to render.
:param int z: Simulated height of the button above the canvas.
:param float radius: Button radius, in pixels.
These are used within certain overlays tightly associated with
particular interaction modes for manipulating things on the canvas.
"""
x = round(float(x))
y = round(float(y))
render_round_floating_color_chip(cr, x, y, color, radius=radius, z=z)
cr.save()
w = pixbuf.get_width()
h = pixbuf.get_height()
x -= w/2
y -= h/2
Gdk.cairo_set_source_pixbuf(cr, pixbuf, x, y)
cr.rectangle(x, y, w, h)
cr.clip()
cr.paint()
cr.restore()
def _get_paint_chip_highlight(color):
"""Paint chip highlight edge color"""
highlight = HCYColor(color=color)
ky = gui.style.PAINT_CHIP_HIGHLIGHT_HCY_Y_MULT
kc = gui.style.PAINT_CHIP_HIGHLIGHT_HCY_C_MULT
highlight.y = clamp(highlight.y * ky, 0, 1)
highlight.c = clamp(highlight.c * kc, 0, 1)
return highlight
def _get_paint_chip_shadow(color):
"""Paint chip shadow edge color"""
shadow = HCYColor(color=color)
ky = gui.style.PAINT_CHIP_SHADOW_HCY_Y_MULT
kc = gui.style.PAINT_CHIP_SHADOW_HCY_C_MULT
shadow.y = clamp(shadow.y * ky, 0, 1)
shadow.c = clamp(shadow.c * kc, 0, 1)
return shadow
def render_round_floating_color_chip(cr, x, y, color, radius, z=2):
"""Draw a round color chip with a slight drop shadow
:param cairo.Context cr: Context in which to draw.
:param float x: X coordinate of the center pixel.
:param float y: Y coordinate of the center pixel.
:param lib.color.UIColor color: Color for the chip.
:param float radius: Circle radius, in pixels.
:param int z: Simulated height of the object above the canvas.
Currently used for accept/dismiss/delete buttons and control points
on the painting canvas, in certain modes.
The button's style is similar to that used for the paint chips in
the dockable palette panel. As used here with drop shadows to
indicate that the blob can be interacted with, the style is similar
to Google's Material Design approach. This style adds a subtle edge
highlight in a brighter variant of "color", which seems to help
address adjacent color interactions.
"""
x = round(float(x))
y = round(float(y))
radius = round(radius)
cr.save()
cr.set_dash([], 0)
cr.set_line_width(0)
base_col = RGBColor(color=color)
hi_col = _get_paint_chip_highlight(base_col)
cr.arc(x, y, radius+0, 0, 2*math.pi)
cr.set_line_width(2)
render_drop_shadow(cr, z=z)
cr.set_source_rgb(*base_col.get_rgb())
cr.fill_preserve()
cr.clip_preserve()
cr.set_source_rgb(*hi_col.get_rgb())
cr.stroke()
cr.restore()
def render_drop_shadow(cr, z=2, line_width=None):
"""Draws a drop shadow for the current path.
:param int z: Simulated height of the object above the canvas.
:param float line_width: Override width of the line to shadow.
This function assumes that the object will be drawn immediately
afterwards using the current path, so the current path and transform
are preserved. The line width will be inferred automatically from
the current path if it is not specified.
These shadows are suitable for lines of a single brightish color
drawn over them. The combined style indicates that the object can be
moved or clicked.
"""
if line_width is None:
line_width = cr.get_line_width()
path = cr.copy_path()
cr.save()
dx = gui.style.DROP_SHADOW_X_OFFSET * z
dy = gui.style.DROP_SHADOW_Y_OFFSET * z
cr.translate(dx, dy)
cr.new_path()
cr.append_path(path)
steps = int(math.ceil(gui.style.DROP_SHADOW_BLUR))
alpha = gui.style.DROP_SHADOW_ALPHA / steps
for i in reversed(range(steps)):
cr.set_source_rgba(0.0, 0.0, 0.0, alpha)
cr.set_line_width(line_width + 2*i)
cr.stroke_preserve()
alpha += alpha/2
cr.translate(-dx, -dy)
cr.new_path()
cr.append_path(path)
cr.restore()
def get_drop_shadow_offsets(line_width, z=2):
"""Get how much extra space is needed to draw the drop shadow.
:param float line_width: Width of the line to shadow.
:param int z: Simulated height of the object above the canvas.
:returns: Offsets: (offs_left, offs_top, offs_right, offs_bottom)
:rtype: tuple
The offsets returned can be added to redraw bboxes, and are always
positive. They reflect how much extra space is required around the
bounding box for a line of the given width by the shadow rendered by
render_drop_shadow().
"""
dx = math.ceil(gui.style.DROP_SHADOW_X_OFFSET * z)
dy = math.ceil(gui.style.DROP_SHADOW_Y_OFFSET * z)
max_i = int(math.ceil(gui.style.DROP_SHADOW_BLUR)) - 1
max_line_width = line_width + 2*max_i
slack = 1
return tuple(int(max(0, n)) for n in [
-dx + max_line_width + slack,
-dy + max_line_width + slack,
dx + max_line_width + slack,
dy + max_line_width + slack,
])
## Test code
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
import sys
import lib.pixbuf
for myb_file in sys.argv[1:]:
if not myb_file.lower().endswith(".myb"):
logger.warning("Ignored %r: not a .myb file", myb_file)
continue
with open(myb_file, 'r') as myb_fp:
myb_json = myb_fp.read()
myb_brushinfo = BrushInfo(myb_json)
myb_pixbuf = render_brush_preview_pixbuf(myb_brushinfo)
if myb_pixbuf is not None:
myb_basename = myb_file[:-4]
png_file = "%s_autopreview.png" % (myb_file,)
logger.info("Saving to %r...", png_file)
lib.pixbuf.save(myb_pixbuf, png_file, "png")
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.