repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
dednal/chromium.src | tools/profile_chrome/perf_controller.py | 16 | 6890 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import signal
import subprocess
import sys
import tempfile
from profile_chrome import controllers
from profile_chrome import ui
from pylib import android_commands
from pylib import constants
from pylib.perf import perf_control
sys.path.append(os.path.join(constants.DIR_SOURCE_ROOT,
'tools',
'telemetry'))
try:
# pylint: disable=F0401
from telemetry.core.platform.profiler import android_profiling_helper
from telemetry.util import support_binaries
except ImportError:
android_profiling_helper = None
support_binaries = None
_PERF_OPTIONS = [
# Sample across all processes and CPUs to so that the current CPU gets
# recorded to each sample.
'--all-cpus',
# In perf 3.13 --call-graph requires an argument, so use the -g short-hand
# which does not.
'-g',
# Increase priority to avoid dropping samples. Requires root.
'--realtime', '80',
# Record raw samples to get CPU information.
'--raw-samples',
# Increase sampling frequency for better coverage.
'--freq', '2000',
]
class _PerfProfiler(object):
def __init__(self, device, perf_binary, categories):
self._device = device
self._output_file = android_commands.DeviceTempFile(
self._device.old_interface, prefix='perf_output')
self._log_file = tempfile.TemporaryFile()
# TODO(jbudorick) Look at providing a way to unhandroll this once the
# adb rewrite has fully landed.
device_param = (['-s', str(self._device)] if str(self._device) else [])
cmd = ['adb'] + device_param + \
['shell', perf_binary, 'record',
'--output', self._output_file.name] + _PERF_OPTIONS
if categories:
cmd += ['--event', ','.join(categories)]
self._perf_control = perf_control.PerfControl(self._device)
self._perf_control.SetPerfProfilingMode()
self._perf_process = subprocess.Popen(cmd,
stdout=self._log_file,
stderr=subprocess.STDOUT)
def SignalAndWait(self):
self._device.KillAll('perf', signum=signal.SIGINT)
self._perf_process.wait()
self._perf_control.SetDefaultPerfMode()
def _FailWithLog(self, msg):
self._log_file.seek(0)
log = self._log_file.read()
raise RuntimeError('%s. Log output:\n%s' % (msg, log))
def PullResult(self, output_path):
if not self._device.FileExists(self._output_file.name):
self._FailWithLog('Perf recorded no data')
perf_profile = os.path.join(output_path,
os.path.basename(self._output_file.name))
self._device.PullFile(self._output_file.name, perf_profile)
if not os.stat(perf_profile).st_size:
os.remove(perf_profile)
self._FailWithLog('Perf recorded a zero-sized file')
self._log_file.close()
self._output_file.close()
return perf_profile
class PerfProfilerController(controllers.BaseController):
def __init__(self, device, categories):
controllers.BaseController.__init__(self)
self._device = device
self._categories = categories
self._perf_binary = self._PrepareDevice(device)
self._perf_instance = None
def __repr__(self):
return 'perf profile'
@staticmethod
def IsSupported():
return bool(android_profiling_helper)
@staticmethod
def _PrepareDevice(device):
if not 'BUILDTYPE' in os.environ:
os.environ['BUILDTYPE'] = 'Release'
return android_profiling_helper.PrepareDeviceForPerf(device)
@classmethod
def GetCategories(cls, device):
perf_binary = cls._PrepareDevice(device)
return device.RunShellCommand('%s list' % perf_binary)
def StartTracing(self, _):
self._perf_instance = _PerfProfiler(self._device,
self._perf_binary,
self._categories)
def StopTracing(self):
if not self._perf_instance:
return
self._perf_instance.SignalAndWait()
@staticmethod
def _GetInteractivePerfCommand(perfhost_path, perf_profile, symfs_dir,
required_libs, kallsyms):
cmd = '%s report -n -i %s --symfs %s --kallsyms %s' % (
os.path.relpath(perfhost_path, '.'), perf_profile, symfs_dir, kallsyms)
for lib in required_libs:
lib = os.path.join(symfs_dir, lib[1:])
if not os.path.exists(lib):
continue
objdump_path = android_profiling_helper.GetToolchainBinaryPath(
lib, 'objdump')
if objdump_path:
cmd += ' --objdump %s' % os.path.relpath(objdump_path, '.')
break
return cmd
def PullTrace(self):
symfs_dir = os.path.join(tempfile.gettempdir(),
os.path.expandvars('$USER-perf-symfs'))
if not os.path.exists(symfs_dir):
os.makedirs(symfs_dir)
required_libs = set()
# Download the recorded perf profile.
perf_profile = self._perf_instance.PullResult(symfs_dir)
required_libs = \
android_profiling_helper.GetRequiredLibrariesForPerfProfile(
perf_profile)
if not required_libs:
logging.warning('No libraries required by perf trace. Most likely there '
'are no samples in the trace.')
# Build a symfs with all the necessary libraries.
kallsyms = android_profiling_helper.CreateSymFs(self._device,
symfs_dir,
required_libs,
use_symlinks=False)
perfhost_path = support_binaries.FindPath(
android_profiling_helper.GetPerfhostName(), 'x86_64', 'linux')
ui.PrintMessage('\nNote: to view the profile in perf, run:')
ui.PrintMessage(' ' + self._GetInteractivePerfCommand(perfhost_path,
perf_profile, symfs_dir, required_libs, kallsyms))
# Convert the perf profile into JSON.
perf_script_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'third_party', 'perf_to_tracing.py')
json_file_name = os.path.basename(perf_profile)
with open(os.devnull, 'w') as dev_null, \
open(json_file_name, 'w') as json_file:
cmd = [perfhost_path, 'script', '-s', perf_script_path, '-i',
perf_profile, '--symfs', symfs_dir, '--kallsyms', kallsyms]
if subprocess.call(cmd, stdout=json_file, stderr=dev_null):
logging.warning('Perf data to JSON conversion failed. The result will '
'not contain any perf samples. You can still view the '
'perf data manually as shown above.')
return None
return json_file_name
| bsd-3-clause |
manojhirway/ExistingImagesOnNFS | cinder/tests/unit/volume/drivers/netapp/eseries/fakes.py | 3 | 19127 | # Copyright (c) - 2015, Alex Meade
# Copyright (c) - 2015, Yogesh Kshirsagar
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from cinder.volume import configuration as conf
from cinder.volume.drivers.netapp.eseries import utils
import cinder.volume.drivers.netapp.options as na_opts
def mock_netapp_lib(modules):
"""Inject fake netapp_lib module classes."""
netapp_lib = mock.Mock()
netapp_lib.api.rest.rest.WebserviceClient = mock.Mock()
for module in modules:
setattr(module, 'netapp_restclient', netapp_lib.api.rest.rest)
MULTIATTACH_HOST_GROUP = {
'clusterRef': '8500000060080E500023C7340036035F515B78FC',
'label': utils.MULTI_ATTACH_HOST_GROUP_NAME,
}
FOREIGN_HOST_GROUP = {
'clusterRef': '8500000060080E500023C7340036035F515B78FD',
'label': 'FOREIGN HOST GROUP',
}
STORAGE_POOL = {
'label': 'DDP',
'volumeGroupRef': 'fakevolgroupref',
'raidLevel': 'raidDiskPool',
}
VOLUME = {
'extremeProtection': False,
'pitBaseVolume': True,
'dssMaxSegmentSize': 131072,
'totalSizeInBytes': '1073741824',
'raidLevel': 'raid6',
'volumeRef': '0200000060080E500023BB34000003FB515C2293',
'listOfMappings': [],
'sectorOffset': '15',
'id': '0200000060080E500023BB34000003FB515C2293',
'wwn': '60080E500023BB3400001FC352D14CB2',
'capacity': '2147483648',
'mgmtClientAttribute': 0,
'label': 'CFDXJ67BLJH25DXCZFZD4NSF54',
'volumeFull': False,
'blkSize': 512,
'volumeCopyTarget': False,
'volumeGroupRef': '0400000060080E500023BB3400001F9F52CECC3F',
'preferredControllerId': '070000000000000000000001',
'currentManager': '070000000000000000000001',
'applicationTagOwned': False,
'status': 'optimal',
'segmentSize': 131072,
'volumeUse': 'standardVolume',
'action': 'none',
'preferredManager': '070000000000000000000001',
'volumeHandle': 15,
'offline': False,
'preReadRedundancyCheckEnabled': False,
'dssPreallocEnabled': False,
'name': 'bdm-vc-test-1',
'worldWideName': '60080E500023BB3400001FC352D14CB2',
'currentControllerId': '070000000000000000000001',
'protectionInformationCapable': False,
'mapped': False,
'reconPriority': 1,
'protectionType': 'type1Protection'
}
INITIATOR_NAME = 'iqn.1998-01.com.vmware:localhost-28a58148'
INITIATOR_NAME_2 = 'iqn.1998-01.com.vmware:localhost-28a58149'
INITIATOR_NAME_3 = 'iqn.1998-01.com.vmware:localhost-28a58150'
WWPN = '20130080E5322230'
WWPN_2 = '20230080E5322230'
FC_TARGET_WWPNS = [
'500a098280feeba5',
'500a098290feeba5',
'500a098190feeba5',
'500a098180feeba5'
]
FC_I_T_MAP = {
'20230080E5322230': [
'500a098280feeba5',
'500a098290feeba5'
],
'20130080E5322230': [
'500a098190feeba5',
'500a098180feeba5'
]
}
FC_FABRIC_MAP = {
'fabricB': {
'target_port_wwn_list': [
'500a098190feeba5',
'500a098180feeba5'
],
'initiator_port_wwn_list': [
'20130080E5322230'
]
},
'fabricA': {
'target_port_wwn_list': [
'500a098290feeba5',
'500a098280feeba5'
],
'initiator_port_wwn_list': [
'20230080E5322230'
]
}
}
HOST = {
'isSAControlled': False,
'confirmLUNMappingCreation': False,
'label': 'stlrx300s7-55',
'isLargeBlockFormatHost': False,
'clusterRef': '8500000060080E500023C7340036035F515B78FC',
'protectionInformationCapableAccessMethod': False,
'ports': [],
'hostRef': '8400000060080E500023C73400300381515BFBA3',
'hostTypeIndex': 6,
'hostSidePorts': [{
'label': 'NewStore',
'type': 'iscsi',
'address': INITIATOR_NAME}]
}
HOST_2 = {
'isSAControlled': False,
'confirmLUNMappingCreation': False,
'label': 'stlrx300s7-55',
'isLargeBlockFormatHost': False,
'clusterRef': utils.NULL_REF,
'protectionInformationCapableAccessMethod': False,
'ports': [],
'hostRef': '8400000060080E500023C73400300381515BFBA5',
'hostTypeIndex': 6,
'hostSidePorts': [{
'label': 'NewStore', 'type': 'iscsi',
'address': INITIATOR_NAME_2}]
}
# HOST_3 has all lun_ids in use.
HOST_3 = {
'isSAControlled': False,
'confirmLUNMappingCreation': False,
'label': 'stlrx300s7-55',
'isLargeBlockFormatHost': False,
'clusterRef': '8500000060080E500023C73400360351515B78FC',
'protectionInformationCapableAccessMethod': False,
'ports': [],
'hostRef': '8400000060080E501023C73400800381515BFBA5',
'hostTypeIndex': 6,
'hostSidePorts': [{
'label': 'NewStore', 'type': 'iscsi',
'address': INITIATOR_NAME_3}],
}
VOLUME_MAPPING = {
'lunMappingRef': '8800000000000000000000000000000000000000',
'lun': 0,
'ssid': 16384,
'perms': 15,
'volumeRef': VOLUME['volumeRef'],
'type': 'all',
'mapRef': HOST['hostRef']
}
# VOLUME_MAPPING_3 corresponding to HOST_3 has all lun_ids in use.
VOLUME_MAPPING_3 = {
'lunMappingRef': '8800000000000000000000000000000000000000',
'lun': range(255),
'ssid': 16384,
'perms': 15,
'volumeRef': VOLUME['volumeRef'],
'type': 'all',
'mapRef': HOST_3['hostRef'],
}
VOLUME_MAPPING_TO_MULTIATTACH_GROUP = copy.deepcopy(VOLUME_MAPPING)
VOLUME_MAPPING_TO_MULTIATTACH_GROUP.update(
{'mapRef': MULTIATTACH_HOST_GROUP['clusterRef']}
)
STORAGE_SYSTEM = {
'freePoolSpace': 11142431623168,
'driveCount': 24,
'hostSparesUsed': 0, 'id':
'1fa6efb5-f07b-4de4-9f0e-52e5f7ff5d1b',
'hotSpareSizeAsString': '0', 'wwn':
'60080E500023C73400000000515AF323',
'parameters': {
'minVolSize': 1048576, 'maxSnapshotsPerBase': 16,
'maxDrives': 192,
'maxVolumes': 512,
'maxVolumesPerGroup': 256,
'maxMirrors': 0,
'maxMappingsPerVolume': 1,
'maxMappableLuns': 256,
'maxVolCopys': 511,
'maxSnapshots': 256
}, 'hotSpareCount': 0,
'hostSpareCountInStandby': 0,
'status': 'needsattn',
'trayCount': 1,
'usedPoolSpaceAsString': '5313000380416',
'ip2': '10.63.165.216',
'ip1': '10.63.165.215',
'freePoolSpaceAsString': '11142431623168',
'types': 'SAS',
'name': 'stle2600-7_8',
'hotSpareSize': 0,
'usedPoolSpace': 5313000380416,
'driveTypes': ['sas'],
'unconfiguredSpaceByDriveType': {},
'unconfiguredSpaceAsStrings': '0',
'model': '2650',
'unconfiguredSpace': 0
}
SNAPSHOT_GROUP = {
'status': 'optimal',
'autoDeleteLimit': 0,
'maxRepositoryCapacity': '-65536',
'rollbackStatus': 'none',
'unusableRepositoryCapacity': '0',
'pitGroupRef':
'3300000060080E500023C7340000098D5294AC9A',
'clusterSize': 65536,
'label': 'C6JICISVHNG2TFZX4XB5ZWL7O',
'maxBaseCapacity': '476187142128128',
'repositoryVolume': '3600000060080E500023BB3400001FA952CEF12C',
'fullWarnThreshold': 99,
'repFullPolicy': 'purgepit',
'action': 'none',
'rollbackPriority': 'medium',
'creationPendingStatus': 'none',
'consistencyGroupRef': '0000000000000000000000000000000000000000',
'volumeHandle': 49153,
'consistencyGroup': False,
'baseVolume': '0200000060080E500023C734000009825294A534'
}
SNAPSHOT_IMAGE = {
'status': 'optimal',
'pitCapacity': '2147483648',
'pitTimestamp': '1389315375',
'pitGroupRef': '3300000060080E500023C7340000098D5294AC9A',
'creationMethod': 'user',
'repositoryCapacityUtilization': '2818048',
'activeCOW': True,
'isRollbackSource': False,
'pitRef': '3400000060080E500023BB3400631F335294A5A8',
'pitSequenceNumber': '19'
}
HARDWARE_INVENTORY = {
'iscsiPorts': [
{
'controllerId':
'070000000000000000000002',
'ipv4Enabled': True,
'ipv4Data': {
'ipv4Address': '0.0.0.0',
'ipv4AddressConfigMethod':
'configStatic',
'ipv4VlanId': {
'isEnabled': False,
'value': 0
},
'ipv4AddressData': {
'ipv4Address': '172.20.123.66',
'ipv4SubnetMask': '255.255.255.0',
'configState': 'configured',
'ipv4GatewayAddress': '0.0.0.0'
}
},
'tcpListenPort': 3260,
'interfaceRef': '2202040000000000000000000000000000000000',
'iqn': 'iqn.1992-01.com.lsi:2365.60080e500023c73400000000515af323'
}
],
'fibrePorts': [
{
"channel": 1,
"loopID": 126,
"speed": 800,
"hardAddress": 6,
"nodeName": "20020080E5322230",
"portName": "20130080E5322230",
"portId": "011700",
"topology": "fabric",
"part": "PM8032 ",
"revision": 8,
"chanMiswire": False,
"esmMiswire": False,
"linkStatus": "up",
"isDegraded": False,
"speedControl": "auto",
"maxSpeed": 800,
"speedNegError": False,
"reserved1": "000000000000000000000000",
"reserved2": "",
"ddsChannelState": 0,
"ddsStateReason": 0,
"ddsStateWho": 0,
"isLocal": True,
"channelPorts": [],
"currentInterfaceSpeed": "speed8gig",
"maximumInterfaceSpeed": "speed8gig",
"interfaceRef": "2202020000000000000000000000000000000000",
"physicalLocation": {
"trayRef": "0000000000000000000000000000000000000000",
"slot": 0,
"locationParent": {
"refType": "generic",
"controllerRef": None,
"symbolRef": "0000000000000000000000000000000000000000",
"typedReference": None
},
"locationPosition": 0
},
"isTrunkCapable": False,
"trunkMiswire": False,
"protectionInformationCapable": True,
"controllerId": "070000000000000000000002",
"interfaceId": "2202020000000000000000000000000000000000",
"addressId": "20130080E5322230",
"niceAddressId": "20:13:00:80:E5:32:22:30"
},
{
"channel": 2,
"loopID": 126,
"speed": 800,
"hardAddress": 7,
"nodeName": "20020080E5322230",
"portName": "20230080E5322230",
"portId": "011700",
"topology": "fabric",
"part": "PM8032 ",
"revision": 8,
"chanMiswire": False,
"esmMiswire": False,
"linkStatus": "up",
"isDegraded": False,
"speedControl": "auto",
"maxSpeed": 800,
"speedNegError": False,
"reserved1": "000000000000000000000000",
"reserved2": "",
"ddsChannelState": 0,
"ddsStateReason": 0,
"ddsStateWho": 0,
"isLocal": True,
"channelPorts": [],
"currentInterfaceSpeed": "speed8gig",
"maximumInterfaceSpeed": "speed8gig",
"interfaceRef": "2202030000000000000000000000000000000000",
"physicalLocation": {
"trayRef": "0000000000000000000000000000000000000000",
"slot": 0,
"locationParent": {
"refType": "generic",
"controllerRef": None,
"symbolRef": "0000000000000000000000000000000000000000",
"typedReference": None
},
"locationPosition": 0
},
"isTrunkCapable": False,
"trunkMiswire": False,
"protectionInformationCapable": True,
"controllerId": "070000000000000000000002",
"interfaceId": "2202030000000000000000000000000000000000",
"addressId": "20230080E5322230",
"niceAddressId": "20:23:00:80:E5:32:22:30"
},
]
}
VOLUME_COPY_JOB = {
"status": "complete",
"cloneCopy": True,
"pgRef": "3300000060080E500023C73400000ACA52D29454",
"volcopyHandle": 49160,
"idleTargetWriteProt": True,
"copyPriority": "priority2",
"volcopyRef": "1800000060080E500023C73400000ACF52D29466",
"worldWideName": "60080E500023C73400000ACF52D29466",
"copyCompleteTime": "0",
"sourceVolume": "3500000060080E500023C73400000ACE52D29462",
"currentManager": "070000000000000000000002",
"copyStartTime": "1389551671",
"reserved1": "00000000",
"targetVolume": "0200000060080E500023C73400000A8C52D10675",
}
FAKE_ENDPOINT_HTTP = 'http://host:80/endpoint'
FAKE_ENDPOINT_HTTPS = 'https://host:8443/endpoint'
FAKE_CLIENT_PARAMS = {
'scheme': 'http',
'host': '127.0.0.1',
'port': 8080,
'service_path': '/devmgr/vn',
'username': 'rw',
'password': 'rw',
}
def create_configuration_eseries():
config = conf.Configuration(None)
config.append_config_values(na_opts.netapp_connection_opts)
config.append_config_values(na_opts.netapp_transport_opts)
config.append_config_values(na_opts.netapp_basicauth_opts)
config.append_config_values(na_opts.netapp_provisioning_opts)
config.append_config_values(na_opts.netapp_eseries_opts)
config.netapp_storage_protocol = 'iscsi'
config.netapp_login = 'rw'
config.netapp_password = 'rw'
config.netapp_server_hostname = '127.0.0.1'
config.netapp_transport_type = 'http'
config.netapp_server_port = '8080'
config.netapp_storage_pools = 'DDP'
config.netapp_storage_family = 'eseries'
config.netapp_sa_password = 'saPass'
config.netapp_controller_ips = '10.11.12.13,10.11.12.14'
config.netapp_webservice_path = '/devmgr/v2'
config.netapp_enable_multiattach = False
return config
def deepcopy_return_value_method_decorator(fn):
"""Returns a deepcopy of the returned value of the wrapped function."""
def decorator(*args, **kwargs):
return copy.deepcopy(fn(*args, **kwargs))
return decorator
def deepcopy_return_value_class_decorator(cls):
"""Wraps 'non-protected' methods of a class with decorator.
Wraps all 'non-protected' methods of a class with the
deepcopy_return_value_method_decorator decorator.
"""
class NewClass(cls):
def __getattribute__(self, attr_name):
obj = super(NewClass, self).__getattribute__(attr_name)
if (hasattr(obj, '__call__') and not attr_name.startswith('_')
and not isinstance(obj, mock.Mock)):
return deepcopy_return_value_method_decorator(obj)
return obj
return NewClass
@deepcopy_return_value_class_decorator
class FakeEseriesClient(object):
def __init__(self, *args, **kwargs):
pass
def list_storage_pools(self):
return [STORAGE_POOL]
def register_storage_system(self, *args, **kwargs):
return {
'freePoolSpace': '17055871480319',
'driveCount': 24,
'wwn': '60080E500023C73400000000515AF323',
'id': '1',
'hotSpareSizeAsString': '0',
'hostSparesUsed': 0,
'types': '',
'hostSpareCountInStandby': 0,
'status': 'optimal',
'trayCount': 1,
'usedPoolSpaceAsString': '37452115456',
'ip2': '10.63.165.216',
'ip1': '10.63.165.215',
'freePoolSpaceAsString': '17055871480319',
'hotSpareCount': 0,
'hotSpareSize': '0',
'name': 'stle2600-7_8',
'usedPoolSpace': '37452115456',
'driveTypes': ['sas'],
'unconfiguredSpaceByDriveType': {},
'unconfiguredSpaceAsStrings': '0',
'model': '2650',
'unconfiguredSpace': '0'
}
def list_volumes(self):
return [VOLUME]
def delete_volume(self, vol):
pass
def create_host_group(self, name):
return MULTIATTACH_HOST_GROUP
def get_host_group(self, ref):
return MULTIATTACH_HOST_GROUP
def list_host_groups(self):
return [MULTIATTACH_HOST_GROUP]
def get_host_group_by_name(self, name, *args, **kwargs):
host_groups = self.list_host_groups()
return [host_group for host_group in host_groups
if host_group['label'] == name][0]
def set_host_group_for_host(self, *args, **kwargs):
pass
def create_host_with_ports(self, *args, **kwargs):
return HOST
def list_hosts(self):
return [HOST, HOST_2]
def get_host(self, *args, **kwargs):
return HOST
def create_volume_mapping(self, *args, **kwargs):
return VOLUME_MAPPING
def get_volume_mappings(self):
return [VOLUME_MAPPING]
def get_volume_mappings_for_volume(self, volume):
return [VOLUME_MAPPING]
def get_volume_mappings_for_host(self, host_ref):
return [VOLUME_MAPPING]
def get_volume_mappings_for_host_group(self, hg_ref):
return [VOLUME_MAPPING]
def delete_volume_mapping(self):
return
def move_volume_mapping_via_symbol(self, map_ref, to_ref, lun_id):
return {'lun': lun_id}
def list_storage_system(self):
return STORAGE_SYSTEM
def list_storage_systems(self):
return [STORAGE_SYSTEM]
def list_snapshot_groups(self):
return [SNAPSHOT_GROUP]
def list_snapshot_images(self):
return [SNAPSHOT_IMAGE]
def list_host_types(self):
return [
{
'id': '4',
'code': 'AIX',
'name': 'AIX',
'index': 4
},
{
'id': '5',
'code': 'IRX',
'name': 'IRX',
'index': 5
},
{
'id': '6',
'code': 'LnxALUA',
'name': 'LnxALUA',
'index': 6
}
]
def list_hardware_inventory(self):
return HARDWARE_INVENTORY
def create_volume_copy_job(self, *args, **kwargs):
return VOLUME_COPY_JOB
def list_vol_copy_job(self, *args, **kwargs):
return VOLUME_COPY_JOB
def delete_vol_copy_job(self, *args, **kwargs):
pass
def delete_snapshot_volume(self, *args, **kwargs):
pass
def list_target_wwpns(self, *args, **kwargs):
return [WWPN_2]
| apache-2.0 |
Dandandan/wikiprogramming | jsrepl/extern/python/unclosured/lib/python2.7/audiodev.py | 286 | 7597 | """Classes for manipulating audio devices (currently only for Sun and SGI)"""
from warnings import warnpy3k
warnpy3k("the audiodev module has been removed in Python 3.0", stacklevel=2)
del warnpy3k
__all__ = ["error","AudioDev"]
class error(Exception):
pass
class Play_Audio_sgi:
# Private instance variables
## if 0: access frameratelist, nchannelslist, sampwidthlist, oldparams, \
## params, config, inited_outrate, inited_width, \
## inited_nchannels, port, converter, classinited: private
classinited = 0
frameratelist = nchannelslist = sampwidthlist = None
def initclass(self):
import AL
self.frameratelist = [
(48000, AL.RATE_48000),
(44100, AL.RATE_44100),
(32000, AL.RATE_32000),
(22050, AL.RATE_22050),
(16000, AL.RATE_16000),
(11025, AL.RATE_11025),
( 8000, AL.RATE_8000),
]
self.nchannelslist = [
(1, AL.MONO),
(2, AL.STEREO),
(4, AL.QUADRO),
]
self.sampwidthlist = [
(1, AL.SAMPLE_8),
(2, AL.SAMPLE_16),
(3, AL.SAMPLE_24),
]
self.classinited = 1
def __init__(self):
import al, AL
if not self.classinited:
self.initclass()
self.oldparams = []
self.params = [AL.OUTPUT_RATE, 0]
self.config = al.newconfig()
self.inited_outrate = 0
self.inited_width = 0
self.inited_nchannels = 0
self.converter = None
self.port = None
return
def __del__(self):
if self.port:
self.stop()
if self.oldparams:
import al, AL
al.setparams(AL.DEFAULT_DEVICE, self.oldparams)
self.oldparams = []
def wait(self):
if not self.port:
return
import time
while self.port.getfilled() > 0:
time.sleep(0.1)
self.stop()
def stop(self):
if self.port:
self.port.closeport()
self.port = None
if self.oldparams:
import al, AL
al.setparams(AL.DEFAULT_DEVICE, self.oldparams)
self.oldparams = []
def setoutrate(self, rate):
for (raw, cooked) in self.frameratelist:
if rate == raw:
self.params[1] = cooked
self.inited_outrate = 1
break
else:
raise error, 'bad output rate'
def setsampwidth(self, width):
for (raw, cooked) in self.sampwidthlist:
if width == raw:
self.config.setwidth(cooked)
self.inited_width = 1
break
else:
if width == 0:
import AL
self.inited_width = 0
self.config.setwidth(AL.SAMPLE_16)
self.converter = self.ulaw2lin
else:
raise error, 'bad sample width'
def setnchannels(self, nchannels):
for (raw, cooked) in self.nchannelslist:
if nchannels == raw:
self.config.setchannels(cooked)
self.inited_nchannels = 1
break
else:
raise error, 'bad # of channels'
def writeframes(self, data):
if not (self.inited_outrate and self.inited_nchannels):
raise error, 'params not specified'
if not self.port:
import al, AL
self.port = al.openport('Python', 'w', self.config)
self.oldparams = self.params[:]
al.getparams(AL.DEFAULT_DEVICE, self.oldparams)
al.setparams(AL.DEFAULT_DEVICE, self.params)
if self.converter:
data = self.converter(data)
self.port.writesamps(data)
def getfilled(self):
if self.port:
return self.port.getfilled()
else:
return 0
def getfillable(self):
if self.port:
return self.port.getfillable()
else:
return self.config.getqueuesize()
# private methods
## if 0: access *: private
def ulaw2lin(self, data):
import audioop
return audioop.ulaw2lin(data, 2)
class Play_Audio_sun:
## if 0: access outrate, sampwidth, nchannels, inited_outrate, inited_width, \
## inited_nchannels, converter: private
def __init__(self):
self.outrate = 0
self.sampwidth = 0
self.nchannels = 0
self.inited_outrate = 0
self.inited_width = 0
self.inited_nchannels = 0
self.converter = None
self.port = None
return
def __del__(self):
self.stop()
def setoutrate(self, rate):
self.outrate = rate
self.inited_outrate = 1
def setsampwidth(self, width):
self.sampwidth = width
self.inited_width = 1
def setnchannels(self, nchannels):
self.nchannels = nchannels
self.inited_nchannels = 1
def writeframes(self, data):
if not (self.inited_outrate and self.inited_width and self.inited_nchannels):
raise error, 'params not specified'
if not self.port:
import sunaudiodev, SUNAUDIODEV
self.port = sunaudiodev.open('w')
info = self.port.getinfo()
info.o_sample_rate = self.outrate
info.o_channels = self.nchannels
if self.sampwidth == 0:
info.o_precision = 8
self.o_encoding = SUNAUDIODEV.ENCODING_ULAW
# XXX Hack, hack -- leave defaults
else:
info.o_precision = 8 * self.sampwidth
info.o_encoding = SUNAUDIODEV.ENCODING_LINEAR
self.port.setinfo(info)
if self.converter:
data = self.converter(data)
self.port.write(data)
def wait(self):
if not self.port:
return
self.port.drain()
self.stop()
def stop(self):
if self.port:
self.port.flush()
self.port.close()
self.port = None
def getfilled(self):
if self.port:
return self.port.obufcount()
else:
return 0
## # Nobody remembers what this method does, and it's broken. :-(
## def getfillable(self):
## return BUFFERSIZE - self.getfilled()
def AudioDev():
# Dynamically try to import and use a platform specific module.
try:
import al
except ImportError:
try:
import sunaudiodev
return Play_Audio_sun()
except ImportError:
try:
import Audio_mac
except ImportError:
raise error, 'no audio device'
else:
return Audio_mac.Play_Audio_mac()
else:
return Play_Audio_sgi()
def test(fn = None):
import sys
if sys.argv[1:]:
fn = sys.argv[1]
else:
fn = 'f:just samples:just.aif'
import aifc
af = aifc.open(fn, 'r')
print fn, af.getparams()
p = AudioDev()
p.setoutrate(af.getframerate())
p.setsampwidth(af.getsampwidth())
p.setnchannels(af.getnchannels())
BUFSIZ = af.getframerate()/af.getsampwidth()/af.getnchannels()
while 1:
data = af.readframes(BUFSIZ)
if not data: break
print len(data)
p.writeframes(data)
p.wait()
if __name__ == '__main__':
test()
| mit |
andrejb/cloudant_bigcouch | couchjs/scons/scons-local-2.0.1/SCons/Tool/packaging/zip.py | 61 | 1775 | """SCons.Tool.Packaging.zip
The zip SRC packager.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/packaging/zip.py 5134 2010/08/16 23:02:40 bdeegan"
from SCons.Tool.packaging import stripinstallbuilder, putintopackageroot
def package(env, target, source, PACKAGEROOT, **kw):
bld = env['BUILDERS']['Zip']
bld.set_suffix('.zip')
target, source = stripinstallbuilder(target, source, env)
target, source = putintopackageroot(target, source, env, PACKAGEROOT)
return bld(env, target, source)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| apache-2.0 |
ArthurGarnier/SickRage | sickbeard/history.py | 5 | 4481 | # coding=utf-8
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: https://sickrage.github.io
# Git: https://github.com/SickRage/SickRage.git
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import datetime
import db
from sickbeard.common import FAILED, Quality, SNATCHED, SUBTITLED
from sickrage.helper.encoding import ss
from sickrage.show.History import History
def _logHistoryItem(action, showid, season, episode, quality, resource, provider, version=-1):
"""
Insert a history item in DB
:param action: action taken (snatch, download, etc)
:param showid: showid this entry is about
:param season: show season
:param episode: show episode
:param quality: media quality
:param resource: resource used
:param provider: provider used
:param version: tracked version of file (defaults to -1)
"""
logDate = datetime.datetime.today().strftime(History.date_format)
resource = ss(resource)
main_db_con = db.DBConnection()
main_db_con.action(
"INSERT INTO history (action, date, showid, season, episode, quality, resource, provider, version) VALUES (?,?,?,?,?,?,?,?,?)",
[action, logDate, showid, season, episode, quality, resource, provider, version])
def logSnatch(searchResult):
"""
Log history of snatch
:param searchResult: search result object
"""
for curEpObj in searchResult.episodes:
showid = int(curEpObj.show.indexerid)
season = int(curEpObj.season)
episode = int(curEpObj.episode)
quality = searchResult.quality
version = searchResult.version
providerClass = searchResult.provider
if providerClass is not None:
provider = providerClass.name
else:
provider = "unknown"
action = Quality.compositeStatus(SNATCHED, searchResult.quality)
resource = searchResult.name
_logHistoryItem(action, showid, season, episode, quality, resource, provider, version)
def logDownload(episode, filename, new_ep_quality, release_group=None, version=-1):
"""
Log history of download
:param episode: episode of show
:param filename: file on disk where the download is
:param new_ep_quality: Quality of download
:param release_group: Release group
:param version: Version of file (defaults to -1)
"""
showid = int(episode.show.indexerid)
season = int(episode.season)
epNum = int(episode.episode)
quality = new_ep_quality
# store the release group as the provider if possible
if release_group:
provider = release_group
else:
provider = -1
action = episode.status
_logHistoryItem(action, showid, season, epNum, quality, filename, provider, version)
def logSubtitle(showid, season, episode, status, subtitleResult):
"""
Log download of subtitle
:param showid: Showid of download
:param season: Show season
:param episode: Show episode
:param status: Status of download
:param subtitleResult: Result object
"""
resource = subtitleResult.language.opensubtitles
provider = subtitleResult.provider_name
status, quality = Quality.splitCompositeStatus(status)
action = Quality.compositeStatus(SUBTITLED, quality)
_logHistoryItem(action, showid, season, episode, quality, resource, provider)
def logFailed(epObj, release, provider=None):
"""
Log a failed download
:param epObj: Episode object
:param release: Release group
:param provider: Provider used for snatch
"""
showid = int(epObj.show.indexerid)
season = int(epObj.season)
epNum = int(epObj.episode)
status, quality = Quality.splitCompositeStatus(epObj.status)
action = Quality.compositeStatus(FAILED, quality)
_logHistoryItem(action, showid, season, epNum, quality, release, provider)
| gpl-3.0 |
tanzquotient/tq_website | cms_plugins/migrations/0001_squashed_0008_upcomingeventsandcoursespluginmodel.py | 2 | 4898 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.27 on 2020-02-15 11:08
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import filer.fields.image
class Migration(migrations.Migration):
replaces = [('cms_plugins', '0001_initial'), ('cms_plugins', '0002_thumbnailpluginmodel'), ('cms_plugins', '0003_auto_20160905_1307'), ('cms_plugins', '0004_auto_20170203_1940'), ('cms_plugins', '0005_countdownpluginmodel'), ('cms_plugins', '0006_countdownpluginmodel_text'), ('cms_plugins', '0007_countdownpluginmodel_finish_text'), ('cms_plugins', '0008_upcomingeventsandcoursespluginmodel')]
initial = True
dependencies = [
('cms', '0001_initial'),
('cms', '0016_auto_20160608_1535'),
('filer', '0001_initial'),
('djangocms_link', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ButtonPluginModel',
fields=[
('link_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='djangocms_link.Link')),
('emphasize', models.BooleanField(default=False, help_text='If this button should be visually emphasized.')),
],
options={
'abstract': False,
},
bases=('djangocms_link.link',),
),
migrations.CreateModel(
name='PageTitlePluginModel',
fields=[
('cmsplugin_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, related_name='cms_plugins_pagetitlepluginmodel', serialize=False, to='cms.CMSPlugin')),
('title', models.CharField(blank=True, help_text="The title to be displayed. Leave empty to display the page's title.", max_length=30, null=True)),
('subtitle', models.CharField(blank=True, help_text='The subtitle to be displayed.', max_length=50, null=True)),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
migrations.CreateModel(
name='ThumbnailPluginModel',
fields=[
('cmsplugin_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, related_name='cms_plugins_thumbnailpluginmodel', serialize=False, to='cms.CMSPlugin')),
('image', filer.fields.image.FilerImageField(blank=True, help_text='Image to show thumbnail for.', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.FILER_IMAGE_MODEL)),
('crop', models.BooleanField(default=False, help_text='If this thumbnail should be cropped to fit given size.')),
('url', models.URLField(blank=True, help_text='URL to display on image click.', max_length=500, null=True)),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
migrations.CreateModel(
name='CountdownPluginModel',
fields=[
('cmsplugin_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, related_name='cms_plugins_countdownpluginmodel', serialize=False, to='cms.CMSPlugin')),
('finish_datetime', models.DateTimeField(help_text='Countdown finish date and time.')),
('hide', models.BooleanField(default=True, help_text='Hide Countdown after finished.')),
('text', models.TextField(blank=True, max_length=255, null=True)),
('finish_text', models.CharField(blank=True, max_length=255, null=True)),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
migrations.CreateModel(
name='UpcomingEventsAndCoursesPluginModel',
fields=[
('cmsplugin_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, related_name='cms_plugins_upcomingeventsandcoursespluginmodel', serialize=False, to='cms.CMSPlugin')),
('delta_days', models.IntegerField(blank=True, help_text='Events and courses within the time delta (in days) from now on are shown.Leave empty to make no restrictions.', null=True)),
('max_displayed', models.IntegerField(blank=True, help_text='Maximum number of items to be displayed. Leave empty to make no restrictions.', null=True)),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
]
| gpl-2.0 |
ShuboshaKuro/SimpleGameEngine | Test.py | 1 | 1251 | import numpy as np
import os
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
# has to change whenever noise_width and noise_height change in the PerlinNoise.hpp file
DIMENSION1 = 200
DIMENSION2 = 200
# works if the working directory is set
path = os.path.dirname(os.path.realpath(__file__))
FILENAME = path + "\input0.txt"
if __name__ == '__main__':
string = open(FILENAME, '+r')
noise = np.fromstring(string.read(), sep=" ", dtype=float).reshape(DIMENSION2, DIMENSION1)
# Build a grid by the 2 dimensions
Xr = np.arange(DIMENSION1)
Yr = np.arange(DIMENSION2)
X, Y = np.meshgrid(Xr, Yr)
# Build a figure with 2 subplots, the first is 3D
fig = plt.figure()
fig.suptitle("3D and 2D heighmap")
colormap = 'coolwarm'
ax = fig.add_subplot(2, 1, 1, projection='3d')
surf = ax.plot_surface(X, Y, noise, rstride=1, cstride=1, cmap=colormap, linewidth=0, antialiased=False)
ax2 = fig.add_subplot(2, 1, 2)
im = ax2.imshow(noise, cmap=colormap, interpolation='nearest')
# swap the Y axis so it aligns with the 3D plot
ax2.invert_yaxis()
# add an explanatory colour bar
plt.colorbar(im, orientation='horizontal')
# Show the image
plt.show()
| mit |
Southpaw-TACTIC/Team | src/python/Lib/encodings/iso8859_11.py | 93 | 12898 | """ Python Character Mapping Codec iso8859_11 generated from 'MAPPINGS/ISO8859/8859-11.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-11',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\x80' # 0x80 -> <control>
u'\x81' # 0x81 -> <control>
u'\x82' # 0x82 -> <control>
u'\x83' # 0x83 -> <control>
u'\x84' # 0x84 -> <control>
u'\x85' # 0x85 -> <control>
u'\x86' # 0x86 -> <control>
u'\x87' # 0x87 -> <control>
u'\x88' # 0x88 -> <control>
u'\x89' # 0x89 -> <control>
u'\x8a' # 0x8A -> <control>
u'\x8b' # 0x8B -> <control>
u'\x8c' # 0x8C -> <control>
u'\x8d' # 0x8D -> <control>
u'\x8e' # 0x8E -> <control>
u'\x8f' # 0x8F -> <control>
u'\x90' # 0x90 -> <control>
u'\x91' # 0x91 -> <control>
u'\x92' # 0x92 -> <control>
u'\x93' # 0x93 -> <control>
u'\x94' # 0x94 -> <control>
u'\x95' # 0x95 -> <control>
u'\x96' # 0x96 -> <control>
u'\x97' # 0x97 -> <control>
u'\x98' # 0x98 -> <control>
u'\x99' # 0x99 -> <control>
u'\x9a' # 0x9A -> <control>
u'\x9b' # 0x9B -> <control>
u'\x9c' # 0x9C -> <control>
u'\x9d' # 0x9D -> <control>
u'\x9e' # 0x9E -> <control>
u'\x9f' # 0x9F -> <control>
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\u0e01' # 0xA1 -> THAI CHARACTER KO KAI
u'\u0e02' # 0xA2 -> THAI CHARACTER KHO KHAI
u'\u0e03' # 0xA3 -> THAI CHARACTER KHO KHUAT
u'\u0e04' # 0xA4 -> THAI CHARACTER KHO KHWAI
u'\u0e05' # 0xA5 -> THAI CHARACTER KHO KHON
u'\u0e06' # 0xA6 -> THAI CHARACTER KHO RAKHANG
u'\u0e07' # 0xA7 -> THAI CHARACTER NGO NGU
u'\u0e08' # 0xA8 -> THAI CHARACTER CHO CHAN
u'\u0e09' # 0xA9 -> THAI CHARACTER CHO CHING
u'\u0e0a' # 0xAA -> THAI CHARACTER CHO CHANG
u'\u0e0b' # 0xAB -> THAI CHARACTER SO SO
u'\u0e0c' # 0xAC -> THAI CHARACTER CHO CHOE
u'\u0e0d' # 0xAD -> THAI CHARACTER YO YING
u'\u0e0e' # 0xAE -> THAI CHARACTER DO CHADA
u'\u0e0f' # 0xAF -> THAI CHARACTER TO PATAK
u'\u0e10' # 0xB0 -> THAI CHARACTER THO THAN
u'\u0e11' # 0xB1 -> THAI CHARACTER THO NANGMONTHO
u'\u0e12' # 0xB2 -> THAI CHARACTER THO PHUTHAO
u'\u0e13' # 0xB3 -> THAI CHARACTER NO NEN
u'\u0e14' # 0xB4 -> THAI CHARACTER DO DEK
u'\u0e15' # 0xB5 -> THAI CHARACTER TO TAO
u'\u0e16' # 0xB6 -> THAI CHARACTER THO THUNG
u'\u0e17' # 0xB7 -> THAI CHARACTER THO THAHAN
u'\u0e18' # 0xB8 -> THAI CHARACTER THO THONG
u'\u0e19' # 0xB9 -> THAI CHARACTER NO NU
u'\u0e1a' # 0xBA -> THAI CHARACTER BO BAIMAI
u'\u0e1b' # 0xBB -> THAI CHARACTER PO PLA
u'\u0e1c' # 0xBC -> THAI CHARACTER PHO PHUNG
u'\u0e1d' # 0xBD -> THAI CHARACTER FO FA
u'\u0e1e' # 0xBE -> THAI CHARACTER PHO PHAN
u'\u0e1f' # 0xBF -> THAI CHARACTER FO FAN
u'\u0e20' # 0xC0 -> THAI CHARACTER PHO SAMPHAO
u'\u0e21' # 0xC1 -> THAI CHARACTER MO MA
u'\u0e22' # 0xC2 -> THAI CHARACTER YO YAK
u'\u0e23' # 0xC3 -> THAI CHARACTER RO RUA
u'\u0e24' # 0xC4 -> THAI CHARACTER RU
u'\u0e25' # 0xC5 -> THAI CHARACTER LO LING
u'\u0e26' # 0xC6 -> THAI CHARACTER LU
u'\u0e27' # 0xC7 -> THAI CHARACTER WO WAEN
u'\u0e28' # 0xC8 -> THAI CHARACTER SO SALA
u'\u0e29' # 0xC9 -> THAI CHARACTER SO RUSI
u'\u0e2a' # 0xCA -> THAI CHARACTER SO SUA
u'\u0e2b' # 0xCB -> THAI CHARACTER HO HIP
u'\u0e2c' # 0xCC -> THAI CHARACTER LO CHULA
u'\u0e2d' # 0xCD -> THAI CHARACTER O ANG
u'\u0e2e' # 0xCE -> THAI CHARACTER HO NOKHUK
u'\u0e2f' # 0xCF -> THAI CHARACTER PAIYANNOI
u'\u0e30' # 0xD0 -> THAI CHARACTER SARA A
u'\u0e31' # 0xD1 -> THAI CHARACTER MAI HAN-AKAT
u'\u0e32' # 0xD2 -> THAI CHARACTER SARA AA
u'\u0e33' # 0xD3 -> THAI CHARACTER SARA AM
u'\u0e34' # 0xD4 -> THAI CHARACTER SARA I
u'\u0e35' # 0xD5 -> THAI CHARACTER SARA II
u'\u0e36' # 0xD6 -> THAI CHARACTER SARA UE
u'\u0e37' # 0xD7 -> THAI CHARACTER SARA UEE
u'\u0e38' # 0xD8 -> THAI CHARACTER SARA U
u'\u0e39' # 0xD9 -> THAI CHARACTER SARA UU
u'\u0e3a' # 0xDA -> THAI CHARACTER PHINTHU
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\u0e3f' # 0xDF -> THAI CURRENCY SYMBOL BAHT
u'\u0e40' # 0xE0 -> THAI CHARACTER SARA E
u'\u0e41' # 0xE1 -> THAI CHARACTER SARA AE
u'\u0e42' # 0xE2 -> THAI CHARACTER SARA O
u'\u0e43' # 0xE3 -> THAI CHARACTER SARA AI MAIMUAN
u'\u0e44' # 0xE4 -> THAI CHARACTER SARA AI MAIMALAI
u'\u0e45' # 0xE5 -> THAI CHARACTER LAKKHANGYAO
u'\u0e46' # 0xE6 -> THAI CHARACTER MAIYAMOK
u'\u0e47' # 0xE7 -> THAI CHARACTER MAITAIKHU
u'\u0e48' # 0xE8 -> THAI CHARACTER MAI EK
u'\u0e49' # 0xE9 -> THAI CHARACTER MAI THO
u'\u0e4a' # 0xEA -> THAI CHARACTER MAI TRI
u'\u0e4b' # 0xEB -> THAI CHARACTER MAI CHATTAWA
u'\u0e4c' # 0xEC -> THAI CHARACTER THANTHAKHAT
u'\u0e4d' # 0xED -> THAI CHARACTER NIKHAHIT
u'\u0e4e' # 0xEE -> THAI CHARACTER YAMAKKAN
u'\u0e4f' # 0xEF -> THAI CHARACTER FONGMAN
u'\u0e50' # 0xF0 -> THAI DIGIT ZERO
u'\u0e51' # 0xF1 -> THAI DIGIT ONE
u'\u0e52' # 0xF2 -> THAI DIGIT TWO
u'\u0e53' # 0xF3 -> THAI DIGIT THREE
u'\u0e54' # 0xF4 -> THAI DIGIT FOUR
u'\u0e55' # 0xF5 -> THAI DIGIT FIVE
u'\u0e56' # 0xF6 -> THAI DIGIT SIX
u'\u0e57' # 0xF7 -> THAI DIGIT SEVEN
u'\u0e58' # 0xF8 -> THAI DIGIT EIGHT
u'\u0e59' # 0xF9 -> THAI DIGIT NINE
u'\u0e5a' # 0xFA -> THAI CHARACTER ANGKHANKHU
u'\u0e5b' # 0xFB -> THAI CHARACTER KHOMUT
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| epl-1.0 |
barbarubra/Don-t-know-What-i-m-doing. | python-build/python-libs/gdata/samples/oauth/oauth_on_appengine/appengine_utilities/cache.py | 129 | 9763 | # -*- coding: utf-8 -*-
"""
Copyright (c) 2008, appengine-utilities project
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
- Neither the name of the appengine-utilities project nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
# main python imports
import datetime
import pickle
import random
import __main__
# google appengine import
from google.appengine.ext import db
from google.appengine.api import memcache
# settings
DEFAULT_TIMEOUT = 3600 # cache expires after one hour (3600 sec)
CLEAN_CHECK_PERCENT = 50 # 15% of all requests will clean the database
MAX_HITS_TO_CLEAN = 100 # the maximum number of cache hits to clean on attempt
class _AppEngineUtilities_Cache(db.Model):
# It's up to the application to determine the format of their keys
cachekey = db.StringProperty()
createTime = db.DateTimeProperty(auto_now_add=True)
timeout = db.DateTimeProperty()
value = db.BlobProperty()
class Cache(object):
"""
Cache is used for storing pregenerated output and/or objects in the Big
Table datastore to minimize the amount of queries needed for page
displays. The idea is that complex queries that generate the same
results really should only be run once. Cache can be used to store
pregenerated value made from queries (or other calls such as
urlFetch()), or the query objects themselves.
"""
def __init__(self, clean_check_percent = CLEAN_CHECK_PERCENT,
max_hits_to_clean = MAX_HITS_TO_CLEAN,
default_timeout = DEFAULT_TIMEOUT):
"""
Initializer
Args:
clean_check_percent: how often cache initialization should
run the cache cleanup
max_hits_to_clean: maximum number of stale hits to clean
default_timeout: default length a cache item is good for
"""
self.clean_check_percent = clean_check_percent
self.max_hits_to_clean = max_hits_to_clean
self.default_timeout = default_timeout
if random.randint(1, 100) < self.clean_check_percent:
self._clean_cache()
if 'AEU_Events' in __main__.__dict__:
__main__.AEU_Events.fire_event('cacheInitialized')
def _clean_cache(self):
"""
_clean_cache is a routine that is run to find and delete cache
items that are old. This helps keep the size of your over all
datastore down.
"""
query = _AppEngineUtilities_Cache.all()
query.filter('timeout < ', datetime.datetime.now())
results = query.fetch(self.max_hits_to_clean)
db.delete(results)
#for result in results:
# result.delete()
def _validate_key(self, key):
if key == None:
raise KeyError
def _validate_value(self, value):
if value == None:
raise ValueError
def _validate_timeout(self, timeout):
if timeout == None:
timeout = datetime.datetime.now() +\
datetime.timedelta(seconds=DEFAULT_TIMEOUT)
if type(timeout) == type(1):
timeout = datetime.datetime.now() + \
datetime.timedelta(seconds = timeout)
if type(timeout) != datetime.datetime:
raise TypeError
if timeout < datetime.datetime.now():
raise ValueError
return timeout
def add(self, key = None, value = None, timeout = None):
"""
add adds an entry to the cache, if one does not already
exist.
"""
self._validate_key(key)
self._validate_value(value)
timeout = self._validate_timeout(timeout)
if key in self:
raise KeyError
cacheEntry = _AppEngineUtilities_Cache()
cacheEntry.cachekey = key
cacheEntry.value = pickle.dumps(value)
cacheEntry.timeout = timeout
# try to put the entry, if it fails silently pass
# failures may happen due to timeouts, the datastore being read
# only for maintenance or other applications. However, cache
# not being able to write to the datastore should not
# break the application
try:
cacheEntry.put()
except:
pass
memcache_timeout = timeout - datetime.datetime.now()
memcache.set('cache-'+key, value, int(memcache_timeout.seconds))
if 'AEU_Events' in __main__.__dict__:
__main__.AEU_Events.fire_event('cacheAdded')
def set(self, key = None, value = None, timeout = None):
"""
add adds an entry to the cache, overwriting an existing value
if one already exists.
"""
self._validate_key(key)
self._validate_value(value)
timeout = self._validate_timeout(timeout)
cacheEntry = self._read(key)
if not cacheEntry:
cacheEntry = _AppEngineUtilities_Cache()
cacheEntry.cachekey = key
cacheEntry.value = pickle.dumps(value)
cacheEntry.timeout = timeout
try:
cacheEntry.put()
except:
pass
memcache_timeout = timeout - datetime.datetime.now()
memcache.set('cache-'+key, value, int(memcache_timeout.seconds))
if 'AEU_Events' in __main__.__dict__:
__main__.AEU_Events.fire_event('cacheSet')
def _read(self, key = None):
"""
_read returns a cache object determined by the key. It's set
to private because it returns a db.Model object, and also
does not handle the unpickling of objects making it not the
best candidate for use. The special method __getitem__ is the
preferred access method for cache data.
"""
query = _AppEngineUtilities_Cache.all()
query.filter('cachekey', key)
query.filter('timeout > ', datetime.datetime.now())
results = query.fetch(1)
if len(results) is 0:
return None
return results[0]
if 'AEU_Events' in __main__.__dict__:
__main__.AEU_Events.fire_event('cacheReadFromDatastore')
if 'AEU_Events' in __main__.__dict__:
__main__.AEU_Events.fire_event('cacheRead')
def delete(self, key = None):
"""
Deletes a cache object determined by the key.
"""
memcache.delete('cache-'+key)
result = self._read(key)
if result:
if 'AEU_Events' in __main__.__dict__:
__main__.AEU_Events.fire_event('cacheDeleted')
result.delete()
def get(self, key):
"""
get is used to return the cache value associated with the key passed.
"""
mc = memcache.get('cache-'+key)
if mc:
if 'AEU_Events' in __main__.__dict__:
__main__.AEU_Events.fire_event('cacheReadFromMemcache')
if 'AEU_Events' in __main__.__dict__:
__main__.AEU_Events.fire_event('cacheRead')
return mc
result = self._read(key)
if result:
timeout = result.timeout - datetime.datetime.now()
# print timeout.seconds
memcache.set('cache-'+key, pickle.loads(result.value),
int(timeout.seconds))
return pickle.loads(result.value)
else:
raise KeyError
def get_many(self, keys):
"""
Returns a dict mapping each key in keys to its value. If the given
key is missing, it will be missing from the response dict.
"""
dict = {}
for key in keys:
value = self.get(key)
if value is not None:
dict[key] = val
return dict
def __getitem__(self, key):
"""
__getitem__ is necessary for this object to emulate a container.
"""
return self.get(key)
def __setitem__(self, key, value):
"""
__setitem__ is necessary for this object to emulate a container.
"""
return self.set(key, value)
def __delitem__(self, key):
"""
Implement the 'del' keyword
"""
return self.delete(key)
def __contains__(self, key):
"""
Implements "in" operator
"""
try:
r = self.__getitem__(key)
except KeyError:
return False
return True
def has_key(self, keyname):
"""
Equivalent to k in a, use that form in new code
"""
return self.__contains__(keyname)
| apache-2.0 |
OCA/account-financial-tools | account_journal_lock_date/tests/test_journal_lock_date.py | 1 | 8511 | # Copyright 2017 ACSONE SA/NV
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from datetime import date, timedelta
from odoo import tools
from odoo.modules import get_module_resource
from odoo.tests import common
from ..exceptions import JournalLockDateError
class TestJournalLockDate(common.TransactionCase):
def setUp(self):
super(TestJournalLockDate, self).setUp()
tools.convert_file(
self.cr,
"account",
get_module_resource("account", "test", "account_minimal_test.xml"),
{},
"init",
False,
"test",
)
self.account_move_obj = self.env["account.move"]
self.account_move_line_obj = self.env["account.move.line"]
self.company_id = self.ref("base.main_company")
self.partner = self.browse_ref("base.res_partner_12")
self.account = self.browse_ref("account.a_recv")
self.account2 = self.browse_ref("account.a_expense")
self.journal = self.browse_ref("account.bank_journal")
def test_journal_lock_date(self):
self.env.user.write({"groups_id": [(3, self.ref("base.group_system"))]})
self.env.user.write(
{"groups_id": [(3, self.ref("account.group_account_manager"))]}
)
self.assertFalse(self.env.user.has_group("account.group_account_manager"))
# create a move and post it
move = self.account_move_obj.create(
{
"date": date.today(),
"journal_id": self.journal.id,
"line_ids": [
(
0,
0,
{
"account_id": self.account.id,
"credit": 1000.0,
"name": "Credit line",
},
),
(
0,
0,
{
"account_id": self.account2.id,
"debit": 1000.0,
"name": "Debit line",
},
),
],
}
)
move.post()
# lock journal, set 'Lock Date for Non-Advisers'
self.journal.period_lock_date = date.today() + timedelta(days=2)
# Test that the move cannot be created, written, or cancelled
with self.assertRaises(JournalLockDateError):
self.account_move_obj.create(
{
"date": date.today(),
"journal_id": self.journal.id,
"line_ids": [
(
0,
0,
{
"account_id": self.account.id,
"credit": 1000.0,
"name": "Credit line",
},
),
(
0,
0,
{
"account_id": self.account2.id,
"debit": 1000.0,
"name": "Debit line",
},
),
],
}
)
with self.assertRaises(JournalLockDateError):
move.write({"name": "TEST"})
# allow cancel posted move
self.journal.update_posted = True
with self.assertRaises(JournalLockDateError):
move.button_cancel()
# create a move after the 'Lock Date for Non-Advisers' and post it
move3 = self.account_move_obj.create(
{
"date": self.journal.period_lock_date + timedelta(days=3),
"journal_id": self.journal.id,
"line_ids": [
(
0,
0,
{
"account_id": self.account.id,
"credit": 1000.0,
"name": "Credit line",
},
),
(
0,
0,
{
"account_id": self.account2.id,
"debit": 1000.0,
"name": "Debit line",
},
),
],
}
)
move3.post()
def test_journal_lock_date_adviser(self):
""" The journal lock date is ignored for Advisers """
self.env.user.write(
{"groups_id": [(4, self.ref("account.group_account_manager"))]}
)
self.assertTrue(self.env.user.has_group("account.group_account_manager"))
# create a move and post it
move = self.account_move_obj.create(
{
"date": date.today(),
"journal_id": self.journal.id,
"line_ids": [
(
0,
0,
{
"account_id": self.account.id,
"credit": 1000.0,
"name": "Credit line",
},
),
(
0,
0,
{
"account_id": self.account2.id,
"debit": 1000.0,
"name": "Debit line",
},
),
],
}
)
move.post()
# lock journal. Set 'Lock Date'
self.journal.fiscalyear_lock_date = date.today() + timedelta(days=2)
# lock journal. Set 'Lock Date for Non-Advisers'
self.journal.period_lock_date = date.today() + timedelta(days=4)
# Advisers cannot create, write, or cancel moves before 'Lock Date'
with self.assertRaises(JournalLockDateError):
self.account_move_obj.create(
{
"date": date.today(),
"journal_id": self.journal.id,
"line_ids": [
(
0,
0,
{
"account_id": self.account.id,
"credit": 1000.0,
"name": "Credit line",
},
),
(
0,
0,
{
"account_id": self.account2.id,
"debit": 1000.0,
"name": "Debit line",
},
),
],
}
)
with self.assertRaises(JournalLockDateError):
move.write({"name": "TEST"})
# allow cancel posted move
self.journal.update_posted = True
with self.assertRaises(JournalLockDateError):
move.button_cancel()
# Advisers can create movements on a date after the 'Lock Date'
# even if that date is before and inclusive of
# the 'Lock Date for Non-Advisers' (self.journal.period_lock_date)
move3 = self.account_move_obj.create(
{
"date": self.journal.period_lock_date,
"journal_id": self.journal.id,
"line_ids": [
(
0,
0,
{
"account_id": self.account.id,
"credit": 1000.0,
"name": "Credit line",
},
),
(
0,
0,
{
"account_id": self.account2.id,
"debit": 1000.0,
"name": "Debit line",
},
),
],
}
)
move3.post()
| agpl-3.0 |
zenefits/sentry | src/sentry/south_migrations/0277_auto__add_commitfilechange__add_unique_commitfilechange_commit_filenam.py | 4 | 64322 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'CommitFileChange'
db.create_table('sentry_commitfilechange', (
('id', self.gf('sentry.db.models.fields.bounded.BoundedBigAutoField')(primary_key=True)),
('organization_id', self.gf('sentry.db.models.fields.bounded.BoundedPositiveIntegerField')(db_index=True)),
('commit', self.gf('sentry.db.models.fields.foreignkey.FlexibleForeignKey')(to=orm['sentry.Commit'])),
('filename', self.gf('django.db.models.fields.CharField')(max_length=255)),
('type', self.gf('django.db.models.fields.CharField')(max_length=1)),
))
db.send_create_signal('sentry', ['CommitFileChange'])
# Adding unique constraint on 'CommitFileChange', fields ['commit', 'filename']
db.create_unique('sentry_commitfilechange', ['commit_id', 'filename'])
# Adding field 'Repository.url'
db.add_column('sentry_repository', 'url',
self.gf('django.db.models.fields.URLField')(max_length=200, null=True),
keep_default=False)
# Adding field 'Repository.provider'
db.add_column('sentry_repository', 'provider',
self.gf('django.db.models.fields.CharField')(max_length=64, null=True),
keep_default=False)
# Adding field 'Repository.external_id'
db.add_column('sentry_repository', 'external_id',
self.gf('django.db.models.fields.CharField')(max_length=64, null=True),
keep_default=False)
# Adding field 'Repository.config'
db.add_column('sentry_repository', 'config',
self.gf('jsonfield.fields.JSONField')(default={}),
keep_default=False)
# Adding field 'Repository.status'
db.add_column('sentry_repository', 'status',
self.gf('sentry.db.models.fields.bounded.BoundedPositiveIntegerField')(default=0, db_index=True),
keep_default=False)
# Adding unique constraint on 'Repository', fields ['organization_id', 'provider', 'external_id']
db.create_unique('sentry_repository', ['organization_id', 'provider', 'external_id'])
def backwards(self, orm):
# Removing unique constraint on 'Repository', fields ['organization_id', 'provider', 'external_id']
db.delete_unique('sentry_repository', ['organization_id', 'provider', 'external_id'])
# Removing unique constraint on 'CommitFileChange', fields ['commit', 'filename']
db.delete_unique('sentry_commitfilechange', ['commit_id', 'filename'])
# Deleting model 'CommitFileChange'
db.delete_table('sentry_commitfilechange')
# Deleting field 'Repository.url'
db.delete_column('sentry_repository', 'url')
# Deleting field 'Repository.provider'
db.delete_column('sentry_repository', 'provider')
# Deleting field 'Repository.external_id'
db.delete_column('sentry_repository', 'external_id')
# Deleting field 'Repository.config'
db.delete_column('sentry_repository', 'config')
# Deleting field 'Repository.status'
db.delete_column('sentry_repository', 'status')
models = {
'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.apikey': {
'Meta': {'object_name': 'ApiKey'},
'allowed_origins': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'default': "'Default'", 'max_length': '64', 'blank': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Organization']"}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.apitoken': {
'Meta': {'object_name': 'ApiToken'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiKey']", 'null': 'True'}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'token': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.auditlogentry': {
'Meta': {'object_name': 'AuditLogEntry'},
'actor': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_actors'", 'null': 'True', 'to': "orm['sentry.User']"}),
'actor_key': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiKey']", 'null': 'True', 'blank': 'True'}),
'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'target_object': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'target_user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_targets'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.authenticator': {
'Meta': {'unique_together': "(('user', 'type'),)", 'object_name': 'Authenticator', 'db_table': "'auth_authenticator'"},
'config': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'last_used_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.authidentity': {
'Meta': {'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))", 'object_name': 'AuthIdentity'},
'auth_provider': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.AuthProvider']"}),
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'last_synced': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_verified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.authprovider': {
'Meta': {'object_name': 'AuthProvider'},
'config': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'default_role': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'default_teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_sync': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']", 'unique': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'sync_time': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.broadcast': {
'Meta': {'object_name': 'Broadcast'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_expires': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2016, 11, 29, 0, 0)', 'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'upstream_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'})
},
'sentry.broadcastseen': {
'Meta': {'unique_together': "(('broadcast', 'user'),)", 'object_name': 'BroadcastSeen'},
'broadcast': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Broadcast']"}),
'date_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.commit': {
'Meta': {'unique_together': "(('repository_id', 'key'),)", 'object_name': 'Commit', 'index_together': "(('repository_id', 'date_added'),)"},
'author': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.CommitAuthor']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'message': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'repository_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.commitauthor': {
'Meta': {'unique_together': "(('organization_id', 'email'),)", 'object_name': 'CommitAuthor'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'})
},
'sentry.commitfilechange': {
'Meta': {'unique_together': "(('commit', 'filename'),)", 'object_name': 'CommitFileChange'},
'commit': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Commit']"}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '1'})
},
'sentry.counter': {
'Meta': {'object_name': 'Counter', 'db_table': "'sentry_projectcounter'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'unique': 'True'}),
'value': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.dsymbundle': {
'Meta': {'object_name': 'DSymBundle'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.DSymObject']"}),
'sdk': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.DSymSDK']"})
},
'sentry.dsymobject': {
'Meta': {'object_name': 'DSymObject'},
'cpu_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_path': ('django.db.models.fields.TextField', [], {'db_index': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'db_index': 'True'}),
'vmaddr': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'vmsize': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'})
},
'sentry.dsymsdk': {
'Meta': {'object_name': 'DSymSDK', 'index_together': "[('version_major', 'version_minor', 'version_patchlevel', 'version_build')]"},
'dsym_type': ('django.db.models.fields.CharField', [], {'max_length': '20', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'sdk_name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'version_build': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'version_major': ('django.db.models.fields.IntegerField', [], {}),
'version_minor': ('django.db.models.fields.IntegerField', [], {}),
'version_patchlevel': ('django.db.models.fields.IntegerField', [], {})
},
'sentry.dsymsymbol': {
'Meta': {'unique_together': "[('object', 'address')]", 'object_name': 'DSymSymbol'},
'address': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.DSymObject']"}),
'symbol': ('django.db.models.fields.TextField', [], {})
},
'sentry.environment': {
'Meta': {'unique_together': "(('project_id', 'name'),)", 'object_name': 'Environment'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.event': {
'Meta': {'unique_together': "(('project_id', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'", 'index_together': "(('group_id', 'datetime'),)"},
'data': ('sentry.db.models.fields.node.NodeField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'time_spent': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'null': 'True'})
},
'sentry.eventmapping': {
'Meta': {'unique_together': "(('project_id', 'event_id'),)", 'object_name': 'EventMapping'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventtag': {
'Meta': {'unique_together': "(('event_id', 'key_id', 'value_id'),)", 'object_name': 'EventTag', 'index_together': "(('project_id', 'key_id', 'value_id'), ('group_id', 'key_id', 'value_id'))"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'value_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventuser': {
'Meta': {'unique_together': "(('project', 'ident'), ('project', 'hash'))", 'object_name': 'EventUser', 'index_together': "(('project', 'email'), ('project', 'username'), ('project', 'ip_address'))"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'})
},
'sentry.file': {
'Meta': {'object_name': 'File'},
'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'legacy_blob'", 'null': 'True', 'to': "orm['sentry.FileBlob']"}),
'blobs': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.FileBlob']", 'through': "orm['sentry.FileBlobIndex']", 'symmetrical': 'False'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True'}),
'headers': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.fileblob': {
'Meta': {'object_name': 'FileBlob'},
'checksum': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'})
},
'sentry.fileblobindex': {
'Meta': {'unique_together': "(('file', 'blob', 'offset'),)", 'object_name': 'FileBlobIndex'},
'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.FileBlob']"}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'offset': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.globaldsymfile': {
'Meta': {'object_name': 'GlobalDSymFile'},
'cpu_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_name': ('django.db.models.fields.TextField', [], {}),
'uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '36'})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'short_id'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'", 'index_together': "(('project', 'first_release'),)"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'short_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'time_spent_total': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
'sentry.groupassignee': {
'Meta': {'object_name': 'GroupAssignee', 'db_table': "'sentry_groupasignee'"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'unique': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_assignee_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupemailthread': {
'Meta': {'unique_together': "(('email', 'group'), ('email', 'msgid'))", 'object_name': 'GroupEmailThread'},
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'groupemail_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'msgid': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'groupemail_set'", 'to': "orm['sentry.Project']"})
},
'sentry.grouphash': {
'Meta': {'unique_together': "(('project', 'hash'),)", 'object_name': 'GroupHash'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.groupredirect': {
'Meta': {'object_name': 'GroupRedirect'},
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'previous_group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'unique': 'True'})
},
'sentry.grouprelease': {
'Meta': {'unique_together': "(('group_id', 'release_id', 'environment'),)", 'object_name': 'GroupRelease'},
'environment': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'})
},
'sentry.groupresolution': {
'Meta': {'object_name': 'GroupResolution'},
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.grouprulestatus': {
'Meta': {'unique_together': "(('rule', 'group'),)", 'object_name': 'GroupRuleStatus'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_active': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'rule': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Rule']"}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'sentry.groupseen': {
'Meta': {'unique_together': "(('user', 'group'),)", 'object_name': 'GroupSeen'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'db_index': 'False'})
},
'sentry.groupsnooze': {
'Meta': {'object_name': 'GroupSnooze'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'until': ('django.db.models.fields.DateTimeField', [], {})
},
'sentry.groupsubscription': {
'Meta': {'unique_together': "(('group', 'user'),)", 'object_name': 'GroupSubscription'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'subscription_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'subscription_set'", 'to': "orm['sentry.Project']"}),
'reason': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.grouptagkey': {
'Meta': {'unique_together': "(('project', 'group', 'key'),)", 'object_name': 'GroupTagKey'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.grouptagvalue': {
'Meta': {'unique_together': "(('group', 'key', 'value'),)", 'object_name': 'GroupTagValue', 'db_table': "'sentry_messagefiltervalue'", 'index_together': "(('project', 'key', 'value', 'last_seen'),)"},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'null': 'True', 'to': "orm['sentry.Project']"}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'unique': 'True'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.organization': {
'Meta': {'object_name': 'Organization'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_role': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '32'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'org_memberships'", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMember']", 'to': "orm['sentry.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.organizationaccessrequest': {
'Meta': {'unique_together': "(('team', 'member'),)", 'object_name': 'OrganizationAccessRequest'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'member': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.organizationmember': {
'Meta': {'unique_together': "(('organization', 'user'), ('organization', 'email'))", 'object_name': 'OrganizationMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'has_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Organization']"}),
'role': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '32'}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMemberTeam']", 'blank': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50', 'blank': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'sentry_orgmember_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.organizationmemberteam': {
'Meta': {'unique_together': "(('team', 'organizationmember'),)", 'object_name': 'OrganizationMemberTeam', 'db_table': "'sentry_organizationmember_teams'"},
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'organizationmember': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.organizationonboardingtask': {
'Meta': {'unique_together': "(('organization', 'task'),)", 'object_name': 'OrganizationOnboardingTask'},
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_completed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'task': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.organizationoption': {
'Meta': {'unique_together': "(('organization', 'key'),)", 'object_name': 'OrganizationOption', 'db_table': "'sentry_organizationoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.project': {
'Meta': {'unique_together': "(('team', 'slug'), ('organization', 'slug'))", 'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'first_event': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'forced_color': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.projectbookmark': {
'Meta': {'unique_together': "(('project_id', 'user'),)", 'object_name': 'ProjectBookmark'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.projectdsymfile': {
'Meta': {'unique_together': "(('project', 'uuid'),)", 'object_name': 'ProjectDSymFile'},
'cpu_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_name': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36'})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'roles': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.projectplatform': {
'Meta': {'unique_together': "(('project_id', 'platform'),)", 'object_name': 'ProjectPlatform'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.release': {
'Meta': {'unique_together': "(('project', 'version'),)", 'object_name': 'Release'},
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_released': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'new_groups': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'ref': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.releasecommit': {
'Meta': {'unique_together': "(('release', 'commit'), ('release', 'order'))", 'object_name': 'ReleaseCommit'},
'commit': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Commit']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'order': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.releaseenvironment': {
'Meta': {'unique_together': "(('project_id', 'release_id', 'environment_id'),)", 'object_name': 'ReleaseEnvironment', 'db_table': "'sentry_environmentrelease'"},
'environment_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'})
},
'sentry.releasefile': {
'Meta': {'unique_together': "(('release', 'ident'),)", 'object_name': 'ReleaseFile'},
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'name': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.repository': {
'Meta': {'unique_together': "(('organization_id', 'name'), ('organization_id', 'provider', 'external_id'))", 'object_name': 'Repository'},
'config': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'})
},
'sentry.rule': {
'Meta': {'object_name': 'Rule'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.savedsearch': {
'Meta': {'unique_together': "(('project', 'name'),)", 'object_name': 'SavedSearch'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'query': ('django.db.models.fields.TextField', [], {})
},
'sentry.savedsearchuserdefault': {
'Meta': {'unique_together': "(('project', 'user'),)", 'object_name': 'SavedSearchUserDefault', 'db_table': "'sentry_savedsearch_userdefault'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'savedsearch': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.SavedSearch']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.tagkey': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.tagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'"},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.team': {
'Meta': {'unique_together': "(('organization', 'slug'),)", 'object_name': 'Team'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_password_expired': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_password_change': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_column': "'first_name'", 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'session_nonce': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'sentry.useravatar': {
'Meta': {'object_name': 'UserAvatar'},
'avatar_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'avatar'", 'unique': 'True', 'to': "orm['sentry.User']"})
},
'sentry.useremail': {
'Meta': {'unique_together': "(('user', 'email'),)", 'object_name': 'UserEmail'},
'date_hash_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'emails'", 'to': "orm['sentry.User']"}),
'validation_hash': ('django.db.models.fields.CharField', [], {'default': "u'nWSQmbINKkiwvRzlFaq4iWFfAr22O7g3'", 'max_length': '32'})
},
'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.userreport': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'UserReport', 'index_together': "(('project', 'event_id'), ('project', 'date_added'))"},
'comments': ('django.db.models.fields.TextField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
}
}
complete_apps = ['sentry'] | bsd-3-clause |
janhahne/nest-simulator | extras/ConnPlotter/examples/simple.py | 20 | 3445 | # -*- coding: utf-8 -*-
#
# simple.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
# ConnPlotter --- A Tool to Generate Connectivity Pattern Matrices
"""
Simple example model.
"""
def simple():
"""
Build lists representing simple network model.
Returns:
layerList, connectList, modelList
"""
def modCopy(orig, diff):
"""Create copy of dict orig, update with diff, return."""
assert (isinstance(orig, dict))
assert (isinstance(diff, dict))
tmp = orig.copy()
tmp.update(diff)
return tmp
N = 40
modelList = [('iaf_psc_alpha', m, {}) for m in ['E', 'I']]
layerList = [('IG', 'poisson_generator', [N, N], [1., 1.]),
('RG_E', 'E', [N, N], [1., 1.]),
('RG_I', 'I', [N, N], [1., 1.])]
common_connspec = {'rule': 'pairwise_bernoulli'}
common_synspec = {'synapse_model': 'static_synapse',
'delay': 1.0}
connectList = [
('IG', 'RG_E',
modCopy(common_connspec, {'mask': {'circular': {'radius': 0.2}},
'p': 0.8}),
modCopy(common_synspec, {'weight': 2.0})),
('IG', 'RG_I',
modCopy(common_connspec, {'mask': {'circular': {'radius': 0.3}},
'p': 0.4}),
modCopy(common_synspec, {'weight': 2.0})),
('RG_E', 'RG_E',
modCopy(common_connspec, {'mask': {'rectangular':
{'lower_left': [-0.4, -0.2],
'upper_right': [0.4, 0.2]}},
'p': 1.0}),
modCopy(common_synspec, {'weight': 2.0})),
('RG_E', 'RG_E',
modCopy(common_connspec, {'mask': {'rectangular':
{'lower_left': [-0.2, -0.4],
'upper_right': [0.2, 0.4]}},
'p': 1.0}),
modCopy(common_synspec, {'weight': 2.0})),
('RG_E', 'RG_I',
modCopy(common_connspec, {'mask': {'circular': {'radius': 0.5}},
'p': 'nest.spatial_distributions.gaussian(nest.spatial.distance, std=0.1)'}),
modCopy(common_synspec, {'weight': 5.0})),
('RG_I', 'RG_E',
modCopy(common_connspec, {'mask': {'circular': {'radius': 0.25}},
'p': 'nest.spatial_distributions.gaussian(nest.spatial.distance, std=0.2)'}),
modCopy(common_synspec, {'weight': -3.0})),
('RG_I', 'RG_I',
modCopy(common_connspec, {'mask': {'circular': {'radius': 1.0}},
'p': 0.5}),
modCopy(common_synspec, {'weight': -0.5}))
]
return layerList, connectList, modelList
| gpl-2.0 |
michigraber/scikit-learn | sklearn/cluster/dbscan_.py | 106 | 11120 | # -*- coding: utf-8 -*-
"""
DBSCAN: Density-Based Spatial Clustering of Applications with Noise
"""
# Author: Robert Layton <robertlayton@gmail.com>
# Joel Nothman <joel.nothman@gmail.com>
# Lars Buitinck
#
# License: BSD 3 clause
import warnings
import numpy as np
from ..base import BaseEstimator, ClusterMixin
from ..metrics import pairwise_distances
from ..utils import check_array, check_consistent_length
from ..neighbors import NearestNeighbors
from ._dbscan_inner import dbscan_inner
def dbscan(X, eps=0.5, min_samples=5, metric='minkowski',
algorithm='auto', leaf_size=30, p=2, sample_weight=None,
random_state=None):
"""Perform DBSCAN clustering from vector array or distance matrix.
Read more in the :ref:`User Guide <dbscan>`.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
eps : float, optional
The maximum distance between two samples for them to be considered
as in the same neighborhood.
min_samples : int, optional
The number of samples (or total weight) in a neighborhood for a point
to be considered as a core point. This includes the point itself.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.pairwise_distances for its
metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
The algorithm to be used by the NearestNeighbors module
to compute pointwise distances and find nearest neighbors.
See NearestNeighbors module documentation for details.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or cKDTree. This can affect the speed
of the construction and query, as well as the memory required
to store the tree. The optimal value depends
on the nature of the problem.
p : float, optional
The power of the Minkowski metric to be used to calculate distance
between points.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
random_state: numpy.RandomState, optional
Deprecated and ignored as of version 0.16, will be removed in version
0.18. DBSCAN does not use random initialization.
Returns
-------
core_samples : array [n_core_samples]
Indices of core samples.
labels : array [n_samples]
Cluster labels for each point. Noisy samples are given the label -1.
Notes
-----
See examples/cluster/plot_dbscan.py for an example.
This implementation bulk-computes all neighborhood queries, which increases
the memory complexity to O(n.d) where d is the average number of neighbors,
while original DBSCAN had memory complexity O(n).
References
----------
Ester, M., H. P. Kriegel, J. Sander, and X. Xu, "A Density-Based
Algorithm for Discovering Clusters in Large Spatial Databases with Noise".
In: Proceedings of the 2nd International Conference on Knowledge Discovery
and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996
"""
if not eps > 0.0:
raise ValueError("eps must be positive.")
if random_state is not None:
warnings.warn("The parameter random_state is deprecated in 0.16 "
"and will be removed in version 0.18. "
"DBSCAN is deterministic except for rare border cases.",
category=DeprecationWarning)
X = check_array(X, accept_sparse='csr')
if sample_weight is not None:
sample_weight = np.asarray(sample_weight)
check_consistent_length(X, sample_weight)
# Calculate neighborhood for all samples. This leaves the original point
# in, which needs to be considered later (i.e. point i is in the
# neighborhood of point i. While True, its useless information)
if metric == 'precomputed':
D = pairwise_distances(X, metric=metric)
neighborhoods = np.empty(X.shape[0], dtype=object)
neighborhoods[:] = [np.where(x <= eps)[0] for x in D]
else:
neighbors_model = NearestNeighbors(radius=eps, algorithm=algorithm,
leaf_size=leaf_size,
metric=metric, p=p)
neighbors_model.fit(X)
# This has worst case O(n^2) memory complexity
neighborhoods = neighbors_model.radius_neighbors(X, eps,
return_distance=False)
if sample_weight is None:
n_neighbors = np.array([len(neighbors) for neighbors in neighborhoods])
else:
n_neighbors = np.array([np.sum(sample_weight[neighbors])
for neighbors in neighborhoods])
# Initially, all samples are noise.
labels = -np.ones(X.shape[0], dtype=np.intp)
# A list of all core samples found.
core_samples = np.asarray(n_neighbors >= min_samples, dtype=np.uint8)
dbscan_inner(core_samples, neighborhoods, labels)
return np.where(core_samples)[0], labels
class DBSCAN(BaseEstimator, ClusterMixin):
"""Perform DBSCAN clustering from vector array or distance matrix.
DBSCAN - Density-Based Spatial Clustering of Applications with Noise.
Finds core samples of high density and expands clusters from them.
Good for data which contains clusters of similar density.
Read more in the :ref:`User Guide <dbscan>`.
Parameters
----------
eps : float, optional
The maximum distance between two samples for them to be considered
as in the same neighborhood.
min_samples : int, optional
The number of samples (or total weight) in a neighborhood for a point
to be considered as a core point. This includes the point itself.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.calculate_distance for its
metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
The algorithm to be used by the NearestNeighbors module
to compute pointwise distances and find nearest neighbors.
See NearestNeighbors module documentation for details.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or cKDTree. This can affect the speed
of the construction and query, as well as the memory required
to store the tree. The optimal value depends
on the nature of the problem.
random_state: numpy.RandomState, optional
Deprecated and ignored as of version 0.16, will be removed in version
0.18. DBSCAN does not use random initialization.
Attributes
----------
core_sample_indices_ : array, shape = [n_core_samples]
Indices of core samples.
components_ : array, shape = [n_core_samples, n_features]
Copy of each core sample found by training.
labels_ : array, shape = [n_samples]
Cluster labels for each point in the dataset given to fit().
Noisy samples are given the label -1.
Notes
-----
See examples/cluster/plot_dbscan.py for an example.
This implementation bulk-computes all neighborhood queries, which increases
the memory complexity to O(n.d) where d is the average number of neighbors,
while original DBSCAN had memory complexity O(n).
References
----------
Ester, M., H. P. Kriegel, J. Sander, and X. Xu, "A Density-Based
Algorithm for Discovering Clusters in Large Spatial Databases with Noise".
In: Proceedings of the 2nd International Conference on Knowledge Discovery
and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996
"""
def __init__(self, eps=0.5, min_samples=5, metric='euclidean',
algorithm='auto', leaf_size=30, p=None, random_state=None):
self.eps = eps
self.min_samples = min_samples
self.metric = metric
self.algorithm = algorithm
self.leaf_size = leaf_size
self.p = p
self.random_state = random_state
def fit(self, X, y=None, sample_weight=None):
"""Perform DBSCAN clustering from features or distance matrix.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
"""
X = check_array(X, accept_sparse='csr')
clust = dbscan(X, sample_weight=sample_weight, **self.get_params())
self.core_sample_indices_, self.labels_ = clust
if len(self.core_sample_indices_):
# fix for scipy sparse indexing issue
self.components_ = X[self.core_sample_indices_].copy()
else:
# no core samples
self.components_ = np.empty((0, X.shape[1]))
return self
def fit_predict(self, X, y=None, sample_weight=None):
"""Performs clustering on X and returns cluster labels.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
Returns
-------
y : ndarray, shape (n_samples,)
cluster labels
"""
self.fit(X, sample_weight=sample_weight)
return self.labels_
| bsd-3-clause |
Tao-Ma/gpdb | gpMgmt/bin/ext/yaml/representer.py | 120 | 17705 |
__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer',
'RepresenterError']
from error import *
from nodes import *
import datetime
try:
set
except NameError:
from sets import Set as set
import sys, copy_reg, types
class RepresenterError(YAMLError):
pass
class BaseRepresenter(object):
yaml_representers = {}
yaml_multi_representers = {}
def __init__(self, default_style=None, default_flow_style=None):
self.default_style = default_style
self.default_flow_style = default_flow_style
self.represented_objects = {}
self.object_keeper = []
self.alias_key = None
def represent(self, data):
node = self.represent_data(data)
self.serialize(node)
self.represented_objects = {}
self.object_keeper = []
self.alias_key = None
def get_classobj_bases(self, cls):
bases = [cls]
for base in cls.__bases__:
bases.extend(self.get_classobj_bases(base))
return bases
def represent_data(self, data):
if self.ignore_aliases(data):
self.alias_key = None
else:
self.alias_key = id(data)
if self.alias_key is not None:
if self.alias_key in self.represented_objects:
node = self.represented_objects[self.alias_key]
#if node is None:
# raise RepresenterError("recursive objects are not allowed: %r" % data)
return node
#self.represented_objects[alias_key] = None
self.object_keeper.append(data)
data_types = type(data).__mro__
if type(data) is types.InstanceType:
data_types = self.get_classobj_bases(data.__class__)+list(data_types)
if data_types[0] in self.yaml_representers:
node = self.yaml_representers[data_types[0]](self, data)
else:
for data_type in data_types:
if data_type in self.yaml_multi_representers:
node = self.yaml_multi_representers[data_type](self, data)
break
else:
if None in self.yaml_multi_representers:
node = self.yaml_multi_representers[None](self, data)
elif None in self.yaml_representers:
node = self.yaml_representers[None](self, data)
else:
node = ScalarNode(None, unicode(data))
#if alias_key is not None:
# self.represented_objects[alias_key] = node
return node
def add_representer(cls, data_type, representer):
if not 'yaml_representers' in cls.__dict__:
cls.yaml_representers = cls.yaml_representers.copy()
cls.yaml_representers[data_type] = representer
add_representer = classmethod(add_representer)
def add_multi_representer(cls, data_type, representer):
if not 'yaml_multi_representers' in cls.__dict__:
cls.yaml_multi_representers = cls.yaml_multi_representers.copy()
cls.yaml_multi_representers[data_type] = representer
add_multi_representer = classmethod(add_multi_representer)
def represent_scalar(self, tag, value, style=None):
if style is None:
style = self.default_style
node = ScalarNode(tag, value, style=style)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
return node
def represent_sequence(self, tag, sequence, flow_style=None):
value = []
node = SequenceNode(tag, value, flow_style=flow_style)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
best_style = True
for item in sequence:
node_item = self.represent_data(item)
if not (isinstance(node_item, ScalarNode) and not node_item.style):
best_style = False
value.append(node_item)
if flow_style is None:
if self.default_flow_style is not None:
node.flow_style = self.default_flow_style
else:
node.flow_style = best_style
return node
def represent_mapping(self, tag, mapping, flow_style=None):
value = []
node = MappingNode(tag, value, flow_style=flow_style)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
best_style = True
if hasattr(mapping, 'items'):
mapping = mapping.items()
mapping.sort()
for item_key, item_value in mapping:
node_key = self.represent_data(item_key)
node_value = self.represent_data(item_value)
if not (isinstance(node_key, ScalarNode) and not node_key.style):
best_style = False
if not (isinstance(node_value, ScalarNode) and not node_value.style):
best_style = False
value.append((node_key, node_value))
if flow_style is None:
if self.default_flow_style is not None:
node.flow_style = self.default_flow_style
else:
node.flow_style = best_style
return node
def ignore_aliases(self, data):
return False
class SafeRepresenter(BaseRepresenter):
def ignore_aliases(self, data):
if data in [None, ()]:
return True
if isinstance(data, (str, unicode, bool, int, float)):
return True
def represent_none(self, data):
return self.represent_scalar(u'tag:yaml.org,2002:null',
u'null')
def represent_str(self, data):
tag = None
style = None
try:
data = unicode(data, 'ascii')
tag = u'tag:yaml.org,2002:str'
except UnicodeDecodeError:
try:
data = unicode(data, 'utf-8')
tag = u'tag:yaml.org,2002:str'
except UnicodeDecodeError:
data = data.encode('base64')
tag = u'tag:yaml.org,2002:binary'
style = '|'
return self.represent_scalar(tag, data, style=style)
def represent_unicode(self, data):
return self.represent_scalar(u'tag:yaml.org,2002:str', data)
def represent_bool(self, data):
if data:
value = u'true'
else:
value = u'false'
return self.represent_scalar(u'tag:yaml.org,2002:bool', value)
def represent_int(self, data):
return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data))
def represent_long(self, data):
return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data))
inf_value = 1e300
while repr(inf_value) != repr(inf_value*inf_value):
inf_value *= inf_value
def represent_float(self, data):
if data != data or (data == 0.0 and data == 1.0):
value = u'.nan'
elif data == self.inf_value:
value = u'.inf'
elif data == -self.inf_value:
value = u'-.inf'
else:
value = unicode(repr(data)).lower()
# Note that in some cases `repr(data)` represents a float number
# without the decimal parts. For instance:
# >>> repr(1e17)
# '1e17'
# Unfortunately, this is not a valid float representation according
# to the definition of the `!!float` tag. We fix this by adding
# '.0' before the 'e' symbol.
if u'.' not in value and u'e' in value:
value = value.replace(u'e', u'.0e', 1)
return self.represent_scalar(u'tag:yaml.org,2002:float', value)
def represent_list(self, data):
#pairs = (len(data) > 0 and isinstance(data, list))
#if pairs:
# for item in data:
# if not isinstance(item, tuple) or len(item) != 2:
# pairs = False
# break
#if not pairs:
return self.represent_sequence(u'tag:yaml.org,2002:seq', data)
#value = []
#for item_key, item_value in data:
# value.append(self.represent_mapping(u'tag:yaml.org,2002:map',
# [(item_key, item_value)]))
#return SequenceNode(u'tag:yaml.org,2002:pairs', value)
def represent_dict(self, data):
return self.represent_mapping(u'tag:yaml.org,2002:map', data)
def represent_set(self, data):
value = {}
for key in data:
value[key] = None
return self.represent_mapping(u'tag:yaml.org,2002:set', value)
def represent_date(self, data):
value = unicode(data.isoformat())
return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value)
def represent_datetime(self, data):
value = unicode(data.isoformat(' '))
return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value)
def represent_yaml_object(self, tag, data, cls, flow_style=None):
if hasattr(data, '__getstate__'):
state = data.__getstate__()
else:
state = data.__dict__.copy()
return self.represent_mapping(tag, state, flow_style=flow_style)
def represent_undefined(self, data):
raise RepresenterError("cannot represent an object: %s" % data)
SafeRepresenter.add_representer(type(None),
SafeRepresenter.represent_none)
SafeRepresenter.add_representer(str,
SafeRepresenter.represent_str)
SafeRepresenter.add_representer(unicode,
SafeRepresenter.represent_unicode)
SafeRepresenter.add_representer(bool,
SafeRepresenter.represent_bool)
SafeRepresenter.add_representer(int,
SafeRepresenter.represent_int)
SafeRepresenter.add_representer(long,
SafeRepresenter.represent_long)
SafeRepresenter.add_representer(float,
SafeRepresenter.represent_float)
SafeRepresenter.add_representer(list,
SafeRepresenter.represent_list)
SafeRepresenter.add_representer(tuple,
SafeRepresenter.represent_list)
SafeRepresenter.add_representer(dict,
SafeRepresenter.represent_dict)
SafeRepresenter.add_representer(set,
SafeRepresenter.represent_set)
SafeRepresenter.add_representer(datetime.date,
SafeRepresenter.represent_date)
SafeRepresenter.add_representer(datetime.datetime,
SafeRepresenter.represent_datetime)
SafeRepresenter.add_representer(None,
SafeRepresenter.represent_undefined)
class Representer(SafeRepresenter):
def represent_str(self, data):
tag = None
style = None
try:
data = unicode(data, 'ascii')
tag = u'tag:yaml.org,2002:str'
except UnicodeDecodeError:
try:
data = unicode(data, 'utf-8')
tag = u'tag:yaml.org,2002:python/str'
except UnicodeDecodeError:
data = data.encode('base64')
tag = u'tag:yaml.org,2002:binary'
style = '|'
return self.represent_scalar(tag, data, style=style)
def represent_unicode(self, data):
tag = None
try:
data.encode('ascii')
tag = u'tag:yaml.org,2002:python/unicode'
except UnicodeEncodeError:
tag = u'tag:yaml.org,2002:str'
return self.represent_scalar(tag, data)
def represent_long(self, data):
tag = u'tag:yaml.org,2002:int'
if int(data) is not data:
tag = u'tag:yaml.org,2002:python/long'
return self.represent_scalar(tag, unicode(data))
def represent_complex(self, data):
if data.imag == 0.0:
data = u'%r' % data.real
elif data.real == 0.0:
data = u'%rj' % data.imag
elif data.imag > 0:
data = u'%r+%rj' % (data.real, data.imag)
else:
data = u'%r%rj' % (data.real, data.imag)
return self.represent_scalar(u'tag:yaml.org,2002:python/complex', data)
def represent_tuple(self, data):
return self.represent_sequence(u'tag:yaml.org,2002:python/tuple', data)
def represent_name(self, data):
name = u'%s.%s' % (data.__module__, data.__name__)
return self.represent_scalar(u'tag:yaml.org,2002:python/name:'+name, u'')
def represent_module(self, data):
return self.represent_scalar(
u'tag:yaml.org,2002:python/module:'+data.__name__, u'')
def represent_instance(self, data):
# For instances of classic classes, we use __getinitargs__ and
# __getstate__ to serialize the data.
# If data.__getinitargs__ exists, the object must be reconstructed by
# calling cls(**args), where args is a tuple returned by
# __getinitargs__. Otherwise, the cls.__init__ method should never be
# called and the class instance is created by instantiating a trivial
# class and assigning to the instance's __class__ variable.
# If data.__getstate__ exists, it returns the state of the object.
# Otherwise, the state of the object is data.__dict__.
# We produce either a !!python/object or !!python/object/new node.
# If data.__getinitargs__ does not exist and state is a dictionary, we
# produce a !!python/object node . Otherwise we produce a
# !!python/object/new node.
cls = data.__class__
class_name = u'%s.%s' % (cls.__module__, cls.__name__)
args = None
state = None
if hasattr(data, '__getinitargs__'):
args = list(data.__getinitargs__())
if hasattr(data, '__getstate__'):
state = data.__getstate__()
else:
state = data.__dict__
if args is None and isinstance(state, dict):
return self.represent_mapping(
u'tag:yaml.org,2002:python/object:'+class_name, state)
if isinstance(state, dict) and not state:
return self.represent_sequence(
u'tag:yaml.org,2002:python/object/new:'+class_name, args)
value = {}
if args:
value['args'] = args
value['state'] = state
return self.represent_mapping(
u'tag:yaml.org,2002:python/object/new:'+class_name, value)
def represent_object(self, data):
# We use __reduce__ API to save the data. data.__reduce__ returns
# a tuple of length 2-5:
# (function, args, state, listitems, dictitems)
# For reconstructing, we calls function(*args), then set its state,
# listitems, and dictitems if they are not None.
# A special case is when function.__name__ == '__newobj__'. In this
# case we create the object with args[0].__new__(*args).
# Another special case is when __reduce__ returns a string - we don't
# support it.
# We produce a !!python/object, !!python/object/new or
# !!python/object/apply node.
cls = type(data)
if cls in copy_reg.dispatch_table:
reduce = copy_reg.dispatch_table[cls](data)
elif hasattr(data, '__reduce_ex__'):
reduce = data.__reduce_ex__(2)
elif hasattr(data, '__reduce__'):
reduce = data.__reduce__()
else:
raise RepresenterError("cannot represent object: %r" % data)
reduce = (list(reduce)+[None]*5)[:5]
function, args, state, listitems, dictitems = reduce
args = list(args)
if state is None:
state = {}
if listitems is not None:
listitems = list(listitems)
if dictitems is not None:
dictitems = dict(dictitems)
if function.__name__ == '__newobj__':
function = args[0]
args = args[1:]
tag = u'tag:yaml.org,2002:python/object/new:'
newobj = True
else:
tag = u'tag:yaml.org,2002:python/object/apply:'
newobj = False
function_name = u'%s.%s' % (function.__module__, function.__name__)
if not args and not listitems and not dictitems \
and isinstance(state, dict) and newobj:
return self.represent_mapping(
u'tag:yaml.org,2002:python/object:'+function_name, state)
if not listitems and not dictitems \
and isinstance(state, dict) and not state:
return self.represent_sequence(tag+function_name, args)
value = {}
if args:
value['args'] = args
if state or not isinstance(state, dict):
value['state'] = state
if listitems:
value['listitems'] = listitems
if dictitems:
value['dictitems'] = dictitems
return self.represent_mapping(tag+function_name, value)
Representer.add_representer(str,
Representer.represent_str)
Representer.add_representer(unicode,
Representer.represent_unicode)
Representer.add_representer(long,
Representer.represent_long)
Representer.add_representer(complex,
Representer.represent_complex)
Representer.add_representer(tuple,
Representer.represent_tuple)
Representer.add_representer(type,
Representer.represent_name)
Representer.add_representer(types.ClassType,
Representer.represent_name)
Representer.add_representer(types.FunctionType,
Representer.represent_name)
Representer.add_representer(types.BuiltinFunctionType,
Representer.represent_name)
Representer.add_representer(types.ModuleType,
Representer.represent_module)
Representer.add_multi_representer(types.InstanceType,
Representer.represent_instance)
Representer.add_multi_representer(object,
Representer.represent_object)
| apache-2.0 |
ros2/rclpy | rclpy/rclpy/action/client.py | 1 | 22966 | # Copyright 2019 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
import time
import uuid
import weakref
from action_msgs.msg import GoalStatus
from action_msgs.srv import CancelGoal
from rclpy.executors import await_or_execute
from rclpy.impl.implementation_singleton import rclpy_implementation as _rclpy
from rclpy.qos import qos_profile_action_status_default
from rclpy.qos import qos_profile_services_default
from rclpy.qos import QoSProfile
from rclpy.task import Future
from rclpy.type_support import check_for_type_support
from rclpy.waitable import NumberOfEntities, Waitable
from unique_identifier_msgs.msg import UUID
class ClientGoalHandle():
"""Goal handle for working with Action Clients."""
def __init__(self, action_client, goal_id, goal_response):
self._action_client = action_client
self._goal_id = goal_id
self._goal_response = goal_response
self._status = GoalStatus.STATUS_UNKNOWN
def __eq__(self, other):
return self._goal_id == other.goal_id
def __ne__(self, other):
return self._goal_id != other.goal_id
def __repr__(self):
return 'ClientGoalHandle <id={0}, accepted={1}, status={2}>'.format(
self.goal_id.uuid,
self.accepted,
self.status)
@property
def goal_id(self):
return self._goal_id
@property
def stamp(self):
return self._goal_response.stamp
@property
def accepted(self):
return self._goal_response.accepted
@property
def status(self):
return self._status
def cancel_goal(self):
"""
Send a cancel request for the goal and wait for the response.
Do not call this method in a callback or a deadlock may occur.
:return: The cancel response.
"""
return self._action_client._cancel_goal(self)
def cancel_goal_async(self):
"""
Asynchronous request for the goal be canceled.
:return: a Future instance that completes when the server responds.
:rtype: :class:`rclpy.task.Future` instance
"""
return self._action_client._cancel_goal_async(self)
def get_result(self):
"""
Request the result for the goal and wait for the response.
Do not call this method in a callback or a deadlock may occur.
:return: The result response.
"""
return self._action_client._get_result(self)
def get_result_async(self):
"""
Asynchronously request the goal result.
:return: a Future instance that completes when the result is ready.
:rtype: :class:`rclpy.task.Future` instance
"""
return self._action_client._get_result_async(self)
class ActionClient(Waitable):
"""ROS Action client."""
def __init__(
self,
node,
action_type,
action_name,
*,
callback_group=None,
goal_service_qos_profile=qos_profile_services_default,
result_service_qos_profile=qos_profile_services_default,
cancel_service_qos_profile=qos_profile_services_default,
feedback_sub_qos_profile=QoSProfile(depth=10),
status_sub_qos_profile=qos_profile_action_status_default
):
"""
Create an ActionClient.
:param node: The ROS node to add the action client to.
:param action_type: Type of the action.
:param action_name: Name of the action.
Used as part of the underlying topic and service names.
:param callback_group: Callback group to add the action client to.
If None, then the node's default callback group is used.
:param goal_service_qos_profile: QoS profile for the goal service.
:param result_service_qos_profile: QoS profile for the result service.
:param cancel_service_qos_profile: QoS profile for the cancel service.
:param feedback_sub_qos_profile: QoS profile for the feedback subscriber.
:param status_sub_qos_profile: QoS profile for the status subscriber.
"""
if callback_group is None:
callback_group = node.default_callback_group
super().__init__(callback_group)
# Import the typesupport for the action module if not already done
check_for_type_support(action_type)
self._node = node
self._action_type = action_type
self._action_name = action_name
with node.handle:
self._client_handle = _rclpy.ActionClient(
node.handle,
action_type,
action_name,
goal_service_qos_profile.get_c_qos_profile(),
result_service_qos_profile.get_c_qos_profile(),
cancel_service_qos_profile.get_c_qos_profile(),
feedback_sub_qos_profile.get_c_qos_profile(),
status_sub_qos_profile.get_c_qos_profile()
)
self._is_ready = False
# key: UUID in bytes, value: weak reference to ClientGoalHandle
self._goal_handles = {}
# key: goal request sequence_number, value: Future for goal response
self._pending_goal_requests = {}
# key: goal request sequence_number, value: UUID
self._sequence_number_to_goal_id = {}
# key: cancel request sequence number, value: Future for cancel response
self._pending_cancel_requests = {}
# key: result request sequence number, value: Future for result response
self._pending_result_requests = {}
# key: UUID in bytes, value: callback function
self._feedback_callbacks = {}
callback_group.add_entity(self)
self._node.add_waitable(self)
def _generate_random_uuid(self):
return UUID(uuid=list(uuid.uuid4().bytes))
def _remove_pending_request(self, future, pending_requests):
"""
Remove a future from the list of pending requests.
This prevents a future from receiving a request and executing its done callbacks.
:param future: a future returned from one of :meth:`send_goal_async`,
:meth:`_cancel_goal_async`, or :meth:`_get_result_async`.
:type future: rclpy.task.Future
:param pending_requests: The list of pending requests.
:type pending_requests: dict
:return: The sequence number associated with the removed future, or
None if the future was not found in the list.
"""
for seq, req_future in list(pending_requests.items()):
if future == req_future:
try:
del pending_requests[seq]
except KeyError:
pass
else:
self.remove_future(future)
return seq
return None
def _remove_pending_goal_request(self, future):
seq = self._remove_pending_request(future, self._pending_goal_requests)
if seq in self._sequence_number_to_goal_id:
del self._sequence_number_to_goal_id[seq]
def _remove_pending_cancel_request(self, future):
self._remove_pending_request(future, self._pending_cancel_requests)
def _remove_pending_result_request(self, future):
self._remove_pending_request(future, self._pending_result_requests)
# Start Waitable API
def is_ready(self, wait_set):
"""Return True if one or more entities are ready in the wait set."""
ready_entities = self._client_handle.is_ready(wait_set)
self._is_feedback_ready = ready_entities[0]
self._is_status_ready = ready_entities[1]
self._is_goal_response_ready = ready_entities[2]
self._is_cancel_response_ready = ready_entities[3]
self._is_result_response_ready = ready_entities[4]
return any(ready_entities)
def take_data(self):
"""Take stuff from lower level so the wait set doesn't immediately wake again."""
data = {}
if self._is_goal_response_ready:
taken_data = self._client_handle.take_goal_response(
self._action_type.Impl.SendGoalService.Response)
# If take fails, then we get (None, None)
if all(taken_data):
data['goal'] = taken_data
if self._is_cancel_response_ready:
taken_data = self._client_handle.take_cancel_response(
self._action_type.Impl.CancelGoalService.Response)
# If take fails, then we get (None, None)
if all(taken_data):
data['cancel'] = taken_data
if self._is_result_response_ready:
taken_data = self._client_handle.take_result_response(
self._action_type.Impl.GetResultService.Response)
# If take fails, then we get (None, None)
if all(taken_data):
data['result'] = taken_data
if self._is_feedback_ready:
taken_data = self._client_handle.take_feedback(
self._action_type.Impl.FeedbackMessage)
# If take fails, then we get None
if taken_data is not None:
data['feedback'] = taken_data
if self._is_status_ready:
taken_data = self._client_handle.take_status(
self._action_type.Impl.GoalStatusMessage)
# If take fails, then we get None
if taken_data is not None:
data['status'] = taken_data
return data
async def execute(self, taken_data):
"""
Execute work after data has been taken from a ready wait set.
This will set results for Future objects for any received service responses and
call any user-defined callbacks (e.g. feedback).
"""
if 'goal' in taken_data:
sequence_number, goal_response = taken_data['goal']
if sequence_number in self._sequence_number_to_goal_id:
goal_handle = ClientGoalHandle(
self,
self._sequence_number_to_goal_id[sequence_number],
goal_response)
if goal_handle.accepted:
goal_uuid = bytes(goal_handle.goal_id.uuid)
if goal_uuid in self._goal_handles:
raise RuntimeError(
'Two goals were accepted with the same ID ({})'.format(goal_handle))
self._goal_handles[goal_uuid] = weakref.ref(goal_handle)
self._pending_goal_requests[sequence_number].set_result(goal_handle)
else:
self._node.get_logger().warning(
'Ignoring unexpected goal response. There may be more than '
f"one action server for the action '{self._action_name}'"
)
if 'cancel' in taken_data:
sequence_number, cancel_response = taken_data['cancel']
if sequence_number in self._pending_cancel_requests:
self._pending_cancel_requests[sequence_number].set_result(cancel_response)
else:
self._node.get_logger().warning(
'Ignoring unexpected cancel response. There may be more than '
f"one action server for the action '{self._action_name}'"
)
if 'result' in taken_data:
sequence_number, result_response = taken_data['result']
if sequence_number in self._pending_result_requests:
self._pending_result_requests[sequence_number].set_result(result_response)
else:
self._node.get_logger().warning(
'Ignoring unexpected result response. There may be more than '
f"one action server for the action '{self._action_name}'"
)
if 'feedback' in taken_data:
feedback_msg = taken_data['feedback']
goal_uuid = bytes(feedback_msg.goal_id.uuid)
# Call a registered callback if there is one
if goal_uuid in self._feedback_callbacks:
await await_or_execute(self._feedback_callbacks[goal_uuid], feedback_msg)
if 'status' in taken_data:
# Update the status of all goal handles maintained by this Action Client
for status_msg in taken_data['status'].status_list:
goal_uuid = bytes(status_msg.goal_info.goal_id.uuid)
status = status_msg.status
if goal_uuid in self._goal_handles:
goal_handle = self._goal_handles[goal_uuid]()
if goal_handle is not None:
goal_handle._status = status
# Remove "done" goals from the list
if (GoalStatus.STATUS_SUCCEEDED == status or
GoalStatus.STATUS_CANCELED == status or
GoalStatus.STATUS_ABORTED == status):
del self._goal_handles[goal_uuid]
else:
# Weak reference is None
del self._goal_handles[goal_uuid]
def get_num_entities(self):
"""Return number of each type of entity used in the wait set."""
num_entities = self._client_handle.get_num_entities()
return NumberOfEntities(*num_entities)
def add_to_wait_set(self, wait_set):
"""Add entities to wait set."""
self._client_handle.add_to_waitset(wait_set)
# End Waitable API
def send_goal(self, goal, **kwargs):
"""
Send a goal and wait for the result.
Do not call this method in a callback or a deadlock may occur.
See :meth:`send_goal_async` for more info about keyword arguments.
Unlike :meth:`send_goal_async`, this method returns the final result of the
action (not a goal handle).
:param goal: The goal request.
:type goal: action_type.Goal
:return: The result response.
:rtype: action_type.Result
:raises: TypeError if the type of the passed goal isn't an instance of
the Goal type of the provided action when the service was
constructed.
"""
if not isinstance(goal, self._action_type.Goal):
raise TypeError()
event = threading.Event()
def unblock(future):
nonlocal event
event.set()
send_goal_future = self.send_goal_async(goal, **kwargs)
send_goal_future.add_done_callback(unblock)
event.wait()
if send_goal_future.exception() is not None:
raise send_goal_future.exception()
goal_handle = send_goal_future.result()
result = self._get_result(goal_handle)
return result
def send_goal_async(self, goal, feedback_callback=None, goal_uuid=None):
"""
Send a goal and asynchronously get the result.
The result of the returned Future is set to a ClientGoalHandle when receipt of the goal
is acknowledged by an action server.
:param goal: The goal request.
:type goal: action_type.Goal
:param feedback_callback: Callback function for feedback associated with the goal.
:type feedback_callback: function
:param goal_uuid: Universally unique identifier for the goal.
If None, then a random UUID is generated.
:type: unique_identifier_msgs.UUID
:return: a Future instance to a goal handle that completes when the goal request
has been accepted or rejected.
:rtype: :class:`rclpy.task.Future` instance
:raises: TypeError if the type of the passed goal isn't an instance of
the Goal type of the provided action when the service was
constructed.
"""
if not isinstance(goal, self._action_type.Goal):
raise TypeError()
request = self._action_type.Impl.SendGoalService.Request()
request.goal_id = self._generate_random_uuid() if goal_uuid is None else goal_uuid
request.goal = goal
sequence_number = self._client_handle.send_goal_request(request)
if sequence_number in self._pending_goal_requests:
raise RuntimeError(
'Sequence ({}) conflicts with pending goal request'.format(sequence_number))
if feedback_callback is not None:
# TODO(jacobperron): Move conversion function to a general-use package
goal_uuid = bytes(request.goal_id.uuid)
self._feedback_callbacks[goal_uuid] = feedback_callback
future = Future()
self._pending_goal_requests[sequence_number] = future
self._sequence_number_to_goal_id[sequence_number] = request.goal_id
future.add_done_callback(self._remove_pending_goal_request)
# Add future so executor is aware
self.add_future(future)
return future
def _cancel_goal(self, goal_handle):
"""
Send a cancel request for an active goal and wait for the response.
Do not call this method in a callback or a deadlock may occur.
:param goal_handle: Handle to the goal to cancel.
:type goal_handle: :class:`ClientGoalHandle`
:return: The cancel response.
"""
event = threading.Event()
def unblock(future):
nonlocal event
event.set()
future = self._cancel_goal_async(goal_handle)
future.add_done_callback(unblock)
event.wait()
if future.exception() is not None:
raise future.exception()
return future.result()
def _cancel_goal_async(self, goal_handle):
"""
Send a cancel request for an active goal and asynchronously get the result.
:param goal_handle: Handle to the goal to cancel.
:type goal_handle: :class:`ClientGoalHandle`
:return: a Future instance that completes when the cancel request has been processed.
:rtype: :class:`rclpy.task.Future` instance
"""
if not isinstance(goal_handle, ClientGoalHandle):
raise TypeError(
'Expected type ClientGoalHandle but received {}'.format(type(goal_handle)))
cancel_request = CancelGoal.Request()
cancel_request.goal_info.goal_id = goal_handle.goal_id
sequence_number = self._client_handle.send_cancel_request(cancel_request)
if sequence_number in self._pending_cancel_requests:
raise RuntimeError(
'Sequence ({}) conflicts with pending cancel request'.format(sequence_number))
future = Future()
self._pending_cancel_requests[sequence_number] = future
future.add_done_callback(self._remove_pending_cancel_request)
# Add future so executor is aware
self.add_future(future)
return future
def _get_result(self, goal_handle):
"""
Request the result for an active goal and wait for the response.
Do not call this method in a callback or a deadlock may occur.
:param goal_handle: Handle to the goal to get the result for.
:type goal_handle: :class:`ClientGoalHandle`
:return: The result response.
"""
event = threading.Event()
def unblock(future):
nonlocal event
event.set()
future = self._get_result_async(goal_handle)
future.add_done_callback(unblock)
event.wait()
if future.exception() is not None:
raise future.exception()
return future.result()
def _get_result_async(self, goal_handle):
"""
Request the result for an active goal asynchronously.
:param goal_handle: Handle to the goal to cancel.
:type goal_handle: :class:`ClientGoalHandle`
:return: a Future instance that completes when the get result request has been processed.
:rtype: :class:`rclpy.task.Future` instance
"""
if not isinstance(goal_handle, ClientGoalHandle):
raise TypeError(
'Expected type ClientGoalHandle but received {}'.format(type(goal_handle)))
result_request = self._action_type.Impl.GetResultService.Request()
result_request.goal_id = goal_handle.goal_id
sequence_number = self._client_handle.send_result_request(result_request)
if sequence_number in self._pending_result_requests:
raise RuntimeError(
'Sequence ({}) conflicts with pending result request'.format(sequence_number))
future = Future()
self._pending_result_requests[sequence_number] = future
future.add_done_callback(self._remove_pending_result_request)
# Add future so executor is aware
self.add_future(future)
return future
def server_is_ready(self):
"""
Check if there is an action server ready to process requests from this client.
:return: True if an action server is ready, False otherwise.
"""
with self._node.handle:
return self._client_handle.is_action_server_available()
def wait_for_server(self, timeout_sec=None):
"""
Wait for an action sever to be ready.
Returns as soon as an action server is ready for this client.
:param timeout_sec: Number of seconds to wait until an action server is available.
If None, then wait indefinitely.
:return: True if an action server is available, False if the timeout is exceeded.
"""
# TODO(jacobperron): Remove arbitrary sleep time and return as soon as server is ready
# See https://github.com/ros2/rclpy/issues/58
sleep_time = 0.25
if timeout_sec is None:
timeout_sec = float('inf')
while self._node.context.ok() and not self.server_is_ready() and timeout_sec > 0.0:
time.sleep(sleep_time)
timeout_sec -= sleep_time
return self.server_is_ready()
def destroy(self):
"""Destroy the underlying action client handle."""
if self._client_handle is None:
return
with self._node.handle:
self._client_handle.destroy_when_not_in_use()
self._node.remove_waitable(self)
self._client_handle = None
def __del__(self):
"""Destroy the underlying action client handle."""
self.destroy()
| apache-2.0 |
iuliat/nova | nova/tests/unit/virt/libvirt/volume/test_gpfs.py | 46 | 1302 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.tests.unit.virt.libvirt.volume import test_volume
from nova.virt.libvirt.volume import gpfs
class LibvirtGPFSVolumeDriverTestCase(test_volume.LibvirtVolumeBaseTestCase):
def test_libvirt_gpfs_driver_get_config(self):
libvirt_driver = gpfs.LibvirtGPFSVolumeDriver(self.fake_conn)
connection_info = {
'driver_volume_type': 'gpfs',
'data': {
'device_path': '/gpfs/foo',
},
'serial': 'fake_serial',
}
conf = libvirt_driver.get_config(connection_info, self.disk_info)
tree = conf.format_dom()
self.assertEqual('file', tree.get('type'))
self.assertEqual('fake_serial', tree.find('./serial').text)
| apache-2.0 |
tomsilver/nupic | nupic/support/features.py | 7 | 4619 | #! /usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
'''
This module implements the Feature Flags system.
Summary:
Based on the environment your program is run in you may want various features
to be displayed and others not.
Based on the type of user who is using Nupic you may want them to see various
features and not others.
These two things can be accomplished using Feature Flags.
Feature Flags wrap sections of code in IF/THEN blocks to isolate functionality
from running code.
This allows you to inline new features without branching files into file.py and
file2.py.
This also allows you to release un-tested, underdevelopment, or experimental
features directly to production with confidence that those code paths won't
get run until such time as they are 'released' to a group of users for testing
or to all users for a launch.
DEV NOTE: To set personal Feature Flag over-rides copy conf/devconf-example.py
to devconf.py and edit as desired.
'''
import os
import sys
from nupic.support.features_list import FEATURES_LIST
from nupic.support.feature_groups import GROUPS
class Features(object):
'''
This class can be used to retrieve available and calculated features for
a given combination of running environment and user
The class is for documentation purposes as all the methods are statically
accessible.
Modeled after Ron's excellent Configuration class
'''
###############################################################################
@staticmethod
def hasFeature(feature, group = False, addList = False, removeList = False):
'''
This is the primary method of the class that will return True or False
based on the current environment and user.
This is the 'Flag' part of Feature Flags.
'''
return (feature in Features.getFeatures(group, addList, removeList))
@staticmethod
def getFeatures(group = False, addList = False, removeList = False):
'''
Returns a list of all the active features for the current env/user
groups: This is a list of groups the current user belongs to
addList: A list of one-off features to add
removeList: A list of one-off features to remove
'''
# Calculate groups
if group:
featureList = Features._getFeaturesForGroup(group)
else:
featureList = Features._getFeaturesForGroup('BASE')
# Calculate user one-offs
if addList:
featureList.extend(addList)
if removeList:
for item in removeList:
if item in featureList:
featureList.remove(item)
# Caclulate developer configuration
if 'TRUNK' in os.environ:
confDir = os.path.join(os.environ['TRUNK'], 'config')
confFilepath = os.path.join(confDir, 'developer.py')
if os.path.exists(confFilepath):
sys.path.append(confDir)
import developer
featureList.extend(developer.FEATURES['ADD'])
for item in developer.FEATURES['REMOVE']:
if item in featureList:
featureList.remove(item)
return featureList
@staticmethod
def _getFeaturesForGroup(group):
'''
Using feature_groups.py calculate the set of features available to the given
group
'''
return GROUPS[group]
@staticmethod
def getAllFeatures():
'''
Returns a list of all known features (essentially the contents of feature_list.py)
'''
for feature in FEATURES_LIST:
print feature['name'] + '\t\t' + feature['description']
@staticmethod
def getAllGroups():
'''
Returns a list of all known feature groups
'''
return [group for group, features in GROUPS.iteritems()]
| gpl-3.0 |
ahu-odoo/odoo | addons/account_asset/wizard/__init__.py | 445 | 1122 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_asset_change_duration
import wizard_asset_compute
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Centre-Alt-Rendiment-Esportiu/att | old_project/Python/win_libs/scipy/weave/tests/test_slice_handler.py | 96 | 5756 | from __future__ import absolute_import, print_function
import parser
from numpy.testing import TestCase, assert_equal, run_module_suite
from scipy.weave import slice_handler
from scipy.weave.slice_handler import indexed_array_pattern
from scipy.weave.ast_tools import ast_to_string, find_first_pattern
from weave_test_utils import remove_whitespace
class TestBuildSliceAtom(TestCase):
def generic_check(self,slice_vars,desired):
pos = slice_vars['pos']
ast_list = slice_handler.build_slice_atom(slice_vars,pos)
actual = ast_to_string(ast_list)
assert_equal(actual,desired)
def test_exclusive_end(self):
slice_vars = {'begin':'1', 'end':'2', 'step':'_stp',
'single_index':'_index','pos':0}
desired = 'slice(1,2-1)'
self.generic_check(slice_vars,desired)
class TestSlice(TestCase):
def generic_check(self,suite_string,desired):
ast_tuple = parser.suite(suite_string).totuple()
found, data = find_first_pattern(ast_tuple,indexed_array_pattern)
subscript = data['subscript_list'][1] # [0] is symbol, [1] is the subscript
actual = slice_handler.slice_ast_to_dict(subscript)
assert_equal(actual,desired,suite_string)
def test_empty_2_slice(self):
# match slice from a[:]
test = "a[:]"
desired = {'begin':'_beg', 'end':'_end', 'step':'_stp',
'single_index':'_index'}
self.generic_check(test,desired)
def test_begin_2_slice(self):
# match slice from a[1:]
test = "a[1:]"
desired = {'begin':'1', 'end':'_end', 'step':'_stp',
'single_index':'_index'}
self.generic_check(test,desired)
def test_end_2_slice(self):
# match slice from a[:2]
test = "a[:2]"
desired = {'begin':'_beg', 'end':'2', 'step':'_stp',
'single_index':'_index'}
self.generic_check(test,desired)
def test_begin_end_2_slice(self):
# match slice from a[1:2]
test = "a[1:2]"
desired = {'begin':'1', 'end':'2', 'step':'_stp',
'single_index':'_index'}
self.generic_check(test,desired)
def test_empty_3_slice(self):
# match slice from a[::]
test = "a[::]"
desired = {'begin':'_beg', 'end':'_end', 'step':'_stp',
'single_index':'_index'}
self.generic_check(test,desired)
def test_begin_3_slice(self):
# match slice from a[1::]
test = "a[1::]"
desired = {'begin':'1', 'end':'_end', 'step':'_stp',
'single_index':'_index'}
self.generic_check(test,desired)
def test_end_3_slice(self):
# match slice from a[:2:]
test = "a[:2:]"
desired = {'begin':'_beg', 'end':'2', 'step':'_stp',
'single_index':'_index'}
self.generic_check(test,desired)
def test_stp3_slice(self):
# match slice from a[::3]
test = "a[::3]"
desired = {'begin':'_beg', 'end':'_end', 'step':'3',
'single_index':'_index'}
self.generic_check(test,desired)
def test_begin_end_3_slice(self):
# match slice from a[1:2:]
test = "a[1:2:]"
desired = {'begin':'1', 'end':'2','step':'_stp',
'single_index':'_index'}
self.generic_check(test,desired)
def test_begin_step_3_slice(self):
# match slice from a[1::3]
test = "a[1::3]"
desired = {'begin':'1', 'end':'_end','step':'3',
'single_index':'_index'}
self.generic_check(test,desired)
def test_end_step_3_slice(self):
# match slice from a[:2:3]
test = "a[:2:3]"
desired = {'begin':'_beg', 'end':'2', 'step':'3',
'single_index':'_index'}
self.generic_check(test,desired)
def test_begin_end_stp3_slice(self):
# match slice from a[1:2:3]
test = "a[1:2:3]"
desired = {'begin':'1', 'end':'2', 'step':'3','single_index':'_index'}
self.generic_check(test,desired)
def test_expr_3_slice(self):
# match slice from a[:1+i+2:]
test = "a[:1+i+2:]"
desired = {'begin':'_beg', 'end':"1+i+2",'step':'_stp',
'single_index':'_index'}
self.generic_check(test,desired)
def test_single_index(self):
# match slice from a[0]
test = "a[0]"
desired = {'begin':'_beg', 'end':"_end",'step':'_stp',
'single_index':'0'}
self.generic_check(test,desired)
class TestTransformSlices(TestCase):
def generic_check(self,suite_string,desired):
ast_list = parser.suite(suite_string).tolist()
slice_handler.transform_slices(ast_list)
actual = ast_to_string(ast_list)
# Remove white space from expressions so that equivalent
# but differently formatted string will compare equally
actual = remove_whitespace(actual)
desired = remove_whitespace(desired)
assert_equal(actual,desired,suite_string)
def test_simple_expr1(self):
# transform a[:] to slice notation
test = "a[:]"
desired = 'a[slice(_beg,_end)]'
self.generic_check(test,desired)
def test_simple_expr2(self):
test = "a[:,:] = b[:,1:1+2:3] *(c[1-2+i:,:] - c[:,:])"
desired = " a[slice(_beg,_end),slice(_beg,_end)] = "\
" b[slice(_beg,_end), slice(1,1+2-1,3)] *"\
" (c[slice(1-2+i,_end), slice(_beg,_end)] -"\
" c[slice(_beg,_end), slice(_beg,_end)])"
self.generic_check(test,desired)
if __name__ == "__main__":
run_module_suite()
| gpl-3.0 |
errx/django | django/contrib/flatpages/views.py | 105 | 2813 | from django.conf import settings
from django.contrib.flatpages.models import FlatPage
from django.contrib.sites.shortcuts import get_current_site
from django.http import Http404, HttpResponse, HttpResponsePermanentRedirect
from django.shortcuts import get_object_or_404
from django.template import loader, RequestContext
from django.utils.safestring import mark_safe
from django.views.decorators.csrf import csrf_protect
DEFAULT_TEMPLATE = 'flatpages/default.html'
# This view is called from FlatpageFallbackMiddleware.process_response
# when a 404 is raised, which often means CsrfViewMiddleware.process_view
# has not been called even if CsrfViewMiddleware is installed. So we need
# to use @csrf_protect, in case the template needs {% csrf_token %}.
# However, we can't just wrap this view; if no matching flatpage exists,
# or a redirect is required for authentication, the 404 needs to be returned
# without any CSRF checks. Therefore, we only
# CSRF protect the internal implementation.
def flatpage(request, url):
"""
Public interface to the flat page view.
Models: `flatpages.flatpages`
Templates: Uses the template defined by the ``template_name`` field,
or :template:`flatpages/default.html` if template_name is not defined.
Context:
flatpage
`flatpages.flatpages` object
"""
if not url.startswith('/'):
url = '/' + url
site_id = get_current_site(request).id
try:
f = get_object_or_404(FlatPage,
url=url, sites=site_id)
except Http404:
if not url.endswith('/') and settings.APPEND_SLASH:
url += '/'
f = get_object_or_404(FlatPage,
url=url, sites=site_id)
return HttpResponsePermanentRedirect('%s/' % request.path)
else:
raise
return render_flatpage(request, f)
@csrf_protect
def render_flatpage(request, f):
"""
Internal interface to the flat page view.
"""
# If registration is required for accessing this page, and the user isn't
# logged in, redirect to the login page.
if f.registration_required and not request.user.is_authenticated():
from django.contrib.auth.views import redirect_to_login
return redirect_to_login(request.path)
if f.template_name:
t = loader.select_template((f.template_name, DEFAULT_TEMPLATE))
else:
t = loader.get_template(DEFAULT_TEMPLATE)
# To avoid having to always use the "|safe" filter in flatpage templates,
# mark the title and content as already safe (since they are raw HTML
# content in the first place).
f.title = mark_safe(f.title)
f.content = mark_safe(f.content)
c = RequestContext(request, {
'flatpage': f,
})
response = HttpResponse(t.render(c))
return response
| bsd-3-clause |
Tao-Ma/gpdb | src/test/tinc/tincrepo/resource_management/memory_accounting/test_oom.py | 3 | 8712 | import tinctest
import unittest2 as unittest
from mpp.models import MPPTestCase
from mpp.lib.PSQL import PSQL
from gppylib.commands.base import Command
from mpp.lib.gpConfig import GpConfig
from tinctest.models.scenario import ScenarioTestCase
class OOMTestCase(MPPTestCase, ScenarioTestCase):
"""
@product_version gpdb:[4.3.0.0-MAIN], hawq: [1.2.1.0-]
"""
@classmethod
def tearDownClass(cls):
# Reset GUC gp_vmem_protect_limit to default
Command('Run gpconfig to set GUC gp_vmem_protect_limit' ,
'source $GPHOME/greenplum_path.sh;gpconfig -c gp_vmem_protect_limit -m 8192 -v 8192; gpconfig -c gp_vmem_limit_per_query -v 0 --skipvalidation').run(validateAfter=True)
# Restart DB
Command('Restart database for GUCs to take effect',
'source $GPHOME/greenplum_path.sh && gpstop -ar').run(validateAfter=True)
def gp_version(self):
"""
@todo: ScenarioTest does not have product from MPPTestCase, need to have the method in ScenarioTestCase.
This is only a hack.
"""
result = PSQL.run_sql_command( sql_cmd='select version()', flags='-t -q' )
if "HAWQ" in result:
return "hawq"
else:
return "gpdb"
def setUp(self):
# Set GUC gp_vmem_protect_limit
self.prd = "_hawq"
if self.gp_version() == "gpdb":
self.prd = ""
gpconfig = GpConfig()
expected_vmem = '20'
expected_runaway_perc = '0'
restart_db = False
if self.name == "OOMTestCase.test_07_OOM_abort_query":
gpconfig.setParameter('gp_vmem_limit_per_query', '2MB', '2MB', '--skipvalidation')
restart_db = True
(vmem, _) = gpconfig.getParameter('gp_vmem_protect_limit')
(runaway_perc, _) = GpConfig().getParameter('runaway_detector_activation_percent')
if runaway_perc == expected_runaway_perc and vmem == expected_vmem:
tinctest.logger.info('gp_vmem_protect_limit and runaway_detector_activation_percent GUCs already set correctly')
else:
tinctest.logger.info('Setting GUC and restarting DB')
gpconfig.setParameter('runaway_detector_activation_percent', expected_runaway_perc, expected_runaway_perc)
gpconfig.setParameter('gp_vmem_protect_limit', expected_vmem, expected_vmem)
restart_db = True
if restart_db:
# Restart DB
Command('Restart database for GUCs to take effect',
'source $GPHOME/greenplum_path.sh && gpstop -ar').run(validateAfter=True)
super(OOMTestCase, self).setUp()
def test_01_OOM_with_singlequery(self):
"""
@description Run a single query OOM and verify log
"""
test_case_list1 = []
test_case_list1.append('resource_management.memory_accounting.scenario.oom_test.runsql.runtest.test_singlequery_oom')
self.test_case_scenario.append(test_case_list1)
test_case_list2 = []
test_case_list2.append('resource_management.memory_accounting.scenario.oom_test.runsql.runtest.test_verify_singlequery_oom%s' % self.prd)
self.test_case_scenario.append(test_case_list2)
def test_02_OOM_concurrent_sleeps(self):
"""
@description Run a single query OOM while multiple other queries are sleeping and verify log
"""
test_case_list1 = []
test_case_list1.append('resource_management.memory_accounting.scenario.oom_test.runsql.runtest.test_sleep')
test_case_list1.append('resource_management.memory_accounting.scenario.oom_test.runsql.runtest.test_sleep')
test_case_list1.append('resource_management.memory_accounting.scenario.oom_test.runsql.runtest.test_oom')
self.test_case_scenario.append(test_case_list1)
test_case_list2 = []
test_case_list2.append('resource_management.memory_accounting.scenario.oom_test.runsql.runtest.test_verify_concurrent_sleeps_oom%s' %self.prd)
self.test_case_scenario.append(test_case_list2)
def test_03_OOM_multiple_random(self):
"""
@description Test where multiple active queries randomly hit OOM
"""
test_case_list1 = []
test_case_list1.append('resource_management.memory_accounting.scenario.oom_test.runsql.runtest.test_oom_mixed_1')
test_case_list1.append('resource_management.memory_accounting.scenario.oom_test.runsql.runtest.test_oom_mixed_2')
test_case_list1.append('resource_management.memory_accounting.scenario.oom_test.runsql.runtest.test_oom_mixed_2')
test_case_list1.append('resource_management.memory_accounting.scenario.oom_test.runsql.runtest.test_oom_mixed_2')
test_case_list1.append('resource_management.memory_accounting.scenario.oom_test.runsql.runtest.test_oom_simple')
self.test_case_scenario.append(test_case_list1)
test_case_list2 = []
test_case_list2.append('resource_management.memory_accounting.scenario.oom_test.runsql.runtest.test_verify_multiple_random_oom%s' % self.prd)
self.test_case_scenario.append(test_case_list2)
# skipping the test for 1.3.1.0 since it takes hours to run this test
# def test_04_multipleslice_singlequery(self):
# """
# @description Test where single query with multiple slices per segment runs OOM
# """
# test_case_list1 = []
# test_case_list1.append('resource_management.memory_accounting.scenario.oom_test.runsql.runtest.test_multislice_oom')
# self.test_case_scenario.append(test_case_list1)
#
# test_case_list2 = []
# test_case_list2.append('resource_management.memory_accounting.scenario.oom_test.runsql.runtest.test_verify_multislice_oom%s' % self.prd)
# self.test_case_scenario.append(test_case_list2)
#
# test_case_list3 = []
# test_case_list3.append('resource_management.memory_accounting.scenario.oom_test.runsql.verify.test_oom_count')
# self.test_case_scenario.append(test_case_list3)
# QA-2748, need at least 48GB ram to run this test
# GPDB should use the DCA, for HAWQ should use gpdb26.rel.dh.greenplum.com
# This test is dependent on how much memory
@unittest.skip("QA-2748, issue with test on different platform with different memory")
def test_06_OOM_massivequery(self):
"""
@description Test where smaller queries pass while the massive violator dies
"""
test_case_list1 = []
test_case_list1.append('resource_management.memory_accounting.scenario.oom_test.runsql.runtest.test_oom_small')
test_case_list1.append('resource_management.memory_accounting.scenario.oom_test.runsql.runtest.test_oom_small')
test_case_list1.append('resource_management.memory_accounting.scenario.oom_test.runsql.runtest.test_oom_small')
test_case_list1.append('resource_management.memory_accounting.scenario.oom_test.runsql.runtest.test_oom_small')
test_case_list1.append('resource_management.memory_accounting.scenario.oom_test.runsql.runtest.test_oom_small')
test_case_list1.append('resource_management.memory_accounting.scenario.oom_test.runsql.runtest.test_oom_small')
test_case_list1.append('resource_management.memory_accounting.scenario.oom_test.runsql.runtest.test_oom_small')
test_case_list1.append('resource_management.memory_accounting.scenario.oom_test.runsql.runtest.test_oom_small')
test_case_list1.append('resource_management.memory_accounting.scenario.oom_test.runsql.runtest.test_oom_small')
test_case_list1.append('resource_management.memory_accounting.scenario.oom_test.runsql.runtest.test_oom_small')
test_case_list1.append('resource_management.memory_accounting.scenario.oom_test.runsql.runtest.test_oom_massive')
self.test_case_scenario.append(test_case_list1)
test_case_list2 = []
test_case_list2.append('resource_management.memory_accounting.scenario.oom_test.runsql.runtest.test_verify_massive_oom%s' % self.prd)
self.test_case_scenario.append(test_case_list2)
def test_07_OOM_abort_query(self):
"""
@description Need a mechanism to abort query before gp_vmem_protect_limit is hit
@note Depending on the machine, we may get "VM Protect failed to allocate memory"
or "Per-query VM protect limit reached: current limit is 102400 kB, requested 8388608 bytes, available 2 MB"
"""
test_case_list1 = []
test_case_list1.append('resource_management.memory_accounting.scenario.oom_test.runsql.runtest.test_verify_oom_abort_query')
self.test_case_scenario.append(test_case_list1)
| apache-2.0 |
defance/edx-platform | common/djangoapps/terrain/stubs/comments.py | 85 | 5565 | """
Stub implementation of cs_comments_service for acceptance tests
"""
import re
import urlparse
from .http import StubHttpRequestHandler, StubHttpService
class StubCommentsServiceHandler(StubHttpRequestHandler):
@property
def _params(self):
return urlparse.parse_qs(urlparse.urlparse(self.path).query)
def do_GET(self):
pattern_handlers = {
"/api/v1/users/(?P<user_id>\\d+)/active_threads$": self.do_user_profile,
"/api/v1/users/(?P<user_id>\\d+)$": self.do_user,
"/api/v1/search/threads$": self.do_search_threads,
"/api/v1/threads$": self.do_threads,
"/api/v1/threads/(?P<thread_id>\\w+)$": self.do_thread,
"/api/v1/comments/(?P<comment_id>\\w+)$": self.do_comment,
"/api/v1/(?P<commentable_id>\\w+)/threads$": self.do_commentable,
}
if self.match_pattern(pattern_handlers):
return
self.send_response(404, content="404 Not Found")
def match_pattern(self, pattern_handlers):
path = urlparse.urlparse(self.path).path
for pattern in pattern_handlers:
match = re.match(pattern, path)
if match:
pattern_handlers[pattern](**match.groupdict())
return True
return None
def do_PUT(self):
if self.path.startswith('/set_config'):
return StubHttpRequestHandler.do_PUT(self)
pattern_handlers = {
"/api/v1/users/(?P<user_id>\\d+)$": self.do_put_user,
}
if self.match_pattern(pattern_handlers):
return
self.send_response(204, "")
def do_put_user(self, user_id):
self.server.config['default_sort_key'] = self.post_dict.get("default_sort_key", "date")
self.send_json_response({'username': self.post_dict.get("username"), 'external_id': self.post_dict.get("external_id")})
def do_DELETE(self):
pattern_handlers = {
"/api/v1/comments/(?P<comment_id>\\w+)$": self.do_delete_comment
}
if self.match_pattern(pattern_handlers):
return
self.send_json_response({})
def do_user(self, user_id):
response = {
"id": user_id,
"default_sort_key": self.server.config.get("default_sort_key", "date"),
"upvoted_ids": [],
"downvoted_ids": [],
"subscribed_thread_ids": [],
}
if 'course_id' in self._params:
response.update({
"threads_count": 1,
"comments_count": 2
})
self.send_json_response(response)
def do_user_profile(self, user_id):
if 'active_threads' in self.server.config:
user_threads = self.server.config['active_threads'][:]
params = self._params
page = int(params.get("page", ["1"])[0])
per_page = int(params.get("per_page", ["20"])[0])
num_pages = max(len(user_threads) - 1, 1) / per_page + 1
user_threads = user_threads[(page - 1) * per_page:page * per_page]
self.send_json_response({
"collection": user_threads,
"page": page,
"num_pages": num_pages
})
else:
self.send_response(404, content="404 Not Found")
def do_thread(self, thread_id):
if thread_id in self.server.config.get('threads', {}):
thread = self.server.config['threads'][thread_id].copy()
params = urlparse.parse_qs(urlparse.urlparse(self.path).query)
if "recursive" in params and params["recursive"][0] == "True":
thread.setdefault('children', [])
resp_total = thread.setdefault('resp_total', len(thread['children']))
resp_skip = int(params.get("resp_skip", ["0"])[0])
resp_limit = int(params.get("resp_limit", ["10000"])[0])
thread['children'] = thread['children'][resp_skip:(resp_skip + resp_limit)]
self.send_json_response(thread)
else:
self.send_response(404, content="404 Not Found")
def do_threads(self):
threads = self.server.config.get('threads', {})
threads_data = [val for key, val in threads.items()]
self.send_json_response({"collection": threads_data, "page": 1, "num_pages": 1})
def do_search_threads(self):
self.send_json_response(self.server.config.get('search_result', {}))
def do_comment(self, comment_id):
# django_comment_client calls GET comment before doing a DELETE, so that's what this is here to support.
if comment_id in self.server.config.get('comments', {}):
comment = self.server.config['comments'][comment_id]
self.send_json_response(comment)
def do_delete_comment(self, comment_id):
"""Handle comment deletion. Returns a JSON representation of the
deleted comment."""
if comment_id in self.server.config.get('comments', {}):
comment = self.server.config['comments'][comment_id]
self.send_json_response(comment)
def do_commentable(self, commentable_id):
self.send_json_response({
"collection": [
thread
for thread in self.server.config.get('threads', {}).values()
if thread.get('commentable_id') == commentable_id
],
"page": 1,
"num_pages": 1,
})
class StubCommentsService(StubHttpService):
HANDLER_CLASS = StubCommentsServiceHandler
| agpl-3.0 |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/scipy/stats/tests/test_stats.py | 2 | 170720 | """ Test functions for stats module
WRITTEN BY LOUIS LUANGKESORN <lluang@yahoo.com> FOR THE STATS MODULE
BASED ON WILKINSON'S STATISTICS QUIZ
http://www.stanford.edu/~clint/bench/wilk.txt
Additional tests by a host of SciPy developers.
"""
from __future__ import division, print_function, absolute_import
import os
import sys
import warnings
from collections import namedtuple
from numpy.testing import (assert_, assert_equal,
assert_almost_equal, assert_array_almost_equal,
assert_array_equal, assert_approx_equal,
assert_allclose)
import pytest
from pytest import raises as assert_raises
from scipy._lib._numpy_compat import suppress_warnings
import numpy.ma.testutils as mat
from numpy import array, arange, float32, float64, power
import numpy as np
import scipy.stats as stats
import scipy.stats.mstats as mstats
import scipy.stats.mstats_basic as mstats_basic
from scipy._lib._version import NumpyVersion
from scipy._lib.six import xrange
from .common_tests import check_named_results
""" Numbers in docstrings beginning with 'W' refer to the section numbers
and headings found in the STATISTICS QUIZ of Leland Wilkinson. These are
considered to be essential functionality. True testing and
evaluation of a statistics package requires use of the
NIST Statistical test data. See McCoullough(1999) Assessing The Reliability
of Statistical Software for a test methodology and its
implementation in testing SAS, SPSS, and S-Plus
"""
# Datasets
# These data sets are from the nasty.dat sets used by Wilkinson
# For completeness, I should write the relevant tests and count them as failures
# Somewhat acceptable, since this is still beta software. It would count as a
# good target for 1.0 status
X = array([1,2,3,4,5,6,7,8,9], float)
ZERO = array([0,0,0,0,0,0,0,0,0], float)
BIG = array([99999991,99999992,99999993,99999994,99999995,99999996,99999997,
99999998,99999999], float)
LITTLE = array([0.99999991,0.99999992,0.99999993,0.99999994,0.99999995,0.99999996,
0.99999997,0.99999998,0.99999999], float)
HUGE = array([1e+12,2e+12,3e+12,4e+12,5e+12,6e+12,7e+12,8e+12,9e+12], float)
TINY = array([1e-12,2e-12,3e-12,4e-12,5e-12,6e-12,7e-12,8e-12,9e-12], float)
ROUND = array([0.5,1.5,2.5,3.5,4.5,5.5,6.5,7.5,8.5], float)
class TestTrimmedStats(object):
# TODO: write these tests to handle missing values properly
dprec = np.finfo(np.float64).precision
def test_tmean(self):
y = stats.tmean(X, (2, 8), (True, True))
assert_approx_equal(y, 5.0, significant=self.dprec)
y1 = stats.tmean(X, limits=(2, 8), inclusive=(False, False))
y2 = stats.tmean(X, limits=None)
assert_approx_equal(y1, y2, significant=self.dprec)
def test_tvar(self):
y = stats.tvar(X, limits=(2, 8), inclusive=(True, True))
assert_approx_equal(y, 4.6666666666666661, significant=self.dprec)
y = stats.tvar(X, limits=None)
assert_approx_equal(y, X.var(ddof=1), significant=self.dprec)
def test_tstd(self):
y = stats.tstd(X, (2, 8), (True, True))
assert_approx_equal(y, 2.1602468994692865, significant=self.dprec)
y = stats.tstd(X, limits=None)
assert_approx_equal(y, X.std(ddof=1), significant=self.dprec)
def test_tmin(self):
assert_equal(stats.tmin(4), 4)
x = np.arange(10)
assert_equal(stats.tmin(x), 0)
assert_equal(stats.tmin(x, lowerlimit=0), 0)
assert_equal(stats.tmin(x, lowerlimit=0, inclusive=False), 1)
x = x.reshape((5, 2))
assert_equal(stats.tmin(x, lowerlimit=0, inclusive=False), [2, 1])
assert_equal(stats.tmin(x, axis=1), [0, 2, 4, 6, 8])
assert_equal(stats.tmin(x, axis=None), 0)
x = np.arange(10.)
x[9] = np.nan
with suppress_warnings() as sup:
r = sup.record(RuntimeWarning, "invalid value*")
assert_equal(stats.tmin(x), np.nan)
assert_equal(stats.tmin(x, nan_policy='omit'), 0.)
assert_raises(ValueError, stats.tmin, x, nan_policy='raise')
assert_raises(ValueError, stats.tmin, x, nan_policy='foobar')
msg = "'propagate', 'raise', 'omit'"
with assert_raises(ValueError, message=msg):
stats.tmin(x, nan_policy='foo')
def test_tmax(self):
assert_equal(stats.tmax(4), 4)
x = np.arange(10)
assert_equal(stats.tmax(x), 9)
assert_equal(stats.tmax(x, upperlimit=9), 9)
assert_equal(stats.tmax(x, upperlimit=9, inclusive=False), 8)
x = x.reshape((5, 2))
assert_equal(stats.tmax(x, upperlimit=9, inclusive=False), [8, 7])
assert_equal(stats.tmax(x, axis=1), [1, 3, 5, 7, 9])
assert_equal(stats.tmax(x, axis=None), 9)
x = np.arange(10.)
x[6] = np.nan
with suppress_warnings() as sup:
r = sup.record(RuntimeWarning, "invalid value*")
assert_equal(stats.tmax(x), np.nan)
assert_equal(stats.tmax(x, nan_policy='omit'), 9.)
assert_raises(ValueError, stats.tmax, x, nan_policy='raise')
assert_raises(ValueError, stats.tmax, x, nan_policy='foobar')
def test_tsem(self):
y = stats.tsem(X, limits=(3, 8), inclusive=(False, True))
y_ref = np.array([4, 5, 6, 7, 8])
assert_approx_equal(y, y_ref.std(ddof=1) / np.sqrt(y_ref.size),
significant=self.dprec)
assert_approx_equal(stats.tsem(X, limits=[-1, 10]),
stats.tsem(X, limits=None),
significant=self.dprec)
class TestCorrPearsonr(object):
""" W.II.D. Compute a correlation matrix on all the variables.
All the correlations, except for ZERO and MISS, should be exactly 1.
ZERO and MISS should have undefined or missing correlations with the
other variables. The same should go for SPEARMAN correlations, if
your program has them.
"""
def test_pXX(self):
y = stats.pearsonr(X,X)
r = y[0]
assert_approx_equal(r,1.0)
def test_pXBIG(self):
y = stats.pearsonr(X,BIG)
r = y[0]
assert_approx_equal(r,1.0)
def test_pXLITTLE(self):
y = stats.pearsonr(X,LITTLE)
r = y[0]
assert_approx_equal(r,1.0)
def test_pXHUGE(self):
y = stats.pearsonr(X,HUGE)
r = y[0]
assert_approx_equal(r,1.0)
def test_pXTINY(self):
y = stats.pearsonr(X,TINY)
r = y[0]
assert_approx_equal(r,1.0)
def test_pXROUND(self):
y = stats.pearsonr(X,ROUND)
r = y[0]
assert_approx_equal(r,1.0)
def test_pBIGBIG(self):
y = stats.pearsonr(BIG,BIG)
r = y[0]
assert_approx_equal(r,1.0)
def test_pBIGLITTLE(self):
y = stats.pearsonr(BIG,LITTLE)
r = y[0]
assert_approx_equal(r,1.0)
def test_pBIGHUGE(self):
y = stats.pearsonr(BIG,HUGE)
r = y[0]
assert_approx_equal(r,1.0)
def test_pBIGTINY(self):
y = stats.pearsonr(BIG,TINY)
r = y[0]
assert_approx_equal(r,1.0)
def test_pBIGROUND(self):
y = stats.pearsonr(BIG,ROUND)
r = y[0]
assert_approx_equal(r,1.0)
def test_pLITTLELITTLE(self):
y = stats.pearsonr(LITTLE,LITTLE)
r = y[0]
assert_approx_equal(r,1.0)
def test_pLITTLEHUGE(self):
y = stats.pearsonr(LITTLE,HUGE)
r = y[0]
assert_approx_equal(r,1.0)
def test_pLITTLETINY(self):
y = stats.pearsonr(LITTLE,TINY)
r = y[0]
assert_approx_equal(r,1.0)
def test_pLITTLEROUND(self):
y = stats.pearsonr(LITTLE,ROUND)
r = y[0]
assert_approx_equal(r,1.0)
def test_pHUGEHUGE(self):
y = stats.pearsonr(HUGE,HUGE)
r = y[0]
assert_approx_equal(r,1.0)
def test_pHUGETINY(self):
y = stats.pearsonr(HUGE,TINY)
r = y[0]
assert_approx_equal(r,1.0)
def test_pHUGEROUND(self):
y = stats.pearsonr(HUGE,ROUND)
r = y[0]
assert_approx_equal(r,1.0)
def test_pTINYTINY(self):
y = stats.pearsonr(TINY,TINY)
r = y[0]
assert_approx_equal(r,1.0)
def test_pTINYROUND(self):
y = stats.pearsonr(TINY,ROUND)
r = y[0]
assert_approx_equal(r,1.0)
def test_pROUNDROUND(self):
y = stats.pearsonr(ROUND,ROUND)
r = y[0]
assert_approx_equal(r,1.0)
def test_r_exactly_pos1(self):
a = arange(3.0)
b = a
r, prob = stats.pearsonr(a,b)
assert_equal(r, 1.0)
assert_equal(prob, 0.0)
def test_r_exactly_neg1(self):
a = arange(3.0)
b = -a
r, prob = stats.pearsonr(a,b)
assert_equal(r, -1.0)
assert_equal(prob, 0.0)
def test_basic(self):
# A basic test, with a correlation coefficient
# that is not 1 or -1.
a = array([-1, 0, 1])
b = array([0, 0, 3])
r, prob = stats.pearsonr(a, b)
assert_approx_equal(r, np.sqrt(3)/2)
assert_approx_equal(prob, 1.0/3)
class TestFisherExact(object):
"""Some tests to show that fisher_exact() works correctly.
Note that in SciPy 0.9.0 this was not working well for large numbers due to
inaccuracy of the hypergeom distribution (see #1218). Fixed now.
Also note that R and Scipy have different argument formats for their
hypergeometric distribution functions.
R:
> phyper(18999, 99000, 110000, 39000, lower.tail = FALSE)
[1] 1.701815e-09
"""
def test_basic(self):
fisher_exact = stats.fisher_exact
res = fisher_exact([[14500, 20000], [30000, 40000]])[1]
assert_approx_equal(res, 0.01106, significant=4)
res = fisher_exact([[100, 2], [1000, 5]])[1]
assert_approx_equal(res, 0.1301, significant=4)
res = fisher_exact([[2, 7], [8, 2]])[1]
assert_approx_equal(res, 0.0230141, significant=6)
res = fisher_exact([[5, 1], [10, 10]])[1]
assert_approx_equal(res, 0.1973244, significant=6)
res = fisher_exact([[5, 15], [20, 20]])[1]
assert_approx_equal(res, 0.0958044, significant=6)
res = fisher_exact([[5, 16], [20, 25]])[1]
assert_approx_equal(res, 0.1725862, significant=6)
res = fisher_exact([[10, 5], [10, 1]])[1]
assert_approx_equal(res, 0.1973244, significant=6)
res = fisher_exact([[5, 0], [1, 4]])[1]
assert_approx_equal(res, 0.04761904, significant=6)
res = fisher_exact([[0, 1], [3, 2]])[1]
assert_approx_equal(res, 1.0)
res = fisher_exact([[0, 2], [6, 4]])[1]
assert_approx_equal(res, 0.4545454545)
res = fisher_exact([[2, 7], [8, 2]])
assert_approx_equal(res[1], 0.0230141, significant=6)
assert_approx_equal(res[0], 4.0 / 56)
def test_precise(self):
# results from R
#
# R defines oddsratio differently (see Notes section of fisher_exact
# docstring), so those will not match. We leave them in anyway, in
# case they will be useful later on. We test only the p-value.
tablist = [
([[100, 2], [1000, 5]], (2.505583993422285e-001, 1.300759363430016e-001)),
([[2, 7], [8, 2]], (8.586235135736206e-002, 2.301413756522114e-002)),
([[5, 1], [10, 10]], (4.725646047336584e+000, 1.973244147157190e-001)),
([[5, 15], [20, 20]], (3.394396617440852e-001, 9.580440012477637e-002)),
([[5, 16], [20, 25]], (3.960558326183334e-001, 1.725864953812994e-001)),
([[10, 5], [10, 1]], (2.116112781158483e-001, 1.973244147157190e-001)),
([[10, 5], [10, 0]], (0.000000000000000e+000, 6.126482213438734e-002)),
([[5, 0], [1, 4]], (np.inf, 4.761904761904762e-002)),
([[0, 5], [1, 4]], (0.000000000000000e+000, 1.000000000000000e+000)),
([[5, 1], [0, 4]], (np.inf, 4.761904761904758e-002)),
([[0, 1], [3, 2]], (0.000000000000000e+000, 1.000000000000000e+000))
]
for table, res_r in tablist:
res = stats.fisher_exact(np.asarray(table))
np.testing.assert_almost_equal(res[1], res_r[1], decimal=11,
verbose=True)
@pytest.mark.slow
def test_large_numbers(self):
# Test with some large numbers. Regression test for #1401
pvals = [5.56e-11, 2.666e-11, 1.363e-11] # from R
for pval, num in zip(pvals, [75, 76, 77]):
res = stats.fisher_exact([[17704, 496], [1065, num]])[1]
assert_approx_equal(res, pval, significant=4)
res = stats.fisher_exact([[18000, 80000], [20000, 90000]])[1]
assert_approx_equal(res, 0.2751, significant=4)
def test_raises(self):
# test we raise an error for wrong shape of input.
assert_raises(ValueError, stats.fisher_exact,
np.arange(6).reshape(2, 3))
def test_row_or_col_zero(self):
tables = ([[0, 0], [5, 10]],
[[5, 10], [0, 0]],
[[0, 5], [0, 10]],
[[5, 0], [10, 0]])
for table in tables:
oddsratio, pval = stats.fisher_exact(table)
assert_equal(pval, 1.0)
assert_equal(oddsratio, np.nan)
def test_less_greater(self):
tables = (
# Some tables to compare with R:
[[2, 7], [8, 2]],
[[200, 7], [8, 300]],
[[28, 21], [6, 1957]],
[[190, 800], [200, 900]],
# Some tables with simple exact values
# (includes regression test for ticket #1568):
[[0, 2], [3, 0]],
[[1, 1], [2, 1]],
[[2, 0], [1, 2]],
[[0, 1], [2, 3]],
[[1, 0], [1, 4]],
)
pvals = (
# from R:
[0.018521725952066501, 0.9990149169715733],
[1.0, 2.0056578803889148e-122],
[1.0, 5.7284374608319831e-44],
[0.7416227, 0.2959826],
# Exact:
[0.1, 1.0],
[0.7, 0.9],
[1.0, 0.3],
[2./3, 1.0],
[1.0, 1./3],
)
for table, pval in zip(tables, pvals):
res = []
res.append(stats.fisher_exact(table, alternative="less")[1])
res.append(stats.fisher_exact(table, alternative="greater")[1])
assert_allclose(res, pval, atol=0, rtol=1e-7)
def test_gh3014(self):
# check if issue #3014 has been fixed.
# before, this would have risen a ValueError
odds, pvalue = stats.fisher_exact([[1, 2], [9, 84419233]])
class TestCorrSpearmanr(object):
""" W.II.D. Compute a correlation matrix on all the variables.
All the correlations, except for ZERO and MISS, should be exactly 1.
ZERO and MISS should have undefined or missing correlations with the
other variables. The same should go for SPEARMAN corelations, if
your program has them.
"""
def test_scalar(self):
y = stats.spearmanr(4., 2.)
assert_(np.isnan(y).all())
def test_uneven_lengths(self):
assert_raises(ValueError, stats.spearmanr, [1, 2, 1], [8, 9])
assert_raises(ValueError, stats.spearmanr, [1, 2, 1], 8)
def test_nan_policy(self):
x = np.arange(10.)
x[9] = np.nan
assert_array_equal(stats.spearmanr(x, x), (np.nan, np.nan))
assert_array_equal(stats.spearmanr(x, x, nan_policy='omit'),
(1.0, 0.0))
assert_raises(ValueError, stats.spearmanr, x, x, nan_policy='raise')
assert_raises(ValueError, stats.spearmanr, x, x, nan_policy='foobar')
def test_sXX(self):
y = stats.spearmanr(X,X)
r = y[0]
assert_approx_equal(r,1.0)
def test_sXBIG(self):
y = stats.spearmanr(X,BIG)
r = y[0]
assert_approx_equal(r,1.0)
def test_sXLITTLE(self):
y = stats.spearmanr(X,LITTLE)
r = y[0]
assert_approx_equal(r,1.0)
def test_sXHUGE(self):
y = stats.spearmanr(X,HUGE)
r = y[0]
assert_approx_equal(r,1.0)
def test_sXTINY(self):
y = stats.spearmanr(X,TINY)
r = y[0]
assert_approx_equal(r,1.0)
def test_sXROUND(self):
y = stats.spearmanr(X,ROUND)
r = y[0]
assert_approx_equal(r,1.0)
def test_sBIGBIG(self):
y = stats.spearmanr(BIG,BIG)
r = y[0]
assert_approx_equal(r,1.0)
def test_sBIGLITTLE(self):
y = stats.spearmanr(BIG,LITTLE)
r = y[0]
assert_approx_equal(r,1.0)
def test_sBIGHUGE(self):
y = stats.spearmanr(BIG,HUGE)
r = y[0]
assert_approx_equal(r,1.0)
def test_sBIGTINY(self):
y = stats.spearmanr(BIG,TINY)
r = y[0]
assert_approx_equal(r,1.0)
def test_sBIGROUND(self):
y = stats.spearmanr(BIG,ROUND)
r = y[0]
assert_approx_equal(r,1.0)
def test_sLITTLELITTLE(self):
y = stats.spearmanr(LITTLE,LITTLE)
r = y[0]
assert_approx_equal(r,1.0)
def test_sLITTLEHUGE(self):
y = stats.spearmanr(LITTLE,HUGE)
r = y[0]
assert_approx_equal(r,1.0)
def test_sLITTLETINY(self):
y = stats.spearmanr(LITTLE,TINY)
r = y[0]
assert_approx_equal(r,1.0)
def test_sLITTLEROUND(self):
y = stats.spearmanr(LITTLE,ROUND)
r = y[0]
assert_approx_equal(r,1.0)
def test_sHUGEHUGE(self):
y = stats.spearmanr(HUGE,HUGE)
r = y[0]
assert_approx_equal(r,1.0)
def test_sHUGETINY(self):
y = stats.spearmanr(HUGE,TINY)
r = y[0]
assert_approx_equal(r,1.0)
def test_sHUGEROUND(self):
y = stats.spearmanr(HUGE,ROUND)
r = y[0]
assert_approx_equal(r,1.0)
def test_sTINYTINY(self):
y = stats.spearmanr(TINY,TINY)
r = y[0]
assert_approx_equal(r,1.0)
def test_sTINYROUND(self):
y = stats.spearmanr(TINY,ROUND)
r = y[0]
assert_approx_equal(r,1.0)
def test_sROUNDROUND(self):
y = stats.spearmanr(ROUND,ROUND)
r = y[0]
assert_approx_equal(r,1.0)
def test_spearmanr_result_attributes(self):
res = stats.spearmanr(X, X)
attributes = ('correlation', 'pvalue')
check_named_results(res, attributes)
def test_spearmanr():
# Cross-check with R:
# cor.test(c(1,2,3,4,5),c(5,6,7,8,7),method="spearmanr")
x1 = [1, 2, 3, 4, 5]
x2 = [5, 6, 7, 8, 7]
expected = (0.82078268166812329, 0.088587005313543798)
res = stats.spearmanr(x1, x2)
assert_approx_equal(res[0], expected[0])
assert_approx_equal(res[1], expected[1])
attributes = ('correlation', 'pvalue')
res = stats.spearmanr(x1, x2)
check_named_results(res, attributes)
# with only ties in one or both inputs
with np.errstate(invalid="ignore"):
assert_equal(stats.spearmanr([2,2,2], [2,2,2]), (np.nan, np.nan))
assert_equal(stats.spearmanr([2,0,2], [2,2,2]), (np.nan, np.nan))
assert_equal(stats.spearmanr([2,2,2], [2,0,2]), (np.nan, np.nan))
# empty arrays provided as input
assert_equal(stats.spearmanr([], []), (np.nan, np.nan))
np.random.seed(7546)
x = np.array([np.random.normal(loc=1, scale=1, size=500),
np.random.normal(loc=1, scale=1, size=500)])
corr = [[1.0, 0.3],
[0.3, 1.0]]
x = np.dot(np.linalg.cholesky(corr), x)
expected = (0.28659685838743354, 6.579862219051161e-11)
res = stats.spearmanr(x[0], x[1])
assert_approx_equal(res[0], expected[0])
assert_approx_equal(res[1], expected[1])
assert_approx_equal(stats.spearmanr([1,1,2], [1,1,2])[0], 1.0)
# test nan_policy
x = np.arange(10.)
x[9] = np.nan
assert_array_equal(stats.spearmanr(x, x), (np.nan, np.nan))
assert_allclose(stats.spearmanr(x, x, nan_policy='omit'),
(1.0, 0))
assert_raises(ValueError, stats.spearmanr, x, x, nan_policy='raise')
assert_raises(ValueError, stats.spearmanr, x, x, nan_policy='foobar')
# test unequal length inputs
x = np.arange(10.)
y = np.arange(20.)
assert_raises(ValueError, stats.spearmanr, x, y)
#test paired value
x1 = [1, 2, 3, 4]
x2 = [8, 7, 6, np.nan]
res1 = stats.spearmanr(x1, x2, nan_policy='omit')
res2 = stats.spearmanr(x1[:3], x2[:3], nan_policy='omit')
assert_equal(res1, res2)
# Regression test for GitHub issue #6061 - Overflow on Windows
x = list(range(2000))
y = list(range(2000))
y[0], y[9] = y[9], y[0]
y[10], y[434] = y[434], y[10]
y[435], y[1509] = y[1509], y[435]
# rho = 1 - 6 * (2 * (9^2 + 424^2 + 1074^2))/(2000 * (2000^2 - 1))
# = 1 - (1 / 500)
# = 0.998
x.append(np.nan)
y.append(3.0)
assert_almost_equal(stats.spearmanr(x, y, nan_policy='omit')[0], 0.998)
class TestCorrSpearmanrTies(object):
"""Some tests of tie-handling by the spearmanr function."""
def test_tie1(self):
# Data
x = [1.0, 2.0, 3.0, 4.0]
y = [1.0, 2.0, 2.0, 3.0]
# Ranks of the data, with tie-handling.
xr = [1.0, 2.0, 3.0, 4.0]
yr = [1.0, 2.5, 2.5, 4.0]
# Result of spearmanr should be the same as applying
# pearsonr to the ranks.
sr = stats.spearmanr(x, y)
pr = stats.pearsonr(xr, yr)
assert_almost_equal(sr, pr)
def test_tie2(self):
# Test tie-handling if inputs contain nan's
# Data without nan's
x1 = [1, 2, 2.5, 2]
y1 = [1, 3, 2.5, 4]
# Same data with nan's
x2 = [1, 2, 2.5, 2, np.nan]
y2 = [1, 3, 2.5, 4, np.nan]
# Results for two data sets should be the same if nan's are ignored
sr1 = stats.spearmanr(x1, y1)
sr2 = stats.spearmanr(x2, y2, nan_policy='omit')
assert_almost_equal(sr1, sr2)
# W.II.E. Tabulate X against X, using BIG as a case weight. The values
# should appear on the diagonal and the total should be 899999955.
# If the table cannot hold these values, forget about working with
# census data. You can also tabulate HUGE against TINY. There is no
# reason a tabulation program should not be able to distinguish
# different values regardless of their magnitude.
# I need to figure out how to do this one.
def test_kendalltau():
# with some ties
# Cross-check with R:
# cor.test(c(12,2,1,12,2),c(1,4,7,1,0),method="kendall",exact=FALSE)
x1 = [12, 2, 1, 12, 2]
x2 = [1, 4, 7, 1, 0]
expected = (-0.47140452079103173, 0.28274545993277478)
res = stats.kendalltau(x1, x2)
assert_approx_equal(res[0], expected[0])
assert_approx_equal(res[1], expected[1])
# test for namedtuple attribute results
attributes = ('correlation', 'pvalue')
res = stats.kendalltau(x1, x2)
check_named_results(res, attributes)
# with only ties in one or both inputs
assert_equal(stats.kendalltau([2,2,2], [2,2,2]), (np.nan, np.nan))
assert_equal(stats.kendalltau([2,0,2], [2,2,2]), (np.nan, np.nan))
assert_equal(stats.kendalltau([2,2,2], [2,0,2]), (np.nan, np.nan))
# empty arrays provided as input
assert_equal(stats.kendalltau([], []), (np.nan, np.nan))
# check with larger arrays
np.random.seed(7546)
x = np.array([np.random.normal(loc=1, scale=1, size=500),
np.random.normal(loc=1, scale=1, size=500)])
corr = [[1.0, 0.3],
[0.3, 1.0]]
x = np.dot(np.linalg.cholesky(corr), x)
expected = (0.19291382765531062, 1.1337095377742629e-10)
res = stats.kendalltau(x[0], x[1])
assert_approx_equal(res[0], expected[0])
assert_approx_equal(res[1], expected[1])
# and do we get a tau of 1 for identical inputs?
assert_approx_equal(stats.kendalltau([1,1,2], [1,1,2])[0], 1.0)
# test nan_policy
x = np.arange(10.)
x[9] = np.nan
assert_array_equal(stats.kendalltau(x, x), (np.nan, np.nan))
assert_allclose(stats.kendalltau(x, x, nan_policy='omit'),
(1.0, 0.00017455009626808976), rtol=1e-06)
assert_raises(ValueError, stats.kendalltau, x, x, nan_policy='raise')
assert_raises(ValueError, stats.kendalltau, x, x, nan_policy='foobar')
# test unequal length inputs
x = np.arange(10.)
y = np.arange(20.)
assert_raises(ValueError, stats.kendalltau, x, y)
# test all ties
tau, p_value = stats.kendalltau([], [])
assert_equal(np.nan, tau)
assert_equal(np.nan, p_value)
tau, p_value = stats.kendalltau([0], [0])
assert_equal(np.nan, tau)
assert_equal(np.nan, p_value)
# Regression test for GitHub issue #6061 - Overflow on Windows
x = np.arange(2000, dtype=float)
x = np.ma.masked_greater(x, 1995)
y = np.arange(2000, dtype=float)
y = np.concatenate((y[1000:], y[:1000]))
assert_(np.isfinite(stats.kendalltau(x,y)[1]))
def test_kendalltau_vs_mstats_basic():
np.random.seed(42)
for s in range(2,10):
a = []
# Generate rankings with ties
for i in range(s):
a += [i]*i
b = list(a)
np.random.shuffle(a)
np.random.shuffle(b)
expected = mstats_basic.kendalltau(a, b)
actual = stats.kendalltau(a, b)
assert_approx_equal(actual[0], expected[0])
assert_approx_equal(actual[1], expected[1])
def test_kendalltau_nan_2nd_arg():
# regression test for gh-6134: nans in the second arg were not handled
x = [1., 2., 3., 4.]
y = [np.nan, 2.4, 3.4, 3.4]
r1 = stats.kendalltau(x, y, nan_policy='omit')
r2 = stats.kendalltau(x[1:], y[1:])
assert_allclose(r1.correlation, r2.correlation, atol=1e-15)
def test_weightedtau():
x = [12, 2, 1, 12, 2]
y = [1, 4, 7, 1, 0]
tau, p_value = stats.weightedtau(x, y)
assert_approx_equal(tau, -0.56694968153682723)
assert_equal(np.nan, p_value)
tau, p_value = stats.weightedtau(x, y, additive=False)
assert_approx_equal(tau, -0.62205716951801038)
assert_equal(np.nan, p_value)
# This must be exactly Kendall's tau
tau, p_value = stats.weightedtau(x, y, weigher=lambda x: 1)
assert_approx_equal(tau, -0.47140452079103173)
assert_equal(np.nan, p_value)
# Asymmetric, ranked version
tau, p_value = stats.weightedtau(x, y, rank=None)
assert_approx_equal(tau, -0.4157652301037516)
assert_equal(np.nan, p_value)
tau, p_value = stats.weightedtau(y, x, rank=None)
assert_approx_equal(tau, -0.7181341329699029)
assert_equal(np.nan, p_value)
tau, p_value = stats.weightedtau(x, y, rank=None, additive=False)
assert_approx_equal(tau, -0.40644850966246893)
assert_equal(np.nan, p_value)
tau, p_value = stats.weightedtau(y, x, rank=None, additive=False)
assert_approx_equal(tau, -0.83766582937355172)
assert_equal(np.nan, p_value)
tau, p_value = stats.weightedtau(x, y, rank=False)
assert_approx_equal(tau, -0.51604397940261848)
assert_equal(np.nan, p_value)
# This must be exactly Kendall's tau
tau, p_value = stats.weightedtau(x, y, rank=True, weigher=lambda x: 1)
assert_approx_equal(tau, -0.47140452079103173)
assert_equal(np.nan, p_value)
tau, p_value = stats.weightedtau(y, x, rank=True, weigher=lambda x: 1)
assert_approx_equal(tau, -0.47140452079103173)
assert_equal(np.nan, p_value)
# Test argument conversion
tau, p_value = stats.weightedtau(np.asarray(x, dtype=np.float64), y)
assert_approx_equal(tau, -0.56694968153682723)
tau, p_value = stats.weightedtau(np.asarray(x, dtype=np.int16), y)
assert_approx_equal(tau, -0.56694968153682723)
tau, p_value = stats.weightedtau(np.asarray(x, dtype=np.float64), np.asarray(y, dtype=np.float64))
assert_approx_equal(tau, -0.56694968153682723)
# All ties
tau, p_value = stats.weightedtau([], [])
assert_equal(np.nan, tau)
assert_equal(np.nan, p_value)
tau, p_value = stats.weightedtau([0], [0])
assert_equal(np.nan, tau)
assert_equal(np.nan, p_value)
# Size mismatches
assert_raises(ValueError, stats.weightedtau, [0, 1], [0, 1, 2])
assert_raises(ValueError, stats.weightedtau, [0, 1], [0, 1], [0])
# NaNs
x = [12, 2, 1, 12, 2]
y = [1, 4, 7, 1, np.nan]
tau, p_value = stats.weightedtau(x, y)
assert_approx_equal(tau, -0.56694968153682723)
x = [12, 2, np.nan, 12, 2]
tau, p_value = stats.weightedtau(x, y)
assert_approx_equal(tau, -0.56694968153682723)
def test_weightedtau_vs_quadratic():
# Trivial quadratic implementation, all parameters mandatory
def wkq(x, y, rank, weigher, add):
tot = conc = disc = u = v = 0
for i in range(len(x)):
for j in range(len(x)):
w = weigher(rank[i]) + weigher(rank[j]) if add else weigher(rank[i]) * weigher(rank[j])
tot += w
if x[i] == x[j]:
u += w
if y[i] == y[j]:
v += w
if x[i] < x[j] and y[i] < y[j] or x[i] > x[j] and y[i] > y[j]:
conc += w
elif x[i] < x[j] and y[i] > y[j] or x[i] > x[j] and y[i] < y[j]:
disc += w
return (conc - disc) / np.sqrt(tot - u) / np.sqrt(tot - v)
np.random.seed(42)
for s in range(3,10):
a = []
# Generate rankings with ties
for i in range(s):
a += [i]*i
b = list(a)
np.random.shuffle(a)
np.random.shuffle(b)
# First pass: use element indices as ranks
rank = np.arange(len(a), dtype=np.intp)
for _ in range(2):
for add in [True, False]:
expected = wkq(a, b, rank, lambda x: 1./(x+1), add)
actual = stats.weightedtau(a, b, rank, lambda x: 1./(x+1), add).correlation
assert_approx_equal(expected, actual)
# Second pass: use a random rank
np.random.shuffle(rank)
class TestFindRepeats(object):
def test_basic(self):
a = [1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 5]
res, nums = stats.find_repeats(a)
assert_array_equal(res, [1, 2, 3, 4])
assert_array_equal(nums, [3, 3, 2, 2])
def test_empty_result(self):
# Check that empty arrays are returned when there are no repeats.
for a in [[10, 20, 50, 30, 40], []]:
repeated, counts = stats.find_repeats(a)
assert_array_equal(repeated, [])
assert_array_equal(counts, [])
class TestRegression(object):
def test_linregressBIGX(self):
# W.II.F. Regress BIG on X.
# The constant should be 99999990 and the regression coefficient should be 1.
y = stats.linregress(X,BIG)
intercept = y[1]
r = y[2]
assert_almost_equal(intercept,99999990)
assert_almost_equal(r,1.0)
def test_regressXX(self):
# W.IV.B. Regress X on X.
# The constant should be exactly 0 and the regression coefficient should be 1.
# This is a perfectly valid regression. The program should not complain.
y = stats.linregress(X,X)
intercept = y[1]
r = y[2]
assert_almost_equal(intercept,0.0)
assert_almost_equal(r,1.0)
# W.IV.C. Regress X on BIG and LITTLE (two predictors). The program
# should tell you that this model is "singular" because BIG and
# LITTLE are linear combinations of each other. Cryptic error
# messages are unacceptable here. Singularity is the most
# fundamental regression error.
# Need to figure out how to handle multiple linear regression. Not obvious
def test_regressZEROX(self):
# W.IV.D. Regress ZERO on X.
# The program should inform you that ZERO has no variance or it should
# go ahead and compute the regression and report a correlation and
# total sum of squares of exactly 0.
y = stats.linregress(X,ZERO)
intercept = y[1]
r = y[2]
assert_almost_equal(intercept,0.0)
assert_almost_equal(r,0.0)
def test_regress_simple(self):
# Regress a line with sinusoidal noise.
x = np.linspace(0, 100, 100)
y = 0.2 * np.linspace(0, 100, 100) + 10
y += np.sin(np.linspace(0, 20, 100))
res = stats.linregress(x, y)
assert_almost_equal(res[4], 2.3957814497838803e-3)
def test_regress_simple_onearg_rows(self):
# Regress a line w sinusoidal noise, with a single input of shape (2, N).
x = np.linspace(0, 100, 100)
y = 0.2 * np.linspace(0, 100, 100) + 10
y += np.sin(np.linspace(0, 20, 100))
rows = np.vstack((x, y))
res = stats.linregress(rows)
assert_almost_equal(res[4], 2.3957814497838803e-3)
def test_regress_simple_onearg_cols(self):
x = np.linspace(0, 100, 100)
y = 0.2 * np.linspace(0, 100, 100) + 10
y += np.sin(np.linspace(0, 20, 100))
cols = np.hstack((np.expand_dims(x, 1), np.expand_dims(y, 1)))
res = stats.linregress(cols)
assert_almost_equal(res[4], 2.3957814497838803e-3)
def test_regress_shape_error(self):
# Check that a single input argument to linregress with wrong shape
# results in a ValueError.
assert_raises(ValueError, stats.linregress, np.ones((3, 3)))
def test_linregress(self):
# compared with multivariate ols with pinv
x = np.arange(11)
y = np.arange(5,16)
y[[(1),(-2)]] -= 1
y[[(0),(-1)]] += 1
res = (1.0, 5.0, 0.98229948625750, 7.45259691e-008, 0.063564172616372733)
assert_array_almost_equal(stats.linregress(x,y),res,decimal=14)
def test_regress_simple_negative_cor(self):
# If the slope of the regression is negative the factor R tend to -1 not 1.
# Sometimes rounding errors makes it < -1 leading to stderr being NaN
a, n = 1e-71, 100000
x = np.linspace(a, 2 * a, n)
y = np.linspace(2 * a, a, n)
stats.linregress(x, y)
res = stats.linregress(x, y)
assert_(res[2] >= -1) # propagated numerical errors were not corrected
assert_almost_equal(res[2], -1) # perfect negative correlation case
assert_(not np.isnan(res[4])) # stderr should stay finite
def test_linregress_result_attributes(self):
# Regress a line with sinusoidal noise.
x = np.linspace(0, 100, 100)
y = 0.2 * np.linspace(0, 100, 100) + 10
y += np.sin(np.linspace(0, 20, 100))
res = stats.linregress(x, y)
attributes = ('slope', 'intercept', 'rvalue', 'pvalue', 'stderr')
check_named_results(res, attributes)
def test_regress_two_inputs(self):
# Regress a simple line formed by two points.
x = np.arange(2)
y = np.arange(3, 5)
res = stats.linregress(x, y)
assert_almost_equal(res[3], 0.0) # non-horizontal line
assert_almost_equal(res[4], 0.0) # zero stderr
def test_regress_two_inputs_horizontal_line(self):
# Regress a horizontal line formed by two points.
x = np.arange(2)
y = np.ones(2)
res = stats.linregress(x, y)
assert_almost_equal(res[3], 1.0) # horizontal line
assert_almost_equal(res[4], 0.0) # zero stderr
def test_nist_norris(self):
x = [0.2, 337.4, 118.2, 884.6, 10.1, 226.5, 666.3, 996.3, 448.6, 777.0,
558.2, 0.4, 0.6, 775.5, 666.9, 338.0, 447.5, 11.6, 556.0, 228.1,
995.8, 887.6, 120.2, 0.3, 0.3, 556.8, 339.1, 887.2, 999.0, 779.0,
11.1, 118.3, 229.2, 669.1, 448.9, 0.5]
y = [0.1, 338.8, 118.1, 888.0, 9.2, 228.1, 668.5, 998.5, 449.1, 778.9,
559.2, 0.3, 0.1, 778.1, 668.8, 339.3, 448.9, 10.8, 557.7, 228.3,
998.0, 888.8, 119.6, 0.3, 0.6, 557.6, 339.3, 888.0, 998.5, 778.9,
10.2, 117.6, 228.9, 668.4, 449.2, 0.2]
# Expected values
exp_slope = 1.00211681802045
exp_intercept = -0.262323073774029
exp_rvalue = 0.999993745883712
actual = stats.linregress(x, y)
assert_almost_equal(actual.slope, exp_slope)
assert_almost_equal(actual.intercept, exp_intercept)
assert_almost_equal(actual.rvalue, exp_rvalue, decimal=5)
def test_empty_input(self):
assert_raises(ValueError, stats.linregress, [], [])
def test_nan_input(self):
x = np.arange(10.)
x[9] = np.nan
with np.errstate(invalid="ignore"):
assert_array_equal(stats.linregress(x, x),
(np.nan, np.nan, np.nan, np.nan, np.nan))
def test_theilslopes():
# Basic slope test.
slope, intercept, lower, upper = stats.theilslopes([0,1,1])
assert_almost_equal(slope, 0.5)
assert_almost_equal(intercept, 0.5)
# Test of confidence intervals.
x = [1, 2, 3, 4, 10, 12, 18]
y = [9, 15, 19, 20, 45, 55, 78]
slope, intercept, lower, upper = stats.theilslopes(y, x, 0.07)
assert_almost_equal(slope, 4)
assert_almost_equal(upper, 4.38, decimal=2)
assert_almost_equal(lower, 3.71, decimal=2)
def test_cumfreq():
x = [1, 4, 2, 1, 3, 1]
cumfreqs, lowlim, binsize, extrapoints = stats.cumfreq(x, numbins=4)
assert_array_almost_equal(cumfreqs, np.array([3., 4., 5., 6.]))
cumfreqs, lowlim, binsize, extrapoints = stats.cumfreq(x, numbins=4,
defaultreallimits=(1.5, 5))
assert_(extrapoints == 3)
# test for namedtuple attribute results
attributes = ('cumcount', 'lowerlimit', 'binsize', 'extrapoints')
res = stats.cumfreq(x, numbins=4, defaultreallimits=(1.5, 5))
check_named_results(res, attributes)
def test_relfreq():
a = np.array([1, 4, 2, 1, 3, 1])
relfreqs, lowlim, binsize, extrapoints = stats.relfreq(a, numbins=4)
assert_array_almost_equal(relfreqs,
array([0.5, 0.16666667, 0.16666667, 0.16666667]))
# test for namedtuple attribute results
attributes = ('frequency', 'lowerlimit', 'binsize', 'extrapoints')
res = stats.relfreq(a, numbins=4)
check_named_results(res, attributes)
# check array_like input is accepted
relfreqs2, lowlim, binsize, extrapoints = stats.relfreq([1, 4, 2, 1, 3, 1],
numbins=4)
assert_array_almost_equal(relfreqs, relfreqs2)
class TestGMean(object):
def test_1D_list(self):
a = (1,2,3,4)
actual = stats.gmean(a)
desired = power(1*2*3*4,1./4.)
assert_almost_equal(actual, desired,decimal=14)
desired1 = stats.gmean(a,axis=-1)
assert_almost_equal(actual, desired1, decimal=14)
def test_1D_array(self):
a = array((1,2,3,4), float32)
actual = stats.gmean(a)
desired = power(1*2*3*4,1./4.)
assert_almost_equal(actual, desired, decimal=7)
desired1 = stats.gmean(a,axis=-1)
assert_almost_equal(actual, desired1, decimal=7)
def test_2D_array_default(self):
a = array(((1,2,3,4),
(1,2,3,4),
(1,2,3,4)))
actual = stats.gmean(a)
desired = array((1,2,3,4))
assert_array_almost_equal(actual, desired, decimal=14)
desired1 = stats.gmean(a,axis=0)
assert_array_almost_equal(actual, desired1, decimal=14)
def test_2D_array_dim1(self):
a = array(((1,2,3,4),
(1,2,3,4),
(1,2,3,4)))
actual = stats.gmean(a, axis=1)
v = power(1*2*3*4,1./4.)
desired = array((v,v,v))
assert_array_almost_equal(actual, desired, decimal=14)
def test_large_values(self):
a = array([1e100, 1e200, 1e300])
actual = stats.gmean(a)
assert_approx_equal(actual, 1e200, significant=13)
class TestHMean(object):
def test_1D_list(self):
a = (1,2,3,4)
actual = stats.hmean(a)
desired = 4. / (1./1 + 1./2 + 1./3 + 1./4)
assert_almost_equal(actual, desired, decimal=14)
desired1 = stats.hmean(array(a),axis=-1)
assert_almost_equal(actual, desired1, decimal=14)
def test_1D_array(self):
a = array((1,2,3,4), float64)
actual = stats.hmean(a)
desired = 4. / (1./1 + 1./2 + 1./3 + 1./4)
assert_almost_equal(actual, desired, decimal=14)
desired1 = stats.hmean(a,axis=-1)
assert_almost_equal(actual, desired1, decimal=14)
def test_2D_array_default(self):
a = array(((1,2,3,4),
(1,2,3,4),
(1,2,3,4)))
actual = stats.hmean(a)
desired = array((1.,2.,3.,4.))
assert_array_almost_equal(actual, desired, decimal=14)
actual1 = stats.hmean(a,axis=0)
assert_array_almost_equal(actual1, desired, decimal=14)
def test_2D_array_dim1(self):
a = array(((1,2,3,4),
(1,2,3,4),
(1,2,3,4)))
v = 4. / (1./1 + 1./2 + 1./3 + 1./4)
desired1 = array((v,v,v))
actual1 = stats.hmean(a, axis=1)
assert_array_almost_equal(actual1, desired1, decimal=14)
class TestScoreatpercentile(object):
def setup_method(self):
self.a1 = [3, 4, 5, 10, -3, -5, 6]
self.a2 = [3, -6, -2, 8, 7, 4, 2, 1]
self.a3 = [3., 4, 5, 10, -3, -5, -6, 7.0]
def test_basic(self):
x = arange(8) * 0.5
assert_equal(stats.scoreatpercentile(x, 0), 0.)
assert_equal(stats.scoreatpercentile(x, 100), 3.5)
assert_equal(stats.scoreatpercentile(x, 50), 1.75)
def test_fraction(self):
scoreatperc = stats.scoreatpercentile
# Test defaults
assert_equal(scoreatperc(list(range(10)), 50), 4.5)
assert_equal(scoreatperc(list(range(10)), 50, (2,7)), 4.5)
assert_equal(scoreatperc(list(range(100)), 50, limit=(1, 8)), 4.5)
assert_equal(scoreatperc(np.array([1, 10,100]), 50, (10,100)), 55)
assert_equal(scoreatperc(np.array([1, 10,100]), 50, (1,10)), 5.5)
# explicitly specify interpolation_method 'fraction' (the default)
assert_equal(scoreatperc(list(range(10)), 50, interpolation_method='fraction'),
4.5)
assert_equal(scoreatperc(list(range(10)), 50, limit=(2, 7),
interpolation_method='fraction'),
4.5)
assert_equal(scoreatperc(list(range(100)), 50, limit=(1, 8),
interpolation_method='fraction'),
4.5)
assert_equal(scoreatperc(np.array([1, 10,100]), 50, (10, 100),
interpolation_method='fraction'),
55)
assert_equal(scoreatperc(np.array([1, 10,100]), 50, (1,10),
interpolation_method='fraction'),
5.5)
def test_lower_higher(self):
scoreatperc = stats.scoreatpercentile
# interpolation_method 'lower'/'higher'
assert_equal(scoreatperc(list(range(10)), 50,
interpolation_method='lower'), 4)
assert_equal(scoreatperc(list(range(10)), 50,
interpolation_method='higher'), 5)
assert_equal(scoreatperc(list(range(10)), 50, (2,7),
interpolation_method='lower'), 4)
assert_equal(scoreatperc(list(range(10)), 50, limit=(2,7),
interpolation_method='higher'), 5)
assert_equal(scoreatperc(list(range(100)), 50, (1,8),
interpolation_method='lower'), 4)
assert_equal(scoreatperc(list(range(100)), 50, (1,8),
interpolation_method='higher'), 5)
assert_equal(scoreatperc(np.array([1, 10, 100]), 50, (10, 100),
interpolation_method='lower'), 10)
assert_equal(scoreatperc(np.array([1, 10, 100]), 50, limit=(10, 100),
interpolation_method='higher'), 100)
assert_equal(scoreatperc(np.array([1, 10, 100]), 50, (1, 10),
interpolation_method='lower'), 1)
assert_equal(scoreatperc(np.array([1, 10, 100]), 50, limit=(1, 10),
interpolation_method='higher'), 10)
def test_sequence_per(self):
x = arange(8) * 0.5
expected = np.array([0, 3.5, 1.75])
res = stats.scoreatpercentile(x, [0, 100, 50])
assert_allclose(res, expected)
assert_(isinstance(res, np.ndarray))
# Test with ndarray. Regression test for gh-2861
assert_allclose(stats.scoreatpercentile(x, np.array([0, 100, 50])),
expected)
# Also test combination of 2-D array, axis not None and array-like per
res2 = stats.scoreatpercentile(np.arange(12).reshape((3,4)),
np.array([0, 1, 100, 100]), axis=1)
expected2 = array([[0, 4, 8],
[0.03, 4.03, 8.03],
[3, 7, 11],
[3, 7, 11]])
assert_allclose(res2, expected2)
def test_axis(self):
scoreatperc = stats.scoreatpercentile
x = arange(12).reshape(3, 4)
assert_equal(scoreatperc(x, (25, 50, 100)), [2.75, 5.5, 11.0])
r0 = [[2, 3, 4, 5], [4, 5, 6, 7], [8, 9, 10, 11]]
assert_equal(scoreatperc(x, (25, 50, 100), axis=0), r0)
r1 = [[0.75, 4.75, 8.75], [1.5, 5.5, 9.5], [3, 7, 11]]
assert_equal(scoreatperc(x, (25, 50, 100), axis=1), r1)
x = array([[1, 1, 1],
[1, 1, 1],
[4, 4, 3],
[1, 1, 1],
[1, 1, 1]])
score = stats.scoreatpercentile(x, 50)
assert_equal(score.shape, ())
assert_equal(score, 1.0)
score = stats.scoreatpercentile(x, 50, axis=0)
assert_equal(score.shape, (3,))
assert_equal(score, [1, 1, 1])
def test_exception(self):
assert_raises(ValueError, stats.scoreatpercentile, [1, 2], 56,
interpolation_method='foobar')
assert_raises(ValueError, stats.scoreatpercentile, [1], 101)
assert_raises(ValueError, stats.scoreatpercentile, [1], -1)
def test_empty(self):
assert_equal(stats.scoreatpercentile([], 50), np.nan)
assert_equal(stats.scoreatpercentile(np.array([[], []]), 50), np.nan)
assert_equal(stats.scoreatpercentile([], [50, 99]), [np.nan, np.nan])
class TestItemfreq(object):
a = [5, 7, 1, 2, 1, 5, 7] * 10
b = [1, 2, 5, 7]
def test_numeric_types(self):
# Check itemfreq works for all dtypes (adapted from np.unique tests)
def _check_itemfreq(dt):
a = np.array(self.a, dt)
with suppress_warnings() as sup:
sup.filter(DeprecationWarning)
v = stats.itemfreq(a)
assert_array_equal(v[:, 0], [1, 2, 5, 7])
assert_array_equal(v[:, 1], np.array([20, 10, 20, 20], dtype=dt))
dtypes = [np.int32, np.int64, np.float32, np.float64,
np.complex64, np.complex128]
for dt in dtypes:
_check_itemfreq(dt)
def test_object_arrays(self):
a, b = self.a, self.b
dt = 'O'
aa = np.empty(len(a), dt)
aa[:] = a
bb = np.empty(len(b), dt)
bb[:] = b
with suppress_warnings() as sup:
sup.filter(DeprecationWarning)
v = stats.itemfreq(aa)
assert_array_equal(v[:, 0], bb)
def test_structured_arrays(self):
a, b = self.a, self.b
dt = [('', 'i'), ('', 'i')]
aa = np.array(list(zip(a, a)), dt)
bb = np.array(list(zip(b, b)), dt)
with suppress_warnings() as sup:
sup.filter(DeprecationWarning)
v = stats.itemfreq(aa)
# Arrays don't compare equal because v[:,0] is object array
assert_equal(tuple(v[2, 0]), tuple(bb[2]))
class TestMode(object):
def test_empty(self):
vals, counts = stats.mode([])
assert_equal(vals, np.array([]))
assert_equal(counts, np.array([]))
def test_scalar(self):
vals, counts = stats.mode(4.)
assert_equal(vals, np.array([4.]))
assert_equal(counts, np.array([1]))
def test_basic(self):
data1 = [3, 5, 1, 10, 23, 3, 2, 6, 8, 6, 10, 6]
vals = stats.mode(data1)
assert_equal(vals[0][0], 6)
assert_equal(vals[1][0], 3)
def test_axes(self):
data1 = [10, 10, 30, 40]
data2 = [10, 10, 10, 10]
data3 = [20, 10, 20, 20]
data4 = [30, 30, 30, 30]
data5 = [40, 30, 30, 30]
arr = np.array([data1, data2, data3, data4, data5])
vals = stats.mode(arr, axis=None)
assert_equal(vals[0], np.array([30]))
assert_equal(vals[1], np.array([8]))
vals = stats.mode(arr, axis=0)
assert_equal(vals[0], np.array([[10, 10, 30, 30]]))
assert_equal(vals[1], np.array([[2, 3, 3, 2]]))
vals = stats.mode(arr, axis=1)
assert_equal(vals[0], np.array([[10], [10], [20], [30], [30]]))
assert_equal(vals[1], np.array([[2], [4], [3], [4], [3]]))
def test_strings(self):
data1 = ['rain', 'showers', 'showers']
with suppress_warnings() as sup:
r = sup.record(RuntimeWarning, ".*checked for nan values")
vals = stats.mode(data1)
assert_equal(len(r), 1)
assert_equal(vals[0][0], 'showers')
assert_equal(vals[1][0], 2)
@pytest.mark.xfail(sys.version_info > (3,), reason='numpy github issue 641')
def test_mixed_objects(self):
objects = [10, True, np.nan, 'hello', 10]
arr = np.empty((5,), dtype=object)
arr[:] = objects
with suppress_warnings() as sup:
r = sup.record(RuntimeWarning, ".*checked for nan values")
vals = stats.mode(arr)
assert_equal(len(r), 1)
assert_equal(vals[0][0], 10)
assert_equal(vals[1][0], 2)
def test_objects(self):
# Python objects must be sortable (le + eq) and have ne defined
# for np.unique to work. hash is for set.
class Point(object):
def __init__(self, x):
self.x = x
def __eq__(self, other):
return self.x == other.x
def __ne__(self, other):
return self.x != other.x
def __lt__(self, other):
return self.x < other.x
def __hash__(self):
return hash(self.x)
points = [Point(x) for x in [1, 2, 3, 4, 3, 2, 2, 2]]
arr = np.empty((8,), dtype=object)
arr[:] = points
assert_(len(set(points)) == 4)
assert_equal(np.unique(arr).shape, (4,))
with suppress_warnings() as sup:
r = sup.record(RuntimeWarning, ".*checked for nan values")
vals = stats.mode(arr)
assert_equal(len(r), 1)
assert_equal(vals[0][0], Point(2))
assert_equal(vals[1][0], 4)
def test_mode_result_attributes(self):
data1 = [3, 5, 1, 10, 23, 3, 2, 6, 8, 6, 10, 6]
data2 = []
actual = stats.mode(data1)
attributes = ('mode', 'count')
check_named_results(actual, attributes)
actual2 = stats.mode(data2)
check_named_results(actual2, attributes)
def test_mode_nan(self):
data1 = [3, np.nan, 5, 1, 10, 23, 3, 2, 6, 8, 6, 10, 6]
actual = stats.mode(data1)
assert_equal(actual, (6, 3))
actual = stats.mode(data1, nan_policy='omit')
assert_equal(actual, (6, 3))
assert_raises(ValueError, stats.mode, data1, nan_policy='raise')
assert_raises(ValueError, stats.mode, data1, nan_policy='foobar')
class TestVariability(object):
testcase = [1,2,3,4]
scalar_testcase = 4.
def test_sem(self):
# This is not in R, so used:
# sqrt(var(testcase)*3/4)/sqrt(3)
# y = stats.sem(self.shoes[0])
# assert_approx_equal(y,0.775177399)
with suppress_warnings() as sup, np.errstate(invalid="ignore"):
sup.filter(RuntimeWarning, "Degrees of freedom <= 0 for slice")
y = stats.sem(self.scalar_testcase)
assert_(np.isnan(y))
y = stats.sem(self.testcase)
assert_approx_equal(y, 0.6454972244)
n = len(self.testcase)
assert_allclose(stats.sem(self.testcase, ddof=0) * np.sqrt(n/(n-2)),
stats.sem(self.testcase, ddof=2))
x = np.arange(10.)
x[9] = np.nan
assert_equal(stats.sem(x), np.nan)
assert_equal(stats.sem(x, nan_policy='omit'), 0.9128709291752769)
assert_raises(ValueError, stats.sem, x, nan_policy='raise')
assert_raises(ValueError, stats.sem, x, nan_policy='foobar')
def test_zmap(self):
# not in R, so tested by using:
# (testcase[i] - mean(testcase, axis=0)) / sqrt(var(testcase) * 3/4)
y = stats.zmap(self.testcase,self.testcase)
desired = ([-1.3416407864999, -0.44721359549996, 0.44721359549996, 1.3416407864999])
assert_array_almost_equal(desired,y,decimal=12)
def test_zmap_axis(self):
# Test use of 'axis' keyword in zmap.
x = np.array([[0.0, 0.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 2.0],
[2.0, 0.0, 2.0, 0.0]])
t1 = 1.0/np.sqrt(2.0/3)
t2 = np.sqrt(3.)/3
t3 = np.sqrt(2.)
z0 = stats.zmap(x, x, axis=0)
z1 = stats.zmap(x, x, axis=1)
z0_expected = [[-t1, -t3/2, -t3/2, 0.0],
[0.0, t3, -t3/2, t1],
[t1, -t3/2, t3, -t1]]
z1_expected = [[-1.0, -1.0, 1.0, 1.0],
[-t2, -t2, -t2, np.sqrt(3.)],
[1.0, -1.0, 1.0, -1.0]]
assert_array_almost_equal(z0, z0_expected)
assert_array_almost_equal(z1, z1_expected)
def test_zmap_ddof(self):
# Test use of 'ddof' keyword in zmap.
x = np.array([[0.0, 0.0, 1.0, 1.0],
[0.0, 1.0, 2.0, 3.0]])
z = stats.zmap(x, x, axis=1, ddof=1)
z0_expected = np.array([-0.5, -0.5, 0.5, 0.5])/(1.0/np.sqrt(3))
z1_expected = np.array([-1.5, -0.5, 0.5, 1.5])/(np.sqrt(5./3))
assert_array_almost_equal(z[0], z0_expected)
assert_array_almost_equal(z[1], z1_expected)
def test_zscore(self):
# not in R, so tested by using:
# (testcase[i] - mean(testcase, axis=0)) / sqrt(var(testcase) * 3/4)
y = stats.zscore(self.testcase)
desired = ([-1.3416407864999, -0.44721359549996, 0.44721359549996, 1.3416407864999])
assert_array_almost_equal(desired,y,decimal=12)
def test_zscore_axis(self):
# Test use of 'axis' keyword in zscore.
x = np.array([[0.0, 0.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 2.0],
[2.0, 0.0, 2.0, 0.0]])
t1 = 1.0/np.sqrt(2.0/3)
t2 = np.sqrt(3.)/3
t3 = np.sqrt(2.)
z0 = stats.zscore(x, axis=0)
z1 = stats.zscore(x, axis=1)
z0_expected = [[-t1, -t3/2, -t3/2, 0.0],
[0.0, t3, -t3/2, t1],
[t1, -t3/2, t3, -t1]]
z1_expected = [[-1.0, -1.0, 1.0, 1.0],
[-t2, -t2, -t2, np.sqrt(3.)],
[1.0, -1.0, 1.0, -1.0]]
assert_array_almost_equal(z0, z0_expected)
assert_array_almost_equal(z1, z1_expected)
def test_zscore_ddof(self):
# Test use of 'ddof' keyword in zscore.
x = np.array([[0.0, 0.0, 1.0, 1.0],
[0.0, 1.0, 2.0, 3.0]])
z = stats.zscore(x, axis=1, ddof=1)
z0_expected = np.array([-0.5, -0.5, 0.5, 0.5])/(1.0/np.sqrt(3))
z1_expected = np.array([-1.5, -0.5, 0.5, 1.5])/(np.sqrt(5./3))
assert_array_almost_equal(z[0], z0_expected)
assert_array_almost_equal(z[1], z1_expected)
class _numpy_version_warn_context_mgr(object):
"""
A simple context maneger class to avoid retyping the same code for
different versions of numpy when the only difference is that older
versions raise warnings.
This manager does not apply for cases where the old code returns
different values.
"""
def __init__(self, min_numpy_version, warning_type, num_warnings):
if NumpyVersion(np.__version__) < min_numpy_version:
self.numpy_is_old = True
self.warning_type = warning_type
self.num_warnings = num_warnings
self.delegate = warnings.catch_warnings(record = True)
else:
self.numpy_is_old = False
def __enter__(self):
if self.numpy_is_old:
self.warn_list = self.delegate.__enter__()
warnings.simplefilter("always")
return None
def __exit__(self, exc_type, exc_value, traceback):
if self.numpy_is_old:
self.delegate.__exit__(exc_type, exc_value, traceback)
_check_warnings(self.warn_list, self.warning_type, self.num_warnings)
def _check_warnings(warn_list, expected_type, expected_len):
"""
Checks that all of the warnings from a list returned by
`warnings.catch_all(record=True)` are of the required type and that the list
contains expected number of warnings.
"""
assert_equal(len(warn_list), expected_len, "number of warnings")
for warn_ in warn_list:
assert_(warn_.category is expected_type)
class TestIQR(object):
def test_basic(self):
x = np.arange(8) * 0.5
np.random.shuffle(x)
assert_equal(stats.iqr(x), 1.75)
def test_api(self):
d = np.ones((5, 5))
stats.iqr(d)
stats.iqr(d, None)
stats.iqr(d, 1)
stats.iqr(d, (0, 1))
stats.iqr(d, None, (10, 90))
stats.iqr(d, None, (30, 20), 'raw')
stats.iqr(d, None, (25, 75), 1.5, 'propagate')
if NumpyVersion(np.__version__) >= '1.9.0a':
stats.iqr(d, None, (50, 50), 'normal', 'raise', 'linear')
stats.iqr(d, None, (25, 75), -0.4, 'omit', 'lower', True)
def test_empty(self):
assert_equal(stats.iqr([]), np.nan)
assert_equal(stats.iqr(np.arange(0)), np.nan)
def test_constant(self):
# Constant array always gives 0
x = np.ones((7, 4))
assert_equal(stats.iqr(x), 0.0)
assert_array_equal(stats.iqr(x, axis=0), np.zeros(4))
assert_array_equal(stats.iqr(x, axis=1), np.zeros(7))
# Even for older versions, 'linear' does not raise a warning
with _numpy_version_warn_context_mgr('1.9.0a', RuntimeWarning, 4):
assert_equal(stats.iqr(x, interpolation='linear'), 0.0)
assert_equal(stats.iqr(x, interpolation='midpoint'), 0.0)
assert_equal(stats.iqr(x, interpolation='nearest'), 0.0)
assert_equal(stats.iqr(x, interpolation='lower'), 0.0)
assert_equal(stats.iqr(x, interpolation='higher'), 0.0)
# 0 only along constant dimensions
# This also tests much of `axis`
y = np.ones((4, 5, 6)) * np.arange(6)
assert_array_equal(stats.iqr(y, axis=0), np.zeros((5, 6)))
assert_array_equal(stats.iqr(y, axis=1), np.zeros((4, 6)))
assert_array_equal(stats.iqr(y, axis=2), 2.5 * np.ones((4, 5)))
assert_array_equal(stats.iqr(y, axis=(0, 1)), np.zeros(6))
assert_array_equal(stats.iqr(y, axis=(0, 2)), 3. * np.ones(5))
assert_array_equal(stats.iqr(y, axis=(1, 2)), 3. * np.ones(4))
def test_scalarlike(self):
x = np.arange(1) + 7.0
assert_equal(stats.iqr(x[0]), 0.0)
assert_equal(stats.iqr(x), 0.0)
if NumpyVersion(np.__version__) >= '1.9.0a':
assert_array_equal(stats.iqr(x, keepdims=True), [0.0])
else:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
assert_array_equal(stats.iqr(x, keepdims=True), 0.0)
_check_warnings(w, RuntimeWarning, 1)
def test_2D(self):
x = np.arange(15).reshape((3, 5))
assert_equal(stats.iqr(x), 7.0)
assert_array_equal(stats.iqr(x, axis=0), 5. * np.ones(5))
assert_array_equal(stats.iqr(x, axis=1), 2. * np.ones(3))
assert_array_equal(stats.iqr(x, axis=(0, 1)), 7.0)
assert_array_equal(stats.iqr(x, axis=(1, 0)), 7.0)
def test_axis(self):
# The `axis` keyword is also put through its paces in `test_keepdims`.
o = np.random.normal(size=(71, 23))
x = np.dstack([o] * 10) # x.shape = (71, 23, 10)
q = stats.iqr(o)
assert_equal(stats.iqr(x, axis=(0, 1)), q)
x = np.rollaxis(x, -1, 0) # x.shape = (10, 71, 23)
assert_equal(stats.iqr(x, axis=(2, 1)), q)
x = x.swapaxes(0, 1) # x.shape = (71, 10, 23)
assert_equal(stats.iqr(x, axis=(0, 2)), q)
x = x.swapaxes(0, 1) # x.shape = (10, 71, 23)
assert_equal(stats.iqr(x, axis=(0, 1, 2)),
stats.iqr(x, axis=None))
assert_equal(stats.iqr(x, axis=(0,)),
stats.iqr(x, axis=0))
d = np.arange(3 * 5 * 7 * 11)
# Older versions of numpy only shuffle along axis=0.
# Not sure about newer, don't care.
np.random.shuffle(d)
d = d.reshape((3, 5, 7, 11))
assert_equal(stats.iqr(d, axis=(0, 1, 2))[0],
stats.iqr(d[:,:,:, 0].ravel()))
assert_equal(stats.iqr(d, axis=(0, 1, 3))[1],
stats.iqr(d[:,:, 1,:].ravel()))
assert_equal(stats.iqr(d, axis=(3, 1, -4))[2],
stats.iqr(d[:,:, 2,:].ravel()))
assert_equal(stats.iqr(d, axis=(3, 1, 2))[2],
stats.iqr(d[2,:,:,:].ravel()))
assert_equal(stats.iqr(d, axis=(3, 2))[2, 1],
stats.iqr(d[2, 1,:,:].ravel()))
assert_equal(stats.iqr(d, axis=(1, -2))[2, 1],
stats.iqr(d[2, :, :, 1].ravel()))
assert_equal(stats.iqr(d, axis=(1, 3))[2, 2],
stats.iqr(d[2, :, 2,:].ravel()))
if NumpyVersion(np.__version__) >= '1.9.0a':
assert_raises(IndexError, stats.iqr, d, axis=4)
else:
assert_raises(ValueError, stats.iqr, d, axis=4)
assert_raises(ValueError, stats.iqr, d, axis=(0, 0))
def test_rng(self):
x = np.arange(5)
assert_equal(stats.iqr(x), 2)
assert_equal(stats.iqr(x, rng=(25, 87.5)), 2.5)
assert_equal(stats.iqr(x, rng=(12.5, 75)), 2.5)
assert_almost_equal(stats.iqr(x, rng=(10, 50)), 1.6) # 3-1.4
assert_raises(ValueError, stats.iqr, x, rng=(0, 101))
assert_raises(ValueError, stats.iqr, x, rng=(np.nan, 25))
assert_raises(TypeError, stats.iqr, x, rng=(0, 50, 60))
def test_interpolation(self):
x = np.arange(5)
y = np.arange(4)
# Default
assert_equal(stats.iqr(x), 2)
assert_equal(stats.iqr(y), 1.5)
if NumpyVersion(np.__version__) >= '1.9.0a':
# Linear
assert_equal(stats.iqr(x, interpolation='linear'), 2)
assert_equal(stats.iqr(y, interpolation='linear'), 1.5)
# Higher
assert_equal(stats.iqr(x, interpolation='higher'), 2)
assert_equal(stats.iqr(x, rng=(25, 80), interpolation='higher'), 3)
assert_equal(stats.iqr(y, interpolation='higher'), 2)
# Lower (will generally, but not always be the same as higher)
assert_equal(stats.iqr(x, interpolation='lower'), 2)
assert_equal(stats.iqr(x, rng=(25, 80), interpolation='lower'), 2)
assert_equal(stats.iqr(y, interpolation='lower'), 2)
# Nearest
assert_equal(stats.iqr(x, interpolation='nearest'), 2)
assert_equal(stats.iqr(y, interpolation='nearest'), 1)
# Midpoint
if NumpyVersion(np.__version__) >= '1.11.0a':
assert_equal(stats.iqr(x, interpolation='midpoint'), 2)
assert_equal(stats.iqr(x, rng=(25, 80), interpolation='midpoint'), 2.5)
assert_equal(stats.iqr(y, interpolation='midpoint'), 2)
else:
# midpoint did not work correctly before numpy 1.11.0
assert_equal(stats.iqr(x, interpolation='midpoint'), 2)
assert_equal(stats.iqr(x, rng=(25, 80), interpolation='midpoint'), 2)
assert_equal(stats.iqr(y, interpolation='midpoint'), 2)
else:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
# Linear
assert_equal(stats.iqr(x, interpolation='linear'), 2)
assert_equal(stats.iqr(y, interpolation='linear'), 1.5)
# Higher
assert_equal(stats.iqr(x, interpolation='higher'), 2)
assert_almost_equal(stats.iqr(x, rng=(25, 80), interpolation='higher'), 2.2)
assert_equal(stats.iqr(y, interpolation='higher'), 1.5)
# Lower
assert_equal(stats.iqr(x, interpolation='lower'), 2)
assert_almost_equal(stats.iqr(x, rng=(25, 80), interpolation='lower'), 2.2)
assert_equal(stats.iqr(y, interpolation='lower'), 1.5)
# Nearest
assert_equal(stats.iqr(x, interpolation='nearest'), 2)
assert_equal(stats.iqr(y, interpolation='nearest'), 1.5)
# Midpoint
assert_equal(stats.iqr(x, interpolation='midpoint'), 2)
assert_almost_equal(stats.iqr(x, rng=(25, 80), interpolation='midpoint'), 2.2)
assert_equal(stats.iqr(y, interpolation='midpoint'), 1.5)
_check_warnings(w, RuntimeWarning, 11)
if NumpyVersion(np.__version__) >= '1.9.0a':
assert_raises(ValueError, stats.iqr, x, interpolation='foobar')
else:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
assert_equal(stats.iqr(x, interpolation='foobar'), 2)
_check_warnings(w, RuntimeWarning, 1)
def test_keepdims(self):
numpy_version = NumpyVersion(np.__version__)
# Also tests most of `axis`
x = np.ones((3, 5, 7, 11))
assert_equal(stats.iqr(x, axis=None, keepdims=False).shape, ())
assert_equal(stats.iqr(x, axis=2, keepdims=False).shape, (3, 5, 11))
assert_equal(stats.iqr(x, axis=(0, 1), keepdims=False).shape, (7, 11))
assert_equal(stats.iqr(x, axis=(0, 3), keepdims=False).shape, (5, 7))
assert_equal(stats.iqr(x, axis=(1,), keepdims=False).shape, (3, 7, 11))
assert_equal(stats.iqr(x, (0, 1, 2, 3), keepdims=False).shape, ())
assert_equal(stats.iqr(x, axis=(0, 1, 3), keepdims=False).shape, (7,))
if numpy_version >= '1.9.0a':
assert_equal(stats.iqr(x, axis=None, keepdims=True).shape, (1, 1, 1, 1))
assert_equal(stats.iqr(x, axis=2, keepdims=True).shape, (3, 5, 1, 11))
assert_equal(stats.iqr(x, axis=(0, 1), keepdims=True).shape, (1, 1, 7, 11))
assert_equal(stats.iqr(x, axis=(0, 3), keepdims=True).shape, (1, 5, 7, 1))
assert_equal(stats.iqr(x, axis=(1,), keepdims=True).shape, (3, 1, 7, 11))
assert_equal(stats.iqr(x, (0, 1, 2, 3), keepdims=True).shape, (1, 1, 1, 1))
assert_equal(stats.iqr(x, axis=(0, 1, 3), keepdims=True).shape, (1, 1, 7, 1))
else:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
assert_equal(stats.iqr(x, axis=None, keepdims=True).shape, ())
assert_equal(stats.iqr(x, axis=2, keepdims=True).shape, (3, 5, 11))
assert_equal(stats.iqr(x, axis=(0, 1), keepdims=True).shape, (7, 11))
assert_equal(stats.iqr(x, axis=(0, 3), keepdims=True).shape, (5, 7))
assert_equal(stats.iqr(x, axis=(1,), keepdims=True).shape, (3, 7, 11))
assert_equal(stats.iqr(x, (0, 1, 2, 3), keepdims=True).shape, ())
assert_equal(stats.iqr(x, axis=(0, 1, 3), keepdims=True).shape, (7,))
_check_warnings(w, RuntimeWarning, 7)
def test_nanpolicy(self):
numpy_version = NumpyVersion(np.__version__)
x = np.arange(15.0).reshape((3, 5))
# No NaNs
assert_equal(stats.iqr(x, nan_policy='propagate'), 7)
assert_equal(stats.iqr(x, nan_policy='omit'), 7)
assert_equal(stats.iqr(x, nan_policy='raise'), 7)
# Yes NaNs
x[1, 2] = np.nan
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
if numpy_version < '1.10.0a':
# Fails over to mishmash of omit/propagate, but mostly omit
# The first case showcases the "incorrect" behavior of np.percentile
assert_equal(stats.iqr(x, nan_policy='propagate'), 8)
assert_equal(stats.iqr(x, axis=0, nan_policy='propagate'), [5, 5, np.nan, 5, 5])
if numpy_version < '1.9.0a':
assert_equal(stats.iqr(x, axis=1, nan_policy='propagate'), [2, 3, 2])
else:
# some fixes to percentile nan handling in 1.9
assert_equal(stats.iqr(x, axis=1, nan_policy='propagate'), [2, np.nan, 2])
_check_warnings(w, RuntimeWarning, 3)
else:
assert_equal(stats.iqr(x, nan_policy='propagate'), np.nan)
assert_equal(stats.iqr(x, axis=0, nan_policy='propagate'), [5, 5, np.nan, 5, 5])
assert_equal(stats.iqr(x, axis=1, nan_policy='propagate'), [2, np.nan, 2])
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
if numpy_version < '1.9.0a':
# Fails over to mishmash of omit/propagate, but mostly omit
assert_equal(stats.iqr(x, nan_policy='omit'), 8)
assert_equal(stats.iqr(x, axis=0, nan_policy='omit'), [5, 5, np.nan, 5, 5])
assert_equal(stats.iqr(x, axis=1, nan_policy='omit'), [2, 3, 2])
_check_warnings(w, RuntimeWarning, 3)
else:
assert_equal(stats.iqr(x, nan_policy='omit'), 7.5)
assert_equal(stats.iqr(x, axis=0, nan_policy='omit'), 5 * np.ones(5))
assert_equal(stats.iqr(x, axis=1, nan_policy='omit'), [2, 2.5, 2])
assert_raises(ValueError, stats.iqr, x, nan_policy='raise')
assert_raises(ValueError, stats.iqr, x, axis=0, nan_policy='raise')
assert_raises(ValueError, stats.iqr, x, axis=1, nan_policy='raise')
# Bad policy
assert_raises(ValueError, stats.iqr, x, nan_policy='barfood')
def test_scale(self):
numpy_version = NumpyVersion(np.__version__)
x = np.arange(15.0).reshape((3, 5))
# No NaNs
assert_equal(stats.iqr(x, scale='raw'), 7)
assert_almost_equal(stats.iqr(x, scale='normal'), 7 / 1.3489795)
assert_equal(stats.iqr(x, scale=2.0), 3.5)
# Yes NaNs
x[1, 2] = np.nan
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
if numpy_version < '1.10.0a':
# Fails over to mishmash of omit/propagate, but mostly omit
assert_equal(stats.iqr(x, scale='raw', nan_policy='propagate'), 8)
assert_almost_equal(stats.iqr(x, scale='normal',
nan_policy='propagate'),
8 / 1.3489795)
assert_equal(stats.iqr(x, scale=2.0, nan_policy='propagate'), 4)
# axis=1 chosen to show behavior with both nans and without
if numpy_version < '1.9.0a':
assert_equal(stats.iqr(x, axis=1, nan_policy='propagate'), [2, 3, 2])
assert_almost_equal(stats.iqr(x, axis=1, scale='normal',
nan_policy='propagate'),
np.array([2, 3, 2]) / 1.3489795)
assert_equal(stats.iqr(x, axis=1, scale=2.0,
nan_policy='propagate'), [1, 1.5, 1])
else:
# some fixes to percentile nan handling in 1.9
assert_equal(stats.iqr(x, axis=1, nan_policy='propagate'), [2, np.nan, 2])
assert_almost_equal(stats.iqr(x, axis=1, scale='normal',
nan_policy='propagate'),
np.array([2, np.nan, 2]) / 1.3489795)
assert_equal(stats.iqr(x, axis=1, scale=2.0,
nan_policy='propagate'), [1, np.nan, 1])
_check_warnings(w, RuntimeWarning, 6)
else:
assert_equal(stats.iqr(x, scale='raw', nan_policy='propagate'), np.nan)
assert_equal(stats.iqr(x, scale='normal', nan_policy='propagate'), np.nan)
assert_equal(stats.iqr(x, scale=2.0, nan_policy='propagate'), np.nan)
# axis=1 chosen to show behavior with both nans and without
assert_equal(stats.iqr(x, axis=1, scale='raw',
nan_policy='propagate'), [2, np.nan, 2])
assert_almost_equal(stats.iqr(x, axis=1, scale='normal',
nan_policy='propagate'),
np.array([2, np.nan, 2]) / 1.3489795)
assert_equal(stats.iqr(x, axis=1, scale=2.0, nan_policy='propagate'),
[1, np.nan, 1])
_check_warnings(w, RuntimeWarning, 6)
if numpy_version < '1.9.0a':
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
# Fails over to mishmash of omit/propagate, but mostly omit
assert_equal(stats.iqr(x, scale='raw', nan_policy='omit'), 8)
assert_almost_equal(stats.iqr(x, scale='normal', nan_policy='omit'),
8 / 1.3489795)
assert_equal(stats.iqr(x, scale=2.0, nan_policy='omit'), 4)
_check_warnings(w, RuntimeWarning, 3)
else:
assert_equal(stats.iqr(x, scale='raw', nan_policy='omit'), 7.5)
assert_almost_equal(stats.iqr(x, scale='normal', nan_policy='omit'),
7.5 / 1.3489795)
assert_equal(stats.iqr(x, scale=2.0, nan_policy='omit'), 3.75)
# Bad scale
assert_raises(ValueError, stats.iqr, x, scale='foobar')
class TestMoments(object):
"""
Comparison numbers are found using R v.1.5.1
note that length(testcase) = 4
testmathworks comes from documentation for the
Statistics Toolbox for Matlab and can be found at both
http://www.mathworks.com/access/helpdesk/help/toolbox/stats/kurtosis.shtml
http://www.mathworks.com/access/helpdesk/help/toolbox/stats/skewness.shtml
Note that both test cases came from here.
"""
testcase = [1,2,3,4]
scalar_testcase = 4.
np.random.seed(1234)
testcase_moment_accuracy = np.random.rand(42)
testmathworks = [1.165, 0.6268, 0.0751, 0.3516, -0.6965]
def test_moment(self):
# mean((testcase-mean(testcase))**power,axis=0),axis=0))**power))
y = stats.moment(self.scalar_testcase)
assert_approx_equal(y, 0.0)
y = stats.moment(self.testcase, 0)
assert_approx_equal(y, 1.0)
y = stats.moment(self.testcase, 1)
assert_approx_equal(y, 0.0, 10)
y = stats.moment(self.testcase, 2)
assert_approx_equal(y, 1.25)
y = stats.moment(self.testcase, 3)
assert_approx_equal(y, 0.0)
y = stats.moment(self.testcase, 4)
assert_approx_equal(y, 2.5625)
# check array_like input for moment
y = stats.moment(self.testcase, [1, 2, 3, 4])
assert_allclose(y, [0, 1.25, 0, 2.5625])
# check moment input consists only of integers
y = stats.moment(self.testcase, 0.0)
assert_approx_equal(y, 1.0)
assert_raises(ValueError, stats.moment, self.testcase, 1.2)
y = stats.moment(self.testcase, [1.0, 2, 3, 4.0])
assert_allclose(y, [0, 1.25, 0, 2.5625])
# test empty input
y = stats.moment([])
assert_equal(y, np.nan)
x = np.arange(10.)
x[9] = np.nan
assert_equal(stats.moment(x, 2), np.nan)
assert_almost_equal(stats.moment(x, nan_policy='omit'), 0.0)
assert_raises(ValueError, stats.moment, x, nan_policy='raise')
assert_raises(ValueError, stats.moment, x, nan_policy='foobar')
def test_moment_propagate_nan(self):
# Check that the shape of the result is the same for inputs
# with and without nans, cf gh-5817
a = np.arange(8).reshape(2, -1).astype(float)
a[1, 0] = np.nan
mm = stats.moment(a, 2, axis=1, nan_policy="propagate")
np.testing.assert_allclose(mm, [1.25, np.nan], atol=1e-15)
def test_variation(self):
# variation = samplestd / mean
y = stats.variation(self.scalar_testcase)
assert_approx_equal(y, 0.0)
y = stats.variation(self.testcase)
assert_approx_equal(y, 0.44721359549996, 10)
x = np.arange(10.)
x[9] = np.nan
assert_equal(stats.variation(x), np.nan)
assert_almost_equal(stats.variation(x, nan_policy='omit'),
0.6454972243679028)
assert_raises(ValueError, stats.variation, x, nan_policy='raise')
assert_raises(ValueError, stats.variation, x, nan_policy='foobar')
def test_variation_propagate_nan(self):
# Check that the shape of the result is the same for inputs
# with and without nans, cf gh-5817
a = np.arange(8).reshape(2, -1).astype(float)
a[1, 0] = np.nan
vv = stats.variation(a, axis=1, nan_policy="propagate")
np.testing.assert_allclose(vv, [0.7453559924999299, np.nan], atol=1e-15)
def test_skewness(self):
# Scalar test case
y = stats.skew(self.scalar_testcase)
assert_approx_equal(y, 0.0)
# sum((testmathworks-mean(testmathworks,axis=0))**3,axis=0) /
# ((sqrt(var(testmathworks)*4/5))**3)/5
y = stats.skew(self.testmathworks)
assert_approx_equal(y, -0.29322304336607, 10)
y = stats.skew(self.testmathworks, bias=0)
assert_approx_equal(y, -0.437111105023940, 10)
y = stats.skew(self.testcase)
assert_approx_equal(y, 0.0, 10)
x = np.arange(10.)
x[9] = np.nan
with np.errstate(invalid='ignore'):
assert_equal(stats.skew(x), np.nan)
assert_equal(stats.skew(x, nan_policy='omit'), 0.)
assert_raises(ValueError, stats.skew, x, nan_policy='raise')
assert_raises(ValueError, stats.skew, x, nan_policy='foobar')
def test_skewness_scalar(self):
# `skew` must return a scalar for 1-dim input
assert_equal(stats.skew(arange(10)), 0.0)
def test_skew_propagate_nan(self):
# Check that the shape of the result is the same for inputs
# with and without nans, cf gh-5817
a = np.arange(8).reshape(2, -1).astype(float)
a[1, 0] = np.nan
with np.errstate(invalid='ignore'):
s = stats.skew(a, axis=1, nan_policy="propagate")
np.testing.assert_allclose(s, [0, np.nan], atol=1e-15)
def test_kurtosis(self):
# Scalar test case
y = stats.kurtosis(self.scalar_testcase)
assert_approx_equal(y, -3.0)
# sum((testcase-mean(testcase,axis=0))**4,axis=0)/((sqrt(var(testcase)*3/4))**4)/4
# sum((test2-mean(testmathworks,axis=0))**4,axis=0)/((sqrt(var(testmathworks)*4/5))**4)/5
# Set flags for axis = 0 and
# fisher=0 (Pearson's defn of kurtosis for compatibility with Matlab)
y = stats.kurtosis(self.testmathworks, 0, fisher=0, bias=1)
assert_approx_equal(y, 2.1658856802973, 10)
# Note that MATLAB has confusing docs for the following case
# kurtosis(x,0) gives an unbiased estimate of Pearson's skewness
# kurtosis(x) gives a biased estimate of Fisher's skewness (Pearson-3)
# The MATLAB docs imply that both should give Fisher's
y = stats.kurtosis(self.testmathworks, fisher=0, bias=0)
assert_approx_equal(y, 3.663542721189047, 10)
y = stats.kurtosis(self.testcase, 0, 0)
assert_approx_equal(y, 1.64)
x = np.arange(10.)
x[9] = np.nan
assert_equal(stats.kurtosis(x), np.nan)
assert_almost_equal(stats.kurtosis(x, nan_policy='omit'), -1.230000)
assert_raises(ValueError, stats.kurtosis, x, nan_policy='raise')
assert_raises(ValueError, stats.kurtosis, x, nan_policy='foobar')
def test_kurtosis_array_scalar(self):
assert_equal(type(stats.kurtosis([1,2,3])), float)
def test_kurtosis_propagate_nan(self):
# Check that the shape of the result is the same for inputs
# with and without nans, cf gh-5817
a = np.arange(8).reshape(2, -1).astype(float)
a[1, 0] = np.nan
k = stats.kurtosis(a, axis=1, nan_policy="propagate")
np.testing.assert_allclose(k, [-1.36, np.nan], atol=1e-15)
def test_moment_accuracy(self):
# 'moment' must have a small enough error compared to the slower
# but very accurate numpy.power() implementation.
tc_no_mean = self.testcase_moment_accuracy - \
np.mean(self.testcase_moment_accuracy)
assert_allclose(np.power(tc_no_mean, 42).mean(),
stats.moment(self.testcase_moment_accuracy, 42))
class TestStudentTest(object):
X1 = np.array([-1, 0, 1])
X2 = np.array([0, 1, 2])
T1_0 = 0
P1_0 = 1
T1_1 = -1.732051
P1_1 = 0.2254033
T1_2 = -3.464102
P1_2 = 0.0741799
T2_0 = 1.732051
P2_0 = 0.2254033
def test_onesample(self):
with suppress_warnings() as sup, np.errstate(invalid="ignore"):
sup.filter(RuntimeWarning, "Degrees of freedom <= 0 for slice")
t, p = stats.ttest_1samp(4., 3.)
assert_(np.isnan(t))
assert_(np.isnan(p))
t, p = stats.ttest_1samp(self.X1, 0)
assert_array_almost_equal(t, self.T1_0)
assert_array_almost_equal(p, self.P1_0)
res = stats.ttest_1samp(self.X1, 0)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
t, p = stats.ttest_1samp(self.X2, 0)
assert_array_almost_equal(t, self.T2_0)
assert_array_almost_equal(p, self.P2_0)
t, p = stats.ttest_1samp(self.X1, 1)
assert_array_almost_equal(t, self.T1_1)
assert_array_almost_equal(p, self.P1_1)
t, p = stats.ttest_1samp(self.X1, 2)
assert_array_almost_equal(t, self.T1_2)
assert_array_almost_equal(p, self.P1_2)
# check nan policy
np.random.seed(7654567)
x = stats.norm.rvs(loc=5, scale=10, size=51)
x[50] = np.nan
with np.errstate(invalid="ignore"):
assert_array_equal(stats.ttest_1samp(x, 5.0), (np.nan, np.nan))
assert_array_almost_equal(stats.ttest_1samp(x, 5.0, nan_policy='omit'),
(-1.6412624074367159, 0.107147027334048005))
assert_raises(ValueError, stats.ttest_1samp, x, 5.0, nan_policy='raise')
assert_raises(ValueError, stats.ttest_1samp, x, 5.0,
nan_policy='foobar')
def test_percentileofscore():
pcos = stats.percentileofscore
assert_equal(pcos([1,2,3,4,5,6,7,8,9,10],4), 40.0)
for (kind, result) in [('mean', 35.0),
('strict', 30.0),
('weak', 40.0)]:
assert_equal(pcos(np.arange(10) + 1, 4, kind=kind), result)
# multiple - 2
for (kind, result) in [('rank', 45.0),
('strict', 30.0),
('weak', 50.0),
('mean', 40.0)]:
assert_equal(pcos([1,2,3,4,4,5,6,7,8,9], 4, kind=kind), result)
# multiple - 3
assert_equal(pcos([1,2,3,4,4,4,5,6,7,8], 4), 50.0)
for (kind, result) in [('rank', 50.0),
('mean', 45.0),
('strict', 30.0),
('weak', 60.0)]:
assert_equal(pcos([1,2,3,4,4,4,5,6,7,8], 4, kind=kind), result)
# missing
for kind in ('rank', 'mean', 'strict', 'weak'):
assert_equal(pcos([1,2,3,5,6,7,8,9,10,11], 4, kind=kind), 30)
# larger numbers
for (kind, result) in [('mean', 35.0),
('strict', 30.0),
('weak', 40.0)]:
assert_equal(
pcos([10, 20, 30, 40, 50, 60, 70, 80, 90, 100], 40,
kind=kind), result)
for (kind, result) in [('mean', 45.0),
('strict', 30.0),
('weak', 60.0)]:
assert_equal(
pcos([10, 20, 30, 40, 40, 40, 50, 60, 70, 80],
40, kind=kind), result)
for kind in ('rank', 'mean', 'strict', 'weak'):
assert_equal(
pcos([10, 20, 30, 50, 60, 70, 80, 90, 100, 110],
40, kind=kind), 30.0)
# boundaries
for (kind, result) in [('rank', 10.0),
('mean', 5.0),
('strict', 0.0),
('weak', 10.0)]:
assert_equal(
pcos([10, 20, 30, 50, 60, 70, 80, 90, 100, 110],
10, kind=kind), result)
for (kind, result) in [('rank', 100.0),
('mean', 95.0),
('strict', 90.0),
('weak', 100.0)]:
assert_equal(
pcos([10, 20, 30, 50, 60, 70, 80, 90, 100, 110],
110, kind=kind), result)
# out of bounds
for (kind, score, result) in [('rank', 200, 100.0),
('mean', 200, 100.0),
('mean', 0, 0.0)]:
assert_equal(
pcos([10, 20, 30, 50, 60, 70, 80, 90, 100, 110],
score, kind=kind), result)
assert_raises(ValueError, pcos, [1, 2, 3, 3, 4], 3, kind='unrecognized')
PowerDivCase = namedtuple('Case', ['f_obs', 'f_exp', 'ddof', 'axis',
'chi2', # Pearson's
'log', # G-test (log-likelihood)
'mod_log', # Modified log-likelihood
'cr', # Cressie-Read (lambda=2/3)
])
# The details of the first two elements in power_div_1d_cases are used
# in a test in TestPowerDivergence. Check that code before making
# any changes here.
power_div_1d_cases = [
# Use the default f_exp.
PowerDivCase(f_obs=[4, 8, 12, 8], f_exp=None, ddof=0, axis=None,
chi2=4,
log=2*(4*np.log(4/8) + 12*np.log(12/8)),
mod_log=2*(8*np.log(8/4) + 8*np.log(8/12)),
cr=(4*((4/8)**(2/3) - 1) + 12*((12/8)**(2/3) - 1))/(5/9)),
# Give a non-uniform f_exp.
PowerDivCase(f_obs=[4, 8, 12, 8], f_exp=[2, 16, 12, 2], ddof=0, axis=None,
chi2=24,
log=2*(4*np.log(4/2) + 8*np.log(8/16) + 8*np.log(8/2)),
mod_log=2*(2*np.log(2/4) + 16*np.log(16/8) + 2*np.log(2/8)),
cr=(4*((4/2)**(2/3) - 1) + 8*((8/16)**(2/3) - 1) +
8*((8/2)**(2/3) - 1))/(5/9)),
# f_exp is a scalar.
PowerDivCase(f_obs=[4, 8, 12, 8], f_exp=8, ddof=0, axis=None,
chi2=4,
log=2*(4*np.log(4/8) + 12*np.log(12/8)),
mod_log=2*(8*np.log(8/4) + 8*np.log(8/12)),
cr=(4*((4/8)**(2/3) - 1) + 12*((12/8)**(2/3) - 1))/(5/9)),
# f_exp equal to f_obs.
PowerDivCase(f_obs=[3, 5, 7, 9], f_exp=[3, 5, 7, 9], ddof=0, axis=0,
chi2=0, log=0, mod_log=0, cr=0),
]
power_div_empty_cases = [
# Shape is (0,)--a data set with length 0. The computed
# test statistic should be 0.
PowerDivCase(f_obs=[],
f_exp=None, ddof=0, axis=0,
chi2=0, log=0, mod_log=0, cr=0),
# Shape is (0, 3). This is 3 data sets, but each data set has
# length 0, so the computed test statistic should be [0, 0, 0].
PowerDivCase(f_obs=np.array([[],[],[]]).T,
f_exp=None, ddof=0, axis=0,
chi2=[0, 0, 0],
log=[0, 0, 0],
mod_log=[0, 0, 0],
cr=[0, 0, 0]),
# Shape is (3, 0). This represents an empty collection of
# data sets in which each data set has length 3. The test
# statistic should be an empty array.
PowerDivCase(f_obs=np.array([[],[],[]]),
f_exp=None, ddof=0, axis=0,
chi2=[],
log=[],
mod_log=[],
cr=[]),
]
class TestPowerDivergence(object):
def check_power_divergence(self, f_obs, f_exp, ddof, axis, lambda_,
expected_stat):
f_obs = np.asarray(f_obs)
if axis is None:
num_obs = f_obs.size
else:
b = np.broadcast(f_obs, f_exp)
num_obs = b.shape[axis]
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "Mean of empty slice")
stat, p = stats.power_divergence(
f_obs=f_obs, f_exp=f_exp, ddof=ddof,
axis=axis, lambda_=lambda_)
assert_allclose(stat, expected_stat)
if lambda_ == 1 or lambda_ == "pearson":
# Also test stats.chisquare.
stat, p = stats.chisquare(f_obs=f_obs, f_exp=f_exp, ddof=ddof,
axis=axis)
assert_allclose(stat, expected_stat)
ddof = np.asarray(ddof)
expected_p = stats.distributions.chi2.sf(expected_stat,
num_obs - 1 - ddof)
assert_allclose(p, expected_p)
def test_basic(self):
for case in power_div_1d_cases:
self.check_power_divergence(
case.f_obs, case.f_exp, case.ddof, case.axis,
None, case.chi2)
self.check_power_divergence(
case.f_obs, case.f_exp, case.ddof, case.axis,
"pearson", case.chi2)
self.check_power_divergence(
case.f_obs, case.f_exp, case.ddof, case.axis,
1, case.chi2)
self.check_power_divergence(
case.f_obs, case.f_exp, case.ddof, case.axis,
"log-likelihood", case.log)
self.check_power_divergence(
case.f_obs, case.f_exp, case.ddof, case.axis,
"mod-log-likelihood", case.mod_log)
self.check_power_divergence(
case.f_obs, case.f_exp, case.ddof, case.axis,
"cressie-read", case.cr)
self.check_power_divergence(
case.f_obs, case.f_exp, case.ddof, case.axis,
2/3, case.cr)
def test_basic_masked(self):
for case in power_div_1d_cases:
mobs = np.ma.array(case.f_obs)
self.check_power_divergence(
mobs, case.f_exp, case.ddof, case.axis,
None, case.chi2)
self.check_power_divergence(
mobs, case.f_exp, case.ddof, case.axis,
"pearson", case.chi2)
self.check_power_divergence(
mobs, case.f_exp, case.ddof, case.axis,
1, case.chi2)
self.check_power_divergence(
mobs, case.f_exp, case.ddof, case.axis,
"log-likelihood", case.log)
self.check_power_divergence(
mobs, case.f_exp, case.ddof, case.axis,
"mod-log-likelihood", case.mod_log)
self.check_power_divergence(
mobs, case.f_exp, case.ddof, case.axis,
"cressie-read", case.cr)
self.check_power_divergence(
mobs, case.f_exp, case.ddof, case.axis,
2/3, case.cr)
def test_axis(self):
case0 = power_div_1d_cases[0]
case1 = power_div_1d_cases[1]
f_obs = np.vstack((case0.f_obs, case1.f_obs))
f_exp = np.vstack((np.ones_like(case0.f_obs)*np.mean(case0.f_obs),
case1.f_exp))
# Check the four computational code paths in power_divergence
# using a 2D array with axis=1.
self.check_power_divergence(
f_obs, f_exp, 0, 1,
"pearson", [case0.chi2, case1.chi2])
self.check_power_divergence(
f_obs, f_exp, 0, 1,
"log-likelihood", [case0.log, case1.log])
self.check_power_divergence(
f_obs, f_exp, 0, 1,
"mod-log-likelihood", [case0.mod_log, case1.mod_log])
self.check_power_divergence(
f_obs, f_exp, 0, 1,
"cressie-read", [case0.cr, case1.cr])
# Reshape case0.f_obs to shape (2,2), and use axis=None.
# The result should be the same.
self.check_power_divergence(
np.array(case0.f_obs).reshape(2, 2), None, 0, None,
"pearson", case0.chi2)
def test_ddof_broadcasting(self):
# Test that ddof broadcasts correctly.
# ddof does not affect the test statistic. It is broadcast
# with the computed test statistic for the computation of
# the p value.
case0 = power_div_1d_cases[0]
case1 = power_div_1d_cases[1]
# Create 4x2 arrays of observed and expected frequencies.
f_obs = np.vstack((case0.f_obs, case1.f_obs)).T
f_exp = np.vstack((np.ones_like(case0.f_obs)*np.mean(case0.f_obs),
case1.f_exp)).T
expected_chi2 = [case0.chi2, case1.chi2]
# ddof has shape (2, 1). This is broadcast with the computed
# statistic, so p will have shape (2,2).
ddof = np.array([[0], [1]])
stat, p = stats.power_divergence(f_obs, f_exp, ddof=ddof)
assert_allclose(stat, expected_chi2)
# Compute the p values separately, passing in scalars for ddof.
stat0, p0 = stats.power_divergence(f_obs, f_exp, ddof=ddof[0,0])
stat1, p1 = stats.power_divergence(f_obs, f_exp, ddof=ddof[1,0])
assert_array_equal(p, np.vstack((p0, p1)))
def test_empty_cases(self):
with warnings.catch_warnings():
for case in power_div_empty_cases:
self.check_power_divergence(
case.f_obs, case.f_exp, case.ddof, case.axis,
"pearson", case.chi2)
self.check_power_divergence(
case.f_obs, case.f_exp, case.ddof, case.axis,
"log-likelihood", case.log)
self.check_power_divergence(
case.f_obs, case.f_exp, case.ddof, case.axis,
"mod-log-likelihood", case.mod_log)
self.check_power_divergence(
case.f_obs, case.f_exp, case.ddof, case.axis,
"cressie-read", case.cr)
def test_power_divergence_result_attributes(self):
f_obs = power_div_1d_cases[0].f_obs
f_exp = power_div_1d_cases[0].f_exp
ddof = power_div_1d_cases[0].ddof
axis = power_div_1d_cases[0].axis
res = stats.power_divergence(f_obs=f_obs, f_exp=f_exp, ddof=ddof,
axis=axis, lambda_="pearson")
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
def test_chisquare_masked_arrays():
# Test masked arrays.
obs = np.array([[8, 8, 16, 32, -1], [-1, -1, 3, 4, 5]]).T
mask = np.array([[0, 0, 0, 0, 1], [1, 1, 0, 0, 0]]).T
mobs = np.ma.masked_array(obs, mask)
expected_chisq = np.array([24.0, 0.5])
expected_g = np.array([2*(2*8*np.log(0.5) + 32*np.log(2.0)),
2*(3*np.log(0.75) + 5*np.log(1.25))])
chi2 = stats.distributions.chi2
chisq, p = stats.chisquare(mobs)
mat.assert_array_equal(chisq, expected_chisq)
mat.assert_array_almost_equal(p, chi2.sf(expected_chisq,
mobs.count(axis=0) - 1))
g, p = stats.power_divergence(mobs, lambda_='log-likelihood')
mat.assert_array_almost_equal(g, expected_g, decimal=15)
mat.assert_array_almost_equal(p, chi2.sf(expected_g,
mobs.count(axis=0) - 1))
chisq, p = stats.chisquare(mobs.T, axis=1)
mat.assert_array_equal(chisq, expected_chisq)
mat.assert_array_almost_equal(p, chi2.sf(expected_chisq,
mobs.T.count(axis=1) - 1))
g, p = stats.power_divergence(mobs.T, axis=1, lambda_="log-likelihood")
mat.assert_array_almost_equal(g, expected_g, decimal=15)
mat.assert_array_almost_equal(p, chi2.sf(expected_g,
mobs.count(axis=0) - 1))
obs1 = np.ma.array([3, 5, 6, 99, 10], mask=[0, 0, 0, 1, 0])
exp1 = np.ma.array([2, 4, 8, 10, 99], mask=[0, 0, 0, 0, 1])
chi2, p = stats.chisquare(obs1, f_exp=exp1)
# Because of the mask at index 3 of obs1 and at index 4 of exp1,
# only the first three elements are included in the calculation
# of the statistic.
mat.assert_array_equal(chi2, 1/2 + 1/4 + 4/8)
# When axis=None, the two values should have type np.float64.
chisq, p = stats.chisquare(np.ma.array([1,2,3]), axis=None)
assert_(isinstance(chisq, np.float64))
assert_(isinstance(p, np.float64))
assert_equal(chisq, 1.0)
assert_almost_equal(p, stats.distributions.chi2.sf(1.0, 2))
# Empty arrays:
# A data set with length 0 returns a masked scalar.
with np.errstate(invalid='ignore'):
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "Mean of empty slice")
chisq, p = stats.chisquare(np.ma.array([]))
assert_(isinstance(chisq, np.ma.MaskedArray))
assert_equal(chisq.shape, ())
assert_(chisq.mask)
empty3 = np.ma.array([[],[],[]])
# empty3 is a collection of 0 data sets (whose lengths would be 3, if
# there were any), so the return value is an array with length 0.
chisq, p = stats.chisquare(empty3)
assert_(isinstance(chisq, np.ma.MaskedArray))
mat.assert_array_equal(chisq, [])
# empty3.T is an array containing 3 data sets, each with length 0,
# so an array of size (3,) is returned, with all values masked.
with np.errstate(invalid='ignore'):
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "Mean of empty slice")
chisq, p = stats.chisquare(empty3.T)
assert_(isinstance(chisq, np.ma.MaskedArray))
assert_equal(chisq.shape, (3,))
assert_(np.all(chisq.mask))
def test_power_divergence_against_cressie_read_data():
# Test stats.power_divergence against tables 4 and 5 from
# Cressie and Read, "Multimonial Goodness-of-Fit Tests",
# J. R. Statist. Soc. B (1984), Vol 46, No. 3, pp. 440-464.
# This tests the calculation for several values of lambda.
# `table4` holds just the second and third columns from Table 4.
table4 = np.array([
# observed, expected,
15, 15.171,
11, 13.952,
14, 12.831,
17, 11.800,
5, 10.852,
11, 9.9796,
10, 9.1777,
4, 8.4402,
8, 7.7620,
10, 7.1383,
7, 6.5647,
9, 6.0371,
11, 5.5520,
3, 5.1059,
6, 4.6956,
1, 4.3183,
1, 3.9713,
4, 3.6522,
]).reshape(-1, 2)
table5 = np.array([
# lambda, statistic
-10.0, 72.2e3,
-5.0, 28.9e1,
-3.0, 65.6,
-2.0, 40.6,
-1.5, 34.0,
-1.0, 29.5,
-0.5, 26.5,
0.0, 24.6,
0.5, 23.4,
0.67, 23.1,
1.0, 22.7,
1.5, 22.6,
2.0, 22.9,
3.0, 24.8,
5.0, 35.5,
10.0, 21.4e1,
]).reshape(-1, 2)
for lambda_, expected_stat in table5:
stat, p = stats.power_divergence(table4[:,0], table4[:,1],
lambda_=lambda_)
assert_allclose(stat, expected_stat, rtol=5e-3)
def test_friedmanchisquare():
# see ticket:113
# verified with matlab and R
# From Demsar "Statistical Comparisons of Classifiers over Multiple Data Sets"
# 2006, Xf=9.28 (no tie handling, tie corrected Xf >=9.28)
x1 = [array([0.763, 0.599, 0.954, 0.628, 0.882, 0.936, 0.661, 0.583,
0.775, 1.0, 0.94, 0.619, 0.972, 0.957]),
array([0.768, 0.591, 0.971, 0.661, 0.888, 0.931, 0.668, 0.583,
0.838, 1.0, 0.962, 0.666, 0.981, 0.978]),
array([0.771, 0.590, 0.968, 0.654, 0.886, 0.916, 0.609, 0.563,
0.866, 1.0, 0.965, 0.614, 0.9751, 0.946]),
array([0.798, 0.569, 0.967, 0.657, 0.898, 0.931, 0.685, 0.625,
0.875, 1.0, 0.962, 0.669, 0.975, 0.970])]
# From "Bioestadistica para las ciencias de la salud" Xf=18.95 p<0.001:
x2 = [array([4,3,5,3,5,3,2,5,4,4,4,3]),
array([2,2,1,2,3,1,2,3,2,1,1,3]),
array([2,4,3,3,4,3,3,4,4,1,2,1]),
array([3,5,4,3,4,4,3,3,3,4,4,4])]
# From Jerrorl H. Zar, "Biostatistical Analysis"(example 12.6), Xf=10.68, 0.005 < p < 0.01:
# Probability from this example is inexact using Chisquare approximation of Friedman Chisquare.
x3 = [array([7.0,9.9,8.5,5.1,10.3]),
array([5.3,5.7,4.7,3.5,7.7]),
array([4.9,7.6,5.5,2.8,8.4]),
array([8.8,8.9,8.1,3.3,9.1])]
assert_array_almost_equal(stats.friedmanchisquare(x1[0],x1[1],x1[2],x1[3]),
(10.2283464566929, 0.0167215803284414))
assert_array_almost_equal(stats.friedmanchisquare(x2[0],x2[1],x2[2],x2[3]),
(18.9428571428571, 0.000280938375189499))
assert_array_almost_equal(stats.friedmanchisquare(x3[0],x3[1],x3[2],x3[3]),
(10.68, 0.0135882729582176))
assert_raises(ValueError, stats.friedmanchisquare,x3[0],x3[1])
# test for namedtuple attribute results
attributes = ('statistic', 'pvalue')
res = stats.friedmanchisquare(*x1)
check_named_results(res, attributes)
# test using mstats
assert_array_almost_equal(mstats.friedmanchisquare(x1[0], x1[1],
x1[2], x1[3]),
(10.2283464566929, 0.0167215803284414))
# the following fails
# assert_array_almost_equal(mstats.friedmanchisquare(x2[0],x2[1],x2[2],x2[3]),
# (18.9428571428571, 0.000280938375189499))
assert_array_almost_equal(mstats.friedmanchisquare(x3[0], x3[1],
x3[2], x3[3]),
(10.68, 0.0135882729582176))
assert_raises(ValueError, mstats.friedmanchisquare,x3[0],x3[1])
def test_kstest():
# from numpy.testing import assert_almost_equal
# comparing with values from R
x = np.linspace(-1,1,9)
D,p = stats.kstest(x,'norm')
assert_almost_equal(D, 0.15865525393145705, 12)
assert_almost_equal(p, 0.95164069201518386, 1)
x = np.linspace(-15,15,9)
D,p = stats.kstest(x,'norm')
assert_almost_equal(D, 0.44435602715924361, 15)
assert_almost_equal(p, 0.038850140086788665, 8)
# test for namedtuple attribute results
attributes = ('statistic', 'pvalue')
res = stats.kstest(x, 'norm')
check_named_results(res, attributes)
# the following tests rely on deterministicaly replicated rvs
np.random.seed(987654321)
x = stats.norm.rvs(loc=0.2, size=100)
D,p = stats.kstest(x, 'norm', mode='asymp')
assert_almost_equal(D, 0.12464329735846891, 15)
assert_almost_equal(p, 0.089444888711820769, 15)
assert_almost_equal(np.array(stats.kstest(x, 'norm', mode='asymp')),
np.array((0.12464329735846891, 0.089444888711820769)), 15)
assert_almost_equal(np.array(stats.kstest(x,'norm', alternative='less')),
np.array((0.12464329735846891, 0.040989164077641749)), 15)
# this 'greater' test fails with precision of decimal=14
assert_almost_equal(np.array(stats.kstest(x,'norm', alternative='greater')),
np.array((0.0072115233216310994, 0.98531158590396228)), 12)
# missing: no test that uses *args
def test_ks_2samp():
# exact small sample solution
data1 = np.array([1.0,2.0])
data2 = np.array([1.0,2.0,3.0])
assert_almost_equal(np.array(stats.ks_2samp(data1+0.01,data2)),
np.array((0.33333333333333337, 0.99062316386915694)))
assert_almost_equal(np.array(stats.ks_2samp(data1-0.01,data2)),
np.array((0.66666666666666674, 0.42490954988801982)))
# these can also be verified graphically
assert_almost_equal(
np.array(stats.ks_2samp(np.linspace(1,100,100),
np.linspace(1,100,100)+2+0.1)),
np.array((0.030000000000000027, 0.99999999996005062)))
assert_almost_equal(
np.array(stats.ks_2samp(np.linspace(1,100,100),
np.linspace(1,100,100)+2-0.1)),
np.array((0.020000000000000018, 0.99999999999999933)))
# these are just regression tests
assert_almost_equal(
np.array(stats.ks_2samp(np.linspace(1,100,100),
np.linspace(1,100,110)+20.1)),
np.array((0.21090909090909091, 0.015880386730710221)))
assert_almost_equal(
np.array(stats.ks_2samp(np.linspace(1,100,100),
np.linspace(1,100,110)+20-0.1)),
np.array((0.20818181818181825, 0.017981441789762638)))
# test for namedtuple attribute results
attributes = ('statistic', 'pvalue')
res = stats.ks_2samp(data1 - 0.01, data2)
check_named_results(res, attributes)
def test_ttest_rel():
# regression test
tr,pr = 0.81248591389165692, 0.41846234511362157
tpr = ([tr,-tr],[pr,pr])
rvs1 = np.linspace(1,100,100)
rvs2 = np.linspace(1.01,99.989,100)
rvs1_2D = np.array([np.linspace(1,100,100), np.linspace(1.01,99.989,100)])
rvs2_2D = np.array([np.linspace(1.01,99.989,100), np.linspace(1,100,100)])
t,p = stats.ttest_rel(rvs1, rvs2, axis=0)
assert_array_almost_equal([t,p],(tr,pr))
t,p = stats.ttest_rel(rvs1_2D.T, rvs2_2D.T, axis=0)
assert_array_almost_equal([t,p],tpr)
t,p = stats.ttest_rel(rvs1_2D, rvs2_2D, axis=1)
assert_array_almost_equal([t,p],tpr)
# test scalars
with suppress_warnings() as sup, np.errstate(invalid="ignore"):
sup.filter(RuntimeWarning, "Degrees of freedom <= 0 for slice")
t, p = stats.ttest_rel(4., 3.)
assert_(np.isnan(t))
assert_(np.isnan(p))
# test for namedtuple attribute results
attributes = ('statistic', 'pvalue')
res = stats.ttest_rel(rvs1, rvs2, axis=0)
check_named_results(res, attributes)
# test on 3 dimensions
rvs1_3D = np.dstack([rvs1_2D,rvs1_2D,rvs1_2D])
rvs2_3D = np.dstack([rvs2_2D,rvs2_2D,rvs2_2D])
t,p = stats.ttest_rel(rvs1_3D, rvs2_3D, axis=1)
assert_array_almost_equal(np.abs(t), tr)
assert_array_almost_equal(np.abs(p), pr)
assert_equal(t.shape, (2, 3))
t,p = stats.ttest_rel(np.rollaxis(rvs1_3D,2), np.rollaxis(rvs2_3D,2), axis=2)
assert_array_almost_equal(np.abs(t), tr)
assert_array_almost_equal(np.abs(p), pr)
assert_equal(t.shape, (3, 2))
# check nan policy
np.random.seed(12345678)
x = stats.norm.rvs(loc=5, scale=10, size=501)
x[500] = np.nan
y = (stats.norm.rvs(loc=5, scale=10, size=501) +
stats.norm.rvs(scale=0.2, size=501))
y[500] = np.nan
with np.errstate(invalid="ignore"):
assert_array_equal(stats.ttest_rel(x, x), (np.nan, np.nan))
assert_array_almost_equal(stats.ttest_rel(x, y, nan_policy='omit'),
(0.25299925303978066, 0.8003729814201519))
assert_raises(ValueError, stats.ttest_rel, x, y, nan_policy='raise')
assert_raises(ValueError, stats.ttest_rel, x, y, nan_policy='foobar')
# test zero division problem
t, p = stats.ttest_rel([0, 0, 0], [1, 1, 1])
assert_equal((np.abs(t), p), (np.inf, 0))
with np.errstate(invalid="ignore"):
assert_equal(stats.ttest_rel([0, 0, 0], [0, 0, 0]), (np.nan, np.nan))
# check that nan in input array result in nan output
anan = np.array([[1, np.nan], [-1, 1]])
assert_equal(stats.ttest_rel(anan, np.zeros((2, 2))),
([0, np.nan], [1, np.nan]))
# test incorrect input shape raise an error
x = np.arange(24)
assert_raises(ValueError, stats.ttest_rel, x.reshape((8, 3)),
x.reshape((2, 3, 4)))
def test_ttest_rel_nan_2nd_arg():
# regression test for gh-6134: nans in the second arg were not handled
x = [np.nan, 2.0, 3.0, 4.0]
y = [1.0, 2.0, 1.0, 2.0]
r1 = stats.ttest_rel(x, y, nan_policy='omit')
r2 = stats.ttest_rel(y, x, nan_policy='omit')
assert_allclose(r2.statistic, -r1.statistic, atol=1e-15)
assert_allclose(r2.pvalue, r1.pvalue, atol=1e-15)
# NB: arguments are paired when NaNs are dropped
r3 = stats.ttest_rel(y[1:], x[1:])
assert_allclose(r2, r3, atol=1e-15)
# .. and this is consistent with R. R code:
# x = c(NA, 2.0, 3.0, 4.0)
# y = c(1.0, 2.0, 1.0, 2.0)
# t.test(x, y, paired=TRUE)
assert_allclose(r2, (-2, 0.1835), atol=1e-4)
def _desc_stats(x1, x2, axis=0):
def _stats(x, axis=0):
x = np.asarray(x)
mu = np.mean(x, axis=axis)
std = np.std(x, axis=axis, ddof=1)
nobs = x.shape[axis]
return mu, std, nobs
return _stats(x1, axis) + _stats(x2, axis)
def test_ttest_ind():
# regression test
tr = 1.0912746897927283
pr = 0.27647818616351882
tpr = ([tr,-tr],[pr,pr])
rvs2 = np.linspace(1,100,100)
rvs1 = np.linspace(5,105,100)
rvs1_2D = np.array([rvs1, rvs2])
rvs2_2D = np.array([rvs2, rvs1])
t,p = stats.ttest_ind(rvs1, rvs2, axis=0)
assert_array_almost_equal([t,p],(tr,pr))
# test from_stats API
assert_array_almost_equal(stats.ttest_ind_from_stats(*_desc_stats(rvs1,
rvs2)),
[t, p])
t,p = stats.ttest_ind(rvs1_2D.T, rvs2_2D.T, axis=0)
assert_array_almost_equal([t,p],tpr)
args = _desc_stats(rvs1_2D.T, rvs2_2D.T)
assert_array_almost_equal(stats.ttest_ind_from_stats(*args),
[t, p])
t,p = stats.ttest_ind(rvs1_2D, rvs2_2D, axis=1)
assert_array_almost_equal([t,p],tpr)
args = _desc_stats(rvs1_2D, rvs2_2D, axis=1)
assert_array_almost_equal(stats.ttest_ind_from_stats(*args),
[t, p])
# test scalars
with suppress_warnings() as sup, np.errstate(invalid="ignore"):
sup.filter(RuntimeWarning, "Degrees of freedom <= 0 for slice")
t, p = stats.ttest_ind(4., 3.)
assert_(np.isnan(t))
assert_(np.isnan(p))
# test on 3 dimensions
rvs1_3D = np.dstack([rvs1_2D,rvs1_2D,rvs1_2D])
rvs2_3D = np.dstack([rvs2_2D,rvs2_2D,rvs2_2D])
t,p = stats.ttest_ind(rvs1_3D, rvs2_3D, axis=1)
assert_almost_equal(np.abs(t), np.abs(tr))
assert_array_almost_equal(np.abs(p), pr)
assert_equal(t.shape, (2, 3))
t,p = stats.ttest_ind(np.rollaxis(rvs1_3D,2), np.rollaxis(rvs2_3D,2), axis=2)
assert_array_almost_equal(np.abs(t), np.abs(tr))
assert_array_almost_equal(np.abs(p), pr)
assert_equal(t.shape, (3, 2))
# check nan policy
np.random.seed(12345678)
x = stats.norm.rvs(loc=5, scale=10, size=501)
x[500] = np.nan
y = stats.norm.rvs(loc=5, scale=10, size=500)
with np.errstate(invalid="ignore"):
assert_array_equal(stats.ttest_ind(x, y), (np.nan, np.nan))
assert_array_almost_equal(stats.ttest_ind(x, y, nan_policy='omit'),
(0.24779670949091914, 0.80434267337517906))
assert_raises(ValueError, stats.ttest_ind, x, y, nan_policy='raise')
assert_raises(ValueError, stats.ttest_ind, x, y, nan_policy='foobar')
# test zero division problem
t, p = stats.ttest_ind([0, 0, 0], [1, 1, 1])
assert_equal((np.abs(t), p), (np.inf, 0))
with np.errstate(invalid="ignore"):
assert_equal(stats.ttest_ind([0, 0, 0], [0, 0, 0]), (np.nan, np.nan))
# check that nan in input array result in nan output
anan = np.array([[1, np.nan], [-1, 1]])
assert_equal(stats.ttest_ind(anan, np.zeros((2, 2))),
([0, np.nan], [1, np.nan]))
def test_ttest_ind_with_uneq_var():
# check vs. R
a = (1, 2, 3)
b = (1.1, 2.9, 4.2)
pr = 0.53619490753126731
tr = -0.68649512735572582
t, p = stats.ttest_ind(a, b, equal_var=False)
assert_array_almost_equal([t,p], [tr, pr])
# test from desc stats API
assert_array_almost_equal(stats.ttest_ind_from_stats(*_desc_stats(a, b),
equal_var=False),
[t, p])
a = (1, 2, 3, 4)
pr = 0.84354139131608286
tr = -0.2108663315950719
t, p = stats.ttest_ind(a, b, equal_var=False)
assert_array_almost_equal([t,p], [tr, pr])
assert_array_almost_equal(stats.ttest_ind_from_stats(*_desc_stats(a, b),
equal_var=False),
[t, p])
# regression test
tr = 1.0912746897927283
tr_uneq_n = 0.66745638708050492
pr = 0.27647831993021388
pr_uneq_n = 0.50873585065616544
tpr = ([tr,-tr],[pr,pr])
rvs3 = np.linspace(1,100, 25)
rvs2 = np.linspace(1,100,100)
rvs1 = np.linspace(5,105,100)
rvs1_2D = np.array([rvs1, rvs2])
rvs2_2D = np.array([rvs2, rvs1])
t,p = stats.ttest_ind(rvs1, rvs2, axis=0, equal_var=False)
assert_array_almost_equal([t,p],(tr,pr))
assert_array_almost_equal(stats.ttest_ind_from_stats(*_desc_stats(rvs1,
rvs2),
equal_var=False),
(t, p))
t,p = stats.ttest_ind(rvs1, rvs3, axis=0, equal_var=False)
assert_array_almost_equal([t,p], (tr_uneq_n, pr_uneq_n))
assert_array_almost_equal(stats.ttest_ind_from_stats(*_desc_stats(rvs1,
rvs3),
equal_var=False),
(t, p))
t,p = stats.ttest_ind(rvs1_2D.T, rvs2_2D.T, axis=0, equal_var=False)
assert_array_almost_equal([t,p],tpr)
args = _desc_stats(rvs1_2D.T, rvs2_2D.T)
assert_array_almost_equal(stats.ttest_ind_from_stats(*args,
equal_var=False),
(t, p))
t,p = stats.ttest_ind(rvs1_2D, rvs2_2D, axis=1, equal_var=False)
assert_array_almost_equal([t,p],tpr)
args = _desc_stats(rvs1_2D, rvs2_2D, axis=1)
assert_array_almost_equal(stats.ttest_ind_from_stats(*args,
equal_var=False),
(t, p))
# test for namedtuple attribute results
attributes = ('statistic', 'pvalue')
res = stats.ttest_ind(rvs1, rvs2, axis=0, equal_var=False)
check_named_results(res, attributes)
# test on 3 dimensions
rvs1_3D = np.dstack([rvs1_2D,rvs1_2D,rvs1_2D])
rvs2_3D = np.dstack([rvs2_2D,rvs2_2D,rvs2_2D])
t,p = stats.ttest_ind(rvs1_3D, rvs2_3D, axis=1, equal_var=False)
assert_almost_equal(np.abs(t), np.abs(tr))
assert_array_almost_equal(np.abs(p), pr)
assert_equal(t.shape, (2, 3))
args = _desc_stats(rvs1_3D, rvs2_3D, axis=1)
t, p = stats.ttest_ind_from_stats(*args, equal_var=False)
assert_almost_equal(np.abs(t), np.abs(tr))
assert_array_almost_equal(np.abs(p), pr)
assert_equal(t.shape, (2, 3))
t,p = stats.ttest_ind(np.rollaxis(rvs1_3D,2), np.rollaxis(rvs2_3D,2),
axis=2, equal_var=False)
assert_array_almost_equal(np.abs(t), np.abs(tr))
assert_array_almost_equal(np.abs(p), pr)
assert_equal(t.shape, (3, 2))
args = _desc_stats(np.rollaxis(rvs1_3D, 2),
np.rollaxis(rvs2_3D, 2), axis=2)
t, p = stats.ttest_ind_from_stats(*args, equal_var=False)
assert_array_almost_equal(np.abs(t), np.abs(tr))
assert_array_almost_equal(np.abs(p), pr)
assert_equal(t.shape, (3, 2))
# test zero division problem
t, p = stats.ttest_ind([0, 0, 0], [1, 1, 1], equal_var=False)
assert_equal((np.abs(t), p), (np.inf, 0))
with np.errstate(all='ignore'):
assert_equal(stats.ttest_ind([0, 0, 0], [0, 0, 0], equal_var=False),
(np.nan, np.nan))
# check that nan in input array result in nan output
anan = np.array([[1, np.nan], [-1, 1]])
assert_equal(stats.ttest_ind(anan, np.zeros((2, 2)), equal_var=False),
([0, np.nan], [1, np.nan]))
def test_ttest_ind_nan_2nd_arg():
# regression test for gh-6134: nans in the second arg were not handled
x = [np.nan, 2.0, 3.0, 4.0]
y = [1.0, 2.0, 1.0, 2.0]
r1 = stats.ttest_ind(x, y, nan_policy='omit')
r2 = stats.ttest_ind(y, x, nan_policy='omit')
assert_allclose(r2.statistic, -r1.statistic, atol=1e-15)
assert_allclose(r2.pvalue, r1.pvalue, atol=1e-15)
# NB: arguments are not paired when NaNs are dropped
r3 = stats.ttest_ind(y, x[1:])
assert_allclose(r2, r3, atol=1e-15)
# .. and this is consistent with R. R code:
# x = c(NA, 2.0, 3.0, 4.0)
# y = c(1.0, 2.0, 1.0, 2.0)
# t.test(x, y, var.equal=TRUE)
assert_allclose(r2, (-2.5354627641855498, 0.052181400457057901), atol=1e-15)
def test_gh5686():
mean1, mean2 = np.array([1, 2]), np.array([3, 4])
std1, std2 = np.array([5, 3]), np.array([4, 5])
nobs1, nobs2 = np.array([130, 140]), np.array([100, 150])
# This will raise a TypeError unless gh-5686 is fixed.
stats.ttest_ind_from_stats(mean1, std1, nobs1, mean2, std2, nobs2)
def test_ttest_1samp_new():
n1, n2, n3 = (10,15,20)
rvn1 = stats.norm.rvs(loc=5,scale=10,size=(n1,n2,n3))
# check multidimensional array and correct axis handling
# deterministic rvn1 and rvn2 would be better as in test_ttest_rel
t1,p1 = stats.ttest_1samp(rvn1[:,:,:], np.ones((n2,n3)),axis=0)
t2,p2 = stats.ttest_1samp(rvn1[:,:,:], 1,axis=0)
t3,p3 = stats.ttest_1samp(rvn1[:,0,0], 1)
assert_array_almost_equal(t1,t2, decimal=14)
assert_almost_equal(t1[0,0],t3, decimal=14)
assert_equal(t1.shape, (n2,n3))
t1,p1 = stats.ttest_1samp(rvn1[:,:,:], np.ones((n1,n3)),axis=1)
t2,p2 = stats.ttest_1samp(rvn1[:,:,:], 1,axis=1)
t3,p3 = stats.ttest_1samp(rvn1[0,:,0], 1)
assert_array_almost_equal(t1,t2, decimal=14)
assert_almost_equal(t1[0,0],t3, decimal=14)
assert_equal(t1.shape, (n1,n3))
t1,p1 = stats.ttest_1samp(rvn1[:,:,:], np.ones((n1,n2)),axis=2)
t2,p2 = stats.ttest_1samp(rvn1[:,:,:], 1,axis=2)
t3,p3 = stats.ttest_1samp(rvn1[0,0,:], 1)
assert_array_almost_equal(t1,t2, decimal=14)
assert_almost_equal(t1[0,0],t3, decimal=14)
assert_equal(t1.shape, (n1,n2))
# test zero division problem
t, p = stats.ttest_1samp([0, 0, 0], 1)
assert_equal((np.abs(t), p), (np.inf, 0))
with np.errstate(all='ignore'):
assert_equal(stats.ttest_1samp([0, 0, 0], 0), (np.nan, np.nan))
# check that nan in input array result in nan output
anan = np.array([[1, np.nan],[-1, 1]])
assert_equal(stats.ttest_1samp(anan, 0), ([0, np.nan], [1, np.nan]))
class TestDescribe(object):
def test_describe_scalar(self):
with suppress_warnings() as sup, np.errstate(invalid="ignore"):
sup.filter(RuntimeWarning, "Degrees of freedom <= 0 for slice")
n, mm, m, v, sk, kurt = stats.describe(4.)
assert_equal(n, 1)
assert_equal(mm, (4.0, 4.0))
assert_equal(m, 4.0)
assert_(np.isnan(v))
assert_array_almost_equal(sk, 0.0, decimal=13)
assert_array_almost_equal(kurt, -3.0, decimal=13)
def test_describe_numbers(self):
x = np.vstack((np.ones((3,4)), 2 * np.ones((2,4))))
nc, mmc = (5, ([1., 1., 1., 1.], [2., 2., 2., 2.]))
mc = np.array([1.4, 1.4, 1.4, 1.4])
vc = np.array([0.3, 0.3, 0.3, 0.3])
skc = [0.40824829046386357] * 4
kurtc = [-1.833333333333333] * 4
n, mm, m, v, sk, kurt = stats.describe(x)
assert_equal(n, nc)
assert_equal(mm, mmc)
assert_equal(m, mc)
assert_equal(v, vc)
assert_array_almost_equal(sk, skc, decimal=13)
assert_array_almost_equal(kurt, kurtc, decimal=13)
n, mm, m, v, sk, kurt = stats.describe(x.T, axis=1)
assert_equal(n, nc)
assert_equal(mm, mmc)
assert_equal(m, mc)
assert_equal(v, vc)
assert_array_almost_equal(sk, skc, decimal=13)
assert_array_almost_equal(kurt, kurtc, decimal=13)
x = np.arange(10.)
x[9] = np.nan
nc, mmc = (9, (0.0, 8.0))
mc = 4.0
vc = 7.5
skc = 0.0
kurtc = -1.2300000000000002
n, mm, m, v, sk, kurt = stats.describe(x, nan_policy='omit')
assert_equal(n, nc)
assert_equal(mm, mmc)
assert_equal(m, mc)
assert_equal(v, vc)
assert_array_almost_equal(sk, skc)
assert_array_almost_equal(kurt, kurtc, decimal=13)
assert_raises(ValueError, stats.describe, x, nan_policy='raise')
assert_raises(ValueError, stats.describe, x, nan_policy='foobar')
def test_describe_result_attributes(self):
actual = stats.describe(np.arange(5))
attributes = ('nobs', 'minmax', 'mean', 'variance', 'skewness',
'kurtosis')
check_named_results(actual, attributes)
def test_describe_ddof(self):
x = np.vstack((np.ones((3, 4)), 2 * np.ones((2, 4))))
nc, mmc = (5, ([1., 1., 1., 1.], [2., 2., 2., 2.]))
mc = np.array([1.4, 1.4, 1.4, 1.4])
vc = np.array([0.24, 0.24, 0.24, 0.24])
skc = [0.40824829046386357] * 4
kurtc = [-1.833333333333333] * 4
n, mm, m, v, sk, kurt = stats.describe(x, ddof=0)
assert_equal(n, nc)
assert_allclose(mm, mmc, rtol=1e-15)
assert_allclose(m, mc, rtol=1e-15)
assert_allclose(v, vc, rtol=1e-15)
assert_array_almost_equal(sk, skc, decimal=13)
assert_array_almost_equal(kurt, kurtc, decimal=13)
def test_describe_axis_none(self):
x = np.vstack((np.ones((3, 4)), 2 * np.ones((2, 4))))
# expected values
e_nobs, e_minmax = (20, (1.0, 2.0))
e_mean = 1.3999999999999999
e_var = 0.25263157894736848
e_skew = 0.4082482904638634
e_kurt = -1.8333333333333333
# actual values
a = stats.describe(x, axis=None)
assert_equal(a.nobs, e_nobs)
assert_almost_equal(a.minmax, e_minmax)
assert_almost_equal(a.mean, e_mean)
assert_almost_equal(a.variance, e_var)
assert_array_almost_equal(a.skewness, e_skew, decimal=13)
assert_array_almost_equal(a.kurtosis, e_kurt, decimal=13)
def test_describe_empty(self):
assert_raises(ValueError, stats.describe, [])
def test_normalitytests():
assert_raises(ValueError, stats.skewtest, 4.)
assert_raises(ValueError, stats.kurtosistest, 4.)
assert_raises(ValueError, stats.normaltest, 4.)
# numbers verified with R: dagoTest in package fBasics
st_normal, st_skew, st_kurt = (3.92371918, 1.98078826, -0.01403734)
pv_normal, pv_skew, pv_kurt = (0.14059673, 0.04761502, 0.98880019)
x = np.array((-2,-1,0,1,2,3)*4)**2
attributes = ('statistic', 'pvalue')
assert_array_almost_equal(stats.normaltest(x), (st_normal, pv_normal))
check_named_results(stats.normaltest(x), attributes)
assert_array_almost_equal(stats.skewtest(x), (st_skew, pv_skew))
check_named_results(stats.skewtest(x), attributes)
assert_array_almost_equal(stats.kurtosistest(x), (st_kurt, pv_kurt))
check_named_results(stats.kurtosistest(x), attributes)
# Test axis=None (equal to axis=0 for 1-D input)
assert_array_almost_equal(stats.normaltest(x, axis=None),
(st_normal, pv_normal))
assert_array_almost_equal(stats.skewtest(x, axis=None),
(st_skew, pv_skew))
assert_array_almost_equal(stats.kurtosistest(x, axis=None),
(st_kurt, pv_kurt))
x = np.arange(10.)
x[9] = np.nan
with np.errstate(invalid="ignore"):
assert_array_equal(stats.skewtest(x), (np.nan, np.nan))
expected = (1.0184643553962129, 0.30845733195153502)
assert_array_almost_equal(stats.skewtest(x, nan_policy='omit'), expected)
with np.errstate(all='ignore'):
assert_raises(ValueError, stats.skewtest, x, nan_policy='raise')
assert_raises(ValueError, stats.skewtest, x, nan_policy='foobar')
x = np.arange(30.)
x[29] = np.nan
with np.errstate(all='ignore'):
assert_array_equal(stats.kurtosistest(x), (np.nan, np.nan))
expected = (-2.2683547379505273, 0.023307594135872967)
assert_array_almost_equal(stats.kurtosistest(x, nan_policy='omit'),
expected)
assert_raises(ValueError, stats.kurtosistest, x, nan_policy='raise')
assert_raises(ValueError, stats.kurtosistest, x, nan_policy='foobar')
with np.errstate(all='ignore'):
assert_array_equal(stats.normaltest(x), (np.nan, np.nan))
expected = (6.2260409514287449, 0.04446644248650191)
assert_array_almost_equal(stats.normaltest(x, nan_policy='omit'), expected)
assert_raises(ValueError, stats.normaltest, x, nan_policy='raise')
assert_raises(ValueError, stats.normaltest, x, nan_policy='foobar')
class TestRankSums(object):
def test_ranksums_result_attributes(self):
res = stats.ranksums(np.arange(5), np.arange(25))
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
class TestJarqueBera(object):
def test_jarque_bera_stats(self):
np.random.seed(987654321)
x = np.random.normal(0, 1, 100000)
y = np.random.chisquare(10000, 100000)
z = np.random.rayleigh(1, 100000)
assert_(stats.jarque_bera(x)[1] > stats.jarque_bera(y)[1])
assert_(stats.jarque_bera(x)[1] > stats.jarque_bera(z)[1])
assert_(stats.jarque_bera(y)[1] > stats.jarque_bera(z)[1])
def test_jarque_bera_array_like(self):
np.random.seed(987654321)
x = np.random.normal(0, 1, 100000)
JB1, p1 = stats.jarque_bera(list(x))
JB2, p2 = stats.jarque_bera(tuple(x))
JB3, p3 = stats.jarque_bera(x.reshape(2, 50000))
assert_(JB1 == JB2 == JB3)
assert_(p1 == p2 == p3)
def test_jarque_bera_size(self):
assert_raises(ValueError, stats.jarque_bera, [])
def test_skewtest_too_few_samples():
# Regression test for ticket #1492.
# skewtest requires at least 8 samples; 7 should raise a ValueError.
x = np.arange(7.0)
assert_raises(ValueError, stats.skewtest, x)
def test_kurtosistest_too_few_samples():
# Regression test for ticket #1425.
# kurtosistest requires at least 5 samples; 4 should raise a ValueError.
x = np.arange(4.0)
assert_raises(ValueError, stats.kurtosistest, x)
class TestMannWhitneyU(object):
X = [19.8958398126694, 19.5452691647182, 19.0577309166425, 21.716543054589,
20.3269502208702, 20.0009273294025, 19.3440043632957, 20.4216806548105,
19.0649894736528, 18.7808043120398, 19.3680942943298, 19.4848044069953,
20.7514611265663, 19.0894948874598, 19.4975522356628, 18.9971170734274,
20.3239606288208, 20.6921298083835, 19.0724259532507, 18.9825187935021,
19.5144462609601, 19.8256857844223, 20.5174677102032, 21.1122407995892,
17.9490854922535, 18.2847521114727, 20.1072217648826, 18.6439891962179,
20.4970638083542, 19.5567594734914]
Y = [19.2790668029091, 16.993808441865, 18.5416338448258, 17.2634018833575,
19.1577183624616, 18.5119655377495, 18.6068455037221, 18.8358343362655,
19.0366413269742, 18.1135025515417, 19.2201873866958, 17.8344909022841,
18.2894380745856, 18.6661374133922, 19.9688601693252, 16.0672254617636,
19.00596360572, 19.201561539032, 19.0487501090183, 19.0847908674356]
significant = 14
def test_mannwhitneyu_one_sided(self):
u1, p1 = stats.mannwhitneyu(self.X, self.Y, alternative='less')
u2, p2 = stats.mannwhitneyu(self.Y, self.X, alternative='greater')
u3, p3 = stats.mannwhitneyu(self.X, self.Y, alternative='greater')
u4, p4 = stats.mannwhitneyu(self.Y, self.X, alternative='less')
assert_equal(p1, p2)
assert_equal(p3, p4)
assert_(p1 != p3)
assert_equal(u1, 498)
assert_equal(u2, 102)
assert_equal(u3, 498)
assert_equal(u4, 102)
assert_approx_equal(p1, 0.999957683256589, significant=self.significant)
assert_approx_equal(p3, 4.5941632666275e-05, significant=self.significant)
def test_mannwhitneyu_two_sided(self):
u1, p1 = stats.mannwhitneyu(self.X, self.Y, alternative='two-sided')
u2, p2 = stats.mannwhitneyu(self.Y, self.X, alternative='two-sided')
assert_equal(p1, p2)
assert_equal(u1, 498)
assert_equal(u2, 102)
assert_approx_equal(p1, 9.188326533255e-05,
significant=self.significant)
def test_mannwhitneyu_default(self):
# The default value for alternative is None
with suppress_warnings() as sup:
sup.filter(DeprecationWarning,
"Calling `mannwhitneyu` without .*`alternative`")
u1, p1 = stats.mannwhitneyu(self.X, self.Y)
u2, p2 = stats.mannwhitneyu(self.Y, self.X)
u3, p3 = stats.mannwhitneyu(self.X, self.Y, alternative=None)
assert_equal(p1, p2)
assert_equal(p1, p3)
assert_equal(u1, 102)
assert_equal(u2, 102)
assert_equal(u3, 102)
assert_approx_equal(p1, 4.5941632666275e-05,
significant=self.significant)
def test_mannwhitneyu_no_correct_one_sided(self):
u1, p1 = stats.mannwhitneyu(self.X, self.Y, False,
alternative='less')
u2, p2 = stats.mannwhitneyu(self.Y, self.X, False,
alternative='greater')
u3, p3 = stats.mannwhitneyu(self.X, self.Y, False,
alternative='greater')
u4, p4 = stats.mannwhitneyu(self.Y, self.X, False,
alternative='less')
assert_equal(p1, p2)
assert_equal(p3, p4)
assert_(p1 != p3)
assert_equal(u1, 498)
assert_equal(u2, 102)
assert_equal(u3, 498)
assert_equal(u4, 102)
assert_approx_equal(p1, 0.999955905990004, significant=self.significant)
assert_approx_equal(p3, 4.40940099958089e-05, significant=self.significant)
def test_mannwhitneyu_no_correct_two_sided(self):
u1, p1 = stats.mannwhitneyu(self.X, self.Y, False,
alternative='two-sided')
u2, p2 = stats.mannwhitneyu(self.Y, self.X, False,
alternative='two-sided')
assert_equal(p1, p2)
assert_equal(u1, 498)
assert_equal(u2, 102)
assert_approx_equal(p1, 8.81880199916178e-05,
significant=self.significant)
def test_mannwhitneyu_no_correct_default(self):
# The default value for alternative is None
with suppress_warnings() as sup:
sup.filter(DeprecationWarning,
"Calling `mannwhitneyu` without .*`alternative`")
u1, p1 = stats.mannwhitneyu(self.X, self.Y, False)
u2, p2 = stats.mannwhitneyu(self.Y, self.X, False)
u3, p3 = stats.mannwhitneyu(self.X, self.Y, False,
alternative=None)
assert_equal(p1, p2)
assert_equal(p1, p3)
assert_equal(u1, 102)
assert_equal(u2, 102)
assert_equal(u3, 102)
assert_approx_equal(p1, 4.40940099958089e-05,
significant=self.significant)
def test_mannwhitneyu_ones(self):
x = np.array([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 2., 1., 1., 1., 1., 2., 1., 1., 2., 1., 1., 2.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 3., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1.])
y = np.array([1., 1., 1., 1., 1., 1., 1., 2., 1., 2., 1., 1., 1., 1.,
2., 1., 1., 1., 2., 1., 1., 1., 1., 1., 2., 1., 1., 3.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1., 2., 1.,
1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 2.,
2., 1., 1., 2., 1., 1., 2., 1., 2., 1., 1., 1., 1., 2.,
2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 2., 1., 1., 1., 1., 1., 2., 2., 2., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
2., 1., 1., 2., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 2., 1., 1.,
1., 1., 1., 1.])
# p-value verified with matlab and R to 5 significant digits
assert_array_almost_equal(stats.stats.mannwhitneyu(x, y,
alternative='less'),
(16980.5, 2.8214327656317373e-005),
decimal=12)
def test_mannwhitneyu_result_attributes(self):
# test for namedtuple attribute results
attributes = ('statistic', 'pvalue')
res = stats.mannwhitneyu(self.X, self.Y, alternative="less")
check_named_results(res, attributes)
def test_pointbiserial():
# same as mstats test except for the nan
# Test data: http://support.sas.com/ctx/samples/index.jsp?sid=490&tab=output
x = [1,0,1,1,1,1,0,1,0,0,0,1,1,0,0,0,1,1,1,0,0,0,0,0,0,0,0,1,0,
0,0,0,0,1]
y = [14.8,13.8,12.4,10.1,7.1,6.1,5.8,4.6,4.3,3.5,3.3,3.2,3.0,
2.8,2.8,2.5,2.4,2.3,2.1,1.7,1.7,1.5,1.3,1.3,1.2,1.2,1.1,
0.8,0.7,0.6,0.5,0.2,0.2,0.1]
assert_almost_equal(stats.pointbiserialr(x, y)[0], 0.36149, 5)
# test for namedtuple attribute results
attributes = ('correlation', 'pvalue')
res = stats.pointbiserialr(x, y)
check_named_results(res, attributes)
def test_obrientransform():
# A couple tests calculated by hand.
x1 = np.array([0, 2, 4])
t1 = stats.obrientransform(x1)
expected = [7, -2, 7]
assert_allclose(t1[0], expected)
x2 = np.array([0, 3, 6, 9])
t2 = stats.obrientransform(x2)
expected = np.array([30, 0, 0, 30])
assert_allclose(t2[0], expected)
# Test two arguments.
a, b = stats.obrientransform(x1, x2)
assert_equal(a, t1[0])
assert_equal(b, t2[0])
# Test three arguments.
a, b, c = stats.obrientransform(x1, x2, x1)
assert_equal(a, t1[0])
assert_equal(b, t2[0])
assert_equal(c, t1[0])
# This is a regression test to check np.var replacement.
# The author of this test didn't separately verify the numbers.
x1 = np.arange(5)
result = np.array(
[[5.41666667, 1.04166667, -0.41666667, 1.04166667, 5.41666667],
[21.66666667, 4.16666667, -1.66666667, 4.16666667, 21.66666667]])
assert_array_almost_equal(stats.obrientransform(x1, 2*x1), result, decimal=8)
# Example from "O'Brien Test for Homogeneity of Variance"
# by Herve Abdi.
values = range(5, 11)
reps = np.array([5, 11, 9, 3, 2, 2])
data = np.repeat(values, reps)
transformed_values = np.array([3.1828, 0.5591, 0.0344,
1.6086, 5.2817, 11.0538])
expected = np.repeat(transformed_values, reps)
result = stats.obrientransform(data)
assert_array_almost_equal(result[0], expected, decimal=4)
class HarMeanTestCase:
def test_1dlist(self):
# Test a 1d list
a = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
b = 34.1417152147
self.do(a, b)
def test_1darray(self):
# Test a 1d array
a = np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100])
b = 34.1417152147
self.do(a, b)
def test_1dma(self):
# Test a 1d masked array
a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100])
b = 34.1417152147
self.do(a, b)
def test_1dmavalue(self):
# Test a 1d masked array with a masked value
a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100],
mask=[0,0,0,0,0,0,0,0,0,1])
b = 31.8137186141
self.do(a, b)
# Note the next tests use axis=None as default, not axis=0
def test_2dlist(self):
# Test a 2d list
a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
b = 38.6696271841
self.do(a, b)
def test_2darray(self):
# Test a 2d array
a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
b = 38.6696271841
self.do(np.array(a), b)
def test_2dma(self):
# Test a 2d masked array
a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
b = 38.6696271841
self.do(np.ma.array(a), b)
def test_2daxis0(self):
# Test a 2d list with axis=0
a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
b = np.array([22.88135593, 39.13043478, 52.90076336, 65.45454545])
self.do(a, b, axis=0)
def test_2daxis1(self):
# Test a 2d list with axis=1
a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
b = np.array([19.2, 63.03939962, 103.80078637])
self.do(a, b, axis=1)
def test_2dmatrixdaxis0(self):
# Test a 2d list with axis=0
a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
b = np.matrix([[22.88135593, 39.13043478, 52.90076336, 65.45454545]])
self.do(np.matrix(a), b, axis=0)
def test_2dmatrixaxis1(self):
# Test a 2d list with axis=1
a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
b = np.matrix([[19.2, 63.03939962, 103.80078637]]).T
self.do(np.matrix(a), b, axis=1)
class TestHarMean(HarMeanTestCase):
def do(self, a, b, axis=None, dtype=None):
x = stats.hmean(a, axis=axis, dtype=dtype)
assert_almost_equal(b, x)
assert_equal(x.dtype, dtype)
class GeoMeanTestCase:
def test_1dlist(self):
# Test a 1d list
a = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
b = 45.2872868812
self.do(a, b)
def test_1darray(self):
# Test a 1d array
a = np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100])
b = 45.2872868812
self.do(a, b)
def test_1dma(self):
# Test a 1d masked array
a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100])
b = 45.2872868812
self.do(a, b)
def test_1dmavalue(self):
# Test a 1d masked array with a masked value
a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100], mask=[0,0,0,0,0,0,0,0,0,1])
b = 41.4716627439
self.do(a, b)
# Note the next tests use axis=None as default, not axis=0
def test_2dlist(self):
# Test a 2d list
a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
b = 52.8885199
self.do(a, b)
def test_2darray(self):
# Test a 2d array
a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
b = 52.8885199
self.do(np.array(a), b)
def test_2dma(self):
# Test a 2d masked array
a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
b = 52.8885199
self.do(np.ma.array(a), b)
def test_2daxis0(self):
# Test a 2d list with axis=0
a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
b = np.array([35.56893304, 49.32424149, 61.3579244, 72.68482371])
self.do(a, b, axis=0)
def test_2daxis1(self):
# Test a 2d list with axis=1
a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
b = np.array([22.13363839, 64.02171746, 104.40086817])
self.do(a, b, axis=1)
def test_2dmatrixdaxis0(self):
# Test a 2d list with axis=0
a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
b = np.matrix([[35.56893304, 49.32424149, 61.3579244, 72.68482371]])
self.do(np.matrix(a), b, axis=0)
def test_2dmatrixaxis1(self):
# Test a 2d list with axis=1
a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
b = np.matrix([[22.13363839, 64.02171746, 104.40086817]]).T
self.do(np.matrix(a), b, axis=1)
def test_1dlist0(self):
# Test a 1d list with zero element
a = [10, 20, 30, 40, 50, 60, 70, 80, 90, 0]
b = 0.0 # due to exp(-inf)=0
olderr = np.seterr(all='ignore')
try:
self.do(a, b)
finally:
np.seterr(**olderr)
def test_1darray0(self):
# Test a 1d array with zero element
a = np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 0])
b = 0.0 # due to exp(-inf)=0
olderr = np.seterr(all='ignore')
try:
self.do(a, b)
finally:
np.seterr(**olderr)
def test_1dma0(self):
# Test a 1d masked array with zero element
a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 0])
b = 41.4716627439
olderr = np.seterr(all='ignore')
try:
self.do(a, b)
finally:
np.seterr(**olderr)
def test_1dmainf(self):
# Test a 1d masked array with negative element
a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, -1])
b = 41.4716627439
olderr = np.seterr(all='ignore')
try:
self.do(a, b)
finally:
np.seterr(**olderr)
class TestGeoMean(GeoMeanTestCase):
def do(self, a, b, axis=None, dtype=None):
# Note this doesn't test when axis is not specified
x = stats.gmean(a, axis=axis, dtype=dtype)
assert_almost_equal(b, x)
assert_equal(x.dtype, dtype)
def test_binomtest():
# precision tests compared to R for ticket:986
pp = np.concatenate((np.linspace(0.1,0.2,5), np.linspace(0.45,0.65,5),
np.linspace(0.85,0.95,5)))
n = 501
x = 450
results = [0.0, 0.0, 1.0159969301994141e-304,
2.9752418572150531e-275, 7.7668382922535275e-250,
2.3381250925167094e-099, 7.8284591587323951e-081,
9.9155947819961383e-065, 2.8729390725176308e-050,
1.7175066298388421e-037, 0.0021070691951093692,
0.12044570587262322, 0.88154763174802508, 0.027120993063129286,
2.6102587134694721e-006]
for p, res in zip(pp,results):
assert_approx_equal(stats.binom_test(x, n, p), res,
significant=12, err_msg='fail forp=%f' % p)
assert_approx_equal(stats.binom_test(50,100,0.1), 5.8320387857343647e-024,
significant=12, err_msg='fail forp=%f' % p)
def test_binomtest2():
# test added for issue #2384
res2 = [
[1.0, 1.0],
[0.5,1.0,0.5],
[0.25,1.00,1.00,0.25],
[0.125,0.625,1.000,0.625,0.125],
[0.0625,0.3750,1.0000,1.0000,0.3750,0.0625],
[0.03125,0.21875,0.68750,1.00000,0.68750,0.21875,0.03125],
[0.015625,0.125000,0.453125,1.000000,1.000000,0.453125,0.125000,0.015625],
[0.0078125,0.0703125,0.2890625,0.7265625,1.0000000,0.7265625,0.2890625,
0.0703125,0.0078125],
[0.00390625,0.03906250,0.17968750,0.50781250,1.00000000,1.00000000,
0.50781250,0.17968750,0.03906250,0.00390625],
[0.001953125,0.021484375,0.109375000,0.343750000,0.753906250,1.000000000,
0.753906250,0.343750000,0.109375000,0.021484375,0.001953125]
]
for k in range(1, 11):
res1 = [stats.binom_test(v, k, 0.5) for v in range(k + 1)]
assert_almost_equal(res1, res2[k-1], decimal=10)
def test_binomtest3():
# test added for issue #2384
# test when x == n*p and neighbors
res3 = [stats.binom_test(v, v*k, 1./k) for v in range(1, 11)
for k in range(2, 11)]
assert_equal(res3, np.ones(len(res3), int))
#> bt=c()
#> for(i in as.single(1:10)){for(k in as.single(2:10)){bt = c(bt, binom.test(i-1, k*i,(1/k))$p.value); print(c(i+1, k*i,(1/k)))}}
binom_testm1 = np.array([
0.5, 0.5555555555555556, 0.578125, 0.5904000000000003,
0.5981224279835393, 0.603430543396034, 0.607304096221924,
0.610255656871054, 0.612579511000001, 0.625, 0.670781893004115,
0.68853759765625, 0.6980101120000006, 0.703906431368616,
0.70793209416498, 0.7108561134173507, 0.713076544331419,
0.714820192935702, 0.6875, 0.7268709038256367, 0.7418963909149174,
0.74986110468096, 0.7548015520398076, 0.7581671424768577,
0.760607984787832, 0.762459425024199, 0.7639120677676575, 0.7265625,
0.761553963657302, 0.774800934828818, 0.7818005980538996,
0.78613491480358, 0.789084353140195, 0.7912217659828884,
0.79284214559524, 0.794112956558801, 0.75390625, 0.7856929451142176,
0.7976688481430754, 0.8039848974727624, 0.807891868948366,
0.8105487660137676, 0.812473307174702, 0.8139318233591120,
0.815075399104785, 0.7744140625, 0.8037322594985427,
0.814742863657656, 0.8205425178645808, 0.8241275984172285,
0.8265645374416, 0.8283292196088257, 0.829666291102775,
0.8307144686362666, 0.7905273437499996, 0.8178712053954738,
0.828116983756619, 0.833508948940494, 0.8368403871552892,
0.839104213210105, 0.840743186196171, 0.84198481438049,
0.8429580531563676, 0.803619384765625, 0.829338573944648,
0.8389591907548646, 0.84401876783902, 0.84714369697889,
0.8492667010581667, 0.850803474598719, 0.851967542858308,
0.8528799045949524, 0.8145294189453126, 0.838881732845347,
0.847979024541911, 0.852760894015685, 0.8557134656773457,
0.8577190131799202, 0.85917058278431, 0.860270010472127,
0.861131648404582, 0.823802947998047, 0.846984756807511,
0.855635653643743, 0.860180994825685, 0.86298688573253,
0.864892525675245, 0.866271647085603, 0.867316125625004,
0.8681346531755114
])
# > bt=c()
# > for(i in as.single(1:10)){for(k in as.single(2:10)){bt = c(bt, binom.test(i+1, k*i,(1/k))$p.value); print(c(i+1, k*i,(1/k)))}}
binom_testp1 = np.array([
0.5, 0.259259259259259, 0.26171875, 0.26272, 0.2632244513031551,
0.2635138663069203, 0.2636951804161073, 0.2638162407564354,
0.2639010709000002, 0.625, 0.4074074074074074, 0.42156982421875,
0.4295746560000003, 0.43473045988554, 0.4383309503172684,
0.4409884859402103, 0.4430309389962837, 0.444649849401104, 0.6875,
0.4927602499618962, 0.5096031427383425, 0.5189636628480,
0.5249280070771274, 0.5290623300865124, 0.5320974248125793,
0.5344204730474308, 0.536255847400756, 0.7265625, 0.5496019313526808,
0.5669248746708034, 0.576436455045805, 0.5824538812831795,
0.5866053321547824, 0.589642781414643, 0.5919618019300193,
0.593790427805202, 0.75390625, 0.590868349763505, 0.607983393277209,
0.617303847446822, 0.623172512167948, 0.627208862156123,
0.6301556891501057, 0.632401894928977, 0.6341708982290303,
0.7744140625, 0.622562037497196, 0.639236102912278, 0.648263335014579,
0.65392850011132, 0.657816519817211, 0.660650782947676,
0.662808780346311, 0.6645068560246006, 0.7905273437499996,
0.6478843304312477, 0.6640468318879372, 0.6727589686071775,
0.6782129857784873, 0.681950188903695, 0.684671508668418,
0.686741824999918, 0.688369886732168, 0.803619384765625,
0.668716055304315, 0.684360013879534, 0.6927642396829181,
0.6980155964704895, 0.701609591890657, 0.7042244320992127,
0.7062125081341817, 0.707775152962577, 0.8145294189453126,
0.686243374488305, 0.7013873696358975, 0.709501223328243,
0.714563595144314, 0.718024953392931, 0.7205416252126137,
0.722454130389843, 0.723956813292035, 0.823802947998047,
0.701255953767043, 0.715928221686075, 0.723772209289768,
0.7286603031173616, 0.7319999279787631, 0.7344267920995765,
0.736270323773157, 0.737718376096348
])
res4_p1 = [stats.binom_test(v+1, v*k, 1./k) for v in range(1, 11)
for k in range(2, 11)]
res4_m1 = [stats.binom_test(v-1, v*k, 1./k) for v in range(1, 11)
for k in range(2, 11)]
assert_almost_equal(res4_p1, binom_testp1, decimal=13)
assert_almost_equal(res4_m1, binom_testm1, decimal=13)
class TestTrim(object):
# test trim functions
def test_trim1(self):
a = np.arange(11)
assert_equal(np.sort(stats.trim1(a, 0.1)), np.arange(10))
assert_equal(np.sort(stats.trim1(a, 0.2)), np.arange(9))
assert_equal(np.sort(stats.trim1(a, 0.2, tail='left')),
np.arange(2, 11))
assert_equal(np.sort(stats.trim1(a, 3/11., tail='left')),
np.arange(3, 11))
assert_equal(stats.trim1(a, 1.0), [])
assert_equal(stats.trim1(a, 1.0, tail='left'), [])
# empty input
assert_equal(stats.trim1([], 0.1), [])
assert_equal(stats.trim1([], 3/11., tail='left'), [])
assert_equal(stats.trim1([], 4/6.), [])
def test_trimboth(self):
a = np.arange(11)
assert_equal(np.sort(stats.trimboth(a, 3/11.)), np.arange(3, 8))
assert_equal(np.sort(stats.trimboth(a, 0.2)),
np.array([2, 3, 4, 5, 6, 7, 8]))
assert_equal(np.sort(stats.trimboth(np.arange(24).reshape(6, 4), 0.2)),
np.arange(4, 20).reshape(4, 4))
assert_equal(np.sort(stats.trimboth(np.arange(24).reshape(4, 6).T,
2/6.)),
np.array([[2, 8, 14, 20], [3, 9, 15, 21]]))
assert_raises(ValueError, stats.trimboth,
np.arange(24).reshape(4, 6).T, 4/6.)
# empty input
assert_equal(stats.trimboth([], 0.1), [])
assert_equal(stats.trimboth([], 3/11.), [])
assert_equal(stats.trimboth([], 4/6.), [])
def test_trim_mean(self):
# don't use pre-sorted arrays
a = np.array([4, 8, 2, 0, 9, 5, 10, 1, 7, 3, 6])
idx = np.array([3, 5, 0, 1, 2, 4])
a2 = np.arange(24).reshape(6, 4)[idx, :]
a3 = np.arange(24).reshape(6, 4, order='F')[idx, :]
assert_equal(stats.trim_mean(a3, 2/6.),
np.array([2.5, 8.5, 14.5, 20.5]))
assert_equal(stats.trim_mean(a2, 2/6.),
np.array([10., 11., 12., 13.]))
idx4 = np.array([1, 0, 3, 2])
a4 = np.arange(24).reshape(4, 6)[idx4, :]
assert_equal(stats.trim_mean(a4, 2/6.),
np.array([9., 10., 11., 12., 13., 14.]))
# shuffled arange(24) as array_like
a = [7, 11, 12, 21, 16, 6, 22, 1, 5, 0, 18, 10, 17, 9, 19, 15, 23,
20, 2, 14, 4, 13, 8, 3]
assert_equal(stats.trim_mean(a, 2/6.), 11.5)
assert_equal(stats.trim_mean([5,4,3,1,2,0], 2/6.), 2.5)
# check axis argument
np.random.seed(1234)
a = np.random.randint(20, size=(5, 6, 4, 7))
for axis in [0, 1, 2, 3, -1]:
res1 = stats.trim_mean(a, 2/6., axis=axis)
res2 = stats.trim_mean(np.rollaxis(a, axis), 2/6.)
assert_equal(res1, res2)
res1 = stats.trim_mean(a, 2/6., axis=None)
res2 = stats.trim_mean(a.ravel(), 2/6.)
assert_equal(res1, res2)
assert_raises(ValueError, stats.trim_mean, a, 0.6)
# empty input
assert_equal(stats.trim_mean([], 0.0), np.nan)
assert_equal(stats.trim_mean([], 0.6), np.nan)
class TestSigmaClip(object):
def test_sigmaclip1(self):
a = np.concatenate((np.linspace(9.5, 10.5, 31), np.linspace(0, 20, 5)))
fact = 4 # default
c, low, upp = stats.sigmaclip(a)
assert_(c.min() > low)
assert_(c.max() < upp)
assert_equal(low, c.mean() - fact*c.std())
assert_equal(upp, c.mean() + fact*c.std())
assert_equal(c.size, a.size)
def test_sigmaclip2(self):
a = np.concatenate((np.linspace(9.5, 10.5, 31), np.linspace(0, 20, 5)))
fact = 1.5
c, low, upp = stats.sigmaclip(a, fact, fact)
assert_(c.min() > low)
assert_(c.max() < upp)
assert_equal(low, c.mean() - fact*c.std())
assert_equal(upp, c.mean() + fact*c.std())
assert_equal(c.size, 4)
assert_equal(a.size, 36) # check original array unchanged
def test_sigmaclip3(self):
a = np.concatenate((np.linspace(9.5, 10.5, 11),
np.linspace(-100, -50, 3)))
fact = 1.8
c, low, upp = stats.sigmaclip(a, fact, fact)
assert_(c.min() > low)
assert_(c.max() < upp)
assert_equal(low, c.mean() - fact*c.std())
assert_equal(upp, c.mean() + fact*c.std())
assert_equal(c, np.linspace(9.5, 10.5, 11))
def test_sigmaclip_result_attributes(self):
a = np.concatenate((np.linspace(9.5, 10.5, 11),
np.linspace(-100, -50, 3)))
fact = 1.8
res = stats.sigmaclip(a, fact, fact)
attributes = ('clipped', 'lower', 'upper')
check_named_results(res, attributes)
def test_std_zero(self):
# regression test #8632
x = np.ones(10)
assert_equal(stats.sigmaclip(x)[0], x)
class TestFOneWay(object):
def test_trivial(self):
# A trivial test of stats.f_oneway, with F=0.
F, p = stats.f_oneway([0,2], [0,2])
assert_equal(F, 0.0)
def test_basic(self):
# Despite being a floating point calculation, this data should
# result in F being exactly 2.0.
F, p = stats.f_oneway([0,2], [2,4])
assert_equal(F, 2.0)
def test_large_integer_array(self):
a = np.array([655, 788], dtype=np.uint16)
b = np.array([789, 772], dtype=np.uint16)
F, p = stats.f_oneway(a, b)
assert_almost_equal(F, 0.77450216931805538)
def test_result_attributes(self):
a = np.array([655, 788], dtype=np.uint16)
b = np.array([789, 772], dtype=np.uint16)
res = stats.f_oneway(a, b)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
def test_nist(self):
# These are the nist ANOVA files. They can be found at:
# http://www.itl.nist.gov/div898/strd/anova/anova.html
filenames = ['SiRstv.dat', 'SmLs01.dat', 'SmLs02.dat', 'SmLs03.dat',
'AtmWtAg.dat', 'SmLs04.dat', 'SmLs05.dat', 'SmLs06.dat',
'SmLs07.dat', 'SmLs08.dat', 'SmLs09.dat']
for test_case in filenames:
rtol = 1e-7
fname = os.path.abspath(os.path.join(os.path.dirname(__file__),
'data/nist_anova', test_case))
with open(fname, 'r') as f:
content = f.read().split('\n')
certified = [line.split() for line in content[40:48]
if line.strip()]
dataf = np.loadtxt(fname, skiprows=60)
y, x = dataf.T
y = y.astype(int)
caty = np.unique(y)
f = float(certified[0][-1])
xlist = [x[y == i] for i in caty]
res = stats.f_oneway(*xlist)
# With the hard test cases we relax the tolerance a bit.
hard_tc = ('SmLs07.dat', 'SmLs08.dat', 'SmLs09.dat')
if test_case in hard_tc:
rtol = 1e-4
assert_allclose(res[0], f, rtol=rtol,
err_msg='Failing testcase: %s' % test_case)
class TestKruskal(object):
def test_simple(self):
x = [1]
y = [2]
h, p = stats.kruskal(x, y)
assert_equal(h, 1.0)
assert_approx_equal(p, stats.distributions.chi2.sf(h, 1))
h, p = stats.kruskal(np.array(x), np.array(y))
assert_equal(h, 1.0)
assert_approx_equal(p, stats.distributions.chi2.sf(h, 1))
def test_basic(self):
x = [1, 3, 5, 7, 9]
y = [2, 4, 6, 8, 10]
h, p = stats.kruskal(x, y)
assert_approx_equal(h, 3./11, significant=10)
assert_approx_equal(p, stats.distributions.chi2.sf(3./11, 1))
h, p = stats.kruskal(np.array(x), np.array(y))
assert_approx_equal(h, 3./11, significant=10)
assert_approx_equal(p, stats.distributions.chi2.sf(3./11, 1))
def test_simple_tie(self):
x = [1]
y = [1, 2]
h_uncorr = 1.5**2 + 2*2.25**2 - 12
corr = 0.75
expected = h_uncorr / corr # 0.5
h, p = stats.kruskal(x, y)
# Since the expression is simple and the exact answer is 0.5, it
# should be safe to use assert_equal().
assert_equal(h, expected)
def test_another_tie(self):
x = [1, 1, 1, 2]
y = [2, 2, 2, 2]
h_uncorr = (12. / 8. / 9.) * 4 * (3**2 + 6**2) - 3 * 9
corr = 1 - float(3**3 - 3 + 5**3 - 5) / (8**3 - 8)
expected = h_uncorr / corr
h, p = stats.kruskal(x, y)
assert_approx_equal(h, expected)
def test_three_groups(self):
# A test of stats.kruskal with three groups, with ties.
x = [1, 1, 1]
y = [2, 2, 2]
z = [2, 2]
h_uncorr = (12. / 8. / 9.) * (3*2**2 + 3*6**2 + 2*6**2) - 3 * 9 # 5.0
corr = 1 - float(3**3 - 3 + 5**3 - 5) / (8**3 - 8)
expected = h_uncorr / corr # 7.0
h, p = stats.kruskal(x, y, z)
assert_approx_equal(h, expected)
assert_approx_equal(p, stats.distributions.chi2.sf(h, 2))
def test_empty(self):
# A test of stats.kruskal with three groups, with ties.
x = [1, 1, 1]
y = [2, 2, 2]
z = []
assert_equal(stats.kruskal(x, y, z), (np.nan, np.nan))
def test_kruskal_result_attributes(self):
x = [1, 3, 5, 7, 9]
y = [2, 4, 6, 8, 10]
res = stats.kruskal(x, y)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
def test_nan_policy(self):
x = np.arange(10.)
x[9] = np.nan
assert_equal(stats.kruskal(x, x), (np.nan, np.nan))
assert_almost_equal(stats.kruskal(x, x, nan_policy='omit'), (0.0, 1.0))
assert_raises(ValueError, stats.kruskal, x, x, nan_policy='raise')
assert_raises(ValueError, stats.kruskal, x, x, nan_policy='foobar')
class TestCombinePvalues(object):
def test_fisher(self):
# Example taken from http://en.wikipedia.org/wiki/Fisher's_exact_test#Example
xsq, p = stats.combine_pvalues([.01, .2, .3], method='fisher')
assert_approx_equal(p, 0.02156, significant=4)
def test_stouffer(self):
Z, p = stats.combine_pvalues([.01, .2, .3], method='stouffer')
assert_approx_equal(p, 0.01651, significant=4)
def test_stouffer2(self):
Z, p = stats.combine_pvalues([.5, .5, .5], method='stouffer')
assert_approx_equal(p, 0.5, significant=4)
def test_weighted_stouffer(self):
Z, p = stats.combine_pvalues([.01, .2, .3], method='stouffer',
weights=np.ones(3))
assert_approx_equal(p, 0.01651, significant=4)
def test_weighted_stouffer2(self):
Z, p = stats.combine_pvalues([.01, .2, .3], method='stouffer',
weights=np.array((1, 4, 9)))
assert_approx_equal(p, 0.1464, significant=4)
class TestCdfDistanceValidation(object):
"""
Test that _cdf_distance() (via wasserstein_distance()) raises ValueErrors
for bad inputs.
"""
def test_distinct_value_and_weight_lengths(self):
# When the number of weights does not match the number of values,
# a ValueError should be raised.
assert_raises(ValueError, stats.wasserstein_distance,
[1], [2], [4], [3, 1])
assert_raises(ValueError, stats.wasserstein_distance, [1], [2], [1, 0])
def test_zero_weight(self):
# When a distribution is given zero weight, a ValueError should be
# raised.
assert_raises(ValueError, stats.wasserstein_distance,
[0, 1], [2], [0, 0])
assert_raises(ValueError, stats.wasserstein_distance,
[0, 1], [2], [3, 1], [0])
def test_negative_weights(self):
# A ValueError should be raised if there are any negative weights.
assert_raises(ValueError, stats.wasserstein_distance,
[0, 1], [2, 2], [1, 1], [3, -1])
def test_empty_distribution(self):
# A ValueError should be raised when trying to measure the distance
# between something and nothing.
assert_raises(ValueError, stats.wasserstein_distance, [], [2, 2])
assert_raises(ValueError, stats.wasserstein_distance, [1], [])
def test_inf_weight(self):
# An inf weight is not valid.
assert_raises(ValueError, stats.wasserstein_distance,
[1, 2, 1], [1, 1], [1, np.inf, 1], [1, 1])
class TestWassersteinDistance(object):
""" Tests for wasserstein_distance() output values.
"""
def test_simple(self):
# For basic distributions, the value of the Wasserstein distance is
# straightforward.
assert_almost_equal(
stats.wasserstein_distance([0, 1], [0], [1, 1], [1]),
.5)
assert_almost_equal(stats.wasserstein_distance(
[0, 1], [0], [3, 1], [1]),
.25)
assert_almost_equal(stats.wasserstein_distance(
[0, 2], [0], [1, 1], [1]),
1)
assert_almost_equal(stats.wasserstein_distance(
[0, 1, 2], [1, 2, 3]),
1)
def test_same_distribution(self):
# Any distribution moved to itself should have a Wasserstein distance of
# zero.
assert_equal(stats.wasserstein_distance([1, 2, 3], [2, 1, 3]), 0)
assert_equal(
stats.wasserstein_distance([1, 1, 1, 4], [4, 1],
[1, 1, 1, 1], [1, 3]),
0)
def test_shift(self):
# If the whole distribution is shifted by x, then the Wasserstein
# distance should be x.
assert_almost_equal(stats.wasserstein_distance([0], [1]), 1)
assert_almost_equal(stats.wasserstein_distance([-5], [5]), 10)
assert_almost_equal(
stats.wasserstein_distance([1, 2, 3, 4, 5], [11, 12, 13, 14, 15]),
10)
assert_almost_equal(
stats.wasserstein_distance([4.5, 6.7, 2.1], [4.6, 7, 9.2],
[3, 1, 1], [1, 3, 1]),
2.5)
def test_combine_weights(self):
# Assigning a weight w to a value is equivalent to including that value
# w times in the value array with weight of 1.
assert_almost_equal(
stats.wasserstein_distance(
[0, 0, 1, 1, 1, 1, 5], [0, 3, 3, 3, 3, 4, 4],
[1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1]),
stats.wasserstein_distance([5, 0, 1], [0, 4, 3],
[1, 2, 4], [1, 2, 4]))
def test_collapse(self):
# Collapsing a distribution to a point distribution at zero is
# equivalent to taking the average of the absolute values of the values.
u = np.arange(-10, 30, 0.3)
v = np.zeros_like(u)
assert_almost_equal(
stats.wasserstein_distance(u, v),
np.mean(np.abs(u)))
u_weights = np.arange(len(u))
v_weights = u_weights[::-1]
assert_almost_equal(
stats.wasserstein_distance(u, v, u_weights, v_weights),
np.average(np.abs(u), weights=u_weights))
def test_zero_weight(self):
# Values with zero weight have no impact on the Wasserstein distance.
assert_almost_equal(
stats.wasserstein_distance([1, 2, 100000], [1, 1],
[1, 1, 0], [1, 1]),
stats.wasserstein_distance([1, 2], [1, 1], [1, 1], [1, 1]))
def test_inf_values(self):
# Inf values can lead to an inf distance or trigger a RuntimeWarning
# (and return NaN) if the distance is undefined.
assert_equal(
stats.wasserstein_distance([1, 2, np.inf], [1, 1]),
np.inf)
assert_equal(
stats.wasserstein_distance([1, 2, np.inf], [-np.inf, 1]),
np.inf)
assert_equal(
stats.wasserstein_distance([1, -np.inf, np.inf], [1, 1]),
np.inf)
with suppress_warnings() as sup:
r = sup.record(RuntimeWarning, "invalid value*")
assert_equal(
stats.wasserstein_distance([1, 2, np.inf], [np.inf, 1]),
np.nan)
class TestEnergyDistance(object):
""" Tests for energy_distance() output values.
"""
def test_simple(self):
# For basic distributions, the value of the energy distance is
# straightforward.
assert_almost_equal(
stats.energy_distance([0, 1], [0], [1, 1], [1]),
np.sqrt(2) * .5)
assert_almost_equal(stats.energy_distance(
[0, 1], [0], [3, 1], [1]),
np.sqrt(2) * .25)
assert_almost_equal(stats.energy_distance(
[0, 2], [0], [1, 1], [1]),
2 * .5)
assert_almost_equal(
stats.energy_distance([0, 1, 2], [1, 2, 3]),
np.sqrt(2) * (3*(1./3**2))**.5)
def test_same_distribution(self):
# Any distribution moved to itself should have a energy distance of
# zero.
assert_equal(stats.energy_distance([1, 2, 3], [2, 1, 3]), 0)
assert_equal(
stats.energy_distance([1, 1, 1, 4], [4, 1], [1, 1, 1, 1], [1, 3]),
0)
def test_shift(self):
# If a single-point distribution is shifted by x, then the energy
# distance should be sqrt(2) * sqrt(x).
assert_almost_equal(stats.energy_distance([0], [1]), np.sqrt(2))
assert_almost_equal(
stats.energy_distance([-5], [5]),
np.sqrt(2) * 10**.5)
def test_combine_weights(self):
# Assigning a weight w to a value is equivalent to including that value
# w times in the value array with weight of 1.
assert_almost_equal(
stats.energy_distance([0, 0, 1, 1, 1, 1, 5], [0, 3, 3, 3, 3, 4, 4],
[1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1]),
stats.energy_distance([5, 0, 1], [0, 4, 3], [1, 2, 4], [1, 2, 4]))
def test_zero_weight(self):
# Values with zero weight have no impact on the energy distance.
assert_almost_equal(
stats.energy_distance([1, 2, 100000], [1, 1], [1, 1, 0], [1, 1]),
stats.energy_distance([1, 2], [1, 1], [1, 1], [1, 1]))
def test_inf_values(self):
# Inf values can lead to an inf distance or trigger a RuntimeWarning
# (and return NaN) if the distance is undefined.
assert_equal(stats.energy_distance([1, 2, np.inf], [1, 1]), np.inf)
assert_equal(
stats.energy_distance([1, 2, np.inf], [-np.inf, 1]),
np.inf)
assert_equal(
stats.energy_distance([1, -np.inf, np.inf], [1, 1]),
np.inf)
with suppress_warnings() as sup:
r = sup.record(RuntimeWarning, "invalid value*")
assert_equal(
stats.energy_distance([1, 2, np.inf], [np.inf, 1]),
np.nan)
| gpl-3.0 |
iambibhas/django | tests/admin_custom_urls/models.py | 78 | 2482 | from functools import update_wrapper
from django.contrib import admin
from django.core.urlresolvers import reverse
from django.db import models
from django.http import HttpResponseRedirect
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Action(models.Model):
name = models.CharField(max_length=50, primary_key=True)
description = models.CharField(max_length=70)
def __str__(self):
return self.name
class ActionAdmin(admin.ModelAdmin):
"""
A ModelAdmin for the Action model that changes the URL of the add_view
to '<app name>/<model name>/!add/'
The Action model has a CharField PK.
"""
list_display = ('name', 'description')
def remove_url(self, name):
"""
Remove all entries named 'name' from the ModelAdmin instance URL
patterns list
"""
return [url for url in super(ActionAdmin, self).get_urls() if url.name != name]
def get_urls(self):
# Add the URL of our custom 'add_view' view to the front of the URLs
# list. Remove the existing one(s) first
from django.conf.urls import url
def wrap(view):
def wrapper(*args, **kwargs):
return self.admin_site.admin_view(view)(*args, **kwargs)
return update_wrapper(wrapper, view)
info = self.model._meta.app_label, self.model._meta.model_name
view_name = '%s_%s_add' % info
return [
url(r'^!add/$', wrap(self.add_view), name=view_name),
] + self.remove_url(view_name)
class Person(models.Model):
name = models.CharField(max_length=20)
class PersonAdmin(admin.ModelAdmin):
def response_post_save_add(self, request, obj):
return HttpResponseRedirect(
reverse('admin:admin_custom_urls_person_history', args=[obj.pk]))
def response_post_save_change(self, request, obj):
return HttpResponseRedirect(
reverse('admin:admin_custom_urls_person_delete', args=[obj.pk]))
class Car(models.Model):
name = models.CharField(max_length=20)
class CarAdmin(admin.ModelAdmin):
def response_add(self, request, obj, post_url_continue=None):
return super(CarAdmin, self).response_add(
request, obj, post_url_continue=reverse('admin:admin_custom_urls_car_history', args=[obj.pk]))
admin.site.register(Action, ActionAdmin)
admin.site.register(Person, PersonAdmin)
admin.site.register(Car, CarAdmin)
| bsd-3-clause |
nikhilprathapani/python-for-android | sl4atools/fullscreenwrapper2/examples/fullscreenwrapper2demo/fullscreenwrapper2demo.py | 44 | 3252 | '''
@copyright: Hariharan Srinath, 2012
@license: This work is licensed under a Creative Commons Attribution 3.0 Unported License. http://creativecommons.org/licenses/by/3.0/
'''
xmldata = """<?xml version="1.0" encoding="utf-8"?>
<LinearLayout
android:layout_width="fill_parent"
android:layout_height="fill_parent"
android:background="#ff314859"
android:orientation="vertical"
xmlns:android="http://schemas.android.com/apk/res/android">
<TextView
android:layout_width="fill_parent"
android:layout_height="0px"
android:textSize="16dp"
android:text="FullScreenWrapper2 Demo"
android:textColor="#ffffffff"
android:layout_weight="20"
android:gravity="center"/>
<TextView
android:layout_width="fill_parent"
android:layout_height="0px"
android:background="#ff000000"
android:id="@+id/txt_colorbox"
android:layout_weight="60"
android:gravity="center"/>
<LinearLayout
android:layout_width="fill_parent"
android:layout_height="0px"
android:orientation="horizontal"
android:layout_weight="20">
<Button
android:layout_width="fill_parent"
android:layout_height="fill_parent"
android:background="#ff66a3d2"
android:text = "Random Color"
android:layout_weight="1"
android:id="@+id/but_change"
android:textSize="14dp"
android:gravity="center"/>
<Button
android:layout_width="fill_parent"
android:layout_height="fill_parent"
android:background="#ff25567b"
android:layout_weight="1"
android:text = "Exit"
android:textSize="14dp"
android:id="@+id/but_exit"
android:gravity="center"/>
</LinearLayout>
</LinearLayout>"""
import android, random
from fullscreenwrapper2 import *
class DemoLayout(Layout):
def __init__(self):
super(DemoLayout,self).__init__(xmldata,"FullScreenWrapper Demo")
def on_show(self):
self.add_event(key_EventHandler(handler_function=self.close_app))
self.views.but_change.add_event(click_EventHandler(self.views.but_change, self.change_color))
self.views.but_exit.add_event(click_EventHandler(self.views.but_exit, self.close_app))
def on_close(self):
pass
def close_app(self,view,event):
FullScreenWrapper2App.exit_FullScreenWrapper2App()
def change_color(self,view, event):
colorvalue = "#ff"+self.get_rand_hex_byte()+self.get_rand_hex_byte()+self.get_rand_hex_byte()
self.views.txt_colorbox.background=colorvalue
def get_rand_hex_byte(self):
j = random.randint(0,255)
hexrep = hex(j)[2:]
if(len(hexrep)==1):
hexrep = '0'+hexrep
return hexrep
if __name__ == '__main__':
droid = android.Android()
random.seed()
FullScreenWrapper2App.initialize(droid)
FullScreenWrapper2App.show_layout(DemoLayout())
FullScreenWrapper2App.eventloop()
| apache-2.0 |
nhomar/odoo-mirror | addons/hr_timesheet_sheet/__init__.py | 434 | 1127 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr_timesheet_sheet
import wizard
import report
import res_config
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
rvs/gpdb | gpMgmt/bin/lib/gppinggpfdist.py | 54 | 1089 | #!/usr/bin/env python
import sys, httplib, getopt, socket
def usage(exitarg):
print 'usage: %s [-q] host:port' % sys.argv[0]
print
print ' -q: quiet mode'
print
print 'e.g. %s localhost:8080' % sys.argv[0]
print
sys.exit(exitarg)
gpfdist = ''
quiet = 0
uri = ''
try:
(options, args) = getopt.getopt(sys.argv[1:], 'q')
except Exception, e:
usage('Error: ' + str(e))
for (switch, val) in options:
if (switch == '-q'): quiet = 1
if len(args) != 1:
usage('Error: please specify uri.')
host_port = args[0]
try:
conn = httplib.HTTPConnection(host_port)
conn.request('GET', '/')
r = conn.getresponse()
gpfdist = r.getheader('X-GPFDIST-VERSION', '')
except socket.error:
if not quiet:
print 'Error: gpfdist is not running (reason: socket error)'
print 'Exit: 1'
sys.exit(1)
if not gpfdist:
if not quiet:
print 'Error: gpfdist port is taken by some other programs'
print 'Exit: 2'
sys.exit(2)
if not quiet:
print 'Okay, gpfdist version "%s" is running on %s.' % (gpfdist, host_port)
sys.exit(0)
| apache-2.0 |
suiyuan2009/tensorflow | tensorflow/contrib/learn/python/learn/estimators/estimator_test_utils.py | 81 | 2065 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utils for Estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.util import tf_inspect
def assert_estimator_contract(tester, estimator_class):
"""Asserts whether given estimator satisfies the expected contract.
This doesn't check every details of contract. This test is used for that a
function is not forgotten to implement in a precanned Estimator.
Args:
tester: A tf.test.TestCase.
estimator_class: 'type' object of pre-canned estimator.
"""
attributes = tf_inspect.getmembers(estimator_class)
attribute_names = [a[0] for a in attributes]
tester.assertTrue('config' in attribute_names)
tester.assertTrue('evaluate' in attribute_names)
tester.assertTrue('export' in attribute_names)
tester.assertTrue('fit' in attribute_names)
tester.assertTrue('get_variable_names' in attribute_names)
tester.assertTrue('get_variable_value' in attribute_names)
tester.assertTrue('model_dir' in attribute_names)
tester.assertTrue('predict' in attribute_names)
def assert_in_range(min_value, max_value, key, metrics):
actual_value = metrics[key]
if actual_value < min_value:
raise ValueError('%s: %s < %s.' % (key, actual_value, min_value))
if actual_value > max_value:
raise ValueError('%s: %s > %s.' % (key, actual_value, max_value))
| apache-2.0 |
jlegendary/orange | Orange/OrangeCanvas/help/intersphinx.py | 6 | 2259 | """
Parsers for intersphinx inventory files
Taken from `sphinx.ext.intersphinx`
"""
import re
import codecs
import zlib
b = str
UTF8StreamReader = codecs.lookup('utf-8')[2]
def read_inventory_v1(f, uri, join):
f = UTF8StreamReader(f)
invdata = {}
line = f.next()
projname = line.rstrip()[11:]
line = f.next()
version = line.rstrip()[11:]
for line in f:
name, type, location = line.rstrip().split(None, 2)
location = join(uri, location)
# version 1 did not add anchors to the location
if type == 'mod':
type = 'py:module'
location += '#module-' + name
else:
type = 'py:' + type
location += '#' + name
invdata.setdefault(type, {})[name] = (projname, version, location, '-')
return invdata
def read_inventory_v2(f, uri, join, bufsize=16*1024):
invdata = {}
line = f.readline()
projname = line.rstrip()[11:].decode('utf-8')
line = f.readline()
version = line.rstrip()[11:].decode('utf-8')
line = f.readline().decode('utf-8')
if 'zlib' not in line:
raise ValueError
def read_chunks():
decompressor = zlib.decompressobj()
for chunk in iter(lambda: f.read(bufsize), b('')):
yield decompressor.decompress(chunk)
yield decompressor.flush()
def split_lines(iter):
buf = b('')
for chunk in iter:
buf += chunk
lineend = buf.find(b('\n'))
while lineend != -1:
yield buf[:lineend].decode('utf-8')
buf = buf[lineend+1:]
lineend = buf.find(b('\n'))
assert not buf
for line in split_lines(read_chunks()):
# be careful to handle names with embedded spaces correctly
m = re.match(r'(?x)(.+?)\s+(\S*:\S*)\s+(\S+)\s+(\S+)\s+(.*)',
line.rstrip())
if not m:
continue
name, type, prio, location, dispname = m.groups()
if location.endswith(u'$'):
location = location[:-1] + name
location = join(uri, location)
invdata.setdefault(type, {})[name] = (projname, version,
location, dispname)
return invdata
| gpl-3.0 |
biocore/qiime | scripts/multiple_rarefactions_even_depth.py | 15 | 3623 | #!/usr/bin/env python
# File created on 09 Feb 2010
from __future__ import division
__author__ = "Justin Kuczynski"
__copyright__ = "Copyright 2011, The QIIME Project"
__credits__ = ["Justin Kuczynski", "Greg Caporaso"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Justin Kuczynski"
__email__ = "justinak@gmail.com"
from qiime.util import parse_command_line_parameters
from qiime.util import make_option
import os.path
from qiime.rarefaction import RarefactionMaker
script_info = {}
script_info[
'brief_description'] = """Perform multiple rarefactions on a single otu table, at one depth of sequences/sample"""
script_info[
'script_description'] = """To perform bootstrap, jackknife, and rarefaction analyses, the otu table must be subsampled (rarefied). This script rarefies, or subsamples, an OTU table. This does not provide curves of diversity by number of sequences in a sample. Rather it creates a subsampled OTU table by random sampling (without replacement) of the input OTU table. Samples that have fewer sequences then the requested rarefaction depth are omitted from the ouput otu tables. The pseudo-random number generator used for rarefaction by subsampling is NumPy's default - an implementation of the Mersenne twister PRNG."""
script_info['script_usage'] = []
script_info['script_usage'].append(
("""Example:""",
"""subsample otu_table.biom at 100 seqs/sample (-d) 10 times (-n) and write results to files (e.g., rarefaction_400_0.biom) in 'rarefied_otu_tables/' (-o).""",
"""%prog -i otu_table.biom -o rarefied_otu_tables/ -d 100 -n 10"""))
script_info[
'output_description'] = """The results of this script consist of n subsampled OTU tables, written to the directory specified by -o. The file has the same otu table format as the input otu_table.biom. Note: if the output files would be empty, no files are written."""
script_info['required_options'] = [
make_option('-i', '--input_path', type='existing_filepath',
help='input otu table filepath'),
make_option('-o', '--output_path', type='new_dirpath',
help="write output rarefied otu tables files to this dir (makes dir if it doesn't exist)"),
make_option('-d', '--depth', type='int',
help='sequences per sample to subsample'),
]
script_info['optional_options'] = [
make_option('-n', '--num_reps', dest='num_reps', default=10, type='int',
help='num iterations at each seqs/sample level [default: %default]'),
make_option('--lineages_included', dest='lineages_included', default=False,
action="store_true",
help="""output rarefied otu tables will include taxonomic (lineage) information for each otu, if present in input otu table [default: %default]"""),
make_option('-k', '--keep_empty_otus', default=False, action='store_true',
help='otus (rows) of all zeros are usually omitted from the output otu tables, with -k they will not be removed from the output files [default: %default]'),
]
script_info['version'] = __version__
def main():
option_parser, opts, args = parse_command_line_parameters(**script_info)
if not os.path.exists(opts.output_path):
os.makedirs(opts.output_path)
maker = RarefactionMaker(opts.input_path, opts.depth, opts.depth,
1, opts.num_reps)
maker.rarefy_to_files(opts.output_path, False,
include_lineages=opts.lineages_included,
empty_otus_removed=(not opts.keep_empty_otus))
if __name__ == "__main__":
main()
| gpl-2.0 |
vilorious/pyload | module/plugins/hoster/MystoreTo.py | 13 | 1208 | # -*- coding: utf-8 -*-
#
# Test link:
# http://mystore.to/dl/mxcA50jKfP
import re
from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
class MystoreTo(SimpleHoster):
__name__ = "MystoreTo"
__type__ = "hoster"
__version__ = "0.04"
__status__ = "testing"
__pattern__ = r'https?://(?:www\.)?mystore\.to/dl/.+'
__config__ = [("use_premium", "bool", "Use premium account if available", True)]
__description__ = """Mystore.to hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("zapp-brannigan", "")]
NAME_PATTERN = r'<h1>(?P<N>.+?)<'
SIZE_PATTERN = r'FILESIZE: (?P<S>[\d\.,]+) (?P<U>[\w^_]+)'
OFFLINE_PATTERN = r'>file not found<'
def setup(self):
self.chunk_limit = 1
self.resume_download = True
self.multiDL = True
def handle_free(self, pyfile):
try:
fid = re.search(r'wert="(.+?)"', self.html).group(1)
except AttributeError:
self.error(_("File-ID not found"))
self.link = self.load("http://mystore.to/api/download",
post={'FID': fid})
getInfo = create_getInfo(MystoreTo)
| gpl-3.0 |
formath/mxnet | benchmark/python/sparse/cast_storage.py | 57 | 3691 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import ctypes
from mxnet.test_utils import *
import os
import time
import argparse
from mxnet.base import check_call, _LIB
parser = argparse.ArgumentParser(description="Benchmark cast storage operators",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--num-omp-threads', type=int, default=1, help='number of omp threads to set in MXNet')
args = parser.parse_args()
def measure_cost(repeat, f, *args, **kwargs):
start = time.time()
results = []
for i in range(repeat):
(f(*args, **kwargs)).wait_to_read()
end = time.time()
diff = end - start
return diff / repeat
def run_cast_storage_synthetic():
def dense_to_sparse(m, n, density, ctx, repeat, stype):
set_default_context(ctx)
data_shape = (m, n)
dns_data = rand_ndarray(data_shape, stype, density).tostype('default')
dns_data.wait_to_read()
# do one warm up run, verify correctness
assert same(mx.nd.cast_storage(dns_data, stype).asnumpy(), dns_data.asnumpy())
# start benchmarking
cost = measure_cost(repeat, mx.nd.cast_storage, dns_data, stype)
results = '{:10.1f} {:>10} {:8d} {:8d} {:10.2f}'.format(density*100, str(ctx), m, n, cost*1000)
print(results)
check_call(_LIB.MXSetNumOMPThreads(ctypes.c_int(args.num_omp_threads)))
# params
# m number of rows
# n number of columns
# density density of the matrix
# num_repeat number of benchmark runs to average over
# contexts mx.cpu(), mx.gpu()
# note: benchmark different contexts separately; to benchmark cpu, compile without CUDA
# benchmarks dns_to_csr, dns_to_rsp
m = [ 512, 512]
n = [50000, 100000]
density = [1.00, 0.80, 0.60, 0.40, 0.20, 0.10, 0.05, 0.02, 0.01]
num_repeat = 10
contexts = [mx.gpu()]
benchmarks = ["dns_to_csr", "dns_to_rsp"]
# run benchmark
for b in benchmarks:
stype = ''
print("==================================================")
if b is "dns_to_csr":
stype = 'csr'
print(" cast_storage benchmark: dense to csr, size m x n ")
elif b is "dns_to_rsp":
stype = 'row_sparse'
print(" cast_storage benchmark: dense to rsp, size m x n ")
else:
print("invalid benchmark: %s" %b)
continue
print("==================================================")
headline = '{:>10} {:>10} {:>8} {:>8} {:>10}'.format('density(%)', 'context', 'm', 'n', 'time(ms)')
print(headline)
for i in range(len(n)):
for ctx in contexts:
for den in density:
dense_to_sparse(m[i], n[i], den, ctx, num_repeat, stype)
print("")
print("")
if __name__ == "__main__":
run_cast_storage_synthetic()
| apache-2.0 |
markredballoon/clivemizen | wp-content/themes/redballoon/bootstrap/node_modules/npm-shrinkwrap/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/MSVSSettings.py | 437 | 43606 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Code to validate and convert settings of the Microsoft build tools.
This file contains code to validate and convert settings of the Microsoft
build tools. The function ConvertToMSBuildSettings(), ValidateMSVSSettings(),
and ValidateMSBuildSettings() are the entry points.
This file was created by comparing the projects created by Visual Studio 2008
and Visual Studio 2010 for all available settings through the user interface.
The MSBuild schemas were also considered. They are typically found in the
MSBuild install directory, e.g. c:\Program Files (x86)\MSBuild
"""
import sys
import re
# Dictionaries of settings validators. The key is the tool name, the value is
# a dictionary mapping setting names to validation functions.
_msvs_validators = {}
_msbuild_validators = {}
# A dictionary of settings converters. The key is the tool name, the value is
# a dictionary mapping setting names to conversion functions.
_msvs_to_msbuild_converters = {}
# Tool name mapping from MSVS to MSBuild.
_msbuild_name_of_tool = {}
class _Tool(object):
"""Represents a tool used by MSVS or MSBuild.
Attributes:
msvs_name: The name of the tool in MSVS.
msbuild_name: The name of the tool in MSBuild.
"""
def __init__(self, msvs_name, msbuild_name):
self.msvs_name = msvs_name
self.msbuild_name = msbuild_name
def _AddTool(tool):
"""Adds a tool to the four dictionaries used to process settings.
This only defines the tool. Each setting also needs to be added.
Args:
tool: The _Tool object to be added.
"""
_msvs_validators[tool.msvs_name] = {}
_msbuild_validators[tool.msbuild_name] = {}
_msvs_to_msbuild_converters[tool.msvs_name] = {}
_msbuild_name_of_tool[tool.msvs_name] = tool.msbuild_name
def _GetMSBuildToolSettings(msbuild_settings, tool):
"""Returns an MSBuild tool dictionary. Creates it if needed."""
return msbuild_settings.setdefault(tool.msbuild_name, {})
class _Type(object):
"""Type of settings (Base class)."""
def ValidateMSVS(self, value):
"""Verifies that the value is legal for MSVS.
Args:
value: the value to check for this type.
Raises:
ValueError if value is not valid for MSVS.
"""
def ValidateMSBuild(self, value):
"""Verifies that the value is legal for MSBuild.
Args:
value: the value to check for this type.
Raises:
ValueError if value is not valid for MSBuild.
"""
def ConvertToMSBuild(self, value):
"""Returns the MSBuild equivalent of the MSVS value given.
Args:
value: the MSVS value to convert.
Returns:
the MSBuild equivalent.
Raises:
ValueError if value is not valid.
"""
return value
class _String(_Type):
"""A setting that's just a string."""
def ValidateMSVS(self, value):
if not isinstance(value, basestring):
raise ValueError('expected string; got %r' % value)
def ValidateMSBuild(self, value):
if not isinstance(value, basestring):
raise ValueError('expected string; got %r' % value)
def ConvertToMSBuild(self, value):
# Convert the macros
return ConvertVCMacrosToMSBuild(value)
class _StringList(_Type):
"""A settings that's a list of strings."""
def ValidateMSVS(self, value):
if not isinstance(value, basestring) and not isinstance(value, list):
raise ValueError('expected string list; got %r' % value)
def ValidateMSBuild(self, value):
if not isinstance(value, basestring) and not isinstance(value, list):
raise ValueError('expected string list; got %r' % value)
def ConvertToMSBuild(self, value):
# Convert the macros
if isinstance(value, list):
return [ConvertVCMacrosToMSBuild(i) for i in value]
else:
return ConvertVCMacrosToMSBuild(value)
class _Boolean(_Type):
"""Boolean settings, can have the values 'false' or 'true'."""
def _Validate(self, value):
if value != 'true' and value != 'false':
raise ValueError('expected bool; got %r' % value)
def ValidateMSVS(self, value):
self._Validate(value)
def ValidateMSBuild(self, value):
self._Validate(value)
def ConvertToMSBuild(self, value):
self._Validate(value)
return value
class _Integer(_Type):
"""Integer settings."""
def __init__(self, msbuild_base=10):
_Type.__init__(self)
self._msbuild_base = msbuild_base
def ValidateMSVS(self, value):
# Try to convert, this will raise ValueError if invalid.
self.ConvertToMSBuild(value)
def ValidateMSBuild(self, value):
# Try to convert, this will raise ValueError if invalid.
int(value, self._msbuild_base)
def ConvertToMSBuild(self, value):
msbuild_format = (self._msbuild_base == 10) and '%d' or '0x%04x'
return msbuild_format % int(value)
class _Enumeration(_Type):
"""Type of settings that is an enumeration.
In MSVS, the values are indexes like '0', '1', and '2'.
MSBuild uses text labels that are more representative, like 'Win32'.
Constructor args:
label_list: an array of MSBuild labels that correspond to the MSVS index.
In the rare cases where MSVS has skipped an index value, None is
used in the array to indicate the unused spot.
new: an array of labels that are new to MSBuild.
"""
def __init__(self, label_list, new=None):
_Type.__init__(self)
self._label_list = label_list
self._msbuild_values = set(value for value in label_list
if value is not None)
if new is not None:
self._msbuild_values.update(new)
def ValidateMSVS(self, value):
# Try to convert. It will raise an exception if not valid.
self.ConvertToMSBuild(value)
def ValidateMSBuild(self, value):
if value not in self._msbuild_values:
raise ValueError('unrecognized enumerated value %s' % value)
def ConvertToMSBuild(self, value):
index = int(value)
if index < 0 or index >= len(self._label_list):
raise ValueError('index value (%d) not in expected range [0, %d)' %
(index, len(self._label_list)))
label = self._label_list[index]
if label is None:
raise ValueError('converted value for %s not specified.' % value)
return label
# Instantiate the various generic types.
_boolean = _Boolean()
_integer = _Integer()
# For now, we don't do any special validation on these types:
_string = _String()
_file_name = _String()
_folder_name = _String()
_file_list = _StringList()
_folder_list = _StringList()
_string_list = _StringList()
# Some boolean settings went from numerical values to boolean. The
# mapping is 0: default, 1: false, 2: true.
_newly_boolean = _Enumeration(['', 'false', 'true'])
def _Same(tool, name, setting_type):
"""Defines a setting that has the same name in MSVS and MSBuild.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
name: the name of the setting.
setting_type: the type of this setting.
"""
_Renamed(tool, name, name, setting_type)
def _Renamed(tool, msvs_name, msbuild_name, setting_type):
"""Defines a setting for which the name has changed.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
msvs_name: the name of the MSVS setting.
msbuild_name: the name of the MSBuild setting.
setting_type: the type of this setting.
"""
def _Translate(value, msbuild_settings):
msbuild_tool_settings = _GetMSBuildToolSettings(msbuild_settings, tool)
msbuild_tool_settings[msbuild_name] = setting_type.ConvertToMSBuild(value)
_msvs_validators[tool.msvs_name][msvs_name] = setting_type.ValidateMSVS
_msbuild_validators[tool.msbuild_name][msbuild_name] = (
setting_type.ValidateMSBuild)
_msvs_to_msbuild_converters[tool.msvs_name][msvs_name] = _Translate
def _Moved(tool, settings_name, msbuild_tool_name, setting_type):
_MovedAndRenamed(tool, settings_name, msbuild_tool_name, settings_name,
setting_type)
def _MovedAndRenamed(tool, msvs_settings_name, msbuild_tool_name,
msbuild_settings_name, setting_type):
"""Defines a setting that may have moved to a new section.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
msvs_settings_name: the MSVS name of the setting.
msbuild_tool_name: the name of the MSBuild tool to place the setting under.
msbuild_settings_name: the MSBuild name of the setting.
setting_type: the type of this setting.
"""
def _Translate(value, msbuild_settings):
tool_settings = msbuild_settings.setdefault(msbuild_tool_name, {})
tool_settings[msbuild_settings_name] = setting_type.ConvertToMSBuild(value)
_msvs_validators[tool.msvs_name][msvs_settings_name] = (
setting_type.ValidateMSVS)
validator = setting_type.ValidateMSBuild
_msbuild_validators[msbuild_tool_name][msbuild_settings_name] = validator
_msvs_to_msbuild_converters[tool.msvs_name][msvs_settings_name] = _Translate
def _MSVSOnly(tool, name, setting_type):
"""Defines a setting that is only found in MSVS.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
name: the name of the setting.
setting_type: the type of this setting.
"""
def _Translate(unused_value, unused_msbuild_settings):
# Since this is for MSVS only settings, no translation will happen.
pass
_msvs_validators[tool.msvs_name][name] = setting_type.ValidateMSVS
_msvs_to_msbuild_converters[tool.msvs_name][name] = _Translate
def _MSBuildOnly(tool, name, setting_type):
"""Defines a setting that is only found in MSBuild.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
name: the name of the setting.
setting_type: the type of this setting.
"""
_msbuild_validators[tool.msbuild_name][name] = setting_type.ValidateMSBuild
def _ConvertedToAdditionalOption(tool, msvs_name, flag):
"""Defines a setting that's handled via a command line option in MSBuild.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
msvs_name: the name of the MSVS setting that if 'true' becomes a flag
flag: the flag to insert at the end of the AdditionalOptions
"""
def _Translate(value, msbuild_settings):
if value == 'true':
tool_settings = _GetMSBuildToolSettings(msbuild_settings, tool)
if 'AdditionalOptions' in tool_settings:
new_flags = '%s %s' % (tool_settings['AdditionalOptions'], flag)
else:
new_flags = flag
tool_settings['AdditionalOptions'] = new_flags
_msvs_validators[tool.msvs_name][msvs_name] = _boolean.ValidateMSVS
_msvs_to_msbuild_converters[tool.msvs_name][msvs_name] = _Translate
def _CustomGeneratePreprocessedFile(tool, msvs_name):
def _Translate(value, msbuild_settings):
tool_settings = _GetMSBuildToolSettings(msbuild_settings, tool)
if value == '0':
tool_settings['PreprocessToFile'] = 'false'
tool_settings['PreprocessSuppressLineNumbers'] = 'false'
elif value == '1': # /P
tool_settings['PreprocessToFile'] = 'true'
tool_settings['PreprocessSuppressLineNumbers'] = 'false'
elif value == '2': # /EP /P
tool_settings['PreprocessToFile'] = 'true'
tool_settings['PreprocessSuppressLineNumbers'] = 'true'
else:
raise ValueError('value must be one of [0, 1, 2]; got %s' % value)
# Create a bogus validator that looks for '0', '1', or '2'
msvs_validator = _Enumeration(['a', 'b', 'c']).ValidateMSVS
_msvs_validators[tool.msvs_name][msvs_name] = msvs_validator
msbuild_validator = _boolean.ValidateMSBuild
msbuild_tool_validators = _msbuild_validators[tool.msbuild_name]
msbuild_tool_validators['PreprocessToFile'] = msbuild_validator
msbuild_tool_validators['PreprocessSuppressLineNumbers'] = msbuild_validator
_msvs_to_msbuild_converters[tool.msvs_name][msvs_name] = _Translate
fix_vc_macro_slashes_regex_list = ('IntDir', 'OutDir')
fix_vc_macro_slashes_regex = re.compile(
r'(\$\((?:%s)\))(?:[\\/]+)' % "|".join(fix_vc_macro_slashes_regex_list)
)
def FixVCMacroSlashes(s):
"""Replace macros which have excessive following slashes.
These macros are known to have a built-in trailing slash. Furthermore, many
scripts hiccup on processing paths with extra slashes in the middle.
This list is probably not exhaustive. Add as needed.
"""
if '$' in s:
s = fix_vc_macro_slashes_regex.sub(r'\1', s)
return s
def ConvertVCMacrosToMSBuild(s):
"""Convert the the MSVS macros found in the string to the MSBuild equivalent.
This list is probably not exhaustive. Add as needed.
"""
if '$' in s:
replace_map = {
'$(ConfigurationName)': '$(Configuration)',
'$(InputDir)': '%(RootDir)%(Directory)',
'$(InputExt)': '%(Extension)',
'$(InputFileName)': '%(Filename)%(Extension)',
'$(InputName)': '%(Filename)',
'$(InputPath)': '%(FullPath)',
'$(ParentName)': '$(ProjectFileName)',
'$(PlatformName)': '$(Platform)',
'$(SafeInputName)': '%(Filename)',
}
for old, new in replace_map.iteritems():
s = s.replace(old, new)
s = FixVCMacroSlashes(s)
return s
def ConvertToMSBuildSettings(msvs_settings, stderr=sys.stderr):
"""Converts MSVS settings (VS2008 and earlier) to MSBuild settings (VS2010+).
Args:
msvs_settings: A dictionary. The key is the tool name. The values are
themselves dictionaries of settings and their values.
stderr: The stream receiving the error messages.
Returns:
A dictionary of MSBuild settings. The key is either the MSBuild tool name
or the empty string (for the global settings). The values are themselves
dictionaries of settings and their values.
"""
msbuild_settings = {}
for msvs_tool_name, msvs_tool_settings in msvs_settings.iteritems():
if msvs_tool_name in _msvs_to_msbuild_converters:
msvs_tool = _msvs_to_msbuild_converters[msvs_tool_name]
for msvs_setting, msvs_value in msvs_tool_settings.iteritems():
if msvs_setting in msvs_tool:
# Invoke the translation function.
try:
msvs_tool[msvs_setting](msvs_value, msbuild_settings)
except ValueError, e:
print >> stderr, ('Warning: while converting %s/%s to MSBuild, '
'%s' % (msvs_tool_name, msvs_setting, e))
else:
# We don't know this setting. Give a warning.
print >> stderr, ('Warning: unrecognized setting %s/%s '
'while converting to MSBuild.' %
(msvs_tool_name, msvs_setting))
else:
print >> stderr, ('Warning: unrecognized tool %s while converting to '
'MSBuild.' % msvs_tool_name)
return msbuild_settings
def ValidateMSVSSettings(settings, stderr=sys.stderr):
"""Validates that the names of the settings are valid for MSVS.
Args:
settings: A dictionary. The key is the tool name. The values are
themselves dictionaries of settings and their values.
stderr: The stream receiving the error messages.
"""
_ValidateSettings(_msvs_validators, settings, stderr)
def ValidateMSBuildSettings(settings, stderr=sys.stderr):
"""Validates that the names of the settings are valid for MSBuild.
Args:
settings: A dictionary. The key is the tool name. The values are
themselves dictionaries of settings and their values.
stderr: The stream receiving the error messages.
"""
_ValidateSettings(_msbuild_validators, settings, stderr)
def _ValidateSettings(validators, settings, stderr):
"""Validates that the settings are valid for MSBuild or MSVS.
We currently only validate the names of the settings, not their values.
Args:
validators: A dictionary of tools and their validators.
settings: A dictionary. The key is the tool name. The values are
themselves dictionaries of settings and their values.
stderr: The stream receiving the error messages.
"""
for tool_name in settings:
if tool_name in validators:
tool_validators = validators[tool_name]
for setting, value in settings[tool_name].iteritems():
if setting in tool_validators:
try:
tool_validators[setting](value)
except ValueError, e:
print >> stderr, ('Warning: for %s/%s, %s' %
(tool_name, setting, e))
else:
print >> stderr, ('Warning: unrecognized setting %s/%s' %
(tool_name, setting))
else:
print >> stderr, ('Warning: unrecognized tool %s' % tool_name)
# MSVS and MBuild names of the tools.
_compile = _Tool('VCCLCompilerTool', 'ClCompile')
_link = _Tool('VCLinkerTool', 'Link')
_midl = _Tool('VCMIDLTool', 'Midl')
_rc = _Tool('VCResourceCompilerTool', 'ResourceCompile')
_lib = _Tool('VCLibrarianTool', 'Lib')
_manifest = _Tool('VCManifestTool', 'Manifest')
_AddTool(_compile)
_AddTool(_link)
_AddTool(_midl)
_AddTool(_rc)
_AddTool(_lib)
_AddTool(_manifest)
# Add sections only found in the MSBuild settings.
_msbuild_validators[''] = {}
_msbuild_validators['ProjectReference'] = {}
_msbuild_validators['ManifestResourceCompile'] = {}
# Descriptions of the compiler options, i.e. VCCLCompilerTool in MSVS and
# ClCompile in MSBuild.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\cl.xml" for
# the schema of the MSBuild ClCompile settings.
# Options that have the same name in MSVS and MSBuild
_Same(_compile, 'AdditionalIncludeDirectories', _folder_list) # /I
_Same(_compile, 'AdditionalOptions', _string_list)
_Same(_compile, 'AdditionalUsingDirectories', _folder_list) # /AI
_Same(_compile, 'AssemblerListingLocation', _file_name) # /Fa
_Same(_compile, 'BrowseInformationFile', _file_name)
_Same(_compile, 'BufferSecurityCheck', _boolean) # /GS
_Same(_compile, 'DisableLanguageExtensions', _boolean) # /Za
_Same(_compile, 'DisableSpecificWarnings', _string_list) # /wd
_Same(_compile, 'EnableFiberSafeOptimizations', _boolean) # /GT
_Same(_compile, 'EnablePREfast', _boolean) # /analyze Visible='false'
_Same(_compile, 'ExpandAttributedSource', _boolean) # /Fx
_Same(_compile, 'FloatingPointExceptions', _boolean) # /fp:except
_Same(_compile, 'ForceConformanceInForLoopScope', _boolean) # /Zc:forScope
_Same(_compile, 'ForcedIncludeFiles', _file_list) # /FI
_Same(_compile, 'ForcedUsingFiles', _file_list) # /FU
_Same(_compile, 'GenerateXMLDocumentationFiles', _boolean) # /doc
_Same(_compile, 'IgnoreStandardIncludePath', _boolean) # /X
_Same(_compile, 'MinimalRebuild', _boolean) # /Gm
_Same(_compile, 'OmitDefaultLibName', _boolean) # /Zl
_Same(_compile, 'OmitFramePointers', _boolean) # /Oy
_Same(_compile, 'PreprocessorDefinitions', _string_list) # /D
_Same(_compile, 'ProgramDataBaseFileName', _file_name) # /Fd
_Same(_compile, 'RuntimeTypeInfo', _boolean) # /GR
_Same(_compile, 'ShowIncludes', _boolean) # /showIncludes
_Same(_compile, 'SmallerTypeCheck', _boolean) # /RTCc
_Same(_compile, 'StringPooling', _boolean) # /GF
_Same(_compile, 'SuppressStartupBanner', _boolean) # /nologo
_Same(_compile, 'TreatWChar_tAsBuiltInType', _boolean) # /Zc:wchar_t
_Same(_compile, 'UndefineAllPreprocessorDefinitions', _boolean) # /u
_Same(_compile, 'UndefinePreprocessorDefinitions', _string_list) # /U
_Same(_compile, 'UseFullPaths', _boolean) # /FC
_Same(_compile, 'WholeProgramOptimization', _boolean) # /GL
_Same(_compile, 'XMLDocumentationFileName', _file_name)
_Same(_compile, 'AssemblerOutput',
_Enumeration(['NoListing',
'AssemblyCode', # /FA
'All', # /FAcs
'AssemblyAndMachineCode', # /FAc
'AssemblyAndSourceCode'])) # /FAs
_Same(_compile, 'BasicRuntimeChecks',
_Enumeration(['Default',
'StackFrameRuntimeCheck', # /RTCs
'UninitializedLocalUsageCheck', # /RTCu
'EnableFastChecks'])) # /RTC1
_Same(_compile, 'BrowseInformation',
_Enumeration(['false',
'true', # /FR
'true'])) # /Fr
_Same(_compile, 'CallingConvention',
_Enumeration(['Cdecl', # /Gd
'FastCall', # /Gr
'StdCall'])) # /Gz
_Same(_compile, 'CompileAs',
_Enumeration(['Default',
'CompileAsC', # /TC
'CompileAsCpp'])) # /TP
_Same(_compile, 'DebugInformationFormat',
_Enumeration(['', # Disabled
'OldStyle', # /Z7
None,
'ProgramDatabase', # /Zi
'EditAndContinue'])) # /ZI
_Same(_compile, 'EnableEnhancedInstructionSet',
_Enumeration(['NotSet',
'StreamingSIMDExtensions', # /arch:SSE
'StreamingSIMDExtensions2'])) # /arch:SSE2
_Same(_compile, 'ErrorReporting',
_Enumeration(['None', # /errorReport:none
'Prompt', # /errorReport:prompt
'Queue'], # /errorReport:queue
new=['Send'])) # /errorReport:send"
_Same(_compile, 'ExceptionHandling',
_Enumeration(['false',
'Sync', # /EHsc
'Async'], # /EHa
new=['SyncCThrow'])) # /EHs
_Same(_compile, 'FavorSizeOrSpeed',
_Enumeration(['Neither',
'Speed', # /Ot
'Size'])) # /Os
_Same(_compile, 'FloatingPointModel',
_Enumeration(['Precise', # /fp:precise
'Strict', # /fp:strict
'Fast'])) # /fp:fast
_Same(_compile, 'InlineFunctionExpansion',
_Enumeration(['Default',
'OnlyExplicitInline', # /Ob1
'AnySuitable'], # /Ob2
new=['Disabled'])) # /Ob0
_Same(_compile, 'Optimization',
_Enumeration(['Disabled', # /Od
'MinSpace', # /O1
'MaxSpeed', # /O2
'Full'])) # /Ox
_Same(_compile, 'RuntimeLibrary',
_Enumeration(['MultiThreaded', # /MT
'MultiThreadedDebug', # /MTd
'MultiThreadedDLL', # /MD
'MultiThreadedDebugDLL'])) # /MDd
_Same(_compile, 'StructMemberAlignment',
_Enumeration(['Default',
'1Byte', # /Zp1
'2Bytes', # /Zp2
'4Bytes', # /Zp4
'8Bytes', # /Zp8
'16Bytes'])) # /Zp16
_Same(_compile, 'WarningLevel',
_Enumeration(['TurnOffAllWarnings', # /W0
'Level1', # /W1
'Level2', # /W2
'Level3', # /W3
'Level4'], # /W4
new=['EnableAllWarnings'])) # /Wall
# Options found in MSVS that have been renamed in MSBuild.
_Renamed(_compile, 'EnableFunctionLevelLinking', 'FunctionLevelLinking',
_boolean) # /Gy
_Renamed(_compile, 'EnableIntrinsicFunctions', 'IntrinsicFunctions',
_boolean) # /Oi
_Renamed(_compile, 'KeepComments', 'PreprocessKeepComments', _boolean) # /C
_Renamed(_compile, 'ObjectFile', 'ObjectFileName', _file_name) # /Fo
_Renamed(_compile, 'OpenMP', 'OpenMPSupport', _boolean) # /openmp
_Renamed(_compile, 'PrecompiledHeaderThrough', 'PrecompiledHeaderFile',
_file_name) # Used with /Yc and /Yu
_Renamed(_compile, 'PrecompiledHeaderFile', 'PrecompiledHeaderOutputFile',
_file_name) # /Fp
_Renamed(_compile, 'UsePrecompiledHeader', 'PrecompiledHeader',
_Enumeration(['NotUsing', # VS recognized '' for this value too.
'Create', # /Yc
'Use'])) # /Yu
_Renamed(_compile, 'WarnAsError', 'TreatWarningAsError', _boolean) # /WX
_ConvertedToAdditionalOption(_compile, 'DefaultCharIsUnsigned', '/J')
# MSVS options not found in MSBuild.
_MSVSOnly(_compile, 'Detect64BitPortabilityProblems', _boolean)
_MSVSOnly(_compile, 'UseUnicodeResponseFiles', _boolean)
# MSBuild options not found in MSVS.
_MSBuildOnly(_compile, 'BuildingInIDE', _boolean)
_MSBuildOnly(_compile, 'CompileAsManaged',
_Enumeration([], new=['false',
'true', # /clr
'Pure', # /clr:pure
'Safe', # /clr:safe
'OldSyntax'])) # /clr:oldSyntax
_MSBuildOnly(_compile, 'CreateHotpatchableImage', _boolean) # /hotpatch
_MSBuildOnly(_compile, 'MultiProcessorCompilation', _boolean) # /MP
_MSBuildOnly(_compile, 'PreprocessOutputPath', _string) # /Fi
_MSBuildOnly(_compile, 'ProcessorNumber', _integer) # the number of processors
_MSBuildOnly(_compile, 'TrackerLogDirectory', _folder_name)
_MSBuildOnly(_compile, 'TreatSpecificWarningsAsErrors', _string_list) # /we
_MSBuildOnly(_compile, 'UseUnicodeForAssemblerListing', _boolean) # /FAu
# Defines a setting that needs very customized processing
_CustomGeneratePreprocessedFile(_compile, 'GeneratePreprocessedFile')
# Directives for converting MSVS VCLinkerTool to MSBuild Link.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\link.xml" for
# the schema of the MSBuild Link settings.
# Options that have the same name in MSVS and MSBuild
_Same(_link, 'AdditionalDependencies', _file_list)
_Same(_link, 'AdditionalLibraryDirectories', _folder_list) # /LIBPATH
# /MANIFESTDEPENDENCY:
_Same(_link, 'AdditionalManifestDependencies', _file_list)
_Same(_link, 'AdditionalOptions', _string_list)
_Same(_link, 'AddModuleNamesToAssembly', _file_list) # /ASSEMBLYMODULE
_Same(_link, 'AllowIsolation', _boolean) # /ALLOWISOLATION
_Same(_link, 'AssemblyLinkResource', _file_list) # /ASSEMBLYLINKRESOURCE
_Same(_link, 'BaseAddress', _string) # /BASE
_Same(_link, 'CLRUnmanagedCodeCheck', _boolean) # /CLRUNMANAGEDCODECHECK
_Same(_link, 'DelayLoadDLLs', _file_list) # /DELAYLOAD
_Same(_link, 'DelaySign', _boolean) # /DELAYSIGN
_Same(_link, 'EmbedManagedResourceFile', _file_list) # /ASSEMBLYRESOURCE
_Same(_link, 'EnableUAC', _boolean) # /MANIFESTUAC
_Same(_link, 'EntryPointSymbol', _string) # /ENTRY
_Same(_link, 'ForceSymbolReferences', _file_list) # /INCLUDE
_Same(_link, 'FunctionOrder', _file_name) # /ORDER
_Same(_link, 'GenerateDebugInformation', _boolean) # /DEBUG
_Same(_link, 'GenerateMapFile', _boolean) # /MAP
_Same(_link, 'HeapCommitSize', _string)
_Same(_link, 'HeapReserveSize', _string) # /HEAP
_Same(_link, 'IgnoreAllDefaultLibraries', _boolean) # /NODEFAULTLIB
_Same(_link, 'IgnoreEmbeddedIDL', _boolean) # /IGNOREIDL
_Same(_link, 'ImportLibrary', _file_name) # /IMPLIB
_Same(_link, 'KeyContainer', _file_name) # /KEYCONTAINER
_Same(_link, 'KeyFile', _file_name) # /KEYFILE
_Same(_link, 'ManifestFile', _file_name) # /ManifestFile
_Same(_link, 'MapExports', _boolean) # /MAPINFO:EXPORTS
_Same(_link, 'MapFileName', _file_name)
_Same(_link, 'MergedIDLBaseFileName', _file_name) # /IDLOUT
_Same(_link, 'MergeSections', _string) # /MERGE
_Same(_link, 'MidlCommandFile', _file_name) # /MIDL
_Same(_link, 'ModuleDefinitionFile', _file_name) # /DEF
_Same(_link, 'OutputFile', _file_name) # /OUT
_Same(_link, 'PerUserRedirection', _boolean)
_Same(_link, 'Profile', _boolean) # /PROFILE
_Same(_link, 'ProfileGuidedDatabase', _file_name) # /PGD
_Same(_link, 'ProgramDatabaseFile', _file_name) # /PDB
_Same(_link, 'RegisterOutput', _boolean)
_Same(_link, 'SetChecksum', _boolean) # /RELEASE
_Same(_link, 'StackCommitSize', _string)
_Same(_link, 'StackReserveSize', _string) # /STACK
_Same(_link, 'StripPrivateSymbols', _file_name) # /PDBSTRIPPED
_Same(_link, 'SupportUnloadOfDelayLoadedDLL', _boolean) # /DELAY:UNLOAD
_Same(_link, 'SuppressStartupBanner', _boolean) # /NOLOGO
_Same(_link, 'SwapRunFromCD', _boolean) # /SWAPRUN:CD
_Same(_link, 'TurnOffAssemblyGeneration', _boolean) # /NOASSEMBLY
_Same(_link, 'TypeLibraryFile', _file_name) # /TLBOUT
_Same(_link, 'TypeLibraryResourceID', _integer) # /TLBID
_Same(_link, 'UACUIAccess', _boolean) # /uiAccess='true'
_Same(_link, 'Version', _string) # /VERSION
_Same(_link, 'EnableCOMDATFolding', _newly_boolean) # /OPT:ICF
_Same(_link, 'FixedBaseAddress', _newly_boolean) # /FIXED
_Same(_link, 'LargeAddressAware', _newly_boolean) # /LARGEADDRESSAWARE
_Same(_link, 'OptimizeReferences', _newly_boolean) # /OPT:REF
_Same(_link, 'RandomizedBaseAddress', _newly_boolean) # /DYNAMICBASE
_Same(_link, 'TerminalServerAware', _newly_boolean) # /TSAWARE
_subsystem_enumeration = _Enumeration(
['NotSet',
'Console', # /SUBSYSTEM:CONSOLE
'Windows', # /SUBSYSTEM:WINDOWS
'Native', # /SUBSYSTEM:NATIVE
'EFI Application', # /SUBSYSTEM:EFI_APPLICATION
'EFI Boot Service Driver', # /SUBSYSTEM:EFI_BOOT_SERVICE_DRIVER
'EFI ROM', # /SUBSYSTEM:EFI_ROM
'EFI Runtime', # /SUBSYSTEM:EFI_RUNTIME_DRIVER
'WindowsCE'], # /SUBSYSTEM:WINDOWSCE
new=['POSIX']) # /SUBSYSTEM:POSIX
_target_machine_enumeration = _Enumeration(
['NotSet',
'MachineX86', # /MACHINE:X86
None,
'MachineARM', # /MACHINE:ARM
'MachineEBC', # /MACHINE:EBC
'MachineIA64', # /MACHINE:IA64
None,
'MachineMIPS', # /MACHINE:MIPS
'MachineMIPS16', # /MACHINE:MIPS16
'MachineMIPSFPU', # /MACHINE:MIPSFPU
'MachineMIPSFPU16', # /MACHINE:MIPSFPU16
None,
None,
None,
'MachineSH4', # /MACHINE:SH4
None,
'MachineTHUMB', # /MACHINE:THUMB
'MachineX64']) # /MACHINE:X64
_Same(_link, 'AssemblyDebug',
_Enumeration(['',
'true', # /ASSEMBLYDEBUG
'false'])) # /ASSEMBLYDEBUG:DISABLE
_Same(_link, 'CLRImageType',
_Enumeration(['Default',
'ForceIJWImage', # /CLRIMAGETYPE:IJW
'ForcePureILImage', # /Switch="CLRIMAGETYPE:PURE
'ForceSafeILImage'])) # /Switch="CLRIMAGETYPE:SAFE
_Same(_link, 'CLRThreadAttribute',
_Enumeration(['DefaultThreadingAttribute', # /CLRTHREADATTRIBUTE:NONE
'MTAThreadingAttribute', # /CLRTHREADATTRIBUTE:MTA
'STAThreadingAttribute'])) # /CLRTHREADATTRIBUTE:STA
_Same(_link, 'DataExecutionPrevention',
_Enumeration(['',
'false', # /NXCOMPAT:NO
'true'])) # /NXCOMPAT
_Same(_link, 'Driver',
_Enumeration(['NotSet',
'Driver', # /Driver
'UpOnly', # /DRIVER:UPONLY
'WDM'])) # /DRIVER:WDM
_Same(_link, 'LinkTimeCodeGeneration',
_Enumeration(['Default',
'UseLinkTimeCodeGeneration', # /LTCG
'PGInstrument', # /LTCG:PGInstrument
'PGOptimization', # /LTCG:PGOptimize
'PGUpdate'])) # /LTCG:PGUpdate
_Same(_link, 'ShowProgress',
_Enumeration(['NotSet',
'LinkVerbose', # /VERBOSE
'LinkVerboseLib'], # /VERBOSE:Lib
new=['LinkVerboseICF', # /VERBOSE:ICF
'LinkVerboseREF', # /VERBOSE:REF
'LinkVerboseSAFESEH', # /VERBOSE:SAFESEH
'LinkVerboseCLR'])) # /VERBOSE:CLR
_Same(_link, 'SubSystem', _subsystem_enumeration)
_Same(_link, 'TargetMachine', _target_machine_enumeration)
_Same(_link, 'UACExecutionLevel',
_Enumeration(['AsInvoker', # /level='asInvoker'
'HighestAvailable', # /level='highestAvailable'
'RequireAdministrator'])) # /level='requireAdministrator'
_Same(_link, 'MinimumRequiredVersion', _string)
_Same(_link, 'TreatLinkerWarningAsErrors', _boolean) # /WX
# Options found in MSVS that have been renamed in MSBuild.
_Renamed(_link, 'ErrorReporting', 'LinkErrorReporting',
_Enumeration(['NoErrorReport', # /ERRORREPORT:NONE
'PromptImmediately', # /ERRORREPORT:PROMPT
'QueueForNextLogin'], # /ERRORREPORT:QUEUE
new=['SendErrorReport'])) # /ERRORREPORT:SEND
_Renamed(_link, 'IgnoreDefaultLibraryNames', 'IgnoreSpecificDefaultLibraries',
_file_list) # /NODEFAULTLIB
_Renamed(_link, 'ResourceOnlyDLL', 'NoEntryPoint', _boolean) # /NOENTRY
_Renamed(_link, 'SwapRunFromNet', 'SwapRunFromNET', _boolean) # /SWAPRUN:NET
_Moved(_link, 'GenerateManifest', '', _boolean)
_Moved(_link, 'IgnoreImportLibrary', '', _boolean)
_Moved(_link, 'LinkIncremental', '', _newly_boolean)
_Moved(_link, 'LinkLibraryDependencies', 'ProjectReference', _boolean)
_Moved(_link, 'UseLibraryDependencyInputs', 'ProjectReference', _boolean)
# MSVS options not found in MSBuild.
_MSVSOnly(_link, 'OptimizeForWindows98', _newly_boolean)
_MSVSOnly(_link, 'UseUnicodeResponseFiles', _boolean)
# These settings generate correctly in the MSVS output files when using
# e.g. DelayLoadDLLs! or AdditionalDependencies! to exclude files from
# configuration entries, but result in spurious artifacts which can be
# safely ignored here. See crbug.com/246570
_MSVSOnly(_link, 'AdditionalLibraryDirectories_excluded', _folder_list)
_MSVSOnly(_link, 'DelayLoadDLLs_excluded', _file_list)
_MSVSOnly(_link, 'AdditionalDependencies_excluded', _file_list)
# MSBuild options not found in MSVS.
_MSBuildOnly(_link, 'BuildingInIDE', _boolean)
_MSBuildOnly(_link, 'ImageHasSafeExceptionHandlers', _boolean) # /SAFESEH
_MSBuildOnly(_link, 'LinkDLL', _boolean) # /DLL Visible='false'
_MSBuildOnly(_link, 'LinkStatus', _boolean) # /LTCG:STATUS
_MSBuildOnly(_link, 'PreventDllBinding', _boolean) # /ALLOWBIND
_MSBuildOnly(_link, 'SupportNobindOfDelayLoadedDLL', _boolean) # /DELAY:NOBIND
_MSBuildOnly(_link, 'TrackerLogDirectory', _folder_name)
_MSBuildOnly(_link, 'MSDOSStubFileName', _file_name) # /STUB Visible='false'
_MSBuildOnly(_link, 'SectionAlignment', _integer) # /ALIGN
_MSBuildOnly(_link, 'SpecifySectionAttributes', _string) # /SECTION
_MSBuildOnly(_link, 'ForceFileOutput',
_Enumeration([], new=['Enabled', # /FORCE
# /FORCE:MULTIPLE
'MultiplyDefinedSymbolOnly',
'UndefinedSymbolOnly'])) # /FORCE:UNRESOLVED
_MSBuildOnly(_link, 'CreateHotPatchableImage',
_Enumeration([], new=['Enabled', # /FUNCTIONPADMIN
'X86Image', # /FUNCTIONPADMIN:5
'X64Image', # /FUNCTIONPADMIN:6
'ItaniumImage'])) # /FUNCTIONPADMIN:16
_MSBuildOnly(_link, 'CLRSupportLastError',
_Enumeration([], new=['Enabled', # /CLRSupportLastError
'Disabled', # /CLRSupportLastError:NO
# /CLRSupportLastError:SYSTEMDLL
'SystemDlls']))
# Directives for converting VCResourceCompilerTool to ResourceCompile.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\rc.xml" for
# the schema of the MSBuild ResourceCompile settings.
_Same(_rc, 'AdditionalOptions', _string_list)
_Same(_rc, 'AdditionalIncludeDirectories', _folder_list) # /I
_Same(_rc, 'Culture', _Integer(msbuild_base=16))
_Same(_rc, 'IgnoreStandardIncludePath', _boolean) # /X
_Same(_rc, 'PreprocessorDefinitions', _string_list) # /D
_Same(_rc, 'ResourceOutputFileName', _string) # /fo
_Same(_rc, 'ShowProgress', _boolean) # /v
# There is no UI in VisualStudio 2008 to set the following properties.
# However they are found in CL and other tools. Include them here for
# completeness, as they are very likely to have the same usage pattern.
_Same(_rc, 'SuppressStartupBanner', _boolean) # /nologo
_Same(_rc, 'UndefinePreprocessorDefinitions', _string_list) # /u
# MSBuild options not found in MSVS.
_MSBuildOnly(_rc, 'NullTerminateStrings', _boolean) # /n
_MSBuildOnly(_rc, 'TrackerLogDirectory', _folder_name)
# Directives for converting VCMIDLTool to Midl.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\midl.xml" for
# the schema of the MSBuild Midl settings.
_Same(_midl, 'AdditionalIncludeDirectories', _folder_list) # /I
_Same(_midl, 'AdditionalOptions', _string_list)
_Same(_midl, 'CPreprocessOptions', _string) # /cpp_opt
_Same(_midl, 'ErrorCheckAllocations', _boolean) # /error allocation
_Same(_midl, 'ErrorCheckBounds', _boolean) # /error bounds_check
_Same(_midl, 'ErrorCheckEnumRange', _boolean) # /error enum
_Same(_midl, 'ErrorCheckRefPointers', _boolean) # /error ref
_Same(_midl, 'ErrorCheckStubData', _boolean) # /error stub_data
_Same(_midl, 'GenerateStublessProxies', _boolean) # /Oicf
_Same(_midl, 'GenerateTypeLibrary', _boolean)
_Same(_midl, 'HeaderFileName', _file_name) # /h
_Same(_midl, 'IgnoreStandardIncludePath', _boolean) # /no_def_idir
_Same(_midl, 'InterfaceIdentifierFileName', _file_name) # /iid
_Same(_midl, 'MkTypLibCompatible', _boolean) # /mktyplib203
_Same(_midl, 'OutputDirectory', _string) # /out
_Same(_midl, 'PreprocessorDefinitions', _string_list) # /D
_Same(_midl, 'ProxyFileName', _file_name) # /proxy
_Same(_midl, 'RedirectOutputAndErrors', _file_name) # /o
_Same(_midl, 'SuppressStartupBanner', _boolean) # /nologo
_Same(_midl, 'TypeLibraryName', _file_name) # /tlb
_Same(_midl, 'UndefinePreprocessorDefinitions', _string_list) # /U
_Same(_midl, 'WarnAsError', _boolean) # /WX
_Same(_midl, 'DefaultCharType',
_Enumeration(['Unsigned', # /char unsigned
'Signed', # /char signed
'Ascii'])) # /char ascii7
_Same(_midl, 'TargetEnvironment',
_Enumeration(['NotSet',
'Win32', # /env win32
'Itanium', # /env ia64
'X64'])) # /env x64
_Same(_midl, 'EnableErrorChecks',
_Enumeration(['EnableCustom',
'None', # /error none
'All'])) # /error all
_Same(_midl, 'StructMemberAlignment',
_Enumeration(['NotSet',
'1', # Zp1
'2', # Zp2
'4', # Zp4
'8'])) # Zp8
_Same(_midl, 'WarningLevel',
_Enumeration(['0', # /W0
'1', # /W1
'2', # /W2
'3', # /W3
'4'])) # /W4
_Renamed(_midl, 'DLLDataFileName', 'DllDataFileName', _file_name) # /dlldata
_Renamed(_midl, 'ValidateParameters', 'ValidateAllParameters',
_boolean) # /robust
# MSBuild options not found in MSVS.
_MSBuildOnly(_midl, 'ApplicationConfigurationMode', _boolean) # /app_config
_MSBuildOnly(_midl, 'ClientStubFile', _file_name) # /cstub
_MSBuildOnly(_midl, 'GenerateClientFiles',
_Enumeration([], new=['Stub', # /client stub
'None'])) # /client none
_MSBuildOnly(_midl, 'GenerateServerFiles',
_Enumeration([], new=['Stub', # /client stub
'None'])) # /client none
_MSBuildOnly(_midl, 'LocaleID', _integer) # /lcid DECIMAL
_MSBuildOnly(_midl, 'ServerStubFile', _file_name) # /sstub
_MSBuildOnly(_midl, 'SuppressCompilerWarnings', _boolean) # /no_warn
_MSBuildOnly(_midl, 'TrackerLogDirectory', _folder_name)
_MSBuildOnly(_midl, 'TypeLibFormat',
_Enumeration([], new=['NewFormat', # /newtlb
'OldFormat'])) # /oldtlb
# Directives for converting VCLibrarianTool to Lib.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\lib.xml" for
# the schema of the MSBuild Lib settings.
_Same(_lib, 'AdditionalDependencies', _file_list)
_Same(_lib, 'AdditionalLibraryDirectories', _folder_list) # /LIBPATH
_Same(_lib, 'AdditionalOptions', _string_list)
_Same(_lib, 'ExportNamedFunctions', _string_list) # /EXPORT
_Same(_lib, 'ForceSymbolReferences', _string) # /INCLUDE
_Same(_lib, 'IgnoreAllDefaultLibraries', _boolean) # /NODEFAULTLIB
_Same(_lib, 'IgnoreSpecificDefaultLibraries', _file_list) # /NODEFAULTLIB
_Same(_lib, 'ModuleDefinitionFile', _file_name) # /DEF
_Same(_lib, 'OutputFile', _file_name) # /OUT
_Same(_lib, 'SuppressStartupBanner', _boolean) # /NOLOGO
_Same(_lib, 'UseUnicodeResponseFiles', _boolean)
_Same(_lib, 'LinkTimeCodeGeneration', _boolean) # /LTCG
_Same(_lib, 'TargetMachine', _target_machine_enumeration)
# TODO(jeanluc) _link defines the same value that gets moved to
# ProjectReference. We may want to validate that they are consistent.
_Moved(_lib, 'LinkLibraryDependencies', 'ProjectReference', _boolean)
# TODO(jeanluc) I don't think these are genuine settings but byproducts of Gyp.
_MSVSOnly(_lib, 'AdditionalLibraryDirectories_excluded', _folder_list)
_MSBuildOnly(_lib, 'DisplayLibrary', _string) # /LIST Visible='false'
_MSBuildOnly(_lib, 'ErrorReporting',
_Enumeration([], new=['PromptImmediately', # /ERRORREPORT:PROMPT
'QueueForNextLogin', # /ERRORREPORT:QUEUE
'SendErrorReport', # /ERRORREPORT:SEND
'NoErrorReport'])) # /ERRORREPORT:NONE
_MSBuildOnly(_lib, 'MinimumRequiredVersion', _string)
_MSBuildOnly(_lib, 'Name', _file_name) # /NAME
_MSBuildOnly(_lib, 'RemoveObjects', _file_list) # /REMOVE
_MSBuildOnly(_lib, 'SubSystem', _subsystem_enumeration)
_MSBuildOnly(_lib, 'TrackerLogDirectory', _folder_name)
_MSBuildOnly(_lib, 'TreatLibWarningAsErrors', _boolean) # /WX
_MSBuildOnly(_lib, 'Verbose', _boolean)
# Directives for converting VCManifestTool to Mt.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\mt.xml" for
# the schema of the MSBuild Lib settings.
# Options that have the same name in MSVS and MSBuild
_Same(_manifest, 'AdditionalManifestFiles', _file_list) # /manifest
_Same(_manifest, 'AdditionalOptions', _string_list)
_Same(_manifest, 'AssemblyIdentity', _string) # /identity:
_Same(_manifest, 'ComponentFileName', _file_name) # /dll
_Same(_manifest, 'GenerateCatalogFiles', _boolean) # /makecdfs
_Same(_manifest, 'InputResourceManifests', _string) # /inputresource
_Same(_manifest, 'OutputManifestFile', _file_name) # /out
_Same(_manifest, 'RegistrarScriptFile', _file_name) # /rgs
_Same(_manifest, 'ReplacementsFile', _file_name) # /replacements
_Same(_manifest, 'SuppressStartupBanner', _boolean) # /nologo
_Same(_manifest, 'TypeLibraryFile', _file_name) # /tlb:
_Same(_manifest, 'UpdateFileHashes', _boolean) # /hashupdate
_Same(_manifest, 'UpdateFileHashesSearchPath', _file_name)
_Same(_manifest, 'VerboseOutput', _boolean) # /verbose
# Options that have moved location.
_MovedAndRenamed(_manifest, 'ManifestResourceFile',
'ManifestResourceCompile',
'ResourceOutputFileName',
_file_name)
_Moved(_manifest, 'EmbedManifest', '', _boolean)
# MSVS options not found in MSBuild.
_MSVSOnly(_manifest, 'DependencyInformationFile', _file_name)
_MSVSOnly(_manifest, 'UseFAT32Workaround', _boolean)
_MSVSOnly(_manifest, 'UseUnicodeResponseFiles', _boolean)
# MSBuild options not found in MSVS.
_MSBuildOnly(_manifest, 'EnableDPIAwareness', _boolean)
_MSBuildOnly(_manifest, 'GenerateCategoryTags', _boolean) # /category
_MSBuildOnly(_manifest, 'ManifestFromManagedAssembly',
_file_name) # /managedassemblyname
_MSBuildOnly(_manifest, 'OutputResourceManifests', _string) # /outputresource
_MSBuildOnly(_manifest, 'SuppressDependencyElement', _boolean) # /nodependency
_MSBuildOnly(_manifest, 'TrackerLogDirectory', _folder_name)
| gpl-2.0 |
armersong/letsencrypt | letsencrypt/tests/client_test.py | 11 | 8012 | """Tests for letsencrypt.client."""
import os
import shutil
import tempfile
import unittest
import configobj
import OpenSSL
import mock
from acme import jose
from letsencrypt import account
from letsencrypt import configuration
from letsencrypt import errors
from letsencrypt import le_util
from letsencrypt.tests import test_util
KEY = test_util.load_vector("rsa512_key.pem")
CSR_SAN = test_util.load_vector("csr-san.der")
class RegisterTest(unittest.TestCase):
"""Tests for letsencrypt.client.register."""
def setUp(self):
self.config = mock.MagicMock(rsa_key_size=1024)
self.account_storage = account.AccountMemoryStorage()
self.tos_cb = mock.MagicMock()
def _call(self):
from letsencrypt.client import register
return register(self.config, self.account_storage, self.tos_cb)
def test_no_tos(self):
with mock.patch("letsencrypt.client.acme_client.Client") as mock_client:
mock_client.register().terms_of_service = "http://tos"
with mock.patch("letsencrypt.account.report_new_account"):
self.tos_cb.return_value = False
self.assertRaises(errors.Error, self._call)
self.tos_cb.return_value = True
self._call()
self.tos_cb = None
self._call()
def test_it(self):
with mock.patch("letsencrypt.client.acme_client.Client"):
with mock.patch("letsencrypt.account."
"report_new_account"):
self._call()
class ClientTest(unittest.TestCase):
"""Tests for letsencrypt.client.Client."""
def setUp(self):
self.config = mock.MagicMock(
no_verify_ssl=False, config_dir="/etc/letsencrypt")
# pylint: disable=star-args
self.account = mock.MagicMock(**{"key.pem": KEY})
from letsencrypt.client import Client
with mock.patch("letsencrypt.client.acme_client.Client") as acme:
self.acme_client = acme
self.acme = acme.return_value = mock.MagicMock()
self.client = Client(
config=self.config, account_=self.account,
dv_auth=None, installer=None)
def test_init_acme_verify_ssl(self):
self.acme_client.assert_called_once_with(
directory=mock.ANY, key=mock.ANY, verify_ssl=True)
def _mock_obtain_certificate(self):
self.client.auth_handler = mock.MagicMock()
self.acme.request_issuance.return_value = mock.sentinel.certr
self.acme.fetch_chain.return_value = mock.sentinel.chain
def _check_obtain_certificate(self):
self.client.auth_handler.get_authorizations.assert_called_once_with(
["example.com", "www.example.com"])
self.acme.request_issuance.assert_called_once_with(
jose.ComparableX509(OpenSSL.crypto.load_certificate_request(
OpenSSL.crypto.FILETYPE_ASN1, CSR_SAN)),
self.client.auth_handler.get_authorizations())
self.acme.fetch_chain.assert_called_once_with(mock.sentinel.certr)
def test_obtain_certificate_from_csr(self):
self._mock_obtain_certificate()
self.assertEqual(
(mock.sentinel.certr, mock.sentinel.chain),
self.client.obtain_certificate_from_csr(le_util.CSR(
form="der", file=None, data=CSR_SAN)))
self._check_obtain_certificate()
@mock.patch("letsencrypt.client.crypto_util")
def test_obtain_certificate(self, mock_crypto_util):
self._mock_obtain_certificate()
csr = le_util.CSR(form="der", file=None, data=CSR_SAN)
mock_crypto_util.init_save_csr.return_value = csr
mock_crypto_util.init_save_key.return_value = mock.sentinel.key
domains = ["example.com", "www.example.com"]
self.assertEqual(
self.client.obtain_certificate(domains),
(mock.sentinel.certr, mock.sentinel.chain, mock.sentinel.key, csr))
mock_crypto_util.init_save_key.assert_called_once_with(
self.config.rsa_key_size, self.config.key_dir)
mock_crypto_util.init_save_csr.assert_called_once_with(
mock.sentinel.key, domains, self.config.cert_dir)
self._check_obtain_certificate()
@mock.patch("letsencrypt.client.zope.component.getUtility")
def test_report_renewal_status(self, mock_zope):
# pylint: disable=protected-access
cert = mock.MagicMock()
cert.configuration = configobj.ConfigObj()
cert.cli_config = configuration.RenewerConfiguration(self.config)
cert.configuration["autorenew"] = "True"
cert.configuration["autodeploy"] = "True"
self.client._report_renewal_status(cert)
msg = mock_zope().add_message.call_args[0][0]
self.assertTrue("renewal and deployment has been" in msg)
self.assertTrue(cert.cli_config.renewal_configs_dir in msg)
cert.configuration["autorenew"] = "False"
self.client._report_renewal_status(cert)
msg = mock_zope().add_message.call_args[0][0]
self.assertTrue("deployment but not automatic renewal" in msg)
self.assertTrue(cert.cli_config.renewal_configs_dir in msg)
cert.configuration["autodeploy"] = "False"
self.client._report_renewal_status(cert)
msg = mock_zope().add_message.call_args[0][0]
self.assertTrue("renewal and deployment has not" in msg)
self.assertTrue(cert.cli_config.renewal_configs_dir in msg)
cert.configuration["autorenew"] = "True"
self.client._report_renewal_status(cert)
msg = mock_zope().add_message.call_args[0][0]
self.assertTrue("renewal but not automatic deployment" in msg)
self.assertTrue(cert.cli_config.renewal_configs_dir in msg)
def test_save_certificate(self):
certs = ["matching_cert.pem", "cert.pem", "cert-san.pem"]
tmp_path = tempfile.mkdtemp()
os.chmod(tmp_path, 0o755) # TODO: really??
certr = mock.MagicMock(body=test_util.load_cert(certs[0]))
cert1 = test_util.load_cert(certs[1])
cert2 = test_util.load_cert(certs[2])
candidate_cert_path = os.path.join(tmp_path, "certs", "cert.pem")
candidate_chain_path = os.path.join(tmp_path, "chains", "chain.pem")
cert_path, chain_path = self.client.save_certificate(
certr, [cert1, cert2], candidate_cert_path, candidate_chain_path)
self.assertEqual(os.path.dirname(cert_path),
os.path.dirname(candidate_cert_path))
self.assertEqual(os.path.dirname(chain_path),
os.path.dirname(candidate_chain_path))
with open(cert_path, "r") as cert_file:
cert_contents = cert_file.read()
self.assertEqual(cert_contents, test_util.load_vector(certs[0]))
with open(chain_path, "r") as chain_file:
chain_contents = chain_file.read()
self.assertEqual(chain_contents, test_util.load_vector(certs[1]) +
test_util.load_vector(certs[2]))
shutil.rmtree(tmp_path)
class RollbackTest(unittest.TestCase):
"""Tests for letsencrypt.client.rollback."""
def setUp(self):
self.m_install = mock.MagicMock()
@classmethod
def _call(cls, checkpoints, side_effect):
from letsencrypt.client import rollback
with mock.patch("letsencrypt.client"
".display_ops.pick_installer") as mock_pick_installer:
mock_pick_installer.side_effect = side_effect
rollback(None, checkpoints, {}, mock.MagicMock())
def test_no_problems(self):
self._call(1, self.m_install)
self.assertEqual(self.m_install().rollback_checkpoints.call_count, 1)
self.assertEqual(self.m_install().restart.call_count, 1)
def test_no_installer(self):
self._call(1, None) # Just make sure no exceptions are raised
if __name__ == "__main__":
unittest.main() # pragma: no cover
| apache-2.0 |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/test/test_popen2.py | 45 | 4315 | """Test script for popen2.py"""
import warnings
warnings.filterwarnings("ignore", ".*popen2 module is deprecated.*",
DeprecationWarning)
warnings.filterwarnings("ignore", "os\.popen. is deprecated.*",
DeprecationWarning)
import os
import sys
import unittest
import popen2
from test.test_support import run_unittest, reap_children
if sys.platform[:4] == 'beos' or sys.platform[:6] == 'atheos':
# Locks get messed up or something. Generally we're supposed
# to avoid mixing "posix" fork & exec with native threads, and
# they may be right about that after all.
raise unittest.SkipTest("popen2() doesn't work on " + sys.platform)
# if we don't have os.popen, check that
# we have os.fork. if not, skip the test
# (by raising an ImportError)
try:
from os import popen
del popen
except ImportError:
from os import fork
del fork
class Popen2Test(unittest.TestCase):
cmd = "cat"
if os.name == "nt":
cmd = "more"
teststr = "ab cd\n"
# "more" doesn't act the same way across Windows flavors,
# sometimes adding an extra newline at the start or the
# end. So we strip whitespace off both ends for comparison.
expected = teststr.strip()
def setUp(self):
popen2._cleanup()
# When the test runs, there shouldn't be any open pipes
self.assertFalse(popen2._active, "Active pipes when test starts" +
repr([c.cmd for c in popen2._active]))
def tearDown(self):
for inst in popen2._active:
inst.wait()
popen2._cleanup()
self.assertFalse(popen2._active, "popen2._active not empty")
# The os.popen*() API delegates to the subprocess module (on Unix)
import subprocess
for inst in subprocess._active:
inst.wait()
subprocess._cleanup()
self.assertFalse(subprocess._active, "subprocess._active not empty")
reap_children()
def validate_output(self, teststr, expected_out, r, w, e=None):
w.write(teststr)
w.close()
got = r.read()
self.assertEqual(expected_out, got.strip(), "wrote %r read %r" %
(teststr, got))
if e is not None:
got = e.read()
self.assertFalse(got, "unexpected %r on stderr" % got)
def test_popen2(self):
r, w = popen2.popen2(self.cmd)
self.validate_output(self.teststr, self.expected, r, w)
def test_popen3(self):
if os.name == 'posix':
r, w, e = popen2.popen3([self.cmd])
self.validate_output(self.teststr, self.expected, r, w, e)
r, w, e = popen2.popen3(self.cmd)
self.validate_output(self.teststr, self.expected, r, w, e)
def test_os_popen2(self):
# same test as test_popen2(), but using the os.popen*() API
if os.name == 'posix':
w, r = os.popen2([self.cmd])
self.validate_output(self.teststr, self.expected, r, w)
w, r = os.popen2(["echo", self.teststr])
got = r.read()
self.assertEqual(got, self.teststr + "\n")
w, r = os.popen2(self.cmd)
self.validate_output(self.teststr, self.expected, r, w)
def test_os_popen3(self):
# same test as test_popen3(), but using the os.popen*() API
if os.name == 'posix':
w, r, e = os.popen3([self.cmd])
self.validate_output(self.teststr, self.expected, r, w, e)
w, r, e = os.popen3(["echo", self.teststr])
got = r.read()
self.assertEqual(got, self.teststr + "\n")
got = e.read()
self.assertFalse(got, "unexpected %r on stderr" % got)
w, r, e = os.popen3(self.cmd)
self.validate_output(self.teststr, self.expected, r, w, e)
def test_os_popen4(self):
if os.name == 'posix':
w, r = os.popen4([self.cmd])
self.validate_output(self.teststr, self.expected, r, w)
w, r = os.popen4(["echo", self.teststr])
got = r.read()
self.assertEqual(got, self.teststr + "\n")
w, r = os.popen4(self.cmd)
self.validate_output(self.teststr, self.expected, r, w)
def test_main():
run_unittest(Popen2Test)
if __name__ == "__main__":
test_main()
| gpl-3.0 |
mmalyska/eve-wspace | evewspace/Map/views.py | 1 | 36404 | # Eve W-Space
# Copyright (C) 2013 Andrew Austin and other contributors
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version. An additional term under section
# 7 of the GPL is included in the LICENSE file.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from datetime import datetime, timedelta
import json
import csv
import pytz
from django.core.exceptions import PermissionDenied, ObjectDoesNotExist
from django.http import Http404, HttpResponseRedirect, HttpResponse
from django.template.response import TemplateResponse
from django.core.urlresolvers import reverse
from django.template import RequestContext
from django.template.loader import render_to_string
from django.contrib.auth.decorators import login_required, permission_required
from django.contrib.auth.models import Group, Permission
from django.shortcuts import render, get_object_or_404
from django.db.models import Q
from Map.models import *
from Map import utils, signals
from core.utils import get_config
# Decorator to check map permissions. Takes request and map_id
# Permissions are 0 = None, 1 = View, 2 = Change
# When used without a permission=x specification, requires Change access
def require_map_permission(permission=2):
def _dec(view_func):
def _view(request, map_id, *args, **kwargs):
current_map = get_object_or_404(Map, pk=map_id)
if current_map.get_permission(request.user) < permission:
raise PermissionDenied
else:
return view_func(request, map_id, *args, **kwargs)
_view.__name__ = view_func.__name__
_view.__doc__ = view_func.__doc__
_view.__dict__ = view_func.__dict__
return _view
return _dec
@login_required
@require_map_permission(permission=1)
def get_map(request, map_id):
"""Get the map and determine if we have permissions to see it.
If we do, then return a TemplateResponse for the map. If map does not
exist, return 404. If we don't have permission, return PermissionDenied.
"""
current_map = get_object_or_404(Map, pk=map_id)
context = {
'map': current_map,
'access': current_map.get_permission(request.user),
}
return TemplateResponse(request, 'map.html', context)
@login_required
@require_map_permission(permission=1)
def map_checkin(request, map_id):
# Initialize json return dict
json_values = {}
current_map = get_object_or_404(Map, pk=map_id)
# AJAX requests should post a JSON datetime called loadtime
# # back that we use to get recent logs.
if 'loadtime' not in request.POST:
return HttpResponse(json.dumps({'error': "No loadtime"}),
mimetype="application/json")
time_string = request.POST['loadtime']
load_time = datetime.strptime(time_string, "%Y-%m-%d %H:%M:%S.%f")
load_time = load_time.replace(tzinfo=pytz.utc)
if request.is_igb_trusted:
dialog_html = _checkin_igb_trusted(request, current_map)
if dialog_html is not None:
json_values.update({'dialogHTML': dialog_html})
log_list = MapLog.objects.filter(timestamp__gt=load_time,
visible=True,
map=current_map)
log_string = render_to_string('log_div.html', {'logs': log_list})
json_values.update({'logs': log_string})
return HttpResponse(json.dumps(json_values), mimetype="application/json")
@login_required
@require_map_permission(permission=1)
def map_refresh(request, map_id):
"""
Returns an HttpResponse with the updated systemJSON for an asynchronous
map refresh.
"""
if not request.is_ajax():
raise PermissionDenied
current_map = get_object_or_404(Map, pk=map_id)
result = [
datetime.strftime(datetime.now(pytz.utc),
"%Y-%m-%d %H:%M:%S.%f"),
utils.MapJSONGenerator(current_map,
request.user).get_systems_json()
]
return HttpResponse(json.dumps(result))
def _checkin_igb_trusted(request, current_map):
"""
Runs the specific code for the case that the request came from an igb that
trusts us, returns None if no further action is required, returns a string
containing the html for a system add dialog if we detect that a new system
needs to be added
"""
current_system = System.objects.get(name=request.eve_systemname)
old_system = None
result = None
threshold = datetime.now(pytz.utc) - timedelta(minutes=5)
recently_active = request.user.locations.filter(
timestamp__gt=threshold,
charactername=request.eve_charname
).all()
if recently_active.count():
old_system = request.user.locations.get(
charactername=request.eve_charname
).system
#Conditions for the system to be automagically added to the map.
if (
old_system in current_map
and current_system not in current_map
and not _is_moving_from_kspace_to_kspace(old_system, current_system)
and recently_active.count()
):
context = {
'oldsystem': current_map.systems.filter(
system=old_system).all()[0],
'newsystem': current_system,
'wormholes': utils.get_possible_wh_types(old_system,
current_system),
}
result = render_to_string('igb_system_add_dialog.html', context,
context_instance=RequestContext(request))
current_system.add_active_pilot(request.user, request.eve_charname,
request.eve_shipname,
request.eve_shiptypename)
return result
def _is_moving_from_kspace_to_kspace(old_system, current_system):
"""
returns whether we are moving through kspace
:param old_system:
:param current_system:
:return:
"""
return old_system.is_kspace() and current_system.is_kspace()
def get_system_context(ms_id):
map_system = get_object_or_404(MapSystem, pk=ms_id)
#If map_system represents a k-space system get the relevant KSystem object
if map_system.system.is_kspace():
system = map_system.system.ksystem
else:
system = map_system.system.wsystem
scan_threshold = datetime.now(pytz.utc) - timedelta(
hours=int(get_config("MAP_SCAN_WARNING", None).value)
)
interest_offset = int(get_config("MAP_INTEREST_TIME", None).value)
interest_threshold = (datetime.now(pytz.utc)
- timedelta(minutes=interest_offset))
scan_warning = system.lastscanned < scan_threshold
if interest_offset > 0:
interest = (map_system.interesttime and
map_system.interesttime > interest_threshold)
else:
interest = map_system.interesttime
# Include any SiteTracker fleets that are active
st_fleets = map_system.system.stfleets.filter(ended=None).all()
return {'system': system, 'mapsys': map_system,
'scanwarning': scan_warning, 'isinterest': interest,
'stfleets': st_fleets}
@login_required
@require_map_permission(permission=2)
def add_system(request, map_id):
"""
AJAX view to add a system to a current_map. Requires POST containing:
topMsID: map_system ID of the parent map_system
bottomSystem: Name of the new system
topType: WormholeType name of the parent side
bottomType: WormholeType name of the new side
timeStatus: Wormhole time status integer value
massStatus: Wormhole mass status integer value
topBubbled: 1 if Parent side bubbled
bottomBubbled: 1 if new side bubbled
friendlyName: Friendly name for the new map_system
"""
if not request.is_ajax():
raise PermissionDenied
try:
# Prepare data
current_map = Map.objects.get(pk=map_id)
top_ms = MapSystem.objects.get(pk=request.POST.get('topMsID'))
bottom_sys = System.objects.get(
name=request.POST.get('bottomSystem')
)
top_type = WormholeType.objects.get(
name=request.POST.get('topType')
)
bottom_type = WormholeType.objects.get(
name=request.POST.get('bottomType')
)
time_status = int(request.POST.get('timeStatus'))
mass_status = int(request.POST.get('massStatus'))
top_bubbled = "1" == request.POST.get('topBubbled')
bottom_bubbled = "1" == request.POST.get('bottomBubbled')
# Add System
bottom_ms = current_map.add_system(
request.user, bottom_sys,
request.POST.get('friendlyName'), top_ms
)
# Add Wormhole
bottom_ms.connect_to(top_ms, top_type, bottom_type, top_bubbled,
bottom_bubbled, time_status, mass_status)
return HttpResponse()
except ObjectDoesNotExist:
return HttpResponse(status=400)
# noinspection PyUnusedLocal
@login_required
@require_map_permission(permission=2)
def remove_system(request, map_id, ms_id):
"""
Removes the supplied map_system from a map.
"""
system = get_object_or_404(MapSystem, pk=ms_id)
system.remove_system(request.user)
return HttpResponse()
# noinspection PyUnusedLocal
@login_required
@require_map_permission(permission=1)
def system_details(request, map_id, ms_id):
"""
Returns a html div representing details of the System given by ms_id in
map map_id
"""
if not request.is_ajax():
raise PermissionDenied
return render(request, 'system_details.html', get_system_context(ms_id))
# noinspection PyUnusedLocal
@login_required
@require_map_permission(permission=1)
def system_menu(request, map_id, ms_id):
"""
Returns the html for system menu
"""
if not request.is_ajax():
raise PermissionDenied
return render(request, 'system_menu.html', get_system_context(ms_id))
# noinspection PyUnusedLocal
@login_required
@require_map_permission(permission=1)
def system_tooltips(request, map_id):
"""
Returns the system tooltips for map_id
"""
if not request.is_ajax():
raise PermissionDenied
ms_list = MapSystem.objects.filter(map_id=map_id)\
.select_related('parent_wormhole', 'system__region')\
.iterator()
return render(request, 'system_tooltip.html', {'map_systems': ms_list})
# noinspection PyUnusedLocal
@login_required
@require_map_permission(permission=1)
def wormhole_tooltips(request, map_id):
"""Takes a POST request from AJAX with a Wormhole ID and renders the
wormhole tooltip for that ID to response.
"""
if not request.is_ajax():
raise PermissionDenied
cur_map = get_object_or_404(Map, pk=map_id)
ms_list = MapSystem.objects.filter(map=cur_map).all()
whs = Wormhole.objects.filter(top__in=ms_list).all()
return render(request, 'wormhole_tooltip.html', {'wormholes': whs})
# noinspection PyUnusedLocal
@login_required()
@require_map_permission(permission=2)
def collapse_system(request, map_id, ms_id):
"""
Mark the system as collapsed.
"""
if not request.is_ajax():
raise PermissionDenied
map_sys = get_object_or_404(MapSystem, pk=ms_id)
parent_wh = map_sys.parent_wormhole
parent_wh.collapsed = True
parent_wh.save()
return HttpResponse()
# noinspection PyUnusedLocal
@login_required()
@require_map_permission(permission=2)
def resurrect_system(request, map_id, ms_id):
"""
Unmark the system as collapsed.
"""
if not request.is_ajax():
raise PermissionDenied
map_sys = get_object_or_404(MapSystem, pk=ms_id)
parent_wh = map_sys.parent_wormhole
parent_wh.collapsed = False
parent_wh.save()
return HttpResponse()
# noinspection PyUnusedLocal
@login_required()
@require_map_permission(permission=2)
def mark_scanned(request, map_id, ms_id):
"""Takes a POST request from AJAX with a system ID and marks that system
as scanned.
"""
if request.is_ajax():
map_system = get_object_or_404(MapSystem, pk=ms_id)
map_system.system.lastscanned = datetime.now(pytz.utc)
map_system.system.save()
return HttpResponse()
else:
raise PermissionDenied
# noinspection PyUnusedLocal
@login_required()
def manual_location(request, map_id, ms_id):
"""Takes a POST request form AJAX with a System ID and marks the user as
being active in that system.
"""
if request.is_ajax():
map_system = get_object_or_404(MapSystem, pk=ms_id)
map_system.system.add_active_pilot(request.user, "OOG Browser",
"Unknown", "Uknown")
return HttpResponse()
else:
raise PermissionDenied
# noinspection PyUnusedLocal
@login_required()
@require_map_permission(permission=2)
def set_interest(request, map_id, ms_id):
"""Takes a POST request from AJAX with an action and marks that system
as having either utcnow or None as interesttime. The action can be either
"set" or "remove".
"""
if request.is_ajax():
action = request.POST.get("action", "none")
if action == "none":
raise Http404
system = get_object_or_404(MapSystem, pk=ms_id)
if action == "set":
system.interesttime = datetime.now(pytz.utc)
system.save()
return HttpResponse()
if action == "remove":
system.interesttime = None
system.save()
return HttpResponse()
return HttpResponse(status=418)
else:
raise PermissionDenied
def _update_sig_from_tsv(signature, row):
COL_SIG = 0
COL_SIG_TYPE = 3
COL_SIG_GROUP = 2
COL_SIG_SCAN_GROUP = 1
COL_SIG_STRENGTH = 4
COL_DISTANCE = 5
info = row[COL_SIG_TYPE]
updated = False
sig_type = None
if (row[COL_SIG_SCAN_GROUP] == "Cosmic Signature"
or row[COL_SIG_SCAN_GROUP] == "Cosmic Anomaly"
):
try:
sig_type = SignatureType.objects.get(
longname=row[COL_SIG_GROUP])
except:
sig_type = None
else:
sig_type = None
if info and sig_type:
updated = True
if sig_type:
signature.sigtype = sig_type
signature.updated = updated or signature.updated
if info:
signature.info = info
return signature
# noinspection PyUnusedLocal
@login_required
@require_map_permission(permission=2)
def bulk_sig_import(request, map_id, ms_id):
"""
GET gets a bulk signature import form. POST processes it, creating sigs
with blank info and type for each sig ID detected.
"""
if not request.is_ajax():
raise PermissionDenied
map_system = get_object_or_404(MapSystem, pk=ms_id)
k = 0
if request.method == 'POST':
reader = csv.reader(request.POST.get('paste', '').decode(
'utf-8').splitlines(), delimiter="\t")
COL_SIG = 0
COL_STRENGTH = 4
for row in reader:
# To prevent pasting of POSes into the sig importer, make sure
# the strength column is present
try:
test_var = row[COL_STRENGTH]
except IndexError:
return HttpResponse('A valid signature paste was not found',
status=400)
if k < 75:
sig_id = utils.convert_signature_id(row[COL_SIG])
sig = Signature.objects.get_or_create(sigid=sig_id,
modified_by=request.user,
system=map_system.system)[0]
sig = _update_sig_from_tsv(sig, row)
sig.modified_by = request.user
sig.save()
signals.signature_update.send_robust(sig, user=request.user,
map=map_system.map,
signal_strength=row[COL_STRENGTH])
k += 1
map_system.map.add_log(request.user,
"Imported %s signatures for %s(%s)."
% (k, map_system.system.name,
map_system.friendlyname), True)
map_system.system.lastscanned = datetime.now(pytz.utc)
map_system.system.save()
return HttpResponse()
else:
return TemplateResponse(request, "bulk_sig_form.html",
{'mapsys': map_system})
@login_required
@require_map_permission(permission=2)
def toggle_sig_owner(request, map_id, ms_id, sig_id=None):
if not request.is_ajax():
raise PermissionDenied
sig = get_object_or_404(Signature, pk=sig_id)
sig.toggle_ownership(request.user)
return HttpResponse()
# noinspection PyUnusedLocal
@login_required
@require_map_permission(permission=2)
def edit_signature(request, map_id, ms_id, sig_id=None):
"""
GET gets a pre-filled edit signature form.
POST updates the signature with the new information and returns a
blank add form.
"""
if not request.is_ajax():
raise PermissionDenied
map_system = get_object_or_404(MapSystem, pk=ms_id)
action = None
if sig_id != None:
signature = get_object_or_404(Signature, pk=sig_id)
created = False
if not signature.owned_by:
signature.toggle_ownership(request.user)
if request.method == 'POST':
form = SignatureForm(request.POST)
if form.is_valid():
ingame_id = utils.convert_signature_id(form.cleaned_data['sigid'])
if sig_id == None:
signature, created = Signature.objects.get_or_create(
system=map_system.system, sigid=ingame_id)
signature.sigid = ingame_id
signature.updated = True
signature.info = form.cleaned_data['info']
if request.POST['sigtype'] != '':
sigtype = form.cleaned_data['sigtype']
else:
sigtype = None
signature.sigtype = sigtype
signature.modified_by = request.user
signature.save()
map_system.system.lastscanned = datetime.now(pytz.utc)
map_system.system.save()
if created:
action = 'Created'
else:
action = 'Updated'
if signature.owned_by:
signature.toggle_ownership(request.user)
map_system.map.add_log(request.user,
"%s signature %s in %s (%s)" %
(action, signature.sigid, map_system.system.name,
map_system.friendlyname))
signals.signature_update.send_robust(signature, user=request.user,
map=map_system.map)
else:
return TemplateResponse(request, "edit_sig_form.html",
{'form': form,
'system': map_system, 'sig': signature})
form = SignatureForm()
if sig_id == None or action == 'Updated':
return TemplateResponse(request, "add_sig_form.html",
{'form': form, 'system': map_system})
else:
return TemplateResponse(request, "edit_sig_form.html",
{'form': SignatureForm(instance=signature),
'system': map_system, 'sig': signature})
# noinspection PyUnusedLocal
@login_required()
@require_map_permission(permission=1)
def get_signature_list(request, map_id, ms_id):
"""
Determines the proper escalationThreshold time and renders
system_signatures.html
"""
if not request.is_ajax():
raise PermissionDenied
system = get_object_or_404(MapSystem, pk=ms_id)
escalation_downtimes = int(get_config("MAP_ESCALATION_BURN",
request.user).value)
return TemplateResponse(request, "system_signatures.html",
{'system': system,
'downtimes': escalation_downtimes})
# noinspection PyUnusedLocal
@login_required
@require_map_permission(permission=2)
def mark_signature_cleared(request, map_id, ms_id, sig_id):
"""
Marks a signature as having its NPCs cleared.
"""
if not request.is_ajax():
raise PermissionDenied
sig = get_object_or_404(Signature, pk=sig_id)
sig.clear_rats()
return HttpResponse()
# noinspection PyUnusedLocal
@login_required
@require_map_permission(permission=2)
def escalate_site(request, map_id, ms_id, sig_id):
"""
Marks a site as having been escalated.
"""
if not request.is_ajax():
raise PermissionDenied
sig = get_object_or_404(Signature, pk=sig_id)
sig.escalate()
return HttpResponse()
# noinspection PyUnusedLocal
@login_required
@require_map_permission(permission=2)
def activate_signature(request, map_id, ms_id, sig_id):
"""
Marks a site activated.
"""
if not request.is_ajax():
raise PermissionDenied
sig = get_object_or_404(Signature, pk=sig_id)
sig.activate()
return HttpResponse()
# noinspection PyUnusedLocal
@login_required
@require_map_permission(permission=2)
def delete_signature(request, map_id, ms_id, sig_id):
"""
Deletes a signature.
"""
if not request.is_ajax():
raise PermissionDenied
map_system = get_object_or_404(MapSystem, pk=ms_id)
sig = get_object_or_404(Signature, pk=sig_id)
sig.delete()
map_system.map.add_log(request.user, "Deleted signature %s in %s (%s)."
% (sig.sigid, map_system.system.name,
map_system.friendlyname))
return HttpResponse()
# noinspection PyUnusedLocal
@login_required
@require_map_permission(permission=2)
def manual_add_system(request, map_id, ms_id):
"""
A GET request gets a blank add system form with the provided MapSystem
as top system. The form is then POSTed to the add_system view.
"""
top_map_system = get_object_or_404(MapSystem, pk=ms_id)
systems = System.objects.all()
wormholes = WormholeType.objects.all()
return render(request, 'add_system_box.html',
{'topMs': top_map_system, 'sysList': systems,
'whList': wormholes})
# noinspection PyUnusedLocal
@login_required
@require_map_permission(permission=2)
def edit_system(request, map_id, ms_id):
"""
A GET request gets the edit system dialog pre-filled with current
information.
A POST request saves the posted data as the new information.
POST values are friendlyName, info, and occupied.
"""
if not request.is_ajax():
raise PermissionDenied
map_system = get_object_or_404(MapSystem, pk=ms_id)
if request.method == 'GET':
occupied = map_system.system.occupied.replace("<br />", "\n")
info = map_system.system.info.replace("<br />", "\n")
return TemplateResponse(request, 'edit_system.html',
{'mapsys': map_system,
'occupied': occupied, 'info': info}
)
if request.method == 'POST':
map_system.friendlyname = request.POST.get('friendlyName', '')
if (
(map_system.system.info != request.POST.get('info', '')) or
(map_system.system.occupied !=
request.POST.get('occupied', ''))
):
map_system.system.info = request.POST.get('info', '')
map_system.system.occupied = request.POST.get('occupied', '')
map_system.system.save()
map_system.save()
map_system.map.add_log(request.user, "Edited System: %s (%s)"
% (map_system.system.name,
map_system.friendlyname))
return HttpResponse()
raise PermissionDenied
# noinspection PyUnusedLocal
@login_required
@require_map_permission(permission=2)
def edit_wormhole(request, map_id, wh_id):
"""
A GET request gets the edit wormhole dialog pre-filled with current info.
A POST request saves the posted data as the new info.
POST values are topType, bottomType, massStatus, timeStatus, topBubbled,
and bottomBubbled.
"""
if not request.is_ajax():
raise PermissionDenied
wormhole = get_object_or_404(Wormhole, pk=wh_id)
if request.method == 'GET':
return TemplateResponse(request, 'edit_wormhole.html',
{'wormhole': wormhole}
)
if request.method == 'POST':
wormhole.mass_status = int(request.POST.get('massStatus', 0))
wormhole.time_status = int(request.POST.get('timeStatus', 0))
wormhole.top_type = get_object_or_404(
WormholeType,
name=request.POST.get('topType', 'K162')
)
wormhole.bottom_type = get_object_or_404(
WormholeType,
name=request.POST.get('bottomType', 'K162')
)
wormhole.top_bubbled = request.POST.get('topBubbled', '1') == '1'
wormhole.bottom_bubbled = request.POST.get('bottomBubbled', '1') == '1'
wormhole.save()
wormhole.map.add_log(request.user,
("Updated the wormhole between %s(%s) and %s(%s)."
% (wormhole.top.system.name,
wormhole.top.friendlyname,
wormhole.bottom.system.name,
wormhole.bottom.friendlyname)))
return HttpResponse()
raise PermissiondDenied
@permission_required('Map.add_map')
def create_map(request):
"""
This function creates a map and then redirects to the new map.
"""
if request.method == 'POST':
form = MapForm(request.POST)
if form.is_valid():
new_map = form.save()
new_map.add_log(request.user, "Created the %s map." % new_map.name)
new_map.add_system(request.user, new_map.root, "Root", None)
return HttpResponseRedirect(reverse('Map.views.get_map',
kwargs={'map_id': new_map.pk}))
else:
return TemplateResponse(request, 'new_map.html', {'form': form})
else:
form = MapForm
return TemplateResponse(request, 'new_map.html', {'form': form, })
def _sort_destinations(destinations):
"""
Takes a list of destination tuples and returns the same list, sorted in order of the jumps.
"""
results = []
onVal = 0
for dest in destinations:
if len(results) == 0:
results.append(dest)
else:
while onVal <= len(results):
if onVal == len(results):
results.append(dest)
onVal = 0
break
else:
if dest[1] > results[onVal][1]:
onVal += 1
else:
results.insert(onVal, dest)
onVal = 0
break
return results
# noinspection PyUnusedLocal
@require_map_permission(permission=1)
def destination_list(request, map_id, ms_id):
"""
Returns the destinations of interest tuple for K-space systems and
a blank response for w-space systems.
"""
if not request.is_ajax():
raise PermissionDenied
destinations = Destination.objects.filter(Q(user=None) |
Q(user=request.user))
map_system = get_object_or_404(MapSystem, pk=ms_id)
try:
system = KSystem.objects.get(pk=map_system.system.pk)
rf = utils.RouteFinder()
result = []
for destination in destinations:
result.append((destination.system,
rf.route_length(system,
destination.system) - 1,
round(rf.ly_distance(system,
destination.system), 3)
))
except ObjectDoesNotExist:
return HttpResponse()
return render(request, 'system_destinations.html',
{'system': system, 'destinations': _sort_destinations(result)})
# noinspection PyUnusedLocal
def site_spawns(request, map_id, ms_id, sig_id):
"""
Returns the spawns for a given signature and system.
"""
sig = get_object_or_404(Signature, pk=sig_id)
spawns = SiteSpawn.objects.filter(sigtype=sig.sigtype).all()
if spawns[0].sysclass != 0:
spawns = SiteSpawn.objects.filter(sigtype=sig.sigtype,
sysclass=sig.system.sysclass).all()
return render(request, 'site_spawns.html', {'spawns': spawns})
#########################
#Settings Views #
#########################
@permission_required('Map.map_admin')
def general_settings(request):
"""
Returns and processes the general settings section.
"""
npc_threshold = get_config("MAP_NPC_THRESHOLD", None)
pvp_threshold = get_config("MAP_PVP_THRESHOLD", None)
scan_threshold = get_config("MAP_SCAN_WARNING", None)
interest_time = get_config("MAP_INTEREST_TIME", None)
escalation_burn = get_config("MAP_ESCALATION_BURN", None)
if request.method == "POST":
scan_threshold.value = int(request.POST['scanwarn'])
interest_time.value = int(request.POST['interesttimeout'])
pvp_threshold.value = int(request.POST['pvpthreshold'])
npc_threshold.value = int(request.POST['npcthreshold'])
escalation_burn.value = int(request.POST['escdowntimes'])
scan_threshold.save()
interest_time.save()
pvp_threshold.save()
npc_threshold.save()
escalation_burn.save()
return HttpResponse()
return TemplateResponse(
request, 'general_settings.html',
{'npcthreshold': npc_threshold.value,
'pvpthreshold': pvp_threshold.value,
'scanwarn': scan_threshold.value,
'interesttimeout': interest_time.value,
'escdowntimes': escalation_burn.value}
)
@permission_required('Map.map_admin')
def sites_settings(request):
"""
Returns the site spawns section.
"""
return TemplateResponse(request, 'spawns_settings.html',
{'spawns': SiteSpawn.objects.all()})
@permission_required('Map.map_admin')
def add_spawns(request):
"""
Adds a site spawn.
"""
return HttpResponse()
# noinspection PyUnusedLocal
@permission_required('Map.map_admin')
def delete_spawns(request, spawn_id):
"""
Deletes a site spawn.
"""
return HttpResponse()
# noinspection PyUnusedLocal
@permission_required('Map.map_admin')
def edit_spawns(request, spawn_id):
"""
Alters a site spawn.
"""
return HttpResponse()
def destination_settings(request, user=None):
"""
Returns the destinations section.
"""
if not user:
dest_list = Destination.objects.filter(user=None)
else:
dest_list = Destination.objects.filter(Q(user=None) |
Q(user=request.user))
return TemplateResponse(request, 'dest_settings.html',
{'destinations': dest_list,
'user_context': user})
def add_destination(request, dest_user=None):
"""
Add a destination.
"""
if not dest_user and not request.user.has_perm('Map.map_admin'):
raise PermissionDenied
system = get_object_or_404(KSystem, name=request.POST['systemName'])
Destination(system=system, user=dest_user).save()
return HttpResponse()
def add_personal_destination(request):
"""
Add a personal destination.
"""
return add_destination(request, dest_user=request.user)
def delete_destination(request, dest_id):
"""
Deletes a destination.
"""
destination = get_object_or_404(Destination, pk=dest_id)
if not request.user.has_perm('Map.map_admin') and not destination.user:
raise PermissionDenied
if destination.user and not request.user == destination.user:
raise PermissionDenied
destination.delete()
return HttpResponse()
@permission_required('Map.map_admin')
def sigtype_settings(request):
"""
Returns the signature types section.
"""
return TemplateResponse(request, 'sigtype_settings.html',
{'sigtypes': SignatureType.objects.all()})
# noinspection PyUnusedLocal
@permission_required('Map.map_admin')
def edit_sigtype(request, sigtype_id):
"""
Alters a signature type.
"""
return HttpResponse()
@permission_required('Map.map_admin')
def add_sigtype(request):
"""
Adds a signature type.
"""
return HttpResponse()
# noinspection PyUnusedLocal
@permission_required('Map.map_admin')
def delete_sigtype(request, sigtype_id):
"""
Deletes a signature type.
"""
return HttpResponse()
@permission_required('Map.map_admin')
def map_settings(request, map_id):
"""
Returns and processes the settings section for a map.
"""
subject = get_object_or_404(Map, pk=map_id)
return TemplateResponse(request, 'map_settings_single.html',
{'map': subject})
@permission_required('Map.map_admin')
def delete_map(request, map_id):
"""
Deletes a map.
"""
subject = get_object_or_404(Map, pk=map_id)
subject.delete()
return HttpResponse()
# noinspection PyUnusedLocal
@permission_required('Map.map_admin')
def edit_map(request, map_id):
"""
Alters a map.
"""
return HttpResponse('[]')
@permission_required('Map.map_admin')
def global_permissions(request):
"""
Returns and processes the global permissions section.
"""
if not request.is_ajax():
raise PermissionDenied
group_list = []
admin_perm = Permission.objects.get(codename="map_admin")
unrestricted_perm = Permission.objects.get(codename="map_unrestricted")
add_map_perm = Permission.objects.get(codename="add_map")
if request.method == "POST":
for group in Group.objects.all():
if request.POST.get('%s_unrestricted' % group.pk, None):
if unrestricted_perm not in group.permissions.all():
group.permissions.add(unrestricted_perm)
else:
if unrestricted_perm in group.permissions.all():
group.permissions.remove(unrestricted_perm)
if request.POST.get('%s_add' % group.pk, None):
if add_map_perm not in group.permissions.all():
group.permissions.add(add_map_perm)
else:
if add_map_perm in group.permissions.all():
group.permissions.remove(add_map_perm)
if request.POST.get('%s_admin' % group.pk, None):
if admin_perm not in group.permissions.all():
group.permissions.add(admin_perm)
else:
if admin_perm in group.permissions.all():
group.permissions.remove(admin_perm)
return HttpResponse()
for group in Group.objects.all():
entry = {
'group': group, 'admin': admin_perm in group.permissions.all(),
'unrestricted': unrestricted_perm in group.permissions.all(),
'add_map': add_map_perm in group.permissions.all()
}
group_list.append(entry)
return TemplateResponse(request, 'global_perms.html',
{'groups': group_list})
| gpl-3.0 |
arshaver/django-photologue | urls.py | 23 | 2931 | from django.conf import settings
from django.conf.urls.defaults import *
from models import *
# Number of random images from the gallery to display.
SAMPLE_SIZE = ":%s" % getattr(settings, 'GALLERY_SAMPLE_SIZE', 5)
# galleries
gallery_args = {'date_field': 'date_added', 'allow_empty': True, 'queryset': Gallery.objects.filter(is_public=True), 'extra_context':{'sample_size':SAMPLE_SIZE}}
urlpatterns = patterns('django.views.generic.date_based',
url(r'^gallery/(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\w{1,2})/(?P<slug>[\-\d\w]+)/$', 'object_detail', {'date_field': 'date_added', 'slug_field': 'title_slug', 'queryset': Gallery.objects.filter(is_public=True), 'extra_context':{'sample_size':SAMPLE_SIZE}}, name='pl-gallery-detail'),
url(r'^gallery/(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\w{1,2})/$', 'archive_day', gallery_args, name='pl-gallery-archive-day'),
url(r'^gallery/(?P<year>\d{4})/(?P<month>[a-z]{3})/$', 'archive_month', gallery_args, name='pl-gallery-archive-month'),
url(r'^gallery/(?P<year>\d{4})/$', 'archive_year', gallery_args, name='pl-gallery-archive-year'),
url(r'^gallery/?$', 'archive_index', gallery_args, name='pl-gallery-archive'),
)
urlpatterns += patterns('django.views.generic.list_detail',
url(r'^gallery/(?P<slug>[\-\d\w]+)/$', 'object_detail', {'slug_field': 'title_slug', 'queryset': Gallery.objects.filter(is_public=True), 'extra_context':{'sample_size':SAMPLE_SIZE}}, name='pl-gallery'),
url(r'^gallery/page/(?P<page>[0-9]+)/$', 'object_list', {'queryset': Gallery.objects.filter(is_public=True), 'allow_empty': True, 'paginate_by': 5, 'extra_context':{'sample_size':SAMPLE_SIZE}}, name='pl-gallery-list'),
)
# photographs
photo_args = {'date_field': 'date_added', 'allow_empty': True, 'queryset': Photo.objects.filter(is_public=True)}
urlpatterns += patterns('django.views.generic.date_based',
url(r'^photo/(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\w{1,2})/(?P<slug>[\-\d\w]+)/$', 'object_detail', {'date_field': 'date_added', 'slug_field': 'title_slug', 'queryset': Photo.objects.filter(is_public=True)}, name='pl-photo-detail'),
url(r'^photo/(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\w{1,2})/$', 'archive_day', photo_args, name='pl-photo-archive-day'),
url(r'^photo/(?P<year>\d{4})/(?P<month>[a-z]{3})/$', 'archive_month', photo_args, name='pl-photo-archive-month'),
url(r'^photo/(?P<year>\d{4})/$', 'archive_year', photo_args, name='pl-photo-archive-year'),
url(r'^photo/$', 'archive_index', photo_args, name='pl-photo-archive'),
)
urlpatterns += patterns('django.views.generic.list_detail',
url(r'^photo/(?P<slug>[\-\d\w]+)/$', 'object_detail', {'slug_field': 'title_slug', 'queryset': Photo.objects.filter(is_public=True)}, name='pl-photo'),
url(r'^photo/page/(?P<page>[0-9]+)/$', 'object_list', {'queryset': Photo.objects.filter(is_public=True), 'allow_empty': True, 'paginate_by': 20}, name='pl-photo-list'),
)
| bsd-3-clause |
frouty/odoo_oph | addons/base_report_designer/plugin/openerp_report_designer/bin/script/ConvertFieldsToBraces.py | 384 | 2324 | #########################################################################
#
# Copyright (c) 2003-2004 Danny Brewer d29583@groovegarden.com
# Copyright (C) 2004-2010 OpenERP SA (<http://openerp.com>).
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# See: http://www.gnu.org/licenses/lgpl.html
#
#############################################################################
import uno
import unohelper
import string
import re
from com.sun.star.task import XJobExecutor
if __name__<>"package":
from lib.gui import *
from LoginTest import *
database="test"
uid = 3
class ConvertFieldsToBraces( unohelper.Base, XJobExecutor ):
def __init__(self, ctx):
self.ctx = ctx
self.module = "openerp_report"
self.version = "0.1"
LoginTest()
if not loginstatus and __name__=="package":
exit(1)
self.aReportSyntex=[]
self.getFields()
def getFields(self):
desktop=getDesktop()
doc = desktop.getCurrentComponent()
oParEnum = doc.getTextFields().createEnumeration()
while oParEnum.hasMoreElements():
oPar = oParEnum.nextElement()
if oPar.supportsService("com.sun.star.text.TextField.DropDown"):
oPar.getAnchor().Text.insertString(oPar.getAnchor(),oPar.Items[1],False)
oPar.dispose()
if __name__<>"package":
ConvertFieldsToBraces(None)
else:
g_ImplementationHelper.addImplementation( ConvertFieldsToBraces, "org.openoffice.openerp.report.convertFB", ("com.sun.star.task.Job",),)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
jcftang/ansible-modules-extras | cloud/amazon/ec2_vpc_nat_gateway.py | 36 | 34426 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ec2_vpc_nat_gateway
short_description: Manage AWS VPC NAT Gateways.
description:
- Ensure the state of AWS VPC NAT Gateways based on their id, allocation and subnet ids.
version_added: "2.2"
requirements: [boto3, botocore]
options:
state:
description:
- Ensure NAT Gateway is present or absent.
required: false
default: "present"
choices: ["present", "absent"]
nat_gateway_id:
description:
- The id AWS dynamically allocates to the NAT Gateway on creation.
This is required when the absent option is present.
required: false
default: None
subnet_id:
description:
- The id of the subnet to create the NAT Gateway in. This is required
with the present option.
required: false
default: None
allocation_id:
description:
- The id of the elastic IP allocation. If this is not passed and the
eip_address is not passed. An EIP is generated for this NAT Gateway.
required: false
default: None
eip_address:
description:
- The elastic IP address of the EIP you want attached to this NAT Gateway.
If this is not passed and the allocation_id is not passed,
an EIP is generated for this NAT Gateway.
required: false
if_exist_do_not_create:
description:
- if a NAT Gateway exists already in the subnet_id, then do not create a new one.
required: false
default: false
release_eip:
description:
- Deallocate the EIP from the VPC.
- Option is only valid with the absent state.
- You should use this with the wait option. Since you can not release an address while a delete operation is happening.
required: false
default: true
wait:
description:
- Wait for operation to complete before returning.
required: false
default: false
wait_timeout:
description:
- How many seconds to wait for an operation to complete before timing out.
required: false
default: 300
client_token:
description:
- Optional unique token to be used during create to ensure idempotency.
When specifying this option, ensure you specify the eip_address parameter
as well otherwise any subsequent runs will fail.
required: false
author:
- "Allen Sanabria (@linuxdynasty)"
- "Jon Hadfield (@jonhadfield)"
- "Karen Cheng(@Etherdaemon)"
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
- name: Create new nat gateway with client token.
ec2_vpc_nat_gateway:
state: present
subnet_id: subnet-12345678
eip_address: 52.1.1.1
region: ap-southeast-2
client_token: abcd-12345678
register: new_nat_gateway
- name: Create new nat gateway using an allocation-id.
ec2_vpc_nat_gateway:
state: present
subnet_id: subnet-12345678
allocation_id: eipalloc-12345678
region: ap-southeast-2
register: new_nat_gateway
- name: Create new nat gateway, using an EIP address and wait for available status.
ec2_vpc_nat_gateway:
state: present
subnet_id: subnet-12345678
eip_address: 52.1.1.1
wait: yes
region: ap-southeast-2
register: new_nat_gateway
- name: Create new nat gateway and allocate new EIP.
ec2_vpc_nat_gateway:
state: present
subnet_id: subnet-12345678
wait: yes
region: ap-southeast-2
register: new_nat_gateway
- name: Create new nat gateway and allocate new EIP if a nat gateway does not yet exist in the subnet.
ec2_vpc_nat_gateway:
state: present
subnet_id: subnet-12345678
wait: yes
region: ap-southeast-2
if_exist_do_not_create: true
register: new_nat_gateway
- name: Delete nat gateway using discovered nat gateways from facts module.
ec2_vpc_nat_gateway:
state: absent
region: ap-southeast-2
wait: yes
nat_gateway_id: "{{ item.NatGatewayId }}"
release_eip: yes
register: delete_nat_gateway_result
with_items: "{{ gateways_to_remove.result }}"
- name: Delete nat gateway and wait for deleted status.
ec2_vpc_nat_gateway:
state: absent
nat_gateway_id: nat-12345678
wait: yes
wait_timeout: 500
region: ap-southeast-2
- name: Delete nat gateway and release EIP.
ec2_vpc_nat_gateway:
state: absent
nat_gateway_id: nat-12345678
release_eip: yes
wait: yes
wait_timeout: 300
region: ap-southeast-2
'''
RETURN = '''
create_time:
description: The ISO 8601 date time formatin UTC.
returned: In all cases.
type: string
sample: "2016-03-05T05:19:20.282000+00:00'"
nat_gateway_id:
description: id of the VPC NAT Gateway
returned: In all cases.
type: string
sample: "nat-0d1e3a878585988f8"
subnet_id:
description: id of the Subnet
returned: In all cases.
type: string
sample: "subnet-12345"
state:
description: The current state of the NAT Gateway.
returned: In all cases.
type: string
sample: "available"
vpc_id:
description: id of the VPC.
returned: In all cases.
type: string
sample: "vpc-12345"
nat_gateway_addresses:
description: List of dictionairies containing the public_ip, network_interface_id, private_ip, and allocation_id.
returned: In all cases.
type: string
sample: [
{
'public_ip': '52.52.52.52',
'network_interface_id': 'eni-12345',
'private_ip': '10.0.0.100',
'allocation_id': 'eipalloc-12345'
}
]
'''
try:
import botocore
import boto3
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
import datetime
import random
import re
import time
from dateutil.tz import tzutc
DRY_RUN_GATEWAYS = [
{
"nat_gateway_id": "nat-123456789",
"subnet_id": "subnet-123456789",
"nat_gateway_addresses": [
{
"public_ip": "55.55.55.55",
"network_interface_id": "eni-1234567",
"private_ip": "10.0.0.102",
"allocation_id": "eipalloc-1234567"
}
],
"state": "available",
"create_time": "2016-03-05T05:19:20.282000+00:00",
"vpc_id": "vpc-12345678"
}
]
DRY_RUN_GATEWAY_UNCONVERTED = [
{
'VpcId': 'vpc-12345678',
'State': 'available',
'NatGatewayId': 'nat-123456789',
'SubnetId': 'subnet-123456789',
'NatGatewayAddresses': [
{
'PublicIp': '55.55.55.55',
'NetworkInterfaceId': 'eni-1234567',
'AllocationId': 'eipalloc-1234567',
'PrivateIp': '10.0.0.102'
}
],
'CreateTime': datetime.datetime(2016, 3, 5, 5, 19, 20, 282000, tzinfo=tzutc())
}
]
DRY_RUN_ALLOCATION_UNCONVERTED = {
'Addresses': [
{
'PublicIp': '55.55.55.55',
'Domain': 'vpc',
'AllocationId': 'eipalloc-1234567'
}
]
}
DRY_RUN_MSGS = 'DryRun Mode:'
def convert_to_lower(data):
"""Convert all uppercase keys in dict with lowercase_
Args:
data (dict): Dictionary with keys that have upper cases in them
Example.. FooBar == foo_bar
if a val is of type datetime.datetime, it will be converted to
the ISO 8601
Basic Usage:
>>> test = {'FooBar': []}
>>> test = convert_to_lower(test)
{
'foo_bar': []
}
Returns:
Dictionary
"""
results = dict()
if isinstance(data, dict):
for key, val in data.items():
key = re.sub(r'(([A-Z]{1,3}){1})', r'_\1', key).lower()
if key[0] == '_':
key = key[1:]
if isinstance(val, datetime.datetime):
results[key] = val.isoformat()
elif isinstance(val, dict):
results[key] = convert_to_lower(val)
elif isinstance(val, list):
converted = list()
for item in val:
converted.append(convert_to_lower(item))
results[key] = converted
else:
results[key] = val
return results
def get_nat_gateways(client, subnet_id=None, nat_gateway_id=None,
states=None, check_mode=False):
"""Retrieve a list of NAT Gateways
Args:
client (botocore.client.EC2): Boto3 client
Kwargs:
subnet_id (str): The subnet_id the nat resides in.
nat_gateway_id (str): The Amazon nat id.
states (list): States available (pending, failed, available, deleting, and deleted)
default=None
Basic Usage:
>>> client = boto3.client('ec2')
>>> subnet_id = 'subnet-12345678'
>>> get_nat_gateways(client, subnet_id)
[
true,
"",
{
"nat_gateway_id": "nat-123456789",
"subnet_id": "subnet-123456789",
"nat_gateway_addresses": [
{
"public_ip": "55.55.55.55",
"network_interface_id": "eni-1234567",
"private_ip": "10.0.0.102",
"allocation_id": "eipalloc-1234567"
}
],
"state": "deleted",
"create_time": "2016-03-05T00:33:21.209000+00:00",
"delete_time": "2016-03-05T00:36:37.329000+00:00",
"vpc_id": "vpc-12345678"
}
Returns:
Tuple (bool, str, list)
"""
params = dict()
err_msg = ""
gateways_retrieved = False
existing_gateways = list()
if not states:
states = ['available', 'pending']
if nat_gateway_id:
params['NatGatewayIds'] = [nat_gateway_id]
else:
params['Filter'] = [
{
'Name': 'subnet-id',
'Values': [subnet_id]
},
{
'Name': 'state',
'Values': states
}
]
try:
if not check_mode:
gateways = client.describe_nat_gateways(**params)['NatGateways']
if gateways:
for gw in gateways:
existing_gateways.append(convert_to_lower(gw))
gateways_retrieved = True
else:
gateways_retrieved = True
if nat_gateway_id:
if DRY_RUN_GATEWAYS[0]['nat_gateway_id'] == nat_gateway_id:
existing_gateways = DRY_RUN_GATEWAYS
elif subnet_id:
if DRY_RUN_GATEWAYS[0]['subnet_id'] == subnet_id:
existing_gateways = DRY_RUN_GATEWAYS
err_msg = '{0} Retrieving gateways'.format(DRY_RUN_MSGS)
except botocore.exceptions.ClientError as e:
err_msg = str(e)
return gateways_retrieved, err_msg, existing_gateways
def wait_for_status(client, wait_timeout, nat_gateway_id, status,
check_mode=False):
"""Wait for the NAT Gateway to reach a status
Args:
client (botocore.client.EC2): Boto3 client
wait_timeout (int): Number of seconds to wait, until this timeout is reached.
nat_gateway_id (str): The Amazon nat id.
status (str): The status to wait for.
examples. status=available, status=deleted
Basic Usage:
>>> client = boto3.client('ec2')
>>> subnet_id = 'subnet-12345678'
>>> allocation_id = 'eipalloc-12345678'
>>> wait_for_status(client, subnet_id, allocation_id)
[
true,
"",
{
"nat_gateway_id": "nat-123456789",
"subnet_id": "subnet-1234567",
"nat_gateway_addresses": [
{
"public_ip": "55.55.55.55",
"network_interface_id": "eni-1234567",
"private_ip": "10.0.0.102",
"allocation_id": "eipalloc-12345678"
}
],
"state": "deleted",
"create_time": "2016-03-05T00:33:21.209000+00:00",
"delete_time": "2016-03-05T00:36:37.329000+00:00",
"vpc_id": "vpc-12345677"
}
]
Returns:
Tuple (bool, str, dict)
"""
polling_increment_secs = 5
wait_timeout = time.time() + wait_timeout
status_achieved = False
nat_gateway = dict()
states = ['pending', 'failed', 'available', 'deleting', 'deleted']
err_msg = ""
while wait_timeout > time.time():
try:
gws_retrieved, err_msg, nat_gateways = (
get_nat_gateways(
client, nat_gateway_id=nat_gateway_id,
states=states, check_mode=check_mode
)
)
if gws_retrieved and nat_gateways:
nat_gateway = nat_gateways[0]
if check_mode:
nat_gateway['state'] = status
if nat_gateway.get('state') == status:
status_achieved = True
break
elif nat_gateway.get('state') == 'failed':
err_msg = nat_gateway.get('failure_message')
break
elif nat_gateway.get('state') == 'pending':
if 'failure_message' in nat_gateway:
err_msg = nat_gateway.get('failure_message')
status_achieved = False
break
else:
time.sleep(polling_increment_secs)
except botocore.exceptions.ClientError as e:
err_msg = str(e)
if not status_achieved:
err_msg = "Wait time out reached, while waiting for results"
return status_achieved, err_msg, nat_gateway
def gateway_in_subnet_exists(client, subnet_id, allocation_id=None,
check_mode=False):
"""Retrieve all NAT Gateways for a subnet.
Args:
subnet_id (str): The subnet_id the nat resides in.
Kwargs:
allocation_id (str): The EIP Amazon identifier.
default = None
Basic Usage:
>>> client = boto3.client('ec2')
>>> subnet_id = 'subnet-1234567'
>>> allocation_id = 'eipalloc-1234567'
>>> gateway_in_subnet_exists(client, subnet_id, allocation_id)
(
[
{
"nat_gateway_id": "nat-123456789",
"subnet_id": "subnet-123456789",
"nat_gateway_addresses": [
{
"public_ip": "55.55.55.55",
"network_interface_id": "eni-1234567",
"private_ip": "10.0.0.102",
"allocation_id": "eipalloc-1234567"
}
],
"state": "deleted",
"create_time": "2016-03-05T00:33:21.209000+00:00",
"delete_time": "2016-03-05T00:36:37.329000+00:00",
"vpc_id": "vpc-1234567"
}
],
False
)
Returns:
Tuple (list, bool)
"""
allocation_id_exists = False
gateways = []
states = ['available', 'pending']
gws_retrieved, _, gws = (
get_nat_gateways(
client, subnet_id, states=states, check_mode=check_mode
)
)
if not gws_retrieved:
return gateways, allocation_id_exists
for gw in gws:
for address in gw['nat_gateway_addresses']:
if allocation_id:
if address.get('allocation_id') == allocation_id:
allocation_id_exists = True
gateways.append(gw)
else:
gateways.append(gw)
return gateways, allocation_id_exists
def get_eip_allocation_id_by_address(client, eip_address, check_mode=False):
"""Release an EIP from your EIP Pool
Args:
client (botocore.client.EC2): Boto3 client
eip_address (str): The Elastic IP Address of the EIP.
Kwargs:
check_mode (bool): if set to true, do not run anything and
falsify the results.
Basic Usage:
>>> client = boto3.client('ec2')
>>> eip_address = '52.87.29.36'
>>> get_eip_allocation_id_by_address(client, eip_address)
'eipalloc-36014da3'
Returns:
Tuple (str, str)
"""
params = {
'PublicIps': [eip_address],
}
allocation_id = None
err_msg = ""
try:
if not check_mode:
allocations = client.describe_addresses(**params)['Addresses']
if len(allocations) == 1:
allocation = allocations[0]
else:
allocation = None
else:
dry_run_eip = (
DRY_RUN_ALLOCATION_UNCONVERTED['Addresses'][0]['PublicIp']
)
if dry_run_eip == eip_address:
allocation = DRY_RUN_ALLOCATION_UNCONVERTED['Addresses'][0]
else:
allocation = None
if allocation:
if allocation.get('Domain') != 'vpc':
err_msg = (
"EIP {0} is a non-VPC EIP, please allocate a VPC scoped EIP"
.format(eip_address)
)
else:
allocation_id = allocation.get('AllocationId')
else:
err_msg = (
"EIP {0} does not exist".format(eip_address)
)
except botocore.exceptions.ClientError as e:
err_msg = str(e)
return allocation_id, err_msg
def allocate_eip_address(client, check_mode=False):
"""Release an EIP from your EIP Pool
Args:
client (botocore.client.EC2): Boto3 client
Kwargs:
check_mode (bool): if set to true, do not run anything and
falsify the results.
Basic Usage:
>>> client = boto3.client('ec2')
>>> allocate_eip_address(client)
True
Returns:
Tuple (bool, str)
"""
ip_allocated = False
new_eip = None
err_msg = ''
params = {
'Domain': 'vpc',
}
try:
if check_mode:
ip_allocated = True
random_numbers = (
''.join(str(x) for x in random.sample(range(0, 9), 7))
)
new_eip = 'eipalloc-{0}'.format(random_numbers)
else:
new_eip = client.allocate_address(**params)['AllocationId']
ip_allocated = True
err_msg = 'eipalloc id {0} created'.format(new_eip)
except botocore.exceptions.ClientError as e:
err_msg = str(e)
return ip_allocated, err_msg, new_eip
def release_address(client, allocation_id, check_mode=False):
"""Release an EIP from your EIP Pool
Args:
client (botocore.client.EC2): Boto3 client
allocation_id (str): The eip Amazon identifier.
Kwargs:
check_mode (bool): if set to true, do not run anything and
falsify the results.
Basic Usage:
>>> client = boto3.client('ec2')
>>> allocation_id = "eipalloc-123456"
>>> release_address(client, allocation_id)
True
Returns:
Boolean, string
"""
err_msg = ''
if check_mode:
return True, ''
ip_released = False
params = {
'AllocationId': allocation_id,
}
try:
client.release_address(**params)
ip_released = True
except botocore.exceptions.ClientError as e:
err_msg = str(e)
return ip_released, err_msg
def create(client, subnet_id, allocation_id, client_token=None,
wait=False, wait_timeout=0, if_exist_do_not_create=False,
check_mode=False):
"""Create an Amazon NAT Gateway.
Args:
client (botocore.client.EC2): Boto3 client
subnet_id (str): The subnet_id the nat resides in.
allocation_id (str): The eip Amazon identifier.
Kwargs:
if_exist_do_not_create (bool): if a nat gateway already exists in this
subnet, than do not create another one.
default = False
wait (bool): Wait for the nat to be in the deleted state before returning.
default = False
wait_timeout (int): Number of seconds to wait, until this timeout is reached.
default = 0
client_token (str):
default = None
Basic Usage:
>>> client = boto3.client('ec2')
>>> subnet_id = 'subnet-1234567'
>>> allocation_id = 'eipalloc-1234567'
>>> create(client, subnet_id, allocation_id, if_exist_do_not_create=True, wait=True, wait_timeout=500)
[
true,
"",
{
"nat_gateway_id": "nat-123456789",
"subnet_id": "subnet-1234567",
"nat_gateway_addresses": [
{
"public_ip": "55.55.55.55",
"network_interface_id": "eni-1234567",
"private_ip": "10.0.0.102",
"allocation_id": "eipalloc-1234567"
}
],
"state": "deleted",
"create_time": "2016-03-05T00:33:21.209000+00:00",
"delete_time": "2016-03-05T00:36:37.329000+00:00",
"vpc_id": "vpc-1234567"
}
]
Returns:
Tuple (bool, str, list)
"""
params = {
'SubnetId': subnet_id,
'AllocationId': allocation_id
}
request_time = datetime.datetime.utcnow()
changed = False
success = False
token_provided = False
err_msg = ""
if client_token:
token_provided = True
params['ClientToken'] = client_token
try:
if not check_mode:
result = client.create_nat_gateway(**params)["NatGateway"]
else:
result = DRY_RUN_GATEWAY_UNCONVERTED[0]
result['CreateTime'] = datetime.datetime.utcnow()
result['NatGatewayAddresses'][0]['AllocationId'] = allocation_id
result['SubnetId'] = subnet_id
success = True
changed = True
create_time = result['CreateTime'].replace(tzinfo=None)
if token_provided and (request_time > create_time):
changed = False
elif wait:
success, err_msg, result = (
wait_for_status(
client, wait_timeout, result['NatGatewayId'], 'available',
check_mode=check_mode
)
)
if success:
err_msg = (
'NAT gateway {0} created'.format(result['nat_gateway_id'])
)
except botocore.exceptions.ClientError as e:
if "IdempotentParameterMismatch" in e.message:
err_msg = (
'NAT Gateway does not support update and token has already been provided'
)
else:
err_msg = str(e)
success = False
changed = False
result = None
return success, changed, err_msg, result
def pre_create(client, subnet_id, allocation_id=None, eip_address=None,
if_exist_do_not_create=False, wait=False, wait_timeout=0,
client_token=None, check_mode=False):
"""Create an Amazon NAT Gateway.
Args:
client (botocore.client.EC2): Boto3 client
subnet_id (str): The subnet_id the nat resides in.
Kwargs:
allocation_id (str): The EIP Amazon identifier.
default = None
eip_address (str): The Elastic IP Address of the EIP.
default = None
if_exist_do_not_create (bool): if a nat gateway already exists in this
subnet, than do not create another one.
default = False
wait (bool): Wait for the nat to be in the deleted state before returning.
default = False
wait_timeout (int): Number of seconds to wait, until this timeout is reached.
default = 0
client_token (str):
default = None
Basic Usage:
>>> client = boto3.client('ec2')
>>> subnet_id = 'subnet-w4t12897'
>>> allocation_id = 'eipalloc-36014da3'
>>> pre_create(client, subnet_id, allocation_id, if_exist_do_not_create=True, wait=True, wait_timeout=500)
[
true,
"",
{
"nat_gateway_id": "nat-03835afb6e31df79b",
"subnet_id": "subnet-w4t12897",
"nat_gateway_addresses": [
{
"public_ip": "52.87.29.36",
"network_interface_id": "eni-5579742d",
"private_ip": "10.0.0.102",
"allocation_id": "eipalloc-36014da3"
}
],
"state": "deleted",
"create_time": "2016-03-05T00:33:21.209000+00:00",
"delete_time": "2016-03-05T00:36:37.329000+00:00",
"vpc_id": "vpc-w68571b5"
}
]
Returns:
Tuple (bool, bool, str, list)
"""
success = False
changed = False
err_msg = ""
results = list()
if not allocation_id and not eip_address:
existing_gateways, allocation_id_exists = (
gateway_in_subnet_exists(client, subnet_id, check_mode=check_mode)
)
if len(existing_gateways) > 0 and if_exist_do_not_create:
success = True
changed = False
results = existing_gateways[0]
err_msg = (
'NAT Gateway {0} already exists in subnet_id {1}'
.format(
existing_gateways[0]['nat_gateway_id'], subnet_id
)
)
return success, changed, err_msg, results
else:
success, err_msg, allocation_id = (
allocate_eip_address(client, check_mode=check_mode)
)
if not success:
return success, 'False', err_msg, dict()
elif eip_address or allocation_id:
if eip_address and not allocation_id:
allocation_id, err_msg = (
get_eip_allocation_id_by_address(
client, eip_address, check_mode=check_mode
)
)
if not allocation_id:
success = False
changed = False
return success, changed, err_msg, dict()
existing_gateways, allocation_id_exists = (
gateway_in_subnet_exists(
client, subnet_id, allocation_id, check_mode=check_mode
)
)
if len(existing_gateways) > 0 and (allocation_id_exists or if_exist_do_not_create):
success = True
changed = False
results = existing_gateways[0]
err_msg = (
'NAT Gateway {0} already exists in subnet_id {1}'
.format(
existing_gateways[0]['nat_gateway_id'], subnet_id
)
)
return success, changed, err_msg, results
success, changed, err_msg, results = create(
client, subnet_id, allocation_id, client_token,
wait, wait_timeout, if_exist_do_not_create, check_mode=check_mode
)
return success, changed, err_msg, results
def remove(client, nat_gateway_id, wait=False, wait_timeout=0,
release_eip=False, check_mode=False):
"""Delete an Amazon NAT Gateway.
Args:
client (botocore.client.EC2): Boto3 client
nat_gateway_id (str): The Amazon nat id.
Kwargs:
wait (bool): Wait for the nat to be in the deleted state before returning.
wait_timeout (int): Number of seconds to wait, until this timeout is reached.
release_eip (bool): Once the nat has been deleted, you can deallocate the eip from the vpc.
Basic Usage:
>>> client = boto3.client('ec2')
>>> nat_gw_id = 'nat-03835afb6e31df79b'
>>> remove(client, nat_gw_id, wait=True, wait_timeout=500, release_eip=True)
[
true,
"",
{
"nat_gateway_id": "nat-03835afb6e31df79b",
"subnet_id": "subnet-w4t12897",
"nat_gateway_addresses": [
{
"public_ip": "52.87.29.36",
"network_interface_id": "eni-5579742d",
"private_ip": "10.0.0.102",
"allocation_id": "eipalloc-36014da3"
}
],
"state": "deleted",
"create_time": "2016-03-05T00:33:21.209000+00:00",
"delete_time": "2016-03-05T00:36:37.329000+00:00",
"vpc_id": "vpc-w68571b5"
}
]
Returns:
Tuple (bool, str, list)
"""
params = {
'NatGatewayId': nat_gateway_id
}
success = False
changed = False
err_msg = ""
results = list()
states = ['pending', 'available' ]
try:
exist, _, gw = (
get_nat_gateways(
client, nat_gateway_id=nat_gateway_id,
states=states, check_mode=check_mode
)
)
if exist and len(gw) == 1:
results = gw[0]
if not check_mode:
client.delete_nat_gateway(**params)
allocation_id = (
results['nat_gateway_addresses'][0]['allocation_id']
)
changed = True
success = True
err_msg = (
'NAT gateway {0} is in a deleting state. Delete was successfull'
.format(nat_gateway_id)
)
if wait:
status_achieved, err_msg, results = (
wait_for_status(
client, wait_timeout, nat_gateway_id, 'deleted',
check_mode=check_mode
)
)
if status_achieved:
err_msg = (
'NAT gateway {0} was deleted successfully'
.format(nat_gateway_id)
)
except botocore.exceptions.ClientError as e:
err_msg = str(e)
if release_eip:
eip_released, eip_err = (
release_address(client, allocation_id, check_mode)
)
if not eip_released:
err_msg = (
"{0}: Failed to release EIP {1}: {2}"
.format(err_msg, allocation_id, eip_err)
)
success = False
return success, changed, err_msg, results
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
subnet_id=dict(type='str'),
eip_address=dict(type='str'),
allocation_id=dict(type='str'),
if_exist_do_not_create=dict(type='bool', default=False),
state=dict(default='present', choices=['present', 'absent']),
wait=dict(type='bool', default=False),
wait_timeout=dict(type='int', default=320, required=False),
release_eip=dict(type='bool', default=False),
nat_gateway_id=dict(type='str'),
client_token=dict(type='str'),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[
['allocation_id', 'eip_address']
]
)
# Validate Requirements
if not HAS_BOTO3:
module.fail_json(msg='botocore/boto3 is required.')
state = module.params.get('state').lower()
check_mode = module.check_mode
subnet_id = module.params.get('subnet_id')
allocation_id = module.params.get('allocation_id')
eip_address = module.params.get('eip_address')
nat_gateway_id = module.params.get('nat_gateway_id')
wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout')
release_eip = module.params.get('release_eip')
client_token = module.params.get('client_token')
if_exist_do_not_create = module.params.get('if_exist_do_not_create')
try:
region, ec2_url, aws_connect_kwargs = (
get_aws_connection_info(module, boto3=True)
)
client = (
boto3_conn(
module, conn_type='client', resource='ec2',
region=region, endpoint=ec2_url, **aws_connect_kwargs
)
)
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Boto3 Client Error - " + str(e.msg))
changed = False
err_msg = ''
if state == 'present':
if not subnet_id:
module.fail_json(msg='subnet_id is required for creation')
success, changed, err_msg, results = (
pre_create(
client, subnet_id, allocation_id, eip_address,
if_exist_do_not_create, wait, wait_timeout,
client_token, check_mode=check_mode
)
)
else:
if not nat_gateway_id:
module.fail_json(msg='nat_gateway_id is required for removal')
else:
success, changed, err_msg, results = (
remove(
client, nat_gateway_id, wait, wait_timeout, release_eip,
check_mode=check_mode
)
)
if not success:
module.fail_json(
msg=err_msg, success=success, changed=changed
)
else:
module.exit_json(
msg=err_msg, success=success, changed=changed, **results
)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
| gpl-3.0 |
nirmeshk/mase | src/abstract.py | 4 | 13355 | from __future__ import print_function, division
from ok import *
import random,re,sys
sys.dont_write_bytecode = True
"""
# Abstraction (Advanced Python Coding)
Layers within layers within layers..
In the following, we will
divide the problem into layers of abstraction where `iterators`
separate out the various concerns. Easier to debug! Good
Zen Python coding.
## Background
From Wikipedia:
+ In computer science, abstraction is a technique for managing complexity of computer systems.
+ It works by establishing a level of complexity on which a person interacts with the system, suppressing the more complex details below the current level.
+ The programmer works with an idealized interface (usually well defined) and can add additional levels of functionality that would otherwise be too complex to handle.
+ For example, a programmer writing code that involves numerical operations may not be interested in the way numbers are represented in the underlying hardware (e.g. whether they're 16 bit or 32 bit integers), and where those details have been suppressed it can be said that they were abstracted away, leaving simply numbers with which the programmer can work.
+ In addition, a task of sending an email message across continents would be extremely complex if you start with a piece of optic cable and basic hardware components.
By using layers of complexity that have been created to abstract away the physical cables, network layout and presenting the programmer with a virtual data channel, this task is manageable.
Abstraction can apply to control or to data: Control abstraction is the abstraction of actions while data abstraction is that of data structures.
+ _Data abstraction_ allows handling data bits in meaningful ways. For example, it is the basic motivation behind datatype and object-oriented programming.
+ _Control abstraction_ involves the use of subprograms and related concepts control flows
The rest of this page is about control abstraction, as implemented by Python's iterator. If a functions `return` statement
is replaced with `yield` then that function becomes a _generator_, whose internal details can now be ignored.
For example, if you want to launch a rocket....
```python
def countdown(n):
while n >= 0:
yield n
n -= 1
print("We are go for launch")
for x in countdown(10):
print(x)
print("lift off!")
```
And here's my favorite iterator that descends recursive lists:
"""
def items(x, depth=-1):
if isinstance(x,(list,tuple)):
for y in x:
for z in items(y, depth+1):
yield z
else:
yield depth,x
"""
This lets me do things like (a) traverse a nested structure and (b) write pretty print that structure.
For example:
```python
for depth,x in items( [10,
[ 20,
30],
40,
[ ( 50,
60,
70),
[ 80,
90,
100],
110]]):
print(" |.. " *depth,x)
```
Output:
```
10
|.. 20
|.. 30
40
|.. |.. 50
|.. |.. 60
|.. |.. 70
|.. |.. 80
|.. |.. 90
|.. |.. 100
|.. 110
```
Anyway, lets apply this idea to a real problem.
## Problem
If parsing 1,000,000
records, in a table of data, keep a small sample
of that large space, without blowing memory.
How?
+ Read a table of data and keep some sample of each column.
Specifically, keep up to `max` things (and if we see more than that,
delete older values).
"""
r = random.random
rseed = random.seed
class Some:
def __init__(i, max=8): # note, usually 256 or 128 or 64 (if brave)
i.n, i.any, i.max = 0,[],max
def __iadd__(i,x):
i.n += 1
now = len(i.any)
if now < i.max: # not full yet, so just keep it
i.any += [x]
elif r() <= now/i.n:
i.any[ int(r() * now) ]= x # zap some older value
#else: forget x
return i
@ok
def _some():
rseed(1)
s = Some(16)
for i in xrange(100000):
s += i
assert sorted(s.any)== [ 5852, 24193, 28929, 38266,
41764, 42926, 51310, 52203,
54651, 56743, 59368, 60794,
61888, 82586, 83018, 88462]
"""
Turns out, we do not lose much (caveat: need to keep more than 16... 256 seems a reasonable default).
```
$ python -B abstract.py
diff to all diff to all
------------------- -------------------
all kept 10% 30% 50% 70% 90% kept 10% 30% 50% 70% 90%
--- ---- --- --- --- --- --- ---- --- --- --- --- ---
128 64 [4, 0, 5, 6, 1] 128 [0, 2, 1, 4, 3]
256 64 [1, 2, 4, 2, 1] 256 [2, 4, 5, 3, 1]
512 64 [1, 0, 2, 3, 7] 512 [0, 0, 5, 4, 1]
1024 64 [1, 5, 10, 4, 1] 1024 [0, 1, 1, 0, 0]
2048 64 [0, 1, 5, 3, 7] 2048 [0, 2, 3, 4, 2]
4096 64 [2, 9, 8, 4, 8] 4096 [0, 1, 0, 1, 0]
8192 64 [0, 4, 3, 2, 0] 8192 [0, 0, 0, 0, 0]
diff to all diff to all
------------------- -------------------
all kept 10% 30% 50% 70% 90% kept 10% 30% 50% 70% 90%
--- ---- --- --- --- --- --- ---- --- --- --- --- ---
256 128 [0, 0, 1, 0, 0] 256 [3, 0, 4, 2, 1]
512 128 [3, 2, 2, 6, 0] 512 [0, 1, 5, 6, 1]
1024 128 [6, 7, 5, 5, 1] 1024 [0, 2, 0, 3, 0]
2048 128 [1, 2, 2, 0, 2] 2048 [0, 1, 1, 0, 0]
4096 128 [1, 0, 3, 0, 1] 4096 [1, 2, 1, 0, 0]
8192 128 [5, 3, 1, 0, 0] 8192 [0, 0, 1, 0, 0]
16384 128 [5, 9, 3, 0, 0] 16384 [0, 0, 0, 0, 0]
diff to all diff to all
------------------- -------------------
all kept 10% 30% 50% 70% 90% kept 10% 30% 50% 70% 90%
--- ---- --- --- --- --- --- ---- --- --- --- --- ---
512 256 [0, 1, 1, 1, 0] 512 [1, 2, 0, 0, 1]
1024 256 [1, 1, 4, 2, 0] 1024 [0, 1, 2, 1, 0]
2048 256 [1, 2, 1, 2, 0] 2048 [0, 0, 0, 0, 1]
4096 256 [0, 1, 1, 2, 0] 4096 [0, 1, 0, 0, 0]
8192 256 [3, 2, 1, 5, 4] 8192 [0, 0, 0, 0, 0]
16384 256 [3, 0, 4, 3, 0] 16384 [0, 0, 0, 0, 0]
32768 256 [0, 2, 5, 5, 2] 32768 [0, 0, 0, 0, 0]
diff to all diff to all
------------------- -------------------
all kept 10% 30% 50% 70% 90% kept 10% 30% 50% 70% 90%
--- ---- --- --- --- --- --- ---- --- --- --- --- ---
1024 512 [2, 0, 0, 0, 0] 1024 [1, 1, 2, 3, 0]
2048 512 [0, 0, 1, 0, 0] 2048 [0, 1, 0, 0, 2]
4096 512 [0, 0, 1, 0, 1] 4096 [1, 0, 0, 0, 0]
8192 512 [1, 0, 0, 1, 2] 8192 [0, 0, 0, 0, 0]
16384 512 [0, 2, 1, 0, 0] 16384 [0, 0, 0, 0, 0]
32768 512 [1, 2, 1, 0, 2] 32768 [0, 0, 0, 0, 0]
65536 512 [1, 1, 0, 0, 0] 65536 [0, 0, 0, 0, 0]
```
## Example data
14 items: 9 examples of playing golf, 5 of not playing golf.
"""
weather="""
outlook,
temperature,
humidity,?windy,play
sunny , 85, 85, FALSE, no # an interesting case
sunny , 80, 90, TRUE , no
overcast , 83, 86, FALSE, yes
rainy , 70, 96, FALSE, yes
rainy , 68, 80, FALSE, yes
rainy , 65, 70, TRUE , no
overcast , 64, 65, TRUE ,
yes
sunny , 72, 95, FALSE, no
sunny , 69, 70, FALSE, yes
rainy , 75, 80, FALSE, yes
sunny , 75, 70, TRUE , yes
overcast , 72, 90, TRUE , yes
overcast , 81, 75, FALSE, yes
rainy , 71, 91, TRUE , no"""
"""
Note that the table is messy- blank lines, spaces, comments,
some lines split over multiple physical lines.
Also:
+ there are some columns we just want to ignore (see `?windy`)
+ when we read these strings, we need to coerce
these values to either strings, ints, or floats.
+ This string has rows belonging to different `klass`es
(see last column). We want our tables to keep counts
separately for each column.
Lets handle all that mess with iterators.
## Support code
### Standard Header
Load some standard tools.
"""
class o:
"""Emulate Javascript's uber simple objects.
Note my convention: I use "`i`" not "`this`."""
def __init__(i,**d) : i.__dict__.update(d)
def __setitem__(i,k,v) : i.__dict__[k] = v
def __getitem__(i,k) : return i.__dict__[k]
def __repr__(i) : return 'o'+str(i.__dict__)
@ok
def _o():
x = o(name='tim',shoesize=9)
assert x.name == 'tim'
assert x["name"] == 'tim'
x.shoesize += 1
assert x.shoesize == 10
assert str(x) == "o{'name': 'tim', 'shoesize': 10}"
"""
### Serious Python JuJu
Tricks to let us read from strings or files or zip files
or anything source at all.
Not for beginners.
"""
def STRING(str):
def wrapper():
for c in str: yield c
return wrapper
def FILE(filename, buffer_size=4096):
def chunks(filename):
with open(filename, "rb") as fp:
chunk = fp.read(buffer_size)
while chunk:
yield chunk
chunk = fp.read(buffer_size)
def wrapper():
for chunk in chunks(filename):
for char in chunk:
yield char
return wrapper
"""
## Iterators
### Lines
Yield each line in a string
"""
def lines(src):
tmp=''
for ch in src(): # sneaky... src can evaluate to different ghings
if ch == "\n":
yield tmp
tmp = ''
else:
tmp += ch # for a (slightly) faster method,
# in Python3, see http://goo.gl/LvgGx3
if tmp:
yield tmp
@ok
def _line():
for line in lines(STRING(weather)):
print("[",line,"]",sep="")
"""
### Rows
Yield all non-blank lines,
joining lines that end in ','.
"""
def rows(src):
b4 = ''
for line in lines(src):
line = re.sub(r"[\r\t ]*","",line)
line = re.sub(r"#.*","",line)
if not line: continue # skip blanks
if line[-1] == ',': # maybe, continue lines
b4 += line
else:
yield b4 + line
b4 = ''
@ok
def _row():
for row in rows(STRING(weather)):
print("[",row,"]",sep="")
"""
### Values
Coerce row values to floats, ints or strings.
Jump over any cols we are ignoring
"""
def values(src):
want = None
for row in rows(src):
lst = row.split(',')
want = want or [col for col in xrange(len(lst))
if lst[col][0] != "?" ]
yield [ make(lst[col]) for col in want ]
"""
Helper function.
"""
def make(x):
try : return int(x)
except:
try : return float(x)
except: return x
"""
Test function.
"""
@ok
def _values():
for cells in values(STRING(weather)):
print(cells)
"""
## Tables
Finally!
Tables keep `Some` values for each column in a string.
Assumes that the string contains a `klass` column
and keeps separate counts for each `klass`.
"""
def table(src, klass= -1, keep= False):
t = None
for cells in values(src):
if t:
k = cells[klass]
for cell,some,all in zip(cells,t.klasses[k],t.all):
some += cell
all += cell
if keep:
t.rows += [cells]
else:
t = o(header = cells,
rows = [],
all = klass0(cells),
klasses= Default(lambda: klass0(t.header)))
return t
"""
Helper functions:
+ If we reach for a klass information and we have not
seen that klass before, create a list of `Some` counters
(one for each column).
"""
class Default(dict):
def __init__(i, default): i.default = default
def __getitem__(i, key):
if key in i: return i.get(key)
return i.setdefault(key, i.default())
def klass0(header):
tmp = [Some() for _ in header]
for n,header1 in enumerate(header):
tmp[n].pos = n
tmp[n].name = header1
return tmp
"""
Test functions: read from strings or files.
"""
@ok
def _tableFromString(src = STRING(weather)):
t = table(src)
for k,v in t.klasses.items():
for some in v:
print(":klass",k,":name",some.name,":col",some.pos,
":seen",some.n,"\n\t:kept",some.any)
@ok
def _tableFromFile():
_tableFromString(FILE("weather.csv"))
"""
## Sanity Check
How much do we lose if from some sample `s1` we only keep some of the items in `s2`?
And just to make this interesting, we'll compare this error to what happens
if I sample that distribution twice, once to `s1` and once to `s3`.
For the results of the following code, see the top of this file.
"""
def samples(m0=128,f=random.random):
print("\n \t diff to all \t \t diff to all")
print(" \t -------------------\t \t -------------------")
print("all kept \t 10% 30% 50% 70% 90%\t kept\t 10% 30% 50% 70% 90%")
print("--- ---- \t --- --- --- --- ---\t ----\t --- --- --- --- ---")
m = m0
for _ in xrange(7):
m = m * 2
n = min(m0,m)
s1,s2,s3 = Some(m), Some(n),Some(m)
for _ in xrange(m):
x,y = f(),f()
s1 += x
s2 += x
s3 += y
print(m,"",n, "\t",diff(s1,s2),"\t",m,"\t",diff(s1,s3))
def ntiles(lst, tiles=[0.1,0.3,0.5,0.7,0.9]):
"Return percentiles in a list"
at = lambda x: lst[ int(len(lst)*x) ]
return [ at(tile) for tile in tiles ]
def diff(s1,s2):
"Return difference in the percentiles"
return [ abs(int(100*(most-less)))
for most,less in
zip(ntiles(sorted(s1.any)),
ntiles(sorted(s2.any))) ]
@ok
def _samples():
rseed(1)
for x in [64,128,256,512]:
samples(x)
| unlicense |
mzizzi/ansible | lib/ansible/modules/network/aci/aci_rest.py | 9 | 13219 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2015 Jason Edelman <jason@networktocode.com>, Network to Code, LLC
# Copyright 2017 Dag Wieers <dag@wieers.com>
# This file is part of Ansible by Red Hat
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
module: aci_rest
short_description: Direct access to the Cisco APIC REST API
description:
- Enables the management of the Cisco ACI fabric through direct access to the Cisco APIC REST API.
- More information regarding the Cisco APIC REST API is available from
U(http://www.cisco.com/c/en/us/td/docs/switches/datacenter/aci/apic/sw/2-x/rest_cfg/2_1_x/b_Cisco_APIC_REST_API_Configuration_Guide.html).
author:
- Jason Edelman (@jedelman8)
- Dag Wieers (@dagwieers)
version_added: '2.4'
requirements:
- lxml (when using XML content)
- xmljson >= 0.1.8 (when using XML content)
- python 2.7+ (when using xmljson)
extends_documentation_fragment: aci
options:
method:
description:
- The HTTP method of the request.
- Using C(delete) is typically used for deleting objects.
- Using C(get) is typically used for querying objects.
- Using C(post) is typically used for modifying objects.
required: true
default: get
choices: [ delete, get, post ]
aliases: [ action ]
path:
description:
- URI being used to execute API calls.
- Must end in C(.xml) or C(.json).
required: true
aliases: [ uri ]
content:
description:
- When used instead of C(src), sets the content of the API request directly.
- This may be convenient to template simple requests, for anything complex use the M(template) module.
src:
description:
- Name of the absolute path of the filname that includes the body
of the http request being sent to the ACI fabric.
aliases: [ config_file ]
notes:
- When using inline-JSON (using C(content)), YAML requires to start with a blank line.
Otherwise the JSON statement will be parsed as a YAML mapping (dictionary) and translated into invalid JSON as a result.
- XML payloads require the C(lxml) and C(xmljson) python libraries. For JSON payloads nothing special is needed.
'''
EXAMPLES = r'''
- name: Add a tenant
aci_rest:
hostname: '{{ inventory_hostname }}'
username: '{{ aci_username }}'
password: '{{ aci_password }}'
method: post
path: /api/mo/uni.xml
src: /home/cisco/ansible/aci/configs/aci_config.xml
delegate_to: localhost
- name: Get tenants
aci_rest:
hostname: '{{ inventory_hostname }}'
username: '{{ aci_username }}'
password: '{{ aci_password }}'
method: get
path: /api/node/class/fvTenant.json
delegate_to: localhost
- name: Configure contracts
aci_rest:
hostname: '{{ inventory_hostname }}'
username: '{{ aci_username }}'
password: '{{ aci_password }}'
method: post
path: /api/mo/uni.xml
src: /home/cisco/ansible/aci/configs/contract_config.xml
delegate_to: localhost
- name: Register leaves and spines
aci_rest:
hostname: '{{ inventory_hostname }}'
username: '{{ aci_username }}'
password: '{{ aci_password }}'
validate_certs: no
method: post
path: /api/mo/uni/controller/nodeidentpol.xml
content: |
<fabricNodeIdentPol>
<fabricNodeIdentP name="{{ item.name }}" nodeId="{{ item.nodeid }}" status="{{ item.status }}" serial="{{ item.serial }}"/>
</fabricNodeIdentPol>
with_items:
- '{{ apic_leavesspines }}'
delegate_to: localhost
- name: Wait for all controllers to become ready
aci_rest:
hostname: '{{ inventory_hostname }}'
username: '{{ aci_username }}'
password: '{{ aci_password }}'
validate_certs: no
path: /api/node/class/topSystem.json?query-target-filter=eq(topSystem.role,"controller")
register: apics
until: "'totalCount' in apics and apics.totalCount|int >= groups['apic']|count"
retries: 120
delay: 30
delegate_to: localhost
run_once: yes
'''
RETURN = r'''
error_code:
description: The REST ACI return code, useful for troubleshooting on failure
returned: always
type: int
sample: 122
error_text:
description: The REST ACI descriptive text, useful for troubleshooting on failure
returned: always
type: string
sample: unknown managed object class foo
imdata:
description: Converted output returned by the APIC REST (register this for post-processing)
returned: always
type: string
sample: [{"error": {"attributes": {"code": "122", "text": "unknown managed object class foo"}}}]
payload:
description: The (templated) payload send to the APIC REST API (xml or json)
returned: always
type: string
sample: '<foo bar="boo"/>'
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: string
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
response:
description: HTTP response string
returned: always
type: string
sample: 'HTTP Error 400: Bad Request'
status:
description: HTTP status code
returned: always
type: int
sample: 400
totalCount:
description: Number of items in the imdata array
returned: always
type: string
sample: '0'
'''
import json
import os
# Optional, only used for XML payload
try:
import lxml.etree
HAS_LXML_ETREE = True
except ImportError:
HAS_LXML_ETREE = False
# Optional, only used for XML payload
try:
from xmljson import cobra
HAS_XMLJSON_COBRA = True
except ImportError:
HAS_XMLJSON_COBRA = False
# from ansible.module_utils.aci import aci_login
from ansible.module_utils.basic import AnsibleModule, get_exception
from ansible.module_utils.urls import fetch_url
from ansible.module_utils._text import to_bytes
aci_argument_spec = dict(
hostname=dict(type='str', required=True, aliases=['host']),
username=dict(type='str', default='admin', aliases=['user']),
password=dict(type='str', required=True, no_log=True),
protocol=dict(type='str'), # Deprecated in v2.8
timeout=dict(type='int', default=30),
use_ssl=dict(type='bool', default=True),
validate_certs=dict(type='bool', default=True),
)
def aci_login(module, result=dict()):
''' Log in to APIC '''
# Set protocol based on use_ssl parameter
if module.params['protocol'] is None:
module.params['protocol'] = 'https' if module.params.get('use_ssl', True) else 'http'
# Perform login request
url = '%(protocol)s://%(hostname)s/api/aaaLogin.json' % module.params
data = {'aaaUser': {'attributes': {'name': module.params['username'], 'pwd': module.params['password']}}}
resp, auth = fetch_url(module, url, data=json.dumps(data), method="POST", timeout=module.params['timeout'])
# Handle APIC response
if auth['status'] != 200:
try:
result.update(aci_response(auth['body'], 'json'))
result['msg'] = 'Authentication failed: %(error_code)s %(error_text)s' % result
except KeyError:
result['msg'] = '%(msg)s for %(url)s' % auth
result['response'] = auth['msg']
result['status'] = auth['status']
module.fail_json(**result)
return resp
def aci_response(rawoutput, rest_type='xml'):
''' Handle APIC response output '''
result = dict()
if rest_type == 'json':
# Use APIC response as module output
try:
result = json.loads(rawoutput)
except:
e = get_exception()
# Expose RAW output for troubleshooting
result['error_code'] = -1
result['error_text'] = "Unable to parse output as JSON, see 'raw' output. %s" % e
result['raw'] = rawoutput
return result
else:
# NOTE: The XML-to-JSON conversion is using the "Cobra" convention
xmldata = None
try:
xml = lxml.etree.fromstring(to_bytes(rawoutput))
xmldata = cobra.data(xml)
except:
e = get_exception()
# Expose RAW output for troubleshooting
result['error_code'] = -1
result['error_text'] = "Unable to parse output as XML, see 'raw' output. %s" % e
result['raw'] = rawoutput
return result
# Reformat as ACI does for JSON API output
if xmldata and 'imdata' in xmldata:
if 'children' in xmldata['imdata']:
result['imdata'] = xmldata['imdata']['children']
else:
result['imdata'] = dict()
result['totalCount'] = xmldata['imdata']['attributes']['totalCount']
# Handle possible APIC error information
try:
result['error_code'] = result['imdata'][0]['error']['attributes']['code']
result['error_text'] = result['imdata'][0]['error']['attributes']['text']
except KeyError:
result['error_code'] = 0
result['error_text'] = 'Success'
return result
def main():
argument_spec = dict(
path=dict(type='str', required=True, aliases=['uri']),
method=dict(type='str', default='get', choices=['delete', 'get', 'post'], aliases=['action']),
src=dict(type='path', aliases=['config_file']),
content=dict(type='str'),
)
argument_spec.update(aci_argument_spec)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=[['content', 'src']],
supports_check_mode=True,
)
hostname = module.params['hostname']
username = module.params['username']
password = module.params['password']
path = module.params['path']
content = module.params['content']
src = module.params['src']
protocol = module.params['protocol']
use_ssl = module.params['use_ssl']
method = module.params['method']
timeout = module.params['timeout']
result = dict(
changed=False,
payload='',
)
# Report missing file
file_exists = False
if src:
if os.path.isfile(src):
file_exists = True
else:
module.fail_json(msg='Cannot find/access src:\n%s' % src)
# Find request type
if path.find('.xml') != -1:
rest_type = 'xml'
if not HAS_LXML_ETREE:
module.fail_json(msg='The lxml python library is missing, or lacks etree support.')
if not HAS_XMLJSON_COBRA:
module.fail_json(msg='The xmljson python library is missing, or lacks cobra support.')
elif path.find('.json') != -1:
rest_type = 'json'
else:
module.fail_json(msg='Failed to find REST API content type (neither .xml nor .json).')
# Set protocol for further use
if protocol is None:
protocol = 'https' if use_ssl else 'http'
else:
module.deprecate("Parameter 'protocol' is deprecated, please use 'use_ssl' instead.", 2.8)
# Perform login first
auth = aci_login(module, result)
# Prepare request data
if content:
# We include the payload as it may be templated
result['payload'] = content
elif file_exists:
with open(src, 'r') as config_object:
# TODO: Would be nice to template this, requires action-plugin
result['payload'] = config_object.read()
# Ensure changes are reported
if method in ('delete', 'post'):
# FIXME: Hardcoding changed is not idempotent
result['changed'] = True
# In check_mode we assume it works, but we don't actually perform the requested change
# TODO: Could we turn this request in a GET instead ?
if module.check_mode:
module.exit_json(response='OK (Check mode)', status=200, **result)
else:
result['changed'] = False
# Perform actual request using auth cookie
url = '%s://%s/%s' % (protocol, hostname, path.lstrip('/'))
headers = dict(Cookie=auth.headers['Set-Cookie'])
resp, info = fetch_url(module, url, data=result['payload'], method=method.upper(), timeout=timeout, headers=headers)
result['response'] = info['msg']
result['status'] = info['status']
# Report failure
if info['status'] != 200:
try:
result.update(aci_response(info['body'], rest_type))
result['msg'] = 'Task failed: %(error_code)s %(error_text)s' % result
except KeyError:
result['msg'] = '%(msg)s for %(url)s' % info
module.fail_json(**result)
# Report success
result.update(aci_response(resp.read(), rest_type))
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
scavarda/mysql-dbcompare | mysql-utilities-1.6.0/scripts/mysqlserverinfo.py | 3 | 5949 | #!/usr/bin/env python
#
# Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
"""
This file contains the server information utility.
"""
from mysql.utilities.common.tools import check_python_version
# Check Python version compatibility
check_python_version()
import os
import sys
from mysql.utilities.exception import UtilError
from mysql.utilities.command.serverinfo import show_server_info
from mysql.utilities.common.tools import check_connector_python
from mysql.utilities.common.options import (add_basedir_option, add_verbosity,
add_format_option,
get_ssl_dict,
add_no_headers_option,
check_dir_option,
setup_common_options,
check_password_security)
# Constants
NAME = "MySQL Utilities - mysqlserverinfo "
DESCRIPTION = "mysqlserverinfo - show server information"
USAGE = "%prog --server=user:pass@host:port:socket --format=grid"
# Check for connector/python
if not check_connector_python():
sys.exit(1)
if __name__ == '__main__':
# Setup the command parser and setup server, help
parser = setup_common_options(os.path.basename(sys.argv[0]),
DESCRIPTION, USAGE, True)
# Setup utility-specific options:
# Input format
add_format_option(parser, "display the output in either grid (default), "
"tab, csv, or vertical format", "grid")
# No header option
add_no_headers_option(parser, restricted_formats=['grid', 'tab', 'csv'])
# Show my.cnf values
parser.add_option("-d", "--show-defaults", action="store_true",
dest="show_defaults", default=False,
help="show defaults from the config file per server")
# Add --start option
parser.add_option("-s", "--start", action="store_true", dest="start",
help="start server in read only mode if offline")
# Add --basedir option
add_basedir_option(parser)
# Add --datadir option
parser.add_option("--datadir", action="store", dest="datadir",
default=None, type="string",
help="the data directory for the server")
# Add --search-port
parser.add_option("--port-range", action="store", dest="ports",
default="3306:3333", type="string",
help="the port range to search for running mysql "
"servers on Windows systems")
# Add --show-servers option
parser.add_option("--show-servers", action="store_true",
dest="show_servers",
help="show any known MySQL servers running on this host")
# Add startup timeout
parser.add_option("--start-timeout", action="store",
dest="start_timeout", type=int, default=10,
help="Number of seconds to wait for the server to "
"start. Default = 10.")
# Add verbosity mode
add_verbosity(parser, False)
# Now we process the rest of the arguments.
opt, args = parser.parse_args()
# Check security settings
check_password_security(opt, args)
# The --basedir and --datadir options are only required if --start is used
# otherwise they are ignored.
if opt.start:
# Check the basedir option for errors (e.g., invalid path).
check_dir_option(parser, opt.basedir, '--basedir', check_access=True,
read_only=True)
# Check the datadir option for errors.
check_dir_option(parser, opt.datadir, '--datadir', check_access=True,
read_only=False)
# Check start timeout for minimal value
if int(opt.start_timeout) < 10:
opt.start_timeout = 10
print("# WARNING: --start-timeout must be >= 10 seconds. Using "
"default value 10.")
# Check port range
if os.name == 'nt':
parts = opt.ports.split(":")
if len(parts) != 2:
print("# WARNING : {0} is not a valid port range. "
"Using default." .format(opt.ports))
opt.ports = "3306:3333"
# Set options for database operations.
options = {
"format": opt.format,
"no_headers": opt.no_headers,
"verbosity": opt.verbosity,
"debug": opt.verbosity >= 3,
"show_defaults": opt.show_defaults,
"start": opt.start,
"basedir": opt.basedir,
"datadir": opt.datadir,
"ports": opt.ports,
"show_servers": opt.show_servers,
"start_timeout": opt.start_timeout,
}
if opt.server is None:
parser.error("You must specify at least one server.")
# add ssl options values.
options.update(get_ssl_dict(opt))
try:
show_server_info(opt.server, options)
except UtilError:
_, e, _ = sys.exc_info()
print("ERROR: {0}".format(e.errmsg))
sys.exit(1)
except Exception:
_, e, _ = sys.exc_info()
print("ERROR: {0}".format(e))
sys.exit(1)
print("#...done.")
sys.exit()
| apache-2.0 |
le9i0nx/ansible | lib/ansible/modules/cloud/ovirt/ovirt_tags_facts.py | 73 | 4844 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_tags_facts
short_description: Retrieve facts about one or more oVirt/RHV tags
author: "Ondra Machacek (@machacekondra)"
version_added: "2.3"
description:
- "Retrieve facts about one or more oVirt/RHV tags."
notes:
- "This module creates a new top-level C(ovirt_tags) fact, which
contains a list of tags"
options:
name:
description:
- "Name of the tag which should be listed."
vm:
description:
- "Name of the VM, which tags should be listed."
host:
description:
- "Name of the host, which tags should be listed."
extends_documentation_fragment: ovirt
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Gather facts about all tags, which names start with C(tag):
- ovirt_tags_facts:
name: tag*
- debug:
var: tags
# Gather facts about all tags, which are assigned to VM C(postgres):
- ovirt_tags_facts:
vm: postgres
- debug:
var: tags
# Gather facts about all tags, which are assigned to host C(west):
- ovirt_tags_facts:
host: west
- debug:
var: tags
'''
RETURN = '''
ovirt_tags:
description: "List of dictionaries describing the tags. Tags attribues are mapped to dictionary keys,
all tags attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/tag."
returned: On success.
type: list
'''
import fnmatch
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
check_sdk,
create_connection,
get_dict_of_struct,
ovirt_facts_full_argument_spec,
search_by_name,
)
def main():
argument_spec = ovirt_facts_full_argument_spec(
name=dict(default=None),
host=dict(default=None),
vm=dict(default=None),
)
module = AnsibleModule(argument_spec)
check_sdk(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
tags_service = connection.system_service().tags_service()
tags = []
all_tags = tags_service.list()
if module.params['name']:
tags.extend([
t for t in all_tags
if fnmatch.fnmatch(t.name, module.params['name'])
])
if module.params['host']:
hosts_service = connection.system_service().hosts_service()
host = search_by_name(hosts_service, module.params['host'])
if host is None:
raise Exception("Host '%s' was not found." % module.params['host'])
tags.extend([
tag for tag in hosts_service.host_service(host.id).tags_service().list()
])
if module.params['vm']:
vms_service = connection.system_service().vms_service()
vm = search_by_name(vms_service, module.params['vm'])
if vm is None:
raise Exception("Vm '%s' was not found." % module.params['vm'])
tags.extend([
tag for tag in vms_service.vm_service(vm.id).tags_service().list()
])
if not (module.params['vm'] or module.params['host'] or module.params['name']):
tags = all_tags
module.exit_json(
changed=False,
ansible_facts=dict(
ovirt_tags=[
get_dict_of_struct(
struct=t,
connection=connection,
fetch_nested=module.params['fetch_nested'],
attributes=module.params['nested_attributes'],
) for t in tags
],
),
)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == '__main__':
main()
| gpl-3.0 |
rhli/hadoop-EAR | build/hadoop-0.20/contrib/hod/hodlib/Common/setup.py | 182 | 45814 | #Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
# $Id:setup.py 5158 2007-04-09 00:14:35Z zim $
# $Id:setup.py 5158 2007-04-09 00:14:35Z zim $
#
#------------------------------------------------------------------------------
"""'setup' provides for reading and verifing configuration files based on
Python's SafeConfigParser class."""
import sys, os, re, pprint
from ConfigParser import SafeConfigParser
from optparse import OptionParser, IndentedHelpFormatter, OptionGroup
from util import get_perms, replace_escapes
from types import typeValidator, typeValidatorInstance, is_valid_type, \
typeToString
from hodlib.Hod.hod import hodHelp
reEmailAddress = re.compile("^.*@.*$")
reEmailDelimit = re.compile("@")
reComma = re.compile("\s*,\s*")
reDot = re.compile("\.")
reCommentHack = re.compile("^.*?\s+#|;.*", flags=re.S)
reCommentNewline = re.compile("\n|\r$")
reKeyVal = r"(?<!\\)="
reKeyVal = re.compile(reKeyVal)
reKeyValList = r"(?<!\\),"
reKeyValList = re.compile(reKeyValList)
errorPrefix = 'error'
requiredPerms = '0660'
class definition:
def __init__(self):
"""Generates a configuration definition object."""
self.__def = {}
self.__defOrder = []
def __repr__(self):
return pprint.pformat(self.__def)
def __getitem__(self, section):
return self.__def[section]
def __iter__(self):
return iter(self.__def)
def sections(self):
"""Returns a list of sections/groups."""
if len(self.__defOrder):
return self.__defOrder
else:
return self.__def.keys()
def add_section(self, section):
"""Add a configuration section / option group."""
if self.__def.has_key(section):
raise Exception("Section already exists: '%s'" % section)
else:
self.__def[section] = {}
def add_def(self, section, var, type, desc, help = True, default = None,
req = True, validate = True, short = None):
""" Add a variable definition.
section - section name
var - variable name
type - valid hodlib.types
desc - description of variable
help - display help for this variable
default - default value
req - bool, requried?
validate - bool, validate type value?
short - short symbol (1 character),
help - bool, display help?"""
if self.__def.has_key(section):
if not is_valid_type(type):
raise Exception("Type (type) is invalid: %s.%s - '%s'" % (section, var,
type))
if not isinstance(desc, str):
raise Exception("Description (desc) must be a string: %s.%s - '%s'" % (
section, var, desc))
if not isinstance(req, bool):
raise Exception("Required (req) must be a bool: %s.%s - '%s'" % (section,
var,
req))
if not isinstance(validate, bool):
raise Exception("Validate (validate) must be a bool: %s.%s - '%s'" % (
section, var, validate))
if self.__def[section].has_key(var):
raise Exception("Variable name already defined: '%s'" % var)
else:
self.__def[section][var] = { 'type' : type,
'desc' : desc,
'help' : help,
'default' : default,
'req' : req,
'validate' : validate,
'short' : short }
else:
raise Exception("Section does not exist: '%s'" % section)
def add_defs(self, defList, defOrder=None):
""" Add a series of definitions.
defList = { section0 : ((name0,
type0,
desc0,
help0,
default0,
req0,
validate0,
short0),
....
(nameN,
typeN,
descN,
helpN,
defaultN,
reqN,
validateN,
shortN)),
....
sectionN : ... }
Where the short synmbol is optional and can only be one char."""
for section in defList.keys():
self.add_section(section)
for defTuple in defList[section]:
if isinstance(defTuple, tuple):
if len(defTuple) < 7:
raise Exception(
"section %s is missing an element: %s" % (
section, pprint.pformat(defTuple)))
else:
raise Exception("section %s of defList is not a tuple" %
section)
if len(defTuple) == 7:
self.add_def(section, defTuple[0], defTuple[1],
defTuple[2], defTuple[3], defTuple[4],
defTuple[5], defTuple[6])
else:
self.add_def(section, defTuple[0], defTuple[1],
defTuple[2], defTuple[3], defTuple[4],
defTuple[5], defTuple[6], defTuple[7])
if defOrder:
for section in defOrder:
if section in self.__def:
self.__defOrder.append(section)
for section in self.__def:
if not section in defOrder:
raise Exception(
"section %s is missing from specified defOrder." %
section)
class baseConfig:
def __init__(self, configDef, originalDir=None):
self.__toString = typeToString()
self.__validated = False
self._configDef = configDef
self._options = None
self._mySections = []
self._dict = {}
self.configFile = None
self.__originalDir = originalDir
if self._configDef:
self._mySections = configDef.sections()
def __repr__(self):
"""Returns a string representation of a config object including all
normalizations."""
print_string = '';
for section in self._mySections:
print_string = "%s[%s]\n" % (print_string, section)
options = self._dict[section].keys()
for option in options:
print_string = "%s%s = %s\n" % (print_string, option,
self._dict[section][option])
print_string = "%s\n" % (print_string)
print_string = re.sub("\n\n$", "", print_string)
return print_string
def __getitem__(self, section):
""" Returns a dictionary of configuration name and values by section.
"""
return self._dict[section]
def __setitem__(self, section, value):
self._dict[section] = value
def __iter__(self):
return iter(self._dict)
def has_key(self, section):
status = False
if section in self._dict:
status = True
return status
# Prints configuration error messages
def var_error(self, section, option, *addData):
errorStrings = []
if not self._dict[section].has_key(option):
self._dict[section][option] = None
errorStrings.append("%s: invalid '%s' specified in section %s (--%s.%s): %s" % (
errorPrefix, option, section, section, option, self._dict[section][option]))
if addData:
errorStrings.append("%s: additional info: %s\n" % (errorPrefix,
addData[0]))
return errorStrings
def var_error_suggest(self, errorStrings):
if self.configFile:
errorStrings.append("Check your command line options and/or " + \
"your configuration file %s" % self.configFile)
def __get_args(self, section):
def __dummyToString(type, value):
return value
toString = __dummyToString
if self.__validated:
toString = self.__toString
args = []
if isinstance(self._dict[section], dict):
for option in self._dict[section]:
if section in self._configDef and \
option in self._configDef[section]:
if self._configDef[section][option]['type'] == 'bool':
if self._dict[section][option] == 'True' or \
self._dict[section][option] == True:
args.append("--%s.%s" % (section, option))
else:
args.append("--%s.%s" % (section, option))
args.append(toString(
self._configDef[section][option]['type'],
self._dict[section][option]))
else:
if section in self._configDef:
if self._configDef[section][option]['type'] == 'bool':
if self._dict[section] == 'True' or \
self._dict[section] == True:
args.append("--%s" % section)
else:
if self._dict[section] != 'config':
args.append("--%s" % section)
args.append(toString(self._configDef[section]['type'],
self._dict[section]))
return args
def values(self):
return self._dict.values()
def keys(self):
return self._dict.keys()
def get_args(self, exclude=None, section=None):
"""Retrieve a tuple of config arguments."""
args = []
if section:
args = self.__get_args(section)
else:
for section in self._dict:
if exclude:
if not section in exclude:
args.extend(self.__get_args(section))
else:
args.extend(self.__get_args(section))
return tuple(args)
def verify(self):
"""Verifies each configuration variable, using the configValidator
class, based on its type as defined by the dictionary configDef.
Upon encountering a problem an error is printed to STDERR and
false is returned."""
oldDir = os.getcwd()
if self.__originalDir:
os.chdir(self.__originalDir)
status = True
statusMsgs = []
if self._configDef:
errorCount = 0
configValidator = typeValidator(self.__originalDir)
# foreach section and option by type string as defined in configDef
# add value to be validated to validator
for section in self._mySections:
for option in self._configDef[section].keys():
configVarName = "%s.%s" % (section, option)
if self._dict[section].has_key(option):
if self._configDef[section][option].has_key('validate'):
if self._configDef[section][option]['validate']:
# is the section.option needed to be validated?
configValidator.add(configVarName,
self._configDef[section][option]['type'],
self._dict[section][option])
else:
# If asked not to validate, just normalize
self[section][option] = \
configValidator.normalize(
self._configDef[section][option]['type'],
self._dict[section][option])
if self._configDef[section][option]['default'] != \
None:
self._configDef[section][option]['default'] = \
configValidator.normalize(
self._configDef[section][option]['type'],
self._configDef[section][option]['default']
)
self._configDef[section][option]['default'] = \
self.__toString(
self._configDef[section][option]['type'],
self._configDef[section][option]['default']
)
else:
# This should not happen. Just in case, take this as 'to be validated' case.
configValidator.add(configVarName,
self._configDef[section][option]['type'],
self._dict[section][option])
elif self._configDef[section][option]['req']:
statusMsgs.append("%s: %s.%s is not defined."
% (errorPrefix, section, option))
errorCount = errorCount + 1
configValidator.validate()
for valueInfo in configValidator.validatedInfo:
sectionsOptions = reDot.split(valueInfo['name'])
if valueInfo['isValid'] == 1:
self._dict[sectionsOptions[0]][sectionsOptions[1]] = \
valueInfo['normalized']
else:
if valueInfo['errorData']:
statusMsgs.extend(self.var_error(sectionsOptions[0],
sectionsOptions[1], valueInfo['errorData']))
else:
statusMsgs.extend(self.var_error(sectionsOptions[0],
sectionsOptions[1]))
errorCount = errorCount + 1
if errorCount > 1:
statusMsgs.append( "%s: %s problems found." % (
errorPrefix, errorCount))
self.var_error_suggest(statusMsgs)
status = False
elif errorCount > 0:
statusMsgs.append( "%s: %s problem found." % (
errorPrefix, errorCount))
self.var_error_suggest(statusMsgs)
status = False
self.__validated = True
if self.__originalDir:
os.chdir(oldDir)
return status,statusMsgs
def normalizeValue(self, section, option) :
return typeValidatorInstance.normalize(
self._configDef[section][option]['type'],
self[section][option])
def validateValue(self, section, option):
# Validates a section.option and exits on error
valueInfo = typeValidatorInstance.verify(
self._configDef[section][option]['type'],
self[section][option])
if valueInfo['isValid'] == 1:
return []
else:
if valueInfo['errorData']:
return self.var_error(section, option, valueInfo['errorData'])
else:
return self.var_error(section, option)
class config(SafeConfigParser, baseConfig):
def __init__(self, configFile, configDef=None, originalDir=None,
options=None, checkPerms=False):
"""Constructs config object.
configFile - configuration file to read
configDef - definition object
options - options object
checkPerms - check file permission on config file, 0660
sample configuration file:
[snis]
modules_dir = modules/ ; location of infoModules
md5_defs_dir = etc/md5_defs ; location of infoTree md5 defs
info_store = var/info ; location of nodeInfo store
cam_daemon = localhost:8200 ; cam daemon address"""
SafeConfigParser.__init__(self)
baseConfig.__init__(self, configDef, originalDir)
if(os.path.exists(configFile)):
self.configFile = configFile
else:
raise IOError
self._options = options
## UNUSED CODE : checkPerms is never True
## zim: this code is used if one instantiates config() with checkPerms set to
## True.
if checkPerms: self.__check_perms()
self.read(configFile)
self._configDef = configDef
if not self._configDef:
self._mySections = self.sections()
self.__initialize_config_dict()
def __initialize_config_dict(self):
""" build a dictionary of config vars keyed by section name defined in
configDef, if options defined override config"""
for section in self._mySections:
items = self.items(section)
self._dict[section] = {}
# First fill self._dict with whatever is given in hodrc.
# Going by this, options given at the command line either override
# options in hodrc, or get appended to the list, like for
# hod.client-params. Note that after this dict has _only_ hodrc
# params
for keyValuePair in items:
# stupid commenting bug in ConfigParser class, lines without an
# option value pair or section required that ; or # are at the
# beginning of the line, :(
newValue = reCommentHack.sub("", keyValuePair[1])
newValue = reCommentNewline.sub("", newValue)
self._dict[section][keyValuePair[0]] = newValue
# end of filling with options given in hodrc
# now start filling in command line options
if self._options:
for option in self._configDef[section].keys():
if self._options[section].has_key(option):
# the user has given an option
compoundOpt = "%s.%s" %(section,option)
if ( compoundOpt == \
'gridservice-mapred.final-server-params' \
or compoundOpt == \
'gridservice-hdfs.final-server-params' \
or compoundOpt == \
'gridservice-mapred.server-params' \
or compoundOpt == \
'gridservice-hdfs.server-params' \
or compoundOpt == \
'hod.client-params' ):
if ( compoundOpt == \
'gridservice-mapred.final-server-params' \
or compoundOpt == \
'gridservice-hdfs.final-server-params' ):
overwrite = False
else: overwrite = True
# Append to the current list of values in self._dict
if not self._dict[section].has_key(option):
self._dict[section][option] = ""
dictOpts = reKeyValList.split(self._dict[section][option])
dictOptsKeyVals = {}
for opt in dictOpts:
if opt != '':
# when dict _has_ params from hodrc
if reKeyVal.search(opt):
(key, val) = reKeyVal.split(opt,1)
# we only consider the first '=' for splitting
# we do this to support passing params like
# mapred.child.java.opts=-Djava.library.path=some_dir
# Even in case of an invalid error like unescaped '=',
# we don't want to fail here itself. We leave such errors
# to be caught during validation which happens after this
dictOptsKeyVals[key] = val
else:
# this means an invalid option. Leaving it
#for config.verify to catch
dictOptsKeyVals[opt] = None
cmdLineOpts = reKeyValList.split(self._options[section][option])
for opt in cmdLineOpts:
if reKeyVal.search(opt):
# Same as for hodrc options. only consider
# the first =
( key, val ) = reKeyVal.split(opt,1)
else:
key = opt
val = None
# whatever is given at cmdline overrides
# what is given in hodrc only for non-final params
if dictOptsKeyVals.has_key(key):
if overwrite:
dictOptsKeyVals[key] = val
else: dictOptsKeyVals[key] = val
self._dict[section][option] = ""
for key in dictOptsKeyVals:
if self._dict[section][option] == "":
if dictOptsKeyVals[key]:
self._dict[section][option] = key + "=" + \
dictOptsKeyVals[key]
else: #invalid option. let config.verify catch
self._dict[section][option] = key
else:
if dictOptsKeyVals[key]:
self._dict[section][option] = \
self._dict[section][option] + "," + key + \
"=" + dictOptsKeyVals[key]
else: #invalid option. let config.verify catch
self._dict[section][option] = \
self._dict[section][option] + "," + key
else:
# for rest of the options, that don't need
# appending business.
# options = cmdline opts + defaults
# dict = hodrc opts only
# only non default opts can overwrite any opt
# currently in dict
if not self._dict[section].has_key(option):
# options not mentioned in hodrc
self._dict[section][option] = \
self._options[section][option]
elif self._configDef[section][option]['default'] != \
self._options[section][option]:
# option mentioned in hodrc but user has given a
# non-default option
self._dict[section][option] = \
self._options[section][option]
## UNUSED METHOD
## zim: is too :)
def __check_perms(self):
perms = None
if self._options:
try:
perms = get_perms(self.configFile)
except OSError, data:
self._options.print_help()
raise Exception("*** could not find config file: %s" % data)
sys.exit(1)
else:
perms = get_perms(self.configFile)
if perms != requiredPerms:
error = "*** '%s' has invalid permission: %s should be %s\n" % \
(self.configFile, perms, requiredPerms)
raise Exception( error)
sys.exit(1)
def replace_escape_seqs(self):
""" replace any escaped characters """
replace_escapes(self)
class formatter(IndentedHelpFormatter):
def format_option_strings(self, option):
"""Return a comma-separated list of option strings & metavariables."""
if option.takes_value():
metavar = option.metavar or option.dest.upper()
short_opts = [sopt
for sopt in option._short_opts]
long_opts = [self._long_opt_fmt % (lopt, metavar)
for lopt in option._long_opts]
else:
short_opts = option._short_opts
long_opts = option._long_opts
if self.short_first:
opts = short_opts + long_opts
else:
opts = long_opts + short_opts
return ", ".join(opts)
class options(OptionParser, baseConfig):
def __init__(self, optionDef, usage, version, originalDir=None,
withConfig=False, defaultConfig=None, defaultLocation=None,
name=None):
"""Constructs and options object.
optionDef - definition object
usage - usage statement
version - version string
withConfig - used in conjunction with a configuration file
defaultConfig - default configuration file
"""
OptionParser.__init__(self, usage=usage)
baseConfig.__init__(self, optionDef, originalDir)
self.formatter = formatter(4, max_help_position=100, width=180,
short_first=1)
self.__name = name
self.__version = version
self.__withConfig = withConfig
self.__defaultConfig = defaultConfig
self.__defaultLoc = defaultLocation
self.args = []
self.__optionList = []
self.__compoundOpts = []
self.__shortMap = {}
self.__alphaString = 'abcdefghijklmnopqrstuvxyzABCDEFGHIJKLMNOPQRSTUVXYZ1234567890'
self.__alpha = []
self.__parsedOptions = {}
self.__reserved = [ 'h' ]
self.__orig_grps = []
self.__orig_grp_lists = {}
self.__orig_option_list = []
self.__display_grps = []
self.__display_grp_lists = {}
self.__display_option_list = []
self.config = None
if self.__withConfig:
self.__reserved.append('c')
self.__reserved.append('v')
self.__gen_alpha()
# build self.__optionList, so it contains all the options that are
# possible. the list elements are of the form section.option
for section in self._mySections:
if self.__withConfig and section == 'config':
raise Exception(
"withConfig set 'config' cannot be used as a section name")
for option in self._configDef[section].keys():
if '.' in option:
raise Exception("Options cannot contain: '.'")
elif self.__withConfig and option == 'config':
raise Exception(
"With config set, option config is not allowed.")
elif self.__withConfig and option == 'verbose-help':
raise Exception(
"With config set, option verbose-help is not allowed.")
self.__optionList.append(self.__splice_compound(section,
option))
self.__build_short_map()
self.__add_options()
self.__init_display_options()
(self.__parsedOptions, self.args) = self.parse_args()
# Now process the positional arguments only for the client side
if self.__name == 'hod':
hodhelp = hodHelp()
_operation = getattr(self.__parsedOptions,'hod.operation')
_script = getattr(self.__parsedOptions, 'hod.script')
nArgs = self.args.__len__()
if _operation:
# -o option is given
if nArgs != 0:
self.error('invalid syntax : command and operation(-o) cannot coexist')
elif nArgs == 0 and _script:
# for a script option, without subcommand: hod -s script ...
pass
elif nArgs == 0:
print "Usage: ",hodhelp.help()
sys.exit(0)
else:
# subcommand is given
cmdstr = self.args[0] # the subcommand itself
cmdlist = hodhelp.ops
if cmdstr not in cmdlist:
print "Usage: ", hodhelp.help()
sys.exit(2)
numNodes = None
clusterDir = None
# Check which subcommand. cmdstr = subcommand itself now.
if cmdstr == "allocate":
clusterDir = getattr(self.__parsedOptions, 'hod.clusterdir')
numNodes = getattr(self.__parsedOptions, 'hod.nodecount')
if not clusterDir or not numNodes:
print hodhelp.usage(cmdstr)
sys.exit(3)
cmdstr = cmdstr + ' ' + clusterDir + ' ' + numNodes
setattr(self.__parsedOptions,'hod.operation', cmdstr)
elif cmdstr == "deallocate" or cmdstr == "info":
clusterDir = getattr(self.__parsedOptions, 'hod.clusterdir')
if not clusterDir:
print hodhelp.usage(cmdstr)
sys.exit(3)
cmdstr = cmdstr + ' ' + clusterDir
setattr(self.__parsedOptions,'hod.operation', cmdstr)
elif cmdstr == "list":
setattr(self.__parsedOptions,'hod.operation', cmdstr)
pass
elif cmdstr == "script":
clusterDir = getattr(self.__parsedOptions, 'hod.clusterdir')
numNodes = getattr(self.__parsedOptions, 'hod.nodecount')
originalDir = getattr(self.__parsedOptions, 'hod.original-dir')
if originalDir and clusterDir:
self.remove_exit_code_file(originalDir, clusterDir)
if not _script or not clusterDir or not numNodes:
print hodhelp.usage(cmdstr)
sys.exit(3)
pass
elif cmdstr == "help":
if nArgs == 1:
self.print_help()
sys.exit(0)
elif nArgs != 2:
self.print_help()
sys.exit(3)
elif self.args[1] == 'options':
self.print_options()
sys.exit(0)
cmdstr = cmdstr + ' ' + self.args[1]
setattr(self.__parsedOptions,'hod.operation', cmdstr)
# end of processing for arguments on the client side
if self.__withConfig:
self.config = self.__parsedOptions.config
if not self.config:
self.error("configuration file must be specified")
if not os.path.isabs(self.config):
# A relative path. Append the original directory which would be the
# current directory at the time of launch
try:
origDir = getattr(self.__parsedOptions, 'hod.original-dir')
if origDir is not None:
self.config = os.path.join(origDir, self.config)
self.__parsedOptions.config = self.config
except AttributeError, e:
self.error("hod.original-dir is not defined.\
Cannot get current directory")
if not os.path.exists(self.config):
if self.__defaultLoc and not re.search("/", self.config):
self.__parsedOptions.config = os.path.join(
self.__defaultLoc, self.config)
self.__build_dict()
def norm_cluster_dir(self, orig_dir, directory):
directory = os.path.expanduser(directory)
if not os.path.isabs(directory):
directory = os.path.join(orig_dir, directory)
directory = os.path.abspath(directory)
return directory
def remove_exit_code_file(self, orig_dir, dir):
try:
dir = self.norm_cluster_dir(orig_dir, dir)
if os.path.exists(dir):
exit_code_file = os.path.join(dir, "script.exitcode")
if os.path.exists(exit_code_file):
os.remove(exit_code_file)
except:
print >>sys.stderr, "Could not remove the script.exitcode file."
def __init_display_options(self):
self.__orig_option_list = self.option_list[:]
optionListTitleMap = {}
for option in self.option_list:
optionListTitleMap[option._long_opts[0]] = option
self.__orig_grps = self.option_groups[:]
for group in self.option_groups:
self.__orig_grp_lists[group.title] = group.option_list[:]
groupTitleMap = {}
optionTitleMap = {}
for group in self.option_groups:
groupTitleMap[group.title] = group
optionTitleMap[group.title] = {}
for option in group.option_list:
(sectionName, optionName) = \
self.__split_compound(option._long_opts[0])
optionTitleMap[group.title][optionName] = option
for section in self._mySections:
for option in self._configDef[section]:
if self._configDef[section][option]['help']:
if groupTitleMap.has_key(section):
if not self.__display_grp_lists.has_key(section):
self.__display_grp_lists[section] = []
self.__display_grp_lists[section].append(
optionTitleMap[section][option])
try:
self.__display_option_list.append(
optionListTitleMap["--" + self.__splice_compound(
section, option)])
except KeyError:
pass
try:
self.__display_option_list.append(optionListTitleMap['--config'])
except KeyError:
pass
self.__display_option_list.append(optionListTitleMap['--help'])
self.__display_option_list.append(optionListTitleMap['--verbose-help'])
self.__display_option_list.append(optionListTitleMap['--version'])
self.__display_grps = self.option_groups[:]
for section in self._mySections:
if self.__display_grp_lists.has_key(section):
self.__orig_grp_lists[section] = \
groupTitleMap[section].option_list
else:
try:
self.__display_grps.remove(groupTitleMap[section])
except KeyError:
pass
def __gen_alpha(self):
assignedOptions = []
for section in self._configDef:
for option in self._configDef[section]:
if self._configDef[section][option]['short']:
assignedOptions.append(
self._configDef[section][option]['short'])
for symbol in self.__alphaString:
if not symbol in assignedOptions:
self.__alpha.append(symbol)
def __splice_compound(self, section, option):
return "%s.%s" % (section, option)
def __split_compound(self, compound):
return compound.split('.')
def __build_short_map(self):
""" build a short_map of parametername : short_option. This is done
only for those parameters that don't have short options already
defined in configDef.
If possible, the first letter in the option that is not already
used/reserved as a short option is allotted. Otherwise the first
letter in __alpha that isn't still used is allotted.
e.g. { 'hodring.java-home': 'T', 'resource_manager.batch-home': 'B' }
"""
optionsKey = {}
for compound in self.__optionList:
(section, option) = self.__split_compound(compound)
if not optionsKey.has_key(section):
optionsKey[section] = []
optionsKey[section].append(option)
for section in self._configDef.sections():
options = optionsKey[section]
options.sort()
for option in options:
if not self._configDef[section][option]['short']:
compound = self.__splice_compound(section, option)
shortOptions = self.__shortMap.values()
for i in range(0, len(option)):
letter = option[i]
letter = letter.lower()
if letter in self.__alpha:
if not letter in shortOptions and \
not letter in self.__reserved:
self.__shortMap[compound] = letter
break
if not self.__shortMap.has_key(compound):
for i in range(0, len(self.__alpha)):
letter = self.__alpha[i]
if not letter in shortOptions and \
not letter in self.__reserved:
self.__shortMap[compound] = letter
def __add_option(self, config, compoundOpt, section, option, group=None):
addMethod = self.add_option
if group: addMethod=group.add_option
self.__compoundOpts.append(compoundOpt)
if compoundOpt == 'gridservice-mapred.final-server-params' or \
compoundOpt == 'gridservice-hdfs.final-server-params' or \
compoundOpt == 'gridservice-mapred.server-params' or \
compoundOpt == 'gridservice-hdfs.server-params' or \
compoundOpt == 'hod.client-params':
_action = 'append'
elif config[section][option]['type'] == 'bool':
_action = 'store_true'
else:
_action = 'store'
if self.__shortMap.has_key(compoundOpt):
addMethod("-" + self.__shortMap[compoundOpt],
"--" + compoundOpt, dest=compoundOpt,
action= _action,
metavar=config[section][option]['type'],
default=config[section][option]['default'],
help=config[section][option]['desc'])
else:
if config[section][option]['short']:
addMethod("-" + config[section][option]['short'],
"--" + compoundOpt, dest=compoundOpt,
action= _action,
metavar=config[section][option]['type'],
default=config[section][option]['default'],
help=config[section][option]['desc'])
else:
addMethod('', "--" + compoundOpt, dest=compoundOpt,
action= _action,
metavar=config[section][option]['type'],
default=config[section][option]['default'],
help=config[section][option]['desc'])
def __add_options(self):
if self.__withConfig:
self.add_option("-c", "--config", dest='config',
action='store', default=self.__defaultConfig,
metavar='config_file',
help="Full path to configuration file.")
self.add_option("", "--verbose-help",
action='help', default=None,
metavar='flag',
help="Display verbose help information.")
self.add_option("-v", "--version",
action='version', default=None,
metavar='flag',
help="Display version information.")
self.version = self.__version
if len(self._mySections) > 1:
for section in self._mySections:
group = OptionGroup(self, section)
for option in self._configDef[section]:
compoundOpt = self.__splice_compound(section, option)
self.__add_option(self._configDef, compoundOpt, section,
option, group)
self.add_option_group(group)
else:
for section in self._mySections:
for option in self._configDef[section]:
compoundOpt = self.__splice_compound(section, option)
self.__add_option(self._configDef, compoundOpt, section,
option)
def __build_dict(self):
if self.__withConfig:
self._dict['config'] = str(getattr(self.__parsedOptions, 'config'))
for compoundOption in dir(self.__parsedOptions):
if compoundOption in self.__compoundOpts:
(section, option) = self.__split_compound(compoundOption)
if not self._dict.has_key(section):
self._dict[section] = {}
if getattr(self.__parsedOptions, compoundOption):
_attr = getattr(self.__parsedOptions, compoundOption)
# when we have multi-valued parameters passed separately
# from command line, python optparser pushes them into a
# list. So converting all such lists to strings
if type(_attr) == type([]):
import string
_attr = string.join(_attr,',')
self._dict[section][option] = _attr
for section in self._configDef:
for option in self._configDef[section]:
if self._configDef[section][option]['type'] == 'bool':
compoundOption = self.__splice_compound(section, option)
if not self._dict.has_key(section):
self._dict[section] = {}
if option not in self._dict[section]:
self._dict[section][option] = False
def __set_display_groups(self):
if not '--verbose-help' in sys.argv:
self.option_groups = self.__display_grps
self.option_list = self.__display_option_list
for group in self.option_groups:
group.option_list = self.__display_grp_lists[group.title]
def __unset_display_groups(self):
if not '--verbose-help' in sys.argv:
self.option_groups = self.__orig_grps
self.option_list = self.__orig_option_list
for group in self.option_groups:
group.option_list = self.__orig_grp_lists[group.title]
def print_help(self, file=None):
self.__set_display_groups()
OptionParser.print_help(self, file)
self.__unset_display_groups()
def print_options(self):
_usage = self.usage
self.set_usage('')
self.print_help()
self.set_usage(_usage)
def verify(self):
return baseConfig.verify(self)
def replace_escape_seqs(self):
replace_escapes(self)
| apache-2.0 |
tedlaz/pyted | tedutil/db_context_manager.py | 1 | 5076 | '''
Module db_context_manager.py
Connect to sqlite database and perform crud functions
'''
import sqlite3
import os
PATH = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
print(PATH)
def grup(txtv):
'''
Trasforms a string to uppercase special for Greek comparison
'''
ar1 = u"αάΆΑβγδεέΈζηήΉθιίϊΊκλμνξοόΌπρσςτυύΎφχψωώΏ"
ar2 = u"ΑΑΑΑΒΓΔΕΕΕΖΗΗΗΘΙΙΙΙΚΛΜΝΞΟΟΟΠΡΣΣΤΥΥΥΦΧΨΩΩΩ"
ftxt = u''
for letter in txtv:
if letter in ar1:
ftxt += ar2[ar1.index(letter)]
else:
ftxt += letter.upper()
return ftxt
class OpenSqlite:
'''
Context manager class
Use it as:
with Open_sqlite(dbfilename) as db:
your code here ...
'''
def __init__(self, dbfile):
self.dbf = dbfile
self.active = False
self.con = None
self.cur = None
def __enter__(self):
self.con = sqlite3.connect(self.dbf)
self.con.create_function("grup", 1, grup)
self.cur = self.con.cursor()
self.active = True
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.active:
self.cur.close()
self.con.close()
def script(self, sqlscript):
"""Execute an sql script against self.dbf"""
self.con.executescript(sqlscript)
return True
def application_id(self):
'''Get application_id from database file'''
sql = 'PRAGMA application_id;'
try:
rws = self.select(sql)
return rws[0][0]
except:
return -9
def set_application_id(self, idv):
'''Set application_id to database file'''
self.script('PRAGMA application_id = %s;' % idv)
def user_version(self):
'''Get user_version from database file'''
sql = 'PRAGMA user_version;'
try:
rws = self.select(sql)
return rws[0][0]
except:
return -9
def set_user_version(self, version):
'''Set user_version to database file'''
self.script('PRAGMA user_version = %s;' % version)
def select(self, sql):
'''Get a list of tuples with data'''
self.cur.execute(sql)
rows = self.cur.fetchall()
return rows
def select_with_names(self, sql):
'''Get a tuple with column names and a list of tuples with data'''
self.cur.execute(sql)
column_names = tuple([t[0] for t in self.cur.description])
rows = self.cur.fetchall()
return column_names, rows
def select_as_dict(self, sql):
'''Get a list of dictionaries [{}, {}, ...]'''
self.cur.execute(sql)
column_names = [t[0] for t in self.cur.description]
rows = self.cur.fetchall()
diclist = []
for row in rows:
dic = {}
for i, col in enumerate(row):
dic[column_names[i]] = col
diclist.append(dic)
diclen = len(diclist)
if diclen > 0:
return diclist
return [{}]
def select_master_detail_as_dic(self,
idv,
tablemaster,
tabledetail=None,
id_at_end=True):
'''
Get a specific record from table tablemaster with id = idv
If we pass a tabledetail value it gets detail records too
idv : id value of record
tablemaster : Master table name
tabledetail : Detail table name
id_at_end : If True Foreign key is like <masterTable>_id
else is like id_<masterTable>
'''
if id_at_end:
fkeytemplate = '%s_id'
else:
fkeytemplate = 'id_%s'
id_field = fkeytemplate % tablemaster
sql1 = "SELECT * FROM %s WHERE id='%s'" % (tablemaster, idv)
sql2 = "SELECT * FROM %s WHERE %s='%s'" % (tabledetail, id_field, idv)
dic = self.select_as_dict(sql1)[0]
ldic = len(dic)
if ldic == 0:
return dic
if tabledetail:
dic['zlines'] = self.select_as_dict(sql2)
# Remove id_field key
for elm in dic['zlines']:
del elm[id_field]
return dic
if __name__ == '__main__':
DBPATH = '/home/tedlaz/tedfiles/prj/2017/2017a.sql3'
with OpenSqlite(DBPATH) as db:
print(db.select('select * from lmo limit 2;'))
print(db.select_as_dict('select * from vtr_trd limit 10;'))
print(db.select_with_names('select * from lmo limit 2;'))
# print(db.script('PRAGMA application_id = 20170313;'))
print(db.application_id())
print(db.user_version())
print(db.select_master_detail_as_dic(1, 'tr', 'trd', False))
print(db.select_master_detail_as_dic(20, 'tr'))
print(db.select_master_detail_as_dic(200000, 'tr'))
print(db.select_master_detail_as_dic(200000, 'tr', 'trd', False))
| gpl-3.0 |
baroquebobcat/pants | src/python/pants/backend/jvm/targets/jvm_target.py | 2 | 9510 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from twitter.common.collections import OrderedSet
from pants.backend.jvm.subsystems.java import Java
from pants.backend.jvm.subsystems.jvm_platform import JvmPlatform
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants.backend.jvm.targets.jarable import Jarable
from pants.base.payload import Payload
from pants.base.payload_field import ExcludesField, PrimitiveField, SetOfPrimitivesField
from pants.build_graph.resources import Resources
from pants.build_graph.target import Target
from pants.java.jar.exclude import Exclude
from pants.util.memo import memoized_property
class JvmTarget(Target, Jarable):
"""A base class for all java module targets that provides path and dependency translation.
:API: public
"""
@classmethod
def subsystems(cls):
return super(JvmTarget, cls).subsystems() + (Java, JvmPlatform)
def __init__(self,
address=None,
payload=None,
sources=None,
provides=None,
excludes=None,
services=None,
platform=None,
strict_deps=None,
exports=None,
fatal_warnings=None,
zinc_file_manager=None,
# Some subclasses can have both .java and .scala sources
# (e.g., JUnitTests, JvmBinary, even ScalaLibrary), so it's convenient
# to have both plugins settings here, even though for other subclasses
# (e.g., JavaLibrary) only one will be relevant.
javac_plugins=None,
javac_plugin_args=None,
scalac_plugins=None,
scalac_plugin_args=None,
**kwargs):
"""
:API: public
:param excludes: List of `exclude <#exclude>`_\s to filter this target's
transitive dependencies against.
:param sources: Source code files to build. Paths are relative to the BUILD
file's directory.
:type sources: ``Fileset`` (from globs or rglobs) or list of strings
:param services: A dict mapping service interface names to the classes owned by this target
that implement them. Keys are fully qualified service class names, values are
lists of strings, each string the fully qualified class name of a class owned
by this target that implements the service interface and should be
discoverable by the jvm service provider discovery mechanism described here:
https://docs.oracle.com/javase/6/docs/api/java/util/ServiceLoader.html
:param str platform: The name of the platform (defined under the jvm-platform subsystem) to use
for compilation (that is, a key into the --jvm-platform-platforms
dictionary). If unspecified, the platform will default to the first one of
these that exist: (1) the default_platform specified for jvm-platform,
(2) a platform constructed from whatever java version is returned by
DistributionLocator.cached().version.
:param bool strict_deps: When True, only the directly declared deps of the target will be used
at compilation time. This enforces that all direct deps of the target
are declared, and can improve compilation speed due to smaller
classpaths. Transitive deps are always provided at runtime.
:param list exports: A list of exported targets, which will be accessible to dependents even
with strict_deps turned on. A common use case is for library targets to
to export dependencies that it knows its dependents will need. Then any
dependents of that library target will have access to those dependencies
even when strict_deps is True. Note: exports is transitive, which means
dependents have access to the closure of exports. An example will be that
if A exports B, and B exports C, then any targets that depends on A will
have access to both B and C.
:param bool fatal_warnings: Whether to turn warnings into errors for this target. If present,
takes priority over the language's fatal-warnings option.
:param bool zinc_file_manager: Whether to use zinc provided file manager that allows
transactional rollbacks, but in certain cases may conflict with
user libraries.
:param javac_plugins: names of compiler plugins to use when compiling this target with javac.
:param dict javac_plugin_args: Map from javac plugin name to list of arguments for that plugin.
:param scalac_plugins: names of compiler plugins to use when compiling this target with scalac.
:param dict scalac_plugin_args: Map from scalac plugin name to list of arguments for that plugin.
"""
self.address = address # Set in case a TargetDefinitionException is thrown early
payload = payload or Payload()
excludes = ExcludesField(self.assert_list(excludes, expected_type=Exclude, key_arg='excludes'))
payload.add_fields({
'sources': self.create_sources_field(sources, address.spec_path, key_arg='sources'),
'provides': provides,
'excludes': excludes,
'platform': PrimitiveField(platform),
'strict_deps': PrimitiveField(strict_deps),
'exports': SetOfPrimitivesField(exports),
'fatal_warnings': PrimitiveField(fatal_warnings),
'zinc_file_manager': PrimitiveField(zinc_file_manager),
'javac_plugins': SetOfPrimitivesField(javac_plugins),
'javac_plugin_args': PrimitiveField(javac_plugin_args),
'scalac_plugins': SetOfPrimitivesField(scalac_plugins),
'scalac_plugin_args': PrimitiveField(scalac_plugin_args),
})
super(JvmTarget, self).__init__(address=address, payload=payload, **kwargs)
# Service info is only used when generating resources, it should not affect, for example, a
# compile fingerprint or javadoc fingerprint. As such, its not a payload field.
self._services = services or {}
@property
def strict_deps(self):
"""If set, whether to limit compile time deps to those that are directly declared.
:return: See constructor.
:rtype: bool or None
"""
return self.payload.strict_deps
@property
def export_specs(self):
return self.payload.exports
@property
def fatal_warnings(self):
"""If set, overrides the platform's default fatal_warnings setting.
:return: See constructor.
:rtype: bool or None
"""
return self.payload.fatal_warnings
@property
def zinc_file_manager(self):
"""If false, the default file manager will be used instead of the zinc provided one.
:return: See constructor.
:rtype: bool or None
"""
return self.payload.zinc_file_manager
@property
def javac_plugins(self):
"""The names of compiler plugins to use when compiling this target with javac.
:return: See constructor.
:rtype: list of strings.
"""
return self.payload.javac_plugins
@property
def javac_plugin_args(self):
"""Map from javac plugin name to list of args for that plugin.
:return: See constructor.
:rtype: map from string to list of strings.
"""
return self.payload.javac_plugin_args
@property
def scalac_plugins(self):
"""The names of compiler plugins to use when compiling this target with scalac.
:return: See constructor.
:rtype: list of strings.
"""
return self.payload.scalac_plugins
@property
def scalac_plugin_args(self):
"""Map from scalac plugin name to list of args for that plugin.
:return: See constructor.
:rtype: map from string to list of strings.
"""
return self.payload.scalac_plugin_args
@property
def platform(self):
"""Platform associated with this target.
:return: The jvm platform object.
:rtype: JvmPlatformSettings
"""
return JvmPlatform.global_instance().get_platform_for_target(self)
@memoized_property
def jar_dependencies(self):
return OrderedSet(self.get_jar_dependencies())
def mark_extra_invalidation_hash_dirty(self):
del self.jar_dependencies
def get_jar_dependencies(self):
jar_deps = OrderedSet()
def collect_jar_deps(target):
if isinstance(target, JarLibrary):
jar_deps.update(target.payload.jars)
self.walk(work=collect_jar_deps)
return jar_deps
@property
def has_resources(self):
return len(self.resources) > 0
@property
def provides(self):
return self.payload.provides
@property
def resources(self):
# TODO: We should deprecate this method, but doing so will require changes to JVM publishing.
# see https://github.com/pantsbuild/pants/issues/4568
return [dependency for dependency in self.dependencies if isinstance(dependency, Resources)]
@property
def excludes(self):
return self.payload.excludes
@property
def services(self):
return self._services
| apache-2.0 |
Erotemic/local | build_scripts/custom_fletch.py | 1 | 5960 | # -*- coding: utf-8 -*-
#!/usr/bin/env python
import os
from os.path import dirname # NOQA
import sys
def disable_packages():
if pkgname == 'OpenBLAS':
"""
PKGNAME=OpenBLAS
PKGNAME=Zlib
find build/src/ -iname CMakeCache.txt -delete
rm -rf build/src/$PKGNAME*
rm -rf build/tmp/$PKGNAME*
rm -rf ${CMAKE_BUILD_DIR}/build/src/${PKGNAME}*
rm -rf ${CMAKE_BUILD_DIR}/build/tmp/${PKGNAME}*
REMOVE CMAKE VARS ${PKGNAME}_*
"""
cmake_build_dir =
pass
pass
def kwiver():
import utool as ut
ut.codeblock(
r'''
# STARTBLOCK bash
git checkout master
cd ~/code/kwiver
rm -rf ~/code/kwiver/build-py2-nocuda
mkdir -p build-py2-nocuda
cd ~/code/kwiver/build-py2-nocuda
cmake -G "Unix Makefiles" \
-D KWIVER_ENABLE_ARROWS:BOOL=True \
-D KWIVER_ENABLE_C_BINDINGS:BOOL=True \
-D KWIVER_ENABLE_PYTHON:BOOL=True \
-D KWIVER_ENABLE_TESTS:BOOL=True \
-D PYTHON_VERSION=$(python -c "import sys; print(sys.version[0:3])") \
-D fletch_DIR:PATH=~/code/fletch/build-py2-nocuda/ \
~/code/kwiver
''')
def rebase_python3_support():
import utool as ut
ut.codeblock(
r'''
# STARTBLOCK bash
cd ~/code/fletch
git checkout master
# blow away old branch
git branch -D tmp/pre-python3-support
# Recreate the branch
git checkout -b tmp/pre-python3-support
# Merge all prereqs into this branch
git merge dev/find_numpy dev/update-openblas-0.2.20 dev/update-opencv dev/update-vtk dev/update-caffe --no-edit
# or could do it one at a time, but w/e
# git merge dev/find_numpy
# git merge dev/update-openblas-0.2.20 --no-edit
# git merge dev/update-opencv --no-edit
# git merge dev/update-vtk --no-edit
git checkout dev/python3-support
# Find the oldest merge branch after master
# This should be the old tmp/pre-python3-support
OLD_MERGE_POINT=$(python -c "import sys; print(sys.argv[-1])" $(git rev-list --min-parents=2 HEAD ^master))
# Check to make sure its the merge point
git log -n 1 $OLD_MERGE_POINT
echo "$OLD_MERGE_POINT"
# Find the most recent merge
# echo $(python -c "import sys; print(sys.argv[-1])" $(git rev-list --min-parents=1 HEAD ^master))
git checkout tmp/pre-python3-support
git checkout -b tmp/rebased-python3-support
# These should be the relevant python3 commits
git log $OLD_MERGE_POINT..dev/python3-support
# Move all the relevant python3-support commits onto the new pre-python3-support
git cherry-pick $OLD_MERGE_POINT..dev/python3-support
git rebase --onto tmp/rebased-python3-support $OLD_MERGE_POINT
git checkout dev/python3-support
git reset --hard tmp/rebased-python3-support
git push --force
git checkout tmp/pre-python3-support
git push --force
cd ~/code/fletch-expt
git checkout master
git branch -D dev/python3-support
git branch -D tmp/pre-python3-support
git checkout dev/python3-support
# git checkout dev/python3-support
# git checkout -b backup-py3-support
# git checkout dev/python3-support
# git merge --strategy-option=theirs tmp/pre-python3-support
# git rebase -i --strategy-option=theirs tmp/pre-python3-support
# ENDBLOCK bash
''')
pass
def cuda_fletch():
"""
# Find cuda version
nvcc --version
8.0
# Find cudnn version
cat /usr/include/cudnn.h | grep CUDNN_Major -A 2
6.0
ldconfig -p | grep libcuda
ldconfig -p | grep libcudnn
"""
def generate_and_make(repo_dpath, **kwargs):
import utool as ut
cmake_vars = {
# build with
'fletch_BUILD_WITH_PYTHON': True,
'fletch_BUILD_WITH_MATLAB': False,
'fletch_BUILD_WITH_CUDA': False,
'fletch_BUILD_WITH_CUDNN': False,
# select version
'OpenCV_SELECT_VERSION': '3.1.0',
'VTK_SELECT_VERSION': '6.2.0',
'fletch_PYTHON_VERSION': sys.version[0:3],
'PYTHON_EXECUTABLE': sys.executable,
}
ut.update_existing(cmake_vars, kwargs)
DISABLED_LIBS = [ # NOQA
'ITK',
]
VTK_LIBS = [
'VTK',
'TinyXML',
'libxml2',
'Qt',
]
ENABLED_LIBS = [
'Boost', 'Caffe', 'Ceres', 'Eigen', 'FFmpeg', 'GeographicLib',
'GFlags', 'GLog', 'HDF5', 'jom', 'LevelDB', 'libjpeg-turbo', 'libjson',
'libkml', 'libtiff', 'LMDB', 'log4cplus', 'OpenBLAS', 'OpenCV',
'OpenCV_contrib', 'PNG', 'PROJ4', 'Protobuf', 'shapelib', 'Snappy',
'SuiteSparse', 'VXL', 'yasm', 'ZLib',
] + VTK_LIBS
lines = ['cmake -G "Unix Makefiles" -D CMAKE_BUILD_TYPE=RELEASE']
lines += ['-D fletch_ENABLE_{}=True'.format(lib) for lib in ENABLED_LIBS]
lines += ['-D {}={}'.format(key, val) for key, val in cmake_vars.items()]
lines += [repo_dpath]
command = ' '.join(lines)
print(command)
if False:
# import utool as ut
# cmake_retcode = ut.cmd2(command, verbose=True)['ret']
cmake_retcode = os.system(command)
if cmake_retcode == 0:
os.system('make -j9')
if __name__ == '__main__':
r"""
CommandLine:
python ~/local/build_scripts/custom_fletch.py
"""
# repo_dpath = '~/code/fletch'
# repo_dpath = dirname(__file__)
repo_dpath = os.getcwd()
if repo_dpath.endswith('fletch-expt'):
kwargs = dict(
OpenCV_SELECT_VERSION='3.2.0',
VTK_SELECT_VERSION='8.0',
)
generate_and_make(repo_dpath, **kwargs)
elif repo_dpath.endswith('fletch'):
generate_and_make(repo_dpath)
| gpl-3.0 |
vidyar/testing-yml | setup.py | 1 | 1647 | # dockerpty.
#
# Copyright 2014 Chris Corbyn <chris@w3style.co.uk>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup
import os
def fopen(filename):
return open(os.path.join(os.path.dirname(__file__), filename))
def read(filename):
return fopen(filename).read()
setup(
name='dockerpty',
version='0.1.1',
description='Python library to use the pseudo-tty of a docker container',
long_description=read('README.md'),
url='https://github.com/d11wtq/dockerpty',
author='Chris Corbyn',
author_email='chris@w3style.co.uk',
license='Apache 2.0',
keywords='docker, tty, pty, terminal',
packages=['dockerpty'],
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Environment :: Console',
'Intended Audience :: Developers',
'Topic :: Terminals',
'Topic :: Terminals :: Terminal Emulators/X Terminals',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| apache-2.0 |
apagac/cfme_tests | cfme/tests/networks/test_sdn_downloads.py | 2 | 2791 | import random
import pytest
from cfme.cloud.provider.azure import AzureProvider
from cfme.cloud.provider.ec2 import EC2Provider
from cfme.cloud.provider.gce import GCEProvider
from cfme.cloud.provider.openstack import OpenStackProvider
from cfme.utils.appliance.implementations.ui import navigate_to
pytestmark = [
pytest.mark.usefixtures('setup_provider'),
pytest.mark.provider([AzureProvider, EC2Provider, GCEProvider, OpenStackProvider],
scope="module")
]
extensions_mapping = {'txt': 'Text', 'csv': 'CSV', 'pdf': 'PDF'}
OBJECTCOLLECTIONS = [
'network_providers',
'balancers',
'cloud_networks',
'network_ports',
'network_security_groups',
'network_subnets',
'network_routers',
]
def download(objecttype, extension):
view = navigate_to(objecttype, 'All')
if view.browser.product_version >= '5.10' and extension == 'pdf':
view.toolbar.download.item_select("Print or export as PDF")
handle_extra_tabs(view)
else:
view.toolbar.download.item_select("Download as {}".format(extensions_mapping[extension]))
def download_summary(spec_object):
view = navigate_to(spec_object, 'Details')
view.toolbar.download.click()
if view.browser.product_version >= '5.10':
handle_extra_tabs(view)
def handle_extra_tabs(view):
tabs = view.browser.selenium.window_handles
while len(tabs) > 1:
view.browser.selenium.switch_to_window(tabs[-1])
view.browser.selenium.close()
tabs = view.browser.selenium.window_handles
view.browser.selenium.switch_to_window(tabs[0])
@pytest.mark.parametrize("filetype", list(extensions_mapping.keys()))
@pytest.mark.parametrize("collection_type", OBJECTCOLLECTIONS)
def test_download_lists_base(filetype, collection_type, appliance):
""" Download the items from base lists.
Metadata:
test_flag: sdn
Polarion:
assignee: mmojzis
initialEstimate: 1/10h
casecomponent: WebUI
caseimportance: medium
"""
collection = getattr(appliance.collections, collection_type)
download(collection, filetype)
@pytest.mark.parametrize("collection_type", OBJECTCOLLECTIONS)
def test_download_pdf_summary(appliance, collection_type, provider):
""" Download the summary details of specific object
Metadata:
test_flag: sdn
Polarion:
assignee: mmojzis
initialEstimate: 1/10h
casecomponent: WebUI
caseimportance: medium
"""
collection = getattr(appliance.collections, collection_type)
all_entities = collection.all()
if all_entities:
random_obj = random.choice(all_entities)
download_summary(random_obj)
else:
pytest.skip('{} entities not available'.format(collection_type))
| gpl-2.0 |
gnuradio/gnuradio | gr-blocks/python/blocks/qa_throttle.py | 7 | 1711 | #!/usr/bin/env python
#
# Copyright 2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
import time
import pmt
from gnuradio import gr, gr_unittest, blocks
class test_throttle(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_throttling(self):
src_data = [1, 2, 3]
src = blocks.vector_source_c(src_data)
thr = blocks.throttle(gr.sizeof_gr_complex, 10)
dst = blocks.vector_sink_c()
self.tb.connect(src, thr, dst)
start_time = time.perf_counter()
self.tb.run()
end_time = time.perf_counter()
total_time = end_time - start_time
self.assertGreater(total_time, 0.3)
self.assertLess(total_time, 0.4)
dst_data = dst.data()
self.assertEqual(src_data, dst_data)
def test_rx_rate_tag(self):
src_data = [1, 2, 3, 4, 5, 6]
tag = gr.tag_t()
tag.key = pmt.string_to_symbol("rx_rate")
tag.value = pmt.to_pmt(20)
tag.offset = 0
src = blocks.vector_source_c(src_data, tags=(tag,))
thr = blocks.throttle(gr.sizeof_gr_complex, 10, ignore_tags=False)
dst = blocks.vector_sink_c()
self.tb.connect(src, thr, dst)
start_time = time.perf_counter()
self.tb.run()
end_time = time.perf_counter()
total_time = end_time - start_time
self.assertGreater(total_time, 0.3)
self.assertLess(total_time, 0.4)
dst_data = dst.data()
self.assertEqual(src_data, dst_data)
if __name__ == '__main__':
gr_unittest.run(test_throttle)
| gpl-3.0 |
hgiemza/DIRAC | tests/System/dirac-test-production.py | 1 | 4565 | """ This script submits a test prodJobuction with filter
"""
import time
import os
import json
from DIRAC.Core.Base import Script
Script.setUsageMessage( '\n'.join( [ __doc__.split( '\n' )[1],
'Usage:',
' %s test directory' % Script.scriptName
] ) )
from DIRAC.Core.Base.Script import parseCommandLine
Script.registerSwitch( "", "UseFilter=", "e.g. True/False" )
parseCommandLine()
from DIRAC import gLogger
from DIRAC.Interfaces.API.Job import Job
from DIRAC.TransformationSystem.Client.Transformation import Transformation
### Needed to test transformations with Filters
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
from DIRAC.DataManagementSystem.Client.DataManager import DataManager
# Parse the arguments
args = Script.getPositionalArgs()
if ( len( args ) != 1 ):
Script.showHelp()
directory = args[0]
UseFilter = False
for switch in Script.getUnprocessedSwitches():
if switch[0].lower() == "usefilter":
if switch[1] == 'True':
UseFilter = True
#Let's first create the prodJobuction
prodJobType = 'Merge'
transName = 'testProduction_' + str(int(time.time()))
desc = 'just test'
prodJob = Job()
prodJob._addParameter( prodJob.workflow, 'PRODUCTION_ID', 'string', '00012345', 'ProductionID' )
prodJob._addParameter( prodJob.workflow, 'JOB_ID', 'string', '00006789', 'ProductionJobID' )
prodJob._addParameter( prodJob.workflow, 'eventType', 'string', 'TestEventType', 'Event Type of the prodJobuction' )
prodJob._addParameter( prodJob.workflow, 'numberOfEvents', 'string', '-1', 'Number of events requested' )
prodJob._addParameter( prodJob.workflow, 'ProcessingType', 'JDL', str( 'Test' ), 'ProductionGroupOrType' )
prodJob._addParameter( prodJob.workflow, 'Priority', 'JDL', str( 9 ), 'UserPriority' )
prodJob.setType( prodJobType )
prodJob.workflow.setName(transName)
prodJob.workflow.setDescrShort( desc )
prodJob.workflow.setDescription( desc )
prodJob.setCPUTime( 86400 )
prodJob.setInputDataPolicy( 'Download' )
prodJob.setExecutable('/bin/ls', '-l')
#Let's submit the prodJobuction now
#result = prodJob.create()
name = prodJob.workflow.getName()
name = name.replace( '/', '' ).replace( '\\', '' )
prodJob.workflow.toXMLFile( name )
print 'Workflow XML file name is: %s' % name
workflowBody = ''
if os.path.exists( name ):
with open( name, 'r' ) as fopen:
workflowBody = fopen.read()
else:
print 'Could not get workflow body'
# Standard parameters
transformation = Transformation()
transformation.setTransformationName( name )
transformation.setTransformationGroup( 'Test' )
transformation.setDescription( desc )
transformation.setLongDescription( desc )
transformation.setType( 'Merge' )
transformation.setBody( workflowBody )
transformation.setPlugin( 'Standard' )
transformation.setTransformationFamily( 'Test' )
transformation.setGroupSize( 2 )
transformation.setOutputDirectories([ '/dirac/outConfigName/configVersion/LOG/00000000',
'/dirac/outConfigName/configVersion/RAW/00000000',
'/dirac/outConfigName/configVersion/CORE/00000000'])
## Set directory meta data and create a transformation with a meta-data filter
if UseFilter:
fc = FileCatalog()
dm = DataManager()
metaCatalog = 'DIRACFileCatalog'
## Set meta data fields in the DFC
MDFieldDict = {'particle':'VARCHAR(128)', 'timestamp':'VARCHAR(128)'}
for MDField in MDFieldDict.keys():
MDFieldType = MDFieldDict[MDField]
res = fc.addMetadataField( MDField, MDFieldType )
if not res['OK']:
gLogger.error( "Failed to add metadata fields", res['Message'] )
exit( -1 )
## Set directory meta data
timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
MDdict1 = {'particle':'gamma', 'timestamp':timestamp}
res = fc.setMetadata( directory, MDdict1 )
if not res['OK']:
gLogger.error( "Failed to set metadata", res['Message'] )
exit( -1 )
## Set the transformation meta data filter
MDdict1b = {'particle':'gamma', 'timestamp':timestamp}
mqJson1b = json.dumps( MDdict1b )
res = transformation.setFileMask( mqJson1b )
if not res['OK']:
gLogger.error( "Failed to set FileMask", res['Message'] )
exit( -1 )
## Create the transformation
result = transformation.addTransformation()
if not result['OK']:
print result
exit(1)
transID = result['Value']
with open('TransformationID', 'w') as fd:
fd.write(str(transID))
print "Created %s, stored in file 'TransformationID'" % transID
| gpl-3.0 |
DavidLP/home-assistant | homeassistant/components/logi_circle/sensor.py | 4 | 4867 | """Support for Logi Circle sensors."""
import logging
from homeassistant.const import (
ATTR_ATTRIBUTION, ATTR_BATTERY_CHARGING, CONF_MONITORED_CONDITIONS,
CONF_SENSORS, STATE_OFF, STATE_ON)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.icon import icon_for_battery_level
from homeassistant.util.dt import as_local
from .const import (
ATTRIBUTION, DEVICE_BRAND, DOMAIN as LOGI_CIRCLE_DOMAIN,
LOGI_SENSORS as SENSOR_TYPES)
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Set up a sensor for a Logi Circle device. Obsolete."""
_LOGGER.warning(
'Logi Circle no longer works with sensor platform configuration')
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up a Logi Circle sensor based on a config entry."""
devices = await hass.data[LOGI_CIRCLE_DOMAIN].cameras
time_zone = str(hass.config.time_zone)
sensors = []
for sensor_type in (entry.data.get(CONF_SENSORS)
.get(CONF_MONITORED_CONDITIONS)):
for device in devices:
if device.supports_feature(sensor_type):
sensors.append(LogiSensor(device, time_zone, sensor_type))
async_add_entities(sensors, True)
class LogiSensor(Entity):
"""A sensor implementation for a Logi Circle camera."""
def __init__(self, camera, time_zone, sensor_type):
"""Initialize a sensor for Logi Circle camera."""
self._sensor_type = sensor_type
self._camera = camera
self._id = '{}-{}'.format(self._camera.mac_address, self._sensor_type)
self._icon = 'mdi:{}'.format(SENSOR_TYPES.get(self._sensor_type)[2])
self._name = "{0} {1}".format(
self._camera.name, SENSOR_TYPES.get(self._sensor_type)[0])
self._activity = {}
self._state = None
self._tz = time_zone
@property
def unique_id(self):
"""Return a unique ID."""
return self._id
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def device_info(self):
"""Return information about the device."""
return {
'name': self._camera.name,
'identifiers': {
(LOGI_CIRCLE_DOMAIN, self._camera.id)
},
'model': self._camera.model_name,
'sw_version': self._camera.firmware,
'manufacturer': DEVICE_BRAND
}
@property
def device_state_attributes(self):
"""Return the state attributes."""
state = {
ATTR_ATTRIBUTION: ATTRIBUTION,
'battery_saving_mode': (
STATE_ON if self._camera.battery_saving else STATE_OFF),
'microphone_gain': self._camera.microphone_gain
}
if self._sensor_type == 'battery_level':
state[ATTR_BATTERY_CHARGING] = self._camera.charging
return state
@property
def icon(self):
"""Icon to use in the frontend, if any."""
if (self._sensor_type == 'battery_level' and
self._state is not None):
return icon_for_battery_level(battery_level=int(self._state),
charging=False)
if (self._sensor_type == 'recording_mode' and
self._state is not None):
return 'mdi:eye' if self._state == STATE_ON else 'mdi:eye-off'
if (self._sensor_type == 'streaming_mode' and
self._state is not None):
return (
'mdi:camera' if self._state == STATE_ON else 'mdi:camera-off')
return self._icon
@property
def unit_of_measurement(self):
"""Return the units of measurement."""
return SENSOR_TYPES.get(self._sensor_type)[1]
async def async_update(self):
"""Get the latest data and updates the state."""
_LOGGER.debug("Pulling data from %s sensor", self._name)
await self._camera.update()
if self._sensor_type == 'last_activity_time':
last_activity = (await self._camera.
get_last_activity(force_refresh=True))
if last_activity is not None:
last_activity_time = as_local(last_activity.end_time_utc)
self._state = '{0:0>2}:{1:0>2}'.format(
last_activity_time.hour, last_activity_time.minute)
else:
state = getattr(self._camera, self._sensor_type, None)
if isinstance(state, bool):
self._state = STATE_ON if state is True else STATE_OFF
else:
self._state = state
self._state = state
| apache-2.0 |
RKD314/yumstat | simple/apiclient/errors.py | 108 | 3516 | #!/usr/bin/python2.4
#
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Errors for the library.
All exceptions defined by the library
should be defined in this file.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
from oauth2client import util
from oauth2client.anyjson import simplejson
class Error(Exception):
"""Base error for this module."""
pass
class HttpError(Error):
"""HTTP data was invalid or unexpected."""
@util.positional(3)
def __init__(self, resp, content, uri=None):
self.resp = resp
self.content = content
self.uri = uri
def _get_reason(self):
"""Calculate the reason for the error from the response content."""
reason = self.resp.reason
try:
data = simplejson.loads(self.content)
reason = data['error']['message']
except (ValueError, KeyError):
pass
if reason is None:
reason = ''
return reason
def __repr__(self):
if self.uri:
return '<HttpError %s when requesting %s returned "%s">' % (
self.resp.status, self.uri, self._get_reason().strip())
else:
return '<HttpError %s "%s">' % (self.resp.status, self._get_reason())
__str__ = __repr__
class InvalidJsonError(Error):
"""The JSON returned could not be parsed."""
pass
class UnknownFileType(Error):
"""File type unknown or unexpected."""
pass
class UnknownLinkType(Error):
"""Link type unknown or unexpected."""
pass
class UnknownApiNameOrVersion(Error):
"""No API with that name and version exists."""
pass
class UnacceptableMimeTypeError(Error):
"""That is an unacceptable mimetype for this operation."""
pass
class MediaUploadSizeError(Error):
"""Media is larger than the method can accept."""
pass
class ResumableUploadError(HttpError):
"""Error occured during resumable upload."""
pass
class InvalidChunkSizeError(Error):
"""The given chunksize is not valid."""
pass
class InvalidNotificationError(Error):
"""The channel Notification is invalid."""
pass
class BatchError(HttpError):
"""Error occured during batch operations."""
@util.positional(2)
def __init__(self, reason, resp=None, content=None):
self.resp = resp
self.content = content
self.reason = reason
def __repr__(self):
return '<BatchError %s "%s">' % (self.resp.status, self.reason)
__str__ = __repr__
class UnexpectedMethodError(Error):
"""Exception raised by RequestMockBuilder on unexpected calls."""
@util.positional(1)
def __init__(self, methodId=None):
"""Constructor for an UnexpectedMethodError."""
super(UnexpectedMethodError, self).__init__(
'Received unexpected call %s' % methodId)
class UnexpectedBodyError(Error):
"""Exception raised by RequestMockBuilder on unexpected bodies."""
def __init__(self, expected, provided):
"""Constructor for an UnexpectedMethodError."""
super(UnexpectedBodyError, self).__init__(
'Expected: [%s] - Provided: [%s]' % (expected, provided))
| mit |
savanu/servo | tests/wpt/web-platform-tests/css/vendor-imports/mozilla/mozilla-central-reftests/text-decor-3/support/generate-text-emphasis-ruby-tests.py | 829 | 3042 | #!/usr/bin/env python
# - * - coding: UTF-8 - * -
"""
This script generates tests text-emphasis-ruby-001 ~ 004 which tests
emphasis marks with ruby in four directions. It outputs a list of all
tests it generated in the format of Mozilla reftest.list to the stdout.
"""
from __future__ import unicode_literals
TEST_FILE = 'text-emphasis-ruby-{:03}{}.html'
TEST_TEMPLATE = '''<!DOCTYPE html>
<meta charset="utf-8">
<title>CSS Test: text-emphasis and ruby, {wm}, {pos}</title>
<link rel="author" title="Xidorn Quan" href="https://www.upsuper.org">
<link rel="author" title="Mozilla" href="https://www.mozilla.org">
<link rel="help" href="https://drafts.csswg.org/css-text-decor-3/#text-emphasis-position-property">
<meta name="assert" content="emphasis marks are drawn outside the ruby">
<link rel="match" href="text-emphasis-ruby-{index:03}-ref.html">
<p>Pass if the emphasis marks are outside the ruby:</p>
<div style="line-height: 5; writing-mode: {wm}; ruby-position: {ruby_pos}; text-emphasis-position: {posval}">ルビ<span style="text-emphasis: circle">と<ruby>圏<rt>けん</rt>点<rt>てん</rt></ruby>を</span>同時</div>
'''
REF_FILE = 'text-emphasis-ruby-{:03}-ref.html'
REF_TEMPLATE = '''<!DOCTYPE html>
<meta charset="utf-8">
<title>CSS Reference: text-emphasis and ruby, {wm}, {pos}</title>
<link rel="author" title="Xidorn Quan" href="https://www.upsuper.org">
<link rel="author" title="Mozilla" href="https://www.mozilla.org">
<style> rtc {{ font-variant-east-asian: inherit; }} </style>
<p>Pass if the emphasis marks are outside the ruby:</p>
<div style="line-height: 5; writing-mode: {wm}; ruby-position: {posval}">ルビ<ruby>と<rtc>●</rtc>圏<rt>けん</rt><rtc>●</rtc>点<rt>てん</rt><rtc>●</rtc>を<rtc>●</rtc></ruby>同時</div>
'''
TEST_CASES = [
('top', 'horizontal-tb', 'over', [
('horizontal-tb', 'over right')]),
('bottom', 'horizontal-tb', 'under', [
('horizontal-tb', 'under right')]),
('right', 'vertical-rl', 'over', [
('vertical-rl', 'over right'),
('vertical-lr', 'over right')]),
('left', 'vertical-rl', 'under', [
('vertical-rl', 'over left'),
('vertical-lr', 'over left')]),
]
SUFFIXES = ['', 'a']
def write_file(filename, content):
with open(filename, 'wb') as f:
f.write(content.encode('UTF-8'))
print("# START tests from {}".format(__file__))
idx = 0
for pos, ref_wm, ruby_pos, subtests in TEST_CASES:
idx += 1
ref_file = REF_FILE.format(idx)
ref_content = REF_TEMPLATE.format(pos=pos, wm=ref_wm, posval=ruby_pos)
write_file(ref_file, ref_content)
suffix = iter(SUFFIXES)
for wm, posval in subtests:
test_file = TEST_FILE.format(idx, next(suffix))
test_content = TEST_TEMPLATE.format(
wm=wm, pos=pos, index=idx, ruby_pos=ruby_pos, posval=posval)
write_file(test_file, test_content)
print("== {} {}".format(test_file, ref_file))
print("# END tests from {}".format(__file__))
| mpl-2.0 |
cournape/Bento | bento/commands/build.py | 1 | 2331 | import os
import os.path as op
from bento.utils.utils \
import \
subst_vars
from bento.installed_package_description \
import \
BuildManifest, build_manifest_meta_from_pkg
from bento._config \
import \
BUILD_MANIFEST_PATH
from bento.commands.core \
import \
Option
from bento.commands.core \
import \
Command
from bento.utils \
import \
cpu_count
class SectionWriter(object):
def __init__(self):
self.sections = {}
def store(self, filename, pkg):
meta = build_manifest_meta_from_pkg(pkg)
p = BuildManifest(self.sections, meta, pkg.executables)
if not op.exists(op.dirname(filename)):
os.makedirs(op.dirname(filename))
p.write(filename)
def jobs_callback(option, opt, value, parser):
setattr(parser.values, option.dest, cpu_count())
class BuildCommand(Command):
long_descr = """\
Purpose: build the project
Usage: bentomaker build [OPTIONS]."""
short_descr = "build the project."
common_options = Command.common_options \
+ [Option("-i", "--inplace",
help="Build extensions in place", action="store_true"),
Option("-j", "--jobs",
help="Parallel builds (yaku build only - EXPERIMENTAL)",
dest="jobs", action="callback", callback=jobs_callback),
Option("-v", "--verbose",
help="Verbose output (yaku build only)",
action="store_true")]
def run(self, ctx):
p = ctx.options_context.parser
o, a = p.parse_args(ctx.command_argv)
if o.help:
p.print_help()
return
ctx.compile()
ctx.post_compile()
def finish(self, ctx):
super(BuildCommand, self).finish(ctx)
n = ctx.build_node.make_node(BUILD_MANIFEST_PATH)
ctx.section_writer.store(n.abspath(), ctx.pkg)
def _config_content(paths):
keys = sorted(paths.keys())
n = max([len(k) for k in keys]) + 2
content = []
for name, value in sorted(paths.items()):
content.append('%s = %r' % (name.upper().ljust(n), subst_vars(value, paths)))
return "\n".join(content)
| bsd-3-clause |
kenshay/ImageScript | ProgramData/Android/ADB/platform-tools/systrace/catapult/telemetry/telemetry/internal/backends/chrome/chrome_browser_backend_unittest.py | 7 | 3764 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
import mock
from telemetry.internal import forwarders
from telemetry.internal.backends.chrome import chrome_browser_backend
from telemetry.internal.browser import browser_options as browser_options_module
from telemetry.util import wpr_modes
class FakePlatformBackend(object):
def __init__(self, is_initialized, local_ts_proxy_port, remote_port,
is_host_platform):
self.is_host_platform = is_host_platform
self.forwarder_factory = mock.Mock()
self.network_controller_backend = mock.Mock()
self.network_controller_backend.is_initialized = is_initialized
if is_initialized:
self.network_controller_backend.forwarder.port_pair = forwarders.PortPair(
local_port=local_ts_proxy_port, remote_port=remote_port)
else:
self.network_controller_backend.forwarder = None
self.network_controller_backend.host_ip = '127.0.0.1'
self.network_controller_backend.is_test_ca_installed = False
class FakeBrowserOptions(browser_options_module.BrowserOptions):
def __init__(self, wpr_mode=wpr_modes.WPR_OFF):
super(FakeBrowserOptions, self).__init__()
self.wpr_mode = wpr_mode
self.browser_type = 'chrome'
self.browser_user_agent_type = 'desktop'
self.disable_background_networking = False
self.disable_component_extensions_with_background_pages = False
self.disable_default_apps = False
class TestChromeBrowserBackend(chrome_browser_backend.ChromeBrowserBackend):
# The test does not need to define the abstract methods.
# pylint: disable=abstract-method
def __init__(self, browser_options,
local_ts_proxy_port=None,
remote_port=None,
is_running_locally=False):
browser_options.extensions_to_load = []
browser_options.output_profile_path = None
super(TestChromeBrowserBackend, self).__init__(
platform_backend=FakePlatformBackend(
browser_options.wpr_mode != wpr_modes.WPR_OFF,
local_ts_proxy_port, remote_port, is_running_locally),
supports_tab_control=False,
supports_extensions=False,
browser_options=browser_options)
class ReplayStartupArgsTest(unittest.TestCase):
"""Test expected inputs for GetReplayBrowserStartupArgs."""
def testReplayOffGivesEmptyArgs(self):
browser_options = FakeBrowserOptions()
browser_backend = TestChromeBrowserBackend(browser_options)
self.assertEqual([], browser_backend.GetReplayBrowserStartupArgs())
def BasicArgsHelper(self, is_running_locally):
browser_options = FakeBrowserOptions(wpr_mode=wpr_modes.WPR_REPLAY)
browser_backend = TestChromeBrowserBackend(
browser_options,
local_ts_proxy_port=567,
remote_port=789,
is_running_locally=is_running_locally)
expected_args = [
'--ignore-certificate-errors',
'--proxy-server=socks://localhost:789',
]
self.assertEqual(
expected_args,
sorted(browser_backend.GetReplayBrowserStartupArgs()))
def testBasicArgs(self):
# The result is the same regardless of whether running locally.
self.BasicArgsHelper(is_running_locally=True)
self.BasicArgsHelper(is_running_locally=False)
def testReplayNotActive(self):
browser_options = FakeBrowserOptions(wpr_mode=wpr_modes.WPR_OFF)
browser_backend = TestChromeBrowserBackend(
browser_options,
local_ts_proxy_port=567,
remote_port=789,
is_running_locally=True)
expected_args = []
self.assertEqual(
expected_args,
sorted(browser_backend.GetReplayBrowserStartupArgs()))
| gpl-3.0 |
kohr-h/odl | examples/solvers/denoising_with_entropy_type_regularization.py | 2 | 2338 | """Denoising using PDHG with TV & entropy-type data term.
Solves the following optimization problem:
min_{x > 0} KL(x, g) + lam || |grad(x)| ||_1
where ``KL(x, g)`` is the Kullback-Leibler divergence, ``grad`` is the
spatial gradient, ``|| . ||_1`` is the 1 norm and lam is a regularization
constant.
For details see :ref:`PDHG`, :ref:`proximal_operators`, and
references therein.
"""
import numpy as np
import scipy.misc
import odl
# Read test image:
# convert integer values to float, and rotate to get the image upright
image = np.rot90(scipy.misc.ascent()[::2, ::2], 3).astype('float')
shape = image.shape
# Rescale
image *= 100 / image.max()
# Add noise
noisy_image = np.random.poisson(1 + image)
# Discretized spaces and elements
space = odl.uniform_discr([0, 0], shape, shape)
orig = space.element(image)
noisy = space.element(noisy_image)
# --- Set up the inverse problem --- #
# Gradient operator
gradient = odl.Gradient(space, method='forward')
# Matrix of operators
op = odl.BroadcastOperator(odl.IdentityOperator(space), gradient)
# Proximal operator related to the primal variable
# Non-negativity constraint
f = odl.solvers.IndicatorNonnegativity(op.domain)
# Functionals related to the dual variable
# Kulback-Leibler data matching
kl_divergence = odl.solvers.KullbackLeibler(space, prior=noisy)
# Isotropic TV-regularization: l1-norm of grad(x)
l1_norm = 0.1 * odl.solvers.L1Norm(gradient.range)
# Make separable sum of functionals, order must correspond to the operator K
g = odl.solvers.SeparableSum(kl_divergence, l1_norm)
# Optional: pass callback objects to solver
callback = (odl.solvers.CallbackPrintIteration() &
odl.solvers.CallbackShow(step=5))
# --- Select solver parameters and solve using PDHG --- #
# Estimated operator norm, add 10 percent to ensure ||K||_2^2 * sigma * tau < 1
op_norm = 1.1 * odl.power_method_opnorm(op)
tau = 10.0 / op_norm # Step size for the primal variable
sigma = 0.1 / op_norm # Step size for the dual variable
# Starting point
x = op.domain.zero()
# Run algorithm (and display intermediates)
odl.solvers.pdhg(x, f, g, op, niter=100, tau=tau, sigma=sigma,
callback=callback)
# Display images
orig.show(title='original image')
noisy.show(title='noisy image')
x.show(title='denoised', force_show=True) # show and hold
| mpl-2.0 |
gencer/sentry | src/sentry/south_migrations/0235_auto__add_projectbookmark__add_unique_projectbookmark_project_id_user_.py | 2 | 64945 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'ProjectBookmark'
db.create_table(
'sentry_projectbookmark', (
(
'id', self.gf('sentry.db.models.fields.bounded.BoundedBigAutoField')(
primary_key=True
)
), (
'project_id', self.gf('sentry.db.models.fields.bounded.BoundedBigIntegerField')(
null=True, blank=True
)
), (
'user', self.gf('sentry.db.models.fields.foreignkey.FlexibleForeignKey')(
to=orm['sentry.User']
)
), (
'date_added', self.gf('django.db.models.fields.DateTimeField')(
default=datetime.datetime.now, null=True
)
),
)
)
db.send_create_signal('sentry', ['ProjectBookmark'])
# Adding unique constraint on 'ProjectBookmark', fields ['project_id', 'user']
db.create_unique('sentry_projectbookmark', ['project_id', 'user_id'])
def backwards(self, orm):
# Removing unique constraint on 'ProjectBookmark', fields ['project_id', 'user']
db.delete_unique('sentry_projectbookmark', ['project_id', 'user_id'])
# Deleting model 'ProjectBookmark'
db.delete_table('sentry_projectbookmark')
models = {
'sentry.activity': {
'Meta': {
'object_name': 'Activity'
},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True'
}),
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True'
}
)
},
'sentry.apikey': {
'Meta': {
'object_name': 'ApiKey'
},
'allowed_origins':
('django.db.models.fields.TextField', [], {
'null': 'True',
'blank': 'True'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '32'
}),
'label': (
'django.db.models.fields.CharField', [], {
'default': "'Default'",
'max_length': '64',
'blank': 'True'
}
),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'key_set'",
'to': "orm['sentry.Organization']"
}
),
'scopes': ('django.db.models.fields.BigIntegerField', [], {
'default': 'None'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
)
},
'sentry.auditlogentry': {
'Meta': {
'object_name': 'AuditLogEntry'
},
'actor': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'audit_actors'",
'null': 'True',
'to': "orm['sentry.User']"
}
),
'actor_key': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.ApiKey']",
'null': 'True',
'blank': 'True'
}
),
'actor_label': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True',
'blank': 'True'
}
),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ip_address': (
'django.db.models.fields.GenericIPAddressField', [], {
'max_length': '39',
'null': 'True'
}
),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'target_object':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'target_user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'audit_targets'",
'null': 'True',
'to': "orm['sentry.User']"
}
)
},
'sentry.authidentity': {
'Meta': {
'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))",
'object_name': 'AuthIdentity'
},
'auth_provider': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.AuthProvider']"
}
),
'data': ('jsonfield.fields.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'last_synced':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'last_verified':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.authprovider': {
'Meta': {
'object_name': 'AuthProvider'
},
'config': ('jsonfield.fields.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'default_global_access':
('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'default_role':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '50'
}),
'default_teams': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.Team']",
'symmetrical': 'False',
'blank': 'True'
}
),
'flags': ('django.db.models.fields.BigIntegerField', [], {
'default': '0'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_sync': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']",
'unique': 'True'
}
),
'provider': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'sync_time':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
})
},
'sentry.broadcast': {
'Meta': {
'object_name': 'Broadcast'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'date_expires': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime(2016, 2, 23, 0, 0)',
'null': 'True',
'blank': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_active':
('django.db.models.fields.BooleanField', [], {
'default': 'True',
'db_index': 'True'
}),
'link': (
'django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.CharField', [], {
'max_length': '256'
}),
'title': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'upstream_id': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True',
'blank': 'True'
}
)
},
'sentry.broadcastseen': {
'Meta': {
'unique_together': "(('broadcast', 'user'),)",
'object_name': 'BroadcastSeen'
},
'broadcast': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Broadcast']"
}
),
'date_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.event': {
'Meta': {
'unique_together': "(('project_id', 'event_id'),)",
'object_name': 'Event',
'db_table': "'sentry_message'",
'index_together': "(('group_id', 'datetime'),)"
},
'data':
('sentry.db.models.fields.node.NodeField', [], {
'null': 'True',
'blank': 'True'
}),
'datetime': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'event_id': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True',
'db_column': "'message_id'"
}
),
'group_id': (
'sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True',
'blank': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'message': ('django.db.models.fields.TextField', [], {}),
'platform':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project_id': (
'sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True',
'blank': 'True'
}
),
'time_spent':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'null': 'True'
})
},
'sentry.eventmapping': {
'Meta': {
'unique_together': "(('project_id', 'event_id'),)",
'object_name': 'EventMapping'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event_id': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventuser': {
'Meta': {
'unique_together':
"(('project', 'ident'), ('project', 'hash'))",
'object_name':
'EventUser',
'index_together':
"(('project', 'email'), ('project', 'username'), ('project', 'ip_address'))"
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'email':
('django.db.models.fields.EmailField', [], {
'max_length': '75',
'null': 'True'
}),
'hash': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident':
('django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True'
}),
'ip_address': (
'django.db.models.fields.GenericIPAddressField', [], {
'max_length': '39',
'null': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'username':
('django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True'
})
},
'sentry.file': {
'Meta': {
'object_name': 'File'
},
'blob': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'legacy_blob'",
'null': 'True',
'to': "orm['sentry.FileBlob']"
}
),
'blobs': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.FileBlob']",
'through': "orm['sentry.FileBlobIndex']",
'symmetrical': 'False'
}
),
'checksum':
('django.db.models.fields.CharField', [], {
'max_length': '40',
'null': 'True'
}),
'headers': ('jsonfield.fields.JSONField', [], {
'default': '{}'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'path': ('django.db.models.fields.TextField', [], {
'null': 'True'
}),
'size':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'timestamp': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'type': ('django.db.models.fields.CharField', [], {
'max_length': '64'
})
},
'sentry.fileblob': {
'Meta': {
'object_name': 'FileBlob'
},
'checksum':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '40'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'path': ('django.db.models.fields.TextField', [], {
'null': 'True'
}),
'size':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'timestamp': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
)
},
'sentry.fileblobindex': {
'Meta': {
'unique_together': "(('file', 'blob', 'offset'),)",
'object_name': 'FileBlobIndex'
},
'blob': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.FileBlob']"
}
),
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'offset': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.group': {
'Meta': {
'object_name': 'Group',
'db_table': "'sentry_groupedmessage'",
'index_together': "(('project', 'first_release'),)"
},
'active_at':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'db_index': 'True'
}),
'culprit': (
'django.db.models.fields.CharField', [], {
'max_length': '200',
'null': 'True',
'db_column': "'view'",
'blank': 'True'
}
),
'data': (
'sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True',
'blank': 'True'
}
),
'first_release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']",
'null': 'True',
'on_delete': 'models.PROTECT'
}
),
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_public': (
'django.db.models.fields.NullBooleanField', [], {
'default': 'False',
'null': 'True',
'blank': 'True'
}
),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'level': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '40',
'db_index': 'True',
'blank': 'True'
}
),
'logger': (
'django.db.models.fields.CharField', [], {
'default': "''",
'max_length': '64',
'db_index': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'null': 'True'
}
),
'platform':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'resolved_at':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'db_index': 'True'
}),
'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'time_spent_count':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'time_spent_total':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'times_seen': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '1',
'db_index': 'True'
}
)
},
'sentry.groupassignee': {
'Meta': {
'object_name': 'GroupAssignee',
'db_table': "'sentry_groupasignee'"
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'assignee_set'",
'unique': 'True',
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'assignee_set'",
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'sentry_assignee_set'",
'to': "orm['sentry.User']"
}
)
},
'sentry.groupbookmark': {
'Meta': {
'unique_together': "(('project', 'user', 'group'),)",
'object_name': 'GroupBookmark'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'bookmark_set'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'bookmark_set'",
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'sentry_bookmark_set'",
'to': "orm['sentry.User']"
}
)
},
'sentry.groupemailthread': {
'Meta': {
'unique_together': "(('email', 'group'), ('email', 'msgid'))",
'object_name': 'GroupEmailThread'
},
'date': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'groupemail_set'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'msgid': ('django.db.models.fields.CharField', [], {
'max_length': '100'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'groupemail_set'",
'to': "orm['sentry.Project']"
}
)
},
'sentry.grouphash': {
'Meta': {
'unique_together': "(('project', 'hash'),)",
'object_name': 'GroupHash'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'hash': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
)
},
'sentry.groupmeta': {
'Meta': {
'unique_together': "(('group', 'key'),)",
'object_name': 'GroupMeta'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.groupresolution': {
'Meta': {
'object_name': 'GroupResolution'
},
'datetime': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'unique': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']"
}
),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.grouprulestatus': {
'Meta': {
'unique_together': "(('rule', 'group'),)",
'object_name': 'GroupRuleStatus'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_active': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'rule': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Rule']"
}
),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {
'default': '0'
})
},
'sentry.groupseen': {
'Meta': {
'unique_together': "(('user', 'group'),)",
'object_name': 'GroupSeen'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'db_index': 'False'
}
)
},
'sentry.groupsnooze': {
'Meta': {
'object_name': 'GroupSnooze'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'unique': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'until': ('django.db.models.fields.DateTimeField', [], {})
},
'sentry.grouptagkey': {
'Meta': {
'unique_together': "(('project', 'group', 'key'),)",
'object_name': 'GroupTagKey'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'values_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.grouptagvalue': {
'Meta': {
'unique_together': "(('project', 'key', 'value', 'group'),)",
'object_name': 'GroupTagValue',
'db_table': "'sentry_messagefiltervalue'"
},
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'grouptag'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'grouptag'",
'null': 'True',
'to': "orm['sentry.Project']"
}
),
'times_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.helppage': {
'Meta': {
'object_name': 'HelpPage'
},
'content': ('django.db.models.fields.TextField', [], {}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_visible': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'key': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'unique': 'True',
'null': 'True'
}
),
'priority':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '50'
}),
'title': ('django.db.models.fields.CharField', [], {
'max_length': '64'
})
},
'sentry.lostpasswordhash': {
'Meta': {
'object_name': 'LostPasswordHash'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'hash': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'unique': 'True'
}
)
},
'sentry.option': {
'Meta': {
'object_name': 'Option'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '64'
}),
'last_updated':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.organization': {
'Meta': {
'object_name': 'Organization'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'default_role':
('django.db.models.fields.CharField', [], {
'default': "'member'",
'max_length': '32'
}),
'flags': ('django.db.models.fields.BigIntegerField', [], {
'default': '1'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'members': (
'django.db.models.fields.related.ManyToManyField', [], {
'related_name': "'org_memberships'",
'symmetrical': 'False',
'through': "orm['sentry.OrganizationMember']",
'to': "orm['sentry.User']"
}
),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'slug':
('django.db.models.fields.SlugField', [], {
'unique': 'True',
'max_length': '50'
}),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.organizationaccessrequest': {
'Meta': {
'unique_together': "(('team', 'member'),)",
'object_name': 'OrganizationAccessRequest'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'member': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.OrganizationMember']"
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
)
},
'sentry.organizationmember': {
'Meta': {
'unique_together': "(('organization', 'user'), ('organization', 'email'))",
'object_name': 'OrganizationMember'
},
'counter': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True',
'blank': 'True'
}
),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email': (
'django.db.models.fields.EmailField', [], {
'max_length': '75',
'null': 'True',
'blank': 'True'
}
),
'flags': ('django.db.models.fields.BigIntegerField', [], {
'default': '0'
}),
'has_global_access': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'member_set'",
'to': "orm['sentry.Organization']"
}
),
'role':
('django.db.models.fields.CharField', [], {
'default': "'member'",
'max_length': '32'
}),
'teams': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.Team']",
'symmetrical': 'False',
'through': "orm['sentry.OrganizationMemberTeam']",
'blank': 'True'
}
),
'type': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '50',
'blank': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'sentry_orgmember_set'",
'null': 'True',
'to': "orm['sentry.User']"
}
)
},
'sentry.organizationmemberteam': {
'Meta': {
'unique_together': "(('team', 'organizationmember'),)",
'object_name': 'OrganizationMemberTeam',
'db_table': "'sentry_organizationmember_teams'"
},
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'organizationmember': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.OrganizationMember']"
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
)
},
'sentry.organizationoption': {
'Meta': {
'unique_together': "(('organization', 'key'),)",
'object_name': 'OrganizationOption',
'db_table': "'sentry_organizationoptions'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.project': {
'Meta': {
'unique_together': "(('team', 'slug'), ('organization', 'slug'))",
'object_name': 'Project'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'first_event': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '200'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'public': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'slug': ('django.db.models.fields.SlugField', [], {
'max_length': '50',
'null': 'True'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
)
},
'sentry.projectbookmark': {
'Meta': {
'unique_together': "(('project_id', 'user'),)",
'object_name': 'ProjectBookmark'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project_id': (
'sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True',
'blank': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.projectkey': {
'Meta': {
'object_name': 'ProjectKey'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'label': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True',
'blank': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'key_set'",
'to': "orm['sentry.Project']"
}
),
'public_key': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'roles': ('django.db.models.fields.BigIntegerField', [], {
'default': '1'
}),
'secret_key': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
)
},
'sentry.projectoption': {
'Meta': {
'unique_together': "(('project', 'key'),)",
'object_name': 'ProjectOption',
'db_table': "'sentry_projectoptions'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.release': {
'Meta': {
'unique_together': "(('project', 'version'),)",
'object_name': 'Release'
},
'data': ('jsonfield.fields.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'date_released':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'blank': 'True'
}),
'date_started':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'blank': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'new_groups':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'owner': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True',
'blank': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'ref': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True',
'blank': 'True'
}
),
'url': (
'django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True',
'blank': 'True'
}
),
'version': ('django.db.models.fields.CharField', [], {
'max_length': '64'
})
},
'sentry.releasefile': {
'Meta': {
'unique_together': "(('release', 'ident'),)",
'object_name': 'ReleaseFile'
},
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'name': ('django.db.models.fields.TextField', [], {}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']"
}
)
},
'sentry.rule': {
'Meta': {
'object_name': 'Rule'
},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'label': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
)
},
'sentry.savedsearch': {
'Meta': {
'unique_together': "(('project', 'name'),)",
'object_name': 'SavedSearch'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_default': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'query': ('django.db.models.fields.TextField', [], {})
},
'sentry.savedsearchuserdefault': {
'Meta': {
'unique_together': "(('project', 'user'),)",
'object_name': 'SavedSearchUserDefault',
'db_table': "'sentry_savedsearch_userdefault'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'savedsearch': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.SavedSearch']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.tagkey': {
'Meta': {
'unique_together': "(('project', 'key'),)",
'object_name': 'TagKey',
'db_table': "'sentry_filterkey'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'label':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'values_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.tagvalue': {
'Meta': {
'unique_together': "(('project', 'key', 'value'),)",
'object_name': 'TagValue',
'db_table': "'sentry_filtervalue'"
},
'data': (
'sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True',
'blank': 'True'
}
),
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'times_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.team': {
'Meta': {
'unique_together': "(('organization', 'slug'),)",
'object_name': 'Team'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'slug': ('django.db.models.fields.SlugField', [], {
'max_length': '50'
}),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.user': {
'Meta': {
'object_name': 'User',
'db_table': "'auth_user'"
},
'date_joined':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email':
('django.db.models.fields.EmailField', [], {
'max_length': '75',
'blank': 'True'
}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'is_managed': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_staff': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'last_login':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'name': (
'django.db.models.fields.CharField', [], {
'max_length': '200',
'db_column': "'first_name'",
'blank': 'True'
}
),
'password': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'username':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '128'
})
},
'sentry.useroption': {
'Meta': {
'unique_together': "(('user', 'project', 'key'),)",
'object_name': 'UserOption'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.userreport': {
'Meta': {
'object_name': 'UserReport',
'index_together': "(('project', 'event_id'), ('project', 'date_added'))"
},
'comments': ('django.db.models.fields.TextField', [], {}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'event_id': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
)
}
}
complete_apps = ['sentry']
| bsd-3-clause |
abartlet/samba | third_party/waf/wafadmin/Scripting.py | 32 | 15298 | #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2005 (ita)
"Module called for configuring, compiling and installing targets"
import os, sys, shutil, traceback, datetime, inspect, errno
import Utils, Configure, Build, Logs, Options, Environment, Task
from Logs import error, warn, info
from Constants import *
g_gz = 'bz2'
commands = []
def prepare_impl(t, cwd, ver, wafdir):
Options.tooldir = [t]
Options.launch_dir = cwd
# some command-line options can be processed immediately
if '--version' in sys.argv:
opt_obj = Options.Handler()
opt_obj.curdir = cwd
opt_obj.parse_args()
sys.exit(0)
# now find the wscript file
msg1 = 'Waf: Please run waf from a directory containing a file named "%s" or run distclean' % WSCRIPT_FILE
# in theory projects can be configured in an autotool-like manner:
# mkdir build && cd build && ../waf configure && ../waf
build_dir_override = None
candidate = None
lst = os.listdir(cwd)
search_for_candidate = True
if WSCRIPT_FILE in lst:
candidate = cwd
elif 'configure' in sys.argv and not WSCRIPT_BUILD_FILE in lst:
# autotool-like configuration
calldir = os.path.abspath(os.path.dirname(sys.argv[0]))
if WSCRIPT_FILE in os.listdir(calldir):
candidate = calldir
search_for_candidate = False
else:
error('arg[0] directory does not contain a wscript file')
sys.exit(1)
build_dir_override = cwd
# climb up to find a script if it is not found
while search_for_candidate:
if len(cwd) <= 3:
break # stop at / or c:
dirlst = os.listdir(cwd)
if WSCRIPT_FILE in dirlst:
candidate = cwd
if 'configure' in sys.argv and candidate:
break
if Options.lockfile in dirlst:
env = Environment.Environment()
try:
env.load(os.path.join(cwd, Options.lockfile))
except:
error('could not load %r' % Options.lockfile)
try:
os.stat(env['cwd'])
except:
candidate = cwd
else:
candidate = env['cwd']
break
cwd = os.path.dirname(cwd) # climb up
if not candidate:
# check if the user only wanted to display the help
if '-h' in sys.argv or '--help' in sys.argv:
warn('No wscript file found: the help message may be incomplete')
opt_obj = Options.Handler()
opt_obj.curdir = cwd
opt_obj.parse_args()
else:
error(msg1)
sys.exit(0)
# We have found wscript, but there is no guarantee that it is valid
try:
os.chdir(candidate)
except OSError:
raise Utils.WafError("the folder %r is unreadable" % candidate)
# define the main module containing the functions init, shutdown, ..
Utils.set_main_module(os.path.join(candidate, WSCRIPT_FILE))
if build_dir_override:
d = getattr(Utils.g_module, BLDDIR, None)
if d:
# test if user has set the blddir in wscript.
msg = ' Overriding build directory %s with %s' % (d, build_dir_override)
warn(msg)
Utils.g_module.blddir = build_dir_override
# bind a few methods and classes by default
def set_def(obj, name=''):
n = name or obj.__name__
if not n in Utils.g_module.__dict__:
setattr(Utils.g_module, n, obj)
for k in [dist, distclean, distcheck, clean, install, uninstall]:
set_def(k)
set_def(Configure.ConfigurationContext, 'configure_context')
for k in ['build', 'clean', 'install', 'uninstall']:
set_def(Build.BuildContext, k + '_context')
# now parse the options from the user wscript file
opt_obj = Options.Handler(Utils.g_module)
opt_obj.curdir = candidate
try:
f = Utils.g_module.set_options
except AttributeError:
pass
else:
opt_obj.sub_options([''])
opt_obj.parse_args()
if not 'init' in Utils.g_module.__dict__:
Utils.g_module.init = Utils.nada
if not 'shutdown' in Utils.g_module.__dict__:
Utils.g_module.shutdown = Utils.nada
main()
def prepare(t, cwd, ver, wafdir):
if WAFVERSION != ver:
msg = 'Version mismatch: waf %s <> wafadmin %s (wafdir %s)' % (ver, WAFVERSION, wafdir)
print('\033[91mError: %s\033[0m' % msg)
sys.exit(1)
#"""
try:
prepare_impl(t, cwd, ver, wafdir)
except Utils.WafError, e:
error(str(e))
sys.exit(1)
except KeyboardInterrupt:
Utils.pprint('RED', 'Interrupted')
sys.exit(68)
"""
import cProfile, pstats
cProfile.runctx("import Scripting; Scripting.prepare_impl(t, cwd, ver, wafdir)", {},
{'t': t, 'cwd':cwd, 'ver':ver, 'wafdir':wafdir},
'profi.txt')
p = pstats.Stats('profi.txt')
p.sort_stats('time').print_stats(45)
#"""
def main():
global commands
commands = Options.arg_line[:]
while commands:
x = commands.pop(0)
ini = datetime.datetime.now()
if x == 'configure':
fun = configure
elif x == 'build':
fun = build
else:
fun = getattr(Utils.g_module, x, None)
if not fun:
raise Utils.WscriptError('No such command %r' % x)
ctx = getattr(Utils.g_module, x + '_context', Utils.Context)()
if x in ['init', 'shutdown', 'dist', 'distclean', 'distcheck']:
# compatibility TODO remove in waf 1.6
try:
fun(ctx)
except TypeError:
fun()
else:
fun(ctx)
ela = ''
if not Options.options.progress_bar:
ela = ' (%s)' % Utils.get_elapsed_time(ini)
if x != 'init' and x != 'shutdown':
info('%r finished successfully%s' % (x, ela))
if not commands and x != 'shutdown':
commands.append('shutdown')
def configure(conf):
src = getattr(Options.options, SRCDIR, None)
if not src: src = getattr(Utils.g_module, SRCDIR, None)
if not src: src = getattr(Utils.g_module, 'top', None)
if not src:
src = '.'
incomplete_src = 1
src = os.path.abspath(src)
bld = getattr(Options.options, BLDDIR, None)
if not bld: bld = getattr(Utils.g_module, BLDDIR, None)
if not bld: bld = getattr(Utils.g_module, 'out', None)
if not bld:
bld = 'build'
incomplete_bld = 1
if bld == '.':
raise Utils.WafError('Setting blddir="." may cause distclean problems')
bld = os.path.abspath(bld)
try: os.makedirs(bld)
except OSError: pass
# It is not possible to compile specific targets in the configuration
# this may cause configuration errors if autoconfig is set
targets = Options.options.compile_targets
Options.options.compile_targets = None
Options.is_install = False
conf.srcdir = src
conf.blddir = bld
conf.post_init()
if 'incomplete_src' in vars():
conf.check_message_1('Setting srcdir to')
conf.check_message_2(src)
if 'incomplete_bld' in vars():
conf.check_message_1('Setting blddir to')
conf.check_message_2(bld)
# calling to main wscript's configure()
conf.sub_config([''])
conf.store()
# this will write a configure lock so that subsequent builds will
# consider the current path as the root directory (see prepare_impl).
# to remove: use 'waf distclean'
env = Environment.Environment()
env[BLDDIR] = bld
env[SRCDIR] = src
env['argv'] = sys.argv
env['commands'] = Options.commands
env['options'] = Options.options.__dict__
# conf.hash & conf.files hold wscript files paths and hash
# (used only by Configure.autoconfig)
env['hash'] = conf.hash
env['files'] = conf.files
env['environ'] = dict(conf.environ)
env['cwd'] = os.path.split(Utils.g_module.root_path)[0]
if Utils.g_module.root_path != src:
# in case the source dir is somewhere else
env.store(os.path.join(src, Options.lockfile))
env.store(Options.lockfile)
Options.options.compile_targets = targets
def clean(bld):
'''removes the build files'''
try:
proj = Environment.Environment(Options.lockfile)
except IOError:
raise Utils.WafError('Nothing to clean (project not configured)')
bld.load_dirs(proj[SRCDIR], proj[BLDDIR])
bld.load_envs()
bld.is_install = 0 # False
# read the scripts - and set the path to the wscript path (useful for srcdir='/foo/bar')
bld.add_subdirs([os.path.split(Utils.g_module.root_path)[0]])
try:
bld.clean()
finally:
bld.save()
def check_configured(bld):
if not Configure.autoconfig:
return bld
conf_cls = getattr(Utils.g_module, 'configure_context', Utils.Context)
bld_cls = getattr(Utils.g_module, 'build_context', Utils.Context)
def reconf(proj):
back = (Options.commands, Options.options.__dict__, Logs.zones, Logs.verbose)
Options.commands = proj['commands']
Options.options.__dict__ = proj['options']
conf = conf_cls()
conf.environ = proj['environ']
configure(conf)
(Options.commands, Options.options.__dict__, Logs.zones, Logs.verbose) = back
try:
proj = Environment.Environment(Options.lockfile)
except IOError:
conf = conf_cls()
configure(conf)
else:
try:
bld = bld_cls()
bld.load_dirs(proj[SRCDIR], proj[BLDDIR])
bld.load_envs()
except Utils.WafError:
reconf(proj)
return bld_cls()
try:
proj = Environment.Environment(Options.lockfile)
except IOError:
raise Utils.WafError('Auto-config: project does not configure (bug)')
h = 0
try:
for file in proj['files']:
if file.endswith('configure'):
h = hash((h, Utils.readf(file)))
else:
mod = Utils.load_module(file)
h = hash((h, mod.waf_hash_val))
except (OSError, IOError):
warn('Reconfiguring the project: a file is unavailable')
reconf(proj)
else:
if (h != proj['hash']):
warn('Reconfiguring the project: the configuration has changed')
reconf(proj)
return bld_cls()
def install(bld):
'''installs the build files'''
bld = check_configured(bld)
Options.commands['install'] = True
Options.commands['uninstall'] = False
Options.is_install = True
bld.is_install = INSTALL
build_impl(bld)
bld.install()
def uninstall(bld):
'''removes the installed files'''
Options.commands['install'] = False
Options.commands['uninstall'] = True
Options.is_install = True
bld.is_install = UNINSTALL
try:
def runnable_status(self):
return SKIP_ME
setattr(Task.Task, 'runnable_status_back', Task.Task.runnable_status)
setattr(Task.Task, 'runnable_status', runnable_status)
build_impl(bld)
bld.install()
finally:
setattr(Task.Task, 'runnable_status', Task.Task.runnable_status_back)
def build(bld):
bld = check_configured(bld)
Options.commands['install'] = False
Options.commands['uninstall'] = False
Options.is_install = False
bld.is_install = 0 # False
return build_impl(bld)
def build_impl(bld):
# compile the project and/or install the files
try:
proj = Environment.Environment(Options.lockfile)
except IOError:
raise Utils.WafError("Project not configured (run 'waf configure' first)")
bld.load_dirs(proj[SRCDIR], proj[BLDDIR])
bld.load_envs()
info("Waf: Entering directory `%s'" % bld.bldnode.abspath())
bld.add_subdirs([os.path.split(Utils.g_module.root_path)[0]])
# execute something immediately before the build starts
bld.pre_build()
try:
bld.compile()
finally:
if Options.options.progress_bar: print('')
info("Waf: Leaving directory `%s'" % bld.bldnode.abspath())
# execute something immediately after a successful build
bld.post_build()
bld.install()
excludes = '.bzr .bzrignore .git .gitignore .svn CVS .cvsignore .arch-ids {arch} SCCS BitKeeper .hg _MTN _darcs Makefile Makefile.in config.log .gitattributes .hgignore .hgtags'.split()
dist_exts = '~ .rej .orig .pyc .pyo .bak .tar.bz2 tar.gz .zip .swp'.split()
def dont_dist(name, src, build_dir):
global excludes, dist_exts
if (name.startswith(',,')
or name.startswith('++')
or name.startswith('.waf')
or (src == '.' and name == Options.lockfile)
or name in excludes
or name == build_dir
):
return True
for ext in dist_exts:
if name.endswith(ext):
return True
return False
# like shutil.copytree
# exclude files and to raise exceptions immediately
def copytree(src, dst, build_dir):
names = os.listdir(src)
os.makedirs(dst)
for name in names:
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
if dont_dist(name, src, build_dir):
continue
if os.path.isdir(srcname):
copytree(srcname, dstname, build_dir)
else:
shutil.copy2(srcname, dstname)
# TODO in waf 1.6, change this method if "srcdir == blddir" is allowed
def distclean(ctx=None):
'''removes the build directory'''
global commands
lst = os.listdir('.')
for f in lst:
if f == Options.lockfile:
try:
proj = Environment.Environment(f)
except:
Logs.warn('could not read %r' % f)
continue
try:
shutil.rmtree(proj[BLDDIR])
except IOError:
pass
except OSError, e:
if e.errno != errno.ENOENT:
Logs.warn('project %r cannot be removed' % proj[BLDDIR])
try:
os.remove(f)
except OSError, e:
if e.errno != errno.ENOENT:
Logs.warn('file %r cannot be removed' % f)
# remove the local waf cache
if not commands and f.startswith('.waf'):
shutil.rmtree(f, ignore_errors=True)
# FIXME waf 1.6 a unique ctx parameter, and remove the optional appname and version
def dist(appname='', version=''):
'''makes a tarball for redistributing the sources'''
# return return (distdirname, tarballname)
import tarfile
if not appname: appname = Utils.g_module.APPNAME
if not version: version = Utils.g_module.VERSION
tmp_folder = appname + '-' + version
if g_gz in ['gz', 'bz2']:
arch_name = tmp_folder + '.tar.' + g_gz
else:
arch_name = tmp_folder + '.' + 'zip'
# remove the previous dir
try:
shutil.rmtree(tmp_folder)
except (OSError, IOError):
pass
# remove the previous archive
try:
os.remove(arch_name)
except (OSError, IOError):
pass
# copy the files into the temporary folder
blddir = getattr(Utils.g_module, BLDDIR, None)
if not blddir:
blddir = getattr(Utils.g_module, 'out', None)
copytree('.', tmp_folder, blddir)
# undocumented hook for additional cleanup
dist_hook = getattr(Utils.g_module, 'dist_hook', None)
if dist_hook:
back = os.getcwd()
os.chdir(tmp_folder)
try:
dist_hook()
finally:
# go back to the root directory
os.chdir(back)
if g_gz in ['gz', 'bz2']:
tar = tarfile.open(arch_name, 'w:' + g_gz)
tar.add(tmp_folder)
tar.close()
else:
Utils.zip_folder(tmp_folder, arch_name, tmp_folder)
try: from hashlib import sha1 as sha
except ImportError: from sha import sha
try:
digest = " (sha=%r)" % sha(Utils.readf(arch_name)).hexdigest()
except:
digest = ''
info('New archive created: %s%s' % (arch_name, digest))
if os.path.exists(tmp_folder): shutil.rmtree(tmp_folder)
return arch_name
# FIXME waf 1.6 a unique ctx parameter, and remove the optional appname and version
def distcheck(appname='', version='', subdir=''):
'''checks if the sources compile (tarball from 'dist')'''
import tempfile, tarfile
if not appname: appname = Utils.g_module.APPNAME
if not version: version = Utils.g_module.VERSION
waf = os.path.abspath(sys.argv[0])
tarball = dist(appname, version)
path = appname + '-' + version
# remove any previous instance
if os.path.exists(path):
shutil.rmtree(path)
t = tarfile.open(tarball)
for x in t: t.extract(x)
t.close()
# build_path is the directory for the waf invocation
if subdir:
build_path = os.path.join(path, subdir)
else:
build_path = path
instdir = tempfile.mkdtemp('.inst', '%s-%s' % (appname, version))
ret = Utils.pproc.Popen([waf, 'configure', 'build', 'install', 'uninstall', '--destdir=' + instdir], cwd=build_path).wait()
if ret:
raise Utils.WafError('distcheck failed with code %i' % ret)
if os.path.exists(instdir):
raise Utils.WafError('distcheck succeeded, but files were left in %s' % instdir)
shutil.rmtree(path)
# FIXME remove in Waf 1.6 (kept for compatibility)
def add_subdir(dir, bld):
bld.recurse(dir, 'build')
| gpl-3.0 |
Endika/edx-platform | lms/djangoapps/instructor/features/bulk_email.py | 33 | 6776 | """
Define steps for bulk email acceptance test.
"""
# pylint: disable=missing-docstring
# pylint: disable=redefined-outer-name
from lettuce import world, step
from lettuce.django import mail
from nose.tools import assert_in, assert_equal
from django.core.management import call_command
from django.conf import settings
from courseware.tests.factories import StaffFactory, InstructorFactory
@step(u'Given there is a course with a staff, instructor and student')
def make_populated_course(step): # pylint: disable=unused-argument
## This is different than the function defined in common.py because it enrolls
## a staff, instructor, and student member regardless of what `role` is, then
## logs `role` in. This is to ensure we have 3 class participants to email.
# Clear existing courses to avoid conflicts
world.clear_courses()
# Create a new course
course = world.CourseFactory.create(
org='edx',
number='888',
display_name='Bulk Email Test Course'
)
world.bulk_email_course_key = course.id
try:
# See if we've defined the instructor & staff user yet
world.bulk_email_instructor
except AttributeError:
# Make & register an instructor for the course
world.bulk_email_instructor = InstructorFactory(course_key=world.bulk_email_course_key)
world.enroll_user(world.bulk_email_instructor, world.bulk_email_course_key)
# Make & register a staff member
world.bulk_email_staff = StaffFactory(course_key=course.id)
world.enroll_user(world.bulk_email_staff, world.bulk_email_course_key)
# Make & register a student
world.register_by_course_key(
course.id,
username='student',
password='test',
is_staff=False
)
# Store the expected recipients
# given each "send to" option
staff_emails = [world.bulk_email_staff.email, world.bulk_email_instructor.email]
world.expected_addresses = {
'course staff': staff_emails,
'students, staff, and instructors': staff_emails + ['student@edx.org']
}
# Dictionary mapping a description of the email recipient
# to the corresponding <option> value in the UI.
SEND_TO_OPTIONS = {
'myself': 'myself',
'course staff': 'staff',
'students, staff, and instructors': 'all'
}
@step(u'I am logged in to the course as "([^"]*)"')
def log_into_the_course(step, role): # pylint: disable=unused-argument
# Store the role
assert_in(role, ['instructor', 'staff'])
# Log in as the an instructor or staff for the course
my_email = world.bulk_email_instructor.email
if role == 'instructor':
world.log_in(
username=world.bulk_email_instructor.username,
password='test',
email=my_email,
name=world.bulk_email_instructor.profile.name
)
else:
my_email = world.bulk_email_staff.email
world.log_in(
username=world.bulk_email_staff.username,
password='test',
email=my_email,
name=world.bulk_email_staff.profile.name
)
# Store the "myself" send to option
world.expected_addresses['myself'] = [my_email]
@step(u'I send email to "([^"]*)"')
def when_i_send_an_email(step, recipient): # pylint: disable=unused-argument
# Check that the recipient is valid
assert_in(
recipient, SEND_TO_OPTIONS,
msg="Invalid recipient: {}".format(recipient)
)
# Clear the queue of existing emails
while not mail.queue.empty(): # pylint: disable=no-member
mail.queue.get() # pylint: disable=no-member
# Because we flush the database before each run,
# we need to ensure that the email template fixture
# is re-loaded into the database
call_command('loaddata', 'course_email_template.json')
# Go to the email section of the instructor dash
url = '/courses/{}'.format(world.bulk_email_course_key)
world.visit(url)
world.css_click('a[href="{}/instructor"]'.format(url))
world.css_click('a[data-section="send_email"]')
# Select the recipient
world.select_option('send_to', SEND_TO_OPTIONS[recipient])
# Enter subject and message
world.css_fill('input#id_subject', 'Hello')
with world.browser.get_iframe('mce_0_ifr') as iframe:
editor = iframe.find_by_id('tinymce')[0]
editor.fill('test message')
# Click send
world.css_click('input[name="send"]', dismiss_alert=True)
# Expect to see a message that the email was sent
expected_msg = "Your email was successfully queued for sending."
world.wait_for_visible('#request-response')
assert_in(
expected_msg, world.css_text('#request-response'),
msg="Could not find email success message."
)
UNSUBSCRIBE_MSG = 'To stop receiving email like this'
@step(u'Email is sent to "([^"]*)"')
def then_the_email_is_sent(step, recipient): # pylint: disable=unused-argument
# Check that the recipient is valid
assert_in(
recipient, SEND_TO_OPTIONS,
msg="Invalid recipient: {}".format(recipient)
)
# Retrieve messages. Because we are using celery in "always eager"
# mode, we expect all messages to be sent by this point.
messages = []
while not mail.queue.empty(): # pylint: disable=no-member
messages.append(mail.queue.get()) # pylint: disable=no-member
# Check that we got the right number of messages
assert_equal(
len(messages), len(world.expected_addresses[recipient]),
msg="Received {0} instead of {1} messages for {2}".format(
len(messages), len(world.expected_addresses[recipient]), recipient
)
)
# Check that the message properties were correct
recipients = []
for msg in messages:
assert_in('Hello', msg.subject)
assert_in(settings.BULK_EMAIL_DEFAULT_FROM_EMAIL, msg.from_email)
# Message body should have the message we sent
# and an unsubscribe message
assert_in('test message', msg.body)
assert_in(UNSUBSCRIBE_MSG, msg.body)
# Should have alternative HTML form
assert_equal(len(msg.alternatives), 1)
content, mime_type = msg.alternatives[0]
assert_equal(mime_type, 'text/html')
assert_in('test message', content)
assert_in(UNSUBSCRIBE_MSG, content)
# Store the recipient address so we can verify later
recipients.extend(msg.recipients())
# Check that the messages were sent to the right people
# Because "myself" can vary based on who sent the message,
# we use the world.expected_addresses dict we configured
# in an earlier step.
for addr in world.expected_addresses[recipient]:
assert_in(addr, recipients)
| agpl-3.0 |
aimas/TuniErp-8.0 | addons/website_mail/__openerp__.py | 379 | 1623 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Website Mail',
'category': 'Hidden',
'summary': 'Website Module for Mail',
'version': '0.1',
'description': """Glue module holding mail improvements for website.""",
'author': 'OpenERP SA',
'depends': ['website', 'mail', 'email_template'],
'data': [
'views/snippets.xml',
'views/website_mail.xml',
'views/website_email_designer.xml',
'views/email_template_view.xml',
'data/mail_groups.xml',
'security/website_mail.xml',
],
'qweb': [
'static/src/xml/website_mail.xml'
],
'installable': True,
'auto_install': True,
}
| agpl-3.0 |
ghchinoy/tensorflow | tensorflow/contrib/copy_graph/__init__.py | 55 | 1187 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to copy elements between graphs.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.copy_graph.python.util import copy_elements
# pylint: disable=wildcard-import
from tensorflow.contrib.copy_graph.python.util.copy_elements import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__, doc_string_modules=[copy_elements])
| apache-2.0 |
MoKee/android_kernel_sony_msm8974pro | tools/perf/scripts/python/syscall-counts.py | 11181 | 1522 | # system call counts
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts.py [comm]\n";
for_comm = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
print "%-40s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
joowani/dtags | dtags/commands/tags.py | 1 | 5372 | import json
import sys
from pathlib import Path
from typing import List, Optional, Set, Tuple
from dtags import style
from dtags.commons import (
dtags_command,
get_argparser,
normalize_tags,
prompt_user,
reverse_map,
)
from dtags.files import get_new_config, load_config_file, save_config_file
USAGE = "tags [-j] [-r] [-y] [-c] [-p] [-t TAG [TAG ...]]"
DESCRIPTION = f"""
Manage directory tags.
examples:
# show all tags
{style.command("tags")}
# show tags in JSON format with -j/--json
{style.command("tags --json")}
# show reverse mapping with -r/--reverse
{style.command("tags --reverse")}
# filter specific tags with -t
{style.command("tags -t foo bar baz")}
# clean invalid directories with -c/--clean
{style.command("tags --clean")}
# purge all tags with -p/--purge
{style.command("tags --purge")}
# skip confirmation prompts with -y/--yes
{style.command("tags --clean --yes")}
"""
@dtags_command
def execute(args: Optional[List[str]] = None) -> None:
parser = get_argparser(prog="tags", desc=DESCRIPTION, usage=USAGE)
arg_group = parser.add_mutually_exclusive_group()
parser.add_argument(
"-j",
"--json",
action="store_true",
dest="json",
help="show tags in JSON format",
)
parser.add_argument(
"-r",
"--reverse",
action="store_true",
dest="reverse",
help="show tag to directories relationship",
)
parser.add_argument(
"-y",
"--yes",
action="store_true",
dest="yes",
help="assume yes to prompts",
)
arg_group.add_argument(
"-c",
"--clean",
action="store_true",
dest="clean",
help="clean invalid directories",
)
arg_group.add_argument(
"-p",
"--purge",
action="store_true",
dest="purge",
help="purge all tags",
)
arg_group.add_argument(
"-t",
metavar="TAG",
nargs="+",
dest="tags",
help="tag names to filter",
)
parsed_args = parser.parse_args(sys.argv[1:] if args is None else args)
if parsed_args.reverse and parsed_args.clean:
parser.error("argument -r/--reverse: not allowed with argument -c/--clean")
elif parsed_args.reverse and parsed_args.purge:
parser.error("argument -r/--reverse: not allowed with argument -p/--purge")
elif parsed_args.json and parsed_args.clean:
parser.error("argument -j/--json: not allowed with argument -c/--clean")
elif parsed_args.json and parsed_args.purge:
parser.error("argument -j/--json: not allowed with argument -p/--purge")
elif parsed_args.clean:
clean_tags(skip_prompts=parsed_args.yes)
elif parsed_args.purge:
purge_tags(skip_prompts=parsed_args.yes)
else:
show_tags(
filters=parsed_args.tags,
in_json=parsed_args.json,
in_reverse=parsed_args.reverse,
)
def show_tags(
filters: Optional[List[str]] = None,
in_json: bool = False,
in_reverse: bool = False,
) -> None:
config = load_config_file()
tag_config = config["tags"]
tag_filters = None if filters is None else normalize_tags(filters)
if in_json and in_reverse:
raw_data = {
tag: sorted(dirpath.as_posix() for dirpath in dirpaths)
for tag, dirpaths in reverse_map(tag_config).items()
if not tag_filters or tag in tag_filters
}
print(json.dumps(raw_data, indent=2, sort_keys=True))
elif in_json and not in_reverse:
raw_data = {
dirpath.as_posix(): sorted(tags)
for dirpath, tags in tag_config.items()
if not tag_filters or tags.intersection(tag_filters)
}
print(json.dumps(raw_data, indent=2, sort_keys=True))
elif not in_json and in_reverse:
tag_to_dirpaths = reverse_map(tag_config)
for tag in sorted(tag_to_dirpaths):
if not tag_filters or tag in tag_filters:
print(style.tag(tag))
for dirpath in sorted(tag_to_dirpaths[tag]):
print(" " + style.path(dirpath))
else:
for dirpath, tags in tag_config.items():
if not tag_filters or tags.intersection(tag_filters):
print(style.mapping(dirpath, tags))
def clean_tags(skip_prompts: bool = True) -> None:
config = load_config_file()
tag_config = config["tags"]
diffs: List[Tuple[Path, Set[str]]] = [
(dirpath, tags) for dirpath, tags in tag_config.items() if not dirpath.is_dir()
]
if not diffs:
print("Nothing to clean")
else:
for dirpath, tags in diffs:
print(style.diff(dirpath, del_tags=tags))
del tag_config[dirpath]
if skip_prompts or prompt_user():
save_config_file(config)
print("Tags cleaned successfully")
def purge_tags(skip_prompts: bool = True) -> None:
config = load_config_file()
tag_config = config["tags"]
if not tag_config:
print("Nothing to purge")
else:
for dirpath, tags in tag_config.items():
print(style.diff(dirpath, del_tags=tags))
if skip_prompts or prompt_user():
save_config_file(get_new_config())
print("Tags purged successfully")
| mit |
muntasirsyed/intellij-community | python/helpers/epydoc/docwriter/dotgraph.py | 91 | 53233 | # epydoc -- Graph generation
#
# Copyright (C) 2005 Edward Loper
# Author: Edward Loper <edloper@loper.org>
# URL: <http://epydoc.sf.net>
#
# $Id: dotgraph.py 1663 2007-11-07 15:29:47Z dvarrazzo $
"""
Render Graphviz directed graphs as images. Below are some examples.
.. importgraph::
.. classtree:: epydoc.apidoc.APIDoc
.. packagetree:: epydoc
:see: `The Graphviz Homepage
<http://www.research.att.com/sw/tools/graphviz/>`__
"""
__docformat__ = 'restructuredtext'
import re
import sys
from epydoc import log
from epydoc.apidoc import *
from epydoc.util import *
from epydoc.compat import * # Backwards compatibility
# colors for graphs of APIDocs
MODULE_BG = '#d8e8ff'
CLASS_BG = '#d8ffe8'
SELECTED_BG = '#ffd0d0'
BASECLASS_BG = '#e0b0a0'
SUBCLASS_BG = '#e0b0a0'
ROUTINE_BG = '#e8d0b0' # maybe?
INH_LINK_COLOR = '#800000'
######################################################################
#{ Dot Graphs
######################################################################
DOT_COMMAND = 'dot'
"""The command that should be used to spawn dot"""
class DotGraph:
"""
A ``dot`` directed graph. The contents of the graph are
constructed from the following instance variables:
- `nodes`: A list of `DotGraphNode`\\s, encoding the nodes
that are present in the graph. Each node is characterized
a set of attributes, including an optional label.
- `edges`: A list of `DotGraphEdge`\\s, encoding the edges
that are present in the graph. Each edge is characterized
by a set of attributes, including an optional label.
- `node_defaults`: Default attributes for nodes.
- `edge_defaults`: Default attributes for edges.
- `body`: A string that is appended as-is in the body of
the graph. This can be used to build more complex dot
graphs.
The `link()` method can be used to resolve crossreference links
within the graph. In particular, if the 'href' attribute of any
node or edge is assigned a value of the form ``<name>``, then it
will be replaced by the URL of the object with that name. This
applies to the `body` as well as the `nodes` and `edges`.
To render the graph, use the methods `write()` and `render()`.
Usually, you should call `link()` before you render the graph.
"""
_uids = set()
"""A set of all uids that that have been generated, used to ensure
that each new graph has a unique uid."""
DEFAULT_NODE_DEFAULTS={'fontsize':10, 'fontname': 'Helvetica'}
DEFAULT_EDGE_DEFAULTS={'fontsize':10, 'fontname': 'Helvetica'}
def __init__(self, title, body='', node_defaults=None,
edge_defaults=None, caption=None):
"""
Create a new `DotGraph`.
"""
self.title = title
"""The title of the graph."""
self.caption = caption
"""A caption for the graph."""
self.nodes = []
"""A list of the nodes that are present in the graph.
:type: ``list`` of `DotGraphNode`"""
self.edges = []
"""A list of the edges that are present in the graph.
:type: ``list`` of `DotGraphEdge`"""
self.body = body
"""A string that should be included as-is in the body of the
graph.
:type: ``str``"""
self.node_defaults = node_defaults or self.DEFAULT_NODE_DEFAULTS
"""Default attribute values for nodes."""
self.edge_defaults = edge_defaults or self.DEFAULT_EDGE_DEFAULTS
"""Default attribute values for edges."""
self.uid = re.sub(r'\W', '_', title).lower()
"""A unique identifier for this graph. This can be used as a
filename when rendering the graph. No two `DotGraph`\s will
have the same uid."""
# Encode the title, if necessary.
if isinstance(self.title, unicode):
self.title = self.title.encode('ascii', 'xmlcharrefreplace')
# Make sure the UID isn't too long.
self.uid = self.uid[:30]
# Make sure the UID is unique
if self.uid in self._uids:
n = 2
while ('%s_%s' % (self.uid, n)) in self._uids: n += 1
self.uid = '%s_%s' % (self.uid, n)
self._uids.add(self.uid)
def to_html(self, image_file, image_url, center=True):
"""
Return the HTML code that should be uesd to display this graph
(including a client-side image map).
:param image_url: The URL of the image file for this graph;
this should be generated separately with the `write()` method.
"""
# If dotversion >1.8.10, then we can generate the image and
# the cmapx with a single call to dot. Otherwise, we need to
# run dot twice.
if get_dot_version() > [1,8,10]:
cmapx = self._run_dot('-Tgif', '-o%s' % image_file, '-Tcmapx')
if cmapx is None: return '' # failed to render
else:
if not self.write(image_file):
return '' # failed to render
cmapx = self.render('cmapx') or ''
# Decode the cmapx (dot uses utf-8)
try:
cmapx = cmapx.decode('utf-8')
except UnicodeDecodeError:
log.debug('%s: unable to decode cmapx from dot; graph will '
'not have clickable regions' % image_file)
cmapx = ''
title = plaintext_to_html(self.title or '')
caption = plaintext_to_html(self.caption or '')
if title or caption:
css_class = 'graph-with-title'
else:
css_class = 'graph-without-title'
if len(title)+len(caption) > 80:
title_align = 'left'
table_width = ' width="600"'
else:
title_align = 'center'
table_width = ''
if center: s = '<center>'
if title or caption:
s += ('<table border="0" cellpadding="0" cellspacing="0" '
'class="graph"%s>\n <tr><td align="center">\n' %
table_width)
s += (' %s\n <img src="%s" alt=%r usemap="#%s" '
'ismap="ismap" class="%s" />\n' %
(cmapx.strip(), image_url, title, self.uid, css_class))
if title or caption:
s += ' </td></tr>\n <tr><td align=%r>\n' % title_align
if title:
s += '<span class="graph-title">%s</span>' % title
if title and caption:
s += ' -- '
if caption:
s += '<span class="graph-caption">%s</span>' % caption
s += '\n </td></tr>\n</table><br />'
if center: s += '</center>'
return s
def link(self, docstring_linker):
"""
Replace any href attributes whose value is ``<name>`` with
the url of the object whose name is ``<name>``.
"""
# Link xrefs in nodes
self._link_href(self.node_defaults, docstring_linker)
for node in self.nodes:
self._link_href(node.attribs, docstring_linker)
# Link xrefs in edges
self._link_href(self.edge_defaults, docstring_linker)
for edge in self.nodes:
self._link_href(edge.attribs, docstring_linker)
# Link xrefs in body
def subfunc(m):
url = docstring_linker.url_for(m.group(1))
if url: return 'href="%s"%s' % (url, m.group(2))
else: return ''
self.body = re.sub("href\s*=\s*['\"]?<([\w\.]+)>['\"]?\s*(,?)",
subfunc, self.body)
def _link_href(self, attribs, docstring_linker):
"""Helper for `link()`"""
if 'href' in attribs:
m = re.match(r'^<([\w\.]+)>$', attribs['href'])
if m:
url = docstring_linker.url_for(m.group(1))
if url: attribs['href'] = url
else: del attribs['href']
def write(self, filename, language='gif'):
"""
Render the graph using the output format `language`, and write
the result to `filename`.
:return: True if rendering was successful.
"""
result = self._run_dot('-T%s' % language,
'-o%s' % filename)
# Decode into unicode, if necessary.
if language == 'cmapx' and result is not None:
result = result.decode('utf-8')
return (result is not None)
def render(self, language='gif'):
"""
Use the ``dot`` command to render this graph, using the output
format `language`. Return the result as a string, or ``None``
if the rendering failed.
"""
return self._run_dot('-T%s' % language)
def _run_dot(self, *options):
try:
result, err = run_subprocess((DOT_COMMAND,)+options,
self.to_dotfile())
if err: log.warning("Graphviz dot warning(s):\n%s" % err)
except OSError, e:
log.warning("Unable to render Graphviz dot graph:\n%s" % e)
#log.debug(self.to_dotfile())
return None
return result
def to_dotfile(self):
"""
Return the string contents of the dot file that should be used
to render this graph.
"""
lines = ['digraph %s {' % self.uid,
'node [%s]' % ','.join(['%s="%s"' % (k,v) for (k,v)
in self.node_defaults.items()]),
'edge [%s]' % ','.join(['%s="%s"' % (k,v) for (k,v)
in self.edge_defaults.items()])]
if self.body:
lines.append(self.body)
lines.append('/* Nodes */')
for node in self.nodes:
lines.append(node.to_dotfile())
lines.append('/* Edges */')
for edge in self.edges:
lines.append(edge.to_dotfile())
lines.append('}')
# Default dot input encoding is UTF-8
return u'\n'.join(lines).encode('utf-8')
class DotGraphNode:
_next_id = 0
def __init__(self, label=None, html_label=None, **attribs):
if label is not None and html_label is not None:
raise ValueError('Use label or html_label, not both.')
if label is not None: attribs['label'] = label
self._html_label = html_label
self._attribs = attribs
self.id = self.__class__._next_id
self.__class__._next_id += 1
self.port = None
def __getitem__(self, attr):
return self._attribs[attr]
def __setitem__(self, attr, val):
if attr == 'html_label':
self._attribs.pop('label')
self._html_label = val
else:
if attr == 'label': self._html_label = None
self._attribs[attr] = val
def to_dotfile(self):
"""
Return the dot commands that should be used to render this node.
"""
attribs = ['%s="%s"' % (k,v) for (k,v) in self._attribs.items()
if v is not None]
if self._html_label:
attribs.insert(0, 'label=<%s>' % (self._html_label,))
if attribs: attribs = ' [%s]' % (','.join(attribs))
return 'node%d%s' % (self.id, attribs)
class DotGraphEdge:
def __init__(self, start, end, label=None, **attribs):
"""
:type start: `DotGraphNode`
:type end: `DotGraphNode`
"""
assert isinstance(start, DotGraphNode)
assert isinstance(end, DotGraphNode)
if label is not None: attribs['label'] = label
self.start = start #: :type: `DotGraphNode`
self.end = end #: :type: `DotGraphNode`
self._attribs = attribs
def __getitem__(self, attr):
return self._attribs[attr]
def __setitem__(self, attr, val):
self._attribs[attr] = val
def to_dotfile(self):
"""
Return the dot commands that should be used to render this edge.
"""
# Set head & tail ports, if the nodes have preferred ports.
attribs = self._attribs.copy()
if (self.start.port is not None and 'headport' not in attribs):
attribs['headport'] = self.start.port
if (self.end.port is not None and 'tailport' not in attribs):
attribs['tailport'] = self.end.port
# Convert attribs to a string
attribs = ','.join(['%s="%s"' % (k,v) for (k,v) in attribs.items()
if v is not None])
if attribs: attribs = ' [%s]' % attribs
# Return the dotfile edge.
return 'node%d -> node%d%s' % (self.start.id, self.end.id, attribs)
######################################################################
#{ Specialized Nodes for UML Graphs
######################################################################
class DotGraphUmlClassNode(DotGraphNode):
"""
A specialized dot graph node used to display `ClassDoc`\s using
UML notation. The node is rendered as a table with three cells:
the top cell contains the class name; the middle cell contains a
list of attributes; and the bottom cell contains a list of
operations::
+-------------+
| ClassName |
+-------------+
| x: int |
| ... |
+-------------+
| f(self, x) |
| ... |
+-------------+
`DotGraphUmlClassNode`\s may be *collapsed*, in which case they are
drawn as a simple box containing the class name::
+-------------+
| ClassName |
+-------------+
Attributes with types corresponding to documented classes can
optionally be converted into edges, using `link_attributes()`.
:todo: Add more options?
- show/hide operation signature
- show/hide operation signature types
- show/hide operation signature return type
- show/hide attribute types
- use qualifiers
"""
def __init__(self, class_doc, linker, context, collapsed=False,
bgcolor=CLASS_BG, **options):
"""
Create a new `DotGraphUmlClassNode` based on the class
`class_doc`.
:Parameters:
`linker` : `markup.DocstringLinker`
Used to look up URLs for classes.
`context` : `APIDoc`
The context in which this node will be drawn; dotted
names will be contextualized to this context.
`collapsed` : ``bool``
If true, then display this node as a simple box.
`bgcolor` : ```str```
The background color for this node.
`options` : ``dict``
A set of options used to control how the node should
be displayed.
:Keywords:
- `show_private_vars`: If false, then private variables
are filtered out of the attributes & operations lists.
(Default: *False*)
- `show_magic_vars`: If false, then magic variables
(such as ``__init__`` and ``__add__``) are filtered out of
the attributes & operations lists. (Default: *True*)
- `show_inherited_vars`: If false, then inherited variables
are filtered out of the attributes & operations lists.
(Default: *False*)
- `max_attributes`: The maximum number of attributes that
should be listed in the attribute box. If the class has
more than this number of attributes, some will be
ellided. Ellipsis is marked with ``'...'``.
- `max_operations`: The maximum number of operations that
should be listed in the operation box.
- `add_nodes_for_linked_attributes`: If true, then
`link_attributes()` will create new a collapsed node for
the types of a linked attributes if no node yet exists for
that type.
"""
if not isinstance(class_doc, ClassDoc):
raise TypeError('Expected a ClassDoc as 1st argument')
self.class_doc = class_doc
"""The class represented by this node."""
self.linker = linker
"""Used to look up URLs for classes."""
self.context = context
"""The context in which the node will be drawn."""
self.bgcolor = bgcolor
"""The background color of the node."""
self.options = options
"""Options used to control how the node is displayed."""
self.collapsed = collapsed
"""If true, then draw this node as a simple box."""
self.attributes = []
"""The list of VariableDocs for attributes"""
self.operations = []
"""The list of VariableDocs for operations"""
self.qualifiers = []
"""List of (key_label, port) tuples."""
self.edges = []
"""List of edges used to represent this node's attributes.
These should not be added to the `DotGraph`; this node will
generate their dotfile code directly."""
# Initialize operations & attributes lists.
show_private = options.get('show_private_vars', False)
show_magic = options.get('show_magic_vars', True)
show_inherited = options.get('show_inherited_vars', False)
for var in class_doc.sorted_variables:
name = var.canonical_name[-1]
if ((not show_private and var.is_public == False) or
(not show_magic and re.match('__\w+__$', name)) or
(not show_inherited and var.container != class_doc)):
pass
elif isinstance(var.value, RoutineDoc):
self.operations.append(var)
else:
self.attributes.append(var)
# Initialize our dot node settings.
tooltip = self._summary(class_doc)
if tooltip:
# dot chokes on a \n in the attribute...
tooltip = " ".join(tooltip.split())
else:
tooltip = class_doc.canonical_name
DotGraphNode.__init__(self, tooltip=tooltip,
width=0, height=0, shape='plaintext',
href=linker.url_for(class_doc) or NOOP_URL)
#/////////////////////////////////////////////////////////////////
#{ Attribute Linking
#/////////////////////////////////////////////////////////////////
SIMPLE_TYPE_RE = re.compile(
r'^([\w\.]+)$')
"""A regular expression that matches descriptions of simple types."""
COLLECTION_TYPE_RE = re.compile(
r'^(list|set|sequence|tuple|collection) of ([\w\.]+)$')
"""A regular expression that matches descriptions of collection types."""
MAPPING_TYPE_RE = re.compile(
r'^(dict|dictionary|map|mapping) from ([\w\.]+) to ([\w\.]+)$')
"""A regular expression that matches descriptions of mapping types."""
MAPPING_TO_COLLECTION_TYPE_RE = re.compile(
r'^(dict|dictionary|map|mapping) from ([\w\.]+) to '
r'(list|set|sequence|tuple|collection) of ([\w\.]+)$')
"""A regular expression that matches descriptions of mapping types
whose value type is a collection."""
OPTIONAL_TYPE_RE = re.compile(
r'^(None or|optional) ([\w\.]+)$|^([\w\.]+) or None$')
"""A regular expression that matches descriptions of optional types."""
def link_attributes(self, nodes):
"""
Convert any attributes with type descriptions corresponding to
documented classes to edges. The following type descriptions
are currently handled:
- Dotted names: Create an attribute edge to the named type,
labelled with the variable name.
- Collections: Create an attribute edge to the named type,
labelled with the variable name, and marked with '*' at the
type end of the edge.
- Mappings: Create an attribute edge to the named type,
labelled with the variable name, connected to the class by
a qualifier box that contains the key type description.
- Optional: Create an attribute edge to the named type,
labelled with the variable name, and marked with '0..1' at
the type end of the edge.
The edges created by `link_attributes()` are handled internally
by `DotGraphUmlClassNode`; they should *not* be added directly
to the `DotGraph`.
:param nodes: A dictionary mapping from `ClassDoc`\s to
`DotGraphUmlClassNode`\s, used to look up the nodes for
attribute types. If the ``add_nodes_for_linked_attributes``
option is used, then new nodes will be added to this
dictionary for any types that are not already listed.
These added nodes must be added to the `DotGraph`.
"""
# Try to convert each attribute var into a graph edge. If
# _link_attribute returns true, then it succeeded, so remove
# that var from our attribute list; otherwise, leave that var
# in our attribute list.
self.attributes = [var for var in self.attributes
if not self._link_attribute(var, nodes)]
def _link_attribute(self, var, nodes):
"""
Helper for `link_attributes()`: try to convert the attribute
variable `var` into an edge, and add that edge to
`self.edges`. Return ``True`` iff the variable was
successfully converted to an edge (in which case, it should be
removed from the attributes list).
"""
type_descr = self._type_descr(var) or self._type_descr(var.value)
# Simple type.
m = self.SIMPLE_TYPE_RE.match(type_descr)
if m and self._add_attribute_edge(var, nodes, m.group(1)):
return True
# Collection type.
m = self.COLLECTION_TYPE_RE.match(type_descr)
if m and self._add_attribute_edge(var, nodes, m.group(2),
headlabel='*'):
return True
# Optional type.
m = self.OPTIONAL_TYPE_RE.match(type_descr)
if m and self._add_attribute_edge(var, nodes, m.group(2) or m.group(3),
headlabel='0..1'):
return True
# Mapping type.
m = self.MAPPING_TYPE_RE.match(type_descr)
if m:
port = 'qualifier_%s' % var.name
if self._add_attribute_edge(var, nodes, m.group(3),
tailport='%s:e' % port):
self.qualifiers.append( (m.group(2), port) )
return True
# Mapping to collection type.
m = self.MAPPING_TO_COLLECTION_TYPE_RE.match(type_descr)
if m:
port = 'qualifier_%s' % var.name
if self._add_attribute_edge(var, nodes, m.group(4), headlabel='*',
tailport='%s:e' % port):
self.qualifiers.append( (m.group(2), port) )
return True
# We were unable to link this attribute.
return False
def _add_attribute_edge(self, var, nodes, type_str, **attribs):
"""
Helper for `link_attributes()`: try to add an edge for the
given attribute variable `var`. Return ``True`` if
successful.
"""
# Use the type string to look up a corresponding ValueDoc.
type_doc = self.linker.docindex.find(type_str, var)
if not type_doc: return False
# Make sure the type is a class.
if not isinstance(type_doc, ClassDoc): return False
# Get the type ValueDoc's node. If it doesn't have one (and
# add_nodes_for_linked_attributes=True), then create it.
type_node = nodes.get(type_doc)
if not type_node:
if self.options.get('add_nodes_for_linked_attributes', True):
type_node = DotGraphUmlClassNode(type_doc, self.linker,
self.context, collapsed=True)
nodes[type_doc] = type_node
else:
return False
# Add an edge from self to the target type node.
# [xx] should I set constraint=false here?
attribs.setdefault('headport', 'body')
attribs.setdefault('tailport', 'body')
url = self.linker.url_for(var) or NOOP_URL
self.edges.append(DotGraphEdge(self, type_node, label=var.name,
arrowhead='open', href=url,
tooltip=var.canonical_name, labeldistance=1.5,
**attribs))
return True
#/////////////////////////////////////////////////////////////////
#{ Helper Methods
#/////////////////////////////////////////////////////////////////
def _summary(self, api_doc):
"""Return a plaintext summary for `api_doc`"""
if not isinstance(api_doc, APIDoc): return ''
if api_doc.summary in (None, UNKNOWN): return ''
summary = api_doc.summary.to_plaintext(None).strip()
return plaintext_to_html(summary)
_summary = classmethod(_summary)
def _type_descr(self, api_doc):
"""Return a plaintext type description for `api_doc`"""
if not hasattr(api_doc, 'type_descr'): return ''
if api_doc.type_descr in (None, UNKNOWN): return ''
type_descr = api_doc.type_descr.to_plaintext(self.linker).strip()
return plaintext_to_html(type_descr)
def _tooltip(self, var_doc):
"""Return a tooltip for `var_doc`."""
return (self._summary(var_doc) or
self._summary(var_doc.value) or
var_doc.canonical_name)
#/////////////////////////////////////////////////////////////////
#{ Rendering
#/////////////////////////////////////////////////////////////////
def _attribute_cell(self, var_doc):
# Construct the label
label = var_doc.name
type_descr = (self._type_descr(var_doc) or
self._type_descr(var_doc.value))
if type_descr: label += ': %s' % type_descr
# Get the URL
url = self.linker.url_for(var_doc) or NOOP_URL
# Construct & return the pseudo-html code
return self._ATTRIBUTE_CELL % (url, self._tooltip(var_doc), label)
def _operation_cell(self, var_doc):
"""
:todo: do 'word wrapping' on the signature, by starting a new
row in the table, if necessary. How to indent the new
line? Maybe use align=right? I don't think dot has a
.
:todo: Optionally add return type info?
"""
# Construct the label (aka function signature)
func_doc = var_doc.value
args = [self._operation_arg(n, d, func_doc) for (n, d)
in zip(func_doc.posargs, func_doc.posarg_defaults)]
args = [plaintext_to_html(arg) for arg in args]
if func_doc.vararg: args.append('*'+func_doc.vararg)
if func_doc.kwarg: args.append('**'+func_doc.kwarg)
label = '%s(%s)' % (var_doc.name, ', '.join(args))
# Get the URL
url = self.linker.url_for(var_doc) or NOOP_URL
# Construct & return the pseudo-html code
return self._OPERATION_CELL % (url, self._tooltip(var_doc), label)
def _operation_arg(self, name, default, func_doc):
"""
:todo: Handle tuple args better
:todo: Optionally add type info?
"""
if default is None:
return '%s' % name
else:
pyval_repr = default.summary_pyval_repr().to_plaintext(None)
return '%s=%s' % (name, pyval_repr)
def _qualifier_cell(self, key_label, port):
return self._QUALIFIER_CELL % (port, self.bgcolor, key_label)
#: args: (url, tooltip, label)
_ATTRIBUTE_CELL = '''
<TR><TD ALIGN="LEFT" HREF="%s" TOOLTIP="%s">%s</TD></TR>
'''
#: args: (url, tooltip, label)
_OPERATION_CELL = '''
<TR><TD ALIGN="LEFT" HREF="%s" TOOLTIP="%s">%s</TD></TR>
'''
#: args: (port, bgcolor, label)
_QUALIFIER_CELL = '''
<TR><TD VALIGN="BOTTOM" PORT="%s" BGCOLOR="%s" BORDER="1">%s</TD></TR>
'''
_QUALIFIER_DIV = '''
<TR><TD VALIGN="BOTTOM" HEIGHT="10" WIDTH="10" FIXEDSIZE="TRUE"></TD></TR>
'''
#: Args: (rowspan, bgcolor, classname, attributes, operations, qualifiers)
_LABEL = '''
<TABLE BORDER="0" CELLBORDER="0" CELLSPACING="0" CELLPADDING="0">
<TR><TD ROWSPAN="%s">
<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"
CELLPADDING="0" PORT="body" BGCOLOR="%s">
<TR><TD>%s</TD></TR>
<TR><TD><TABLE BORDER="0" CELLBORDER="0" CELLSPACING="0">
%s</TABLE></TD></TR>
<TR><TD><TABLE BORDER="0" CELLBORDER="0" CELLSPACING="0">
%s</TABLE></TD></TR>
</TABLE>
</TD></TR>
%s
</TABLE>'''
_COLLAPSED_LABEL = '''
<TABLE CELLBORDER="0" BGCOLOR="%s" PORT="body">
<TR><TD>%s</TD></TR>
</TABLE>'''
def _get_html_label(self):
# Get the class name & contextualize it.
classname = self.class_doc.canonical_name
classname = classname.contextualize(self.context.canonical_name)
# If we're collapsed, display the node as a single box.
if self.collapsed:
return self._COLLAPSED_LABEL % (self.bgcolor, classname)
# Construct the attribute list. (If it's too long, truncate)
attrib_cells = [self._attribute_cell(a) for a in self.attributes]
max_attributes = self.options.get('max_attributes', 15)
if len(attrib_cells) == 0:
attrib_cells = ['<TR><TD></TD></TR>']
elif len(attrib_cells) > max_attributes:
attrib_cells[max_attributes-2:-1] = ['<TR><TD>...</TD></TR>']
attributes = ''.join(attrib_cells)
# Construct the operation list. (If it's too long, truncate)
oper_cells = [self._operation_cell(a) for a in self.operations]
max_operations = self.options.get('max_operations', 15)
if len(oper_cells) == 0:
oper_cells = ['<TR><TD></TD></TR>']
elif len(oper_cells) > max_operations:
oper_cells[max_operations-2:-1] = ['<TR><TD>...</TD></TR>']
operations = ''.join(oper_cells)
# Construct the qualifier list & determine the rowspan.
if self.qualifiers:
rowspan = len(self.qualifiers)*2+2
div = self._QUALIFIER_DIV
qualifiers = div+div.join([self._qualifier_cell(l,p) for
(l,p) in self.qualifiers])+div
else:
rowspan = 1
qualifiers = ''
# Put it all together.
return self._LABEL % (rowspan, self.bgcolor, classname,
attributes, operations, qualifiers)
def to_dotfile(self):
attribs = ['%s="%s"' % (k,v) for (k,v) in self._attribs.items()]
attribs.append('label=<%s>' % self._get_html_label())
s = 'node%d%s' % (self.id, ' [%s]' % (','.join(attribs)))
if not self.collapsed:
for edge in self.edges:
s += '\n' + edge.to_dotfile()
return s
class DotGraphUmlModuleNode(DotGraphNode):
"""
A specialized dot grah node used to display `ModuleDoc`\s using
UML notation. Simple module nodes look like::
.----.
+------------+
| modulename |
+------------+
Packages nodes are drawn with their modules & subpackages nested
inside::
.----.
+----------------------------------------+
| packagename |
| |
| .----. .----. .----. |
| +---------+ +---------+ +---------+ |
| | module1 | | module2 | | module3 | |
| +---------+ +---------+ +---------+ |
| |
+----------------------------------------+
"""
def __init__(self, module_doc, linker, context, collapsed=False,
excluded_submodules=(), **options):
self.module_doc = module_doc
self.linker = linker
self.context = context
self.collapsed = collapsed
self.options = options
self.excluded_submodules = excluded_submodules
DotGraphNode.__init__(self, shape='plaintext',
href=linker.url_for(module_doc) or NOOP_URL,
tooltip=module_doc.canonical_name)
#: Expects: (color, color, url, tooltip, body)
_MODULE_LABEL = '''
<TABLE BORDER="0" CELLBORDER="0" CELLSPACING="0" ALIGN="LEFT">
<TR><TD ALIGN="LEFT" VALIGN="BOTTOM" HEIGHT="8" WIDTH="16"
FIXEDSIZE="true" BGCOLOR="%s" BORDER="1" PORT="tab"></TD></TR>
<TR><TD ALIGN="LEFT" VALIGN="TOP" BGCOLOR="%s" BORDER="1" WIDTH="20"
PORT="body" HREF="%s" TOOLTIP="%s">%s</TD></TR>
</TABLE>'''
#: Expects: (name, body_rows)
_NESTED_BODY = '''
<TABLE BORDER="0" CELLBORDER="0" CELLPADDING="0" CELLSPACING="0">
<TR><TD ALIGN="LEFT">%s</TD></TR>
%s
</TABLE>'''
#: Expects: (cells,)
_NESTED_BODY_ROW = '''
<TR><TD>
<TABLE BORDER="0" CELLBORDER="0"><TR>%s</TR></TABLE>
</TD></TR>'''
def _get_html_label(self, package):
"""
:Return: (label, depth, width) where:
- ``label`` is the HTML label
- ``depth`` is the depth of the package tree (for coloring)
- ``width`` is the max width of the HTML label, roughly in
units of characters.
"""
MAX_ROW_WIDTH = 80 # unit is roughly characters.
pkg_name = package.canonical_name
pkg_url = self.linker.url_for(package) or NOOP_URL
if (not package.is_package or len(package.submodules) == 0 or
self.collapsed):
pkg_color = self._color(package, 1)
label = self._MODULE_LABEL % (pkg_color, pkg_color,
pkg_url, pkg_name, pkg_name[-1])
return (label, 1, len(pkg_name[-1])+3)
# Get the label for each submodule, and divide them into rows.
row_list = ['']
row_width = 0
max_depth = 0
max_row_width = len(pkg_name[-1])+3
for submodule in package.submodules:
if submodule in self.excluded_submodules: continue
# Get the submodule's label.
label, depth, width = self._get_html_label(submodule)
# Check if we should start a new row.
if row_width > 0 and width+row_width > MAX_ROW_WIDTH:
row_list.append('')
row_width = 0
# Add the submodule's label to the row.
row_width += width
row_list[-1] += '<TD ALIGN="LEFT">%s</TD>' % label
# Update our max's.
max_depth = max(depth, max_depth)
max_row_width = max(row_width, max_row_width)
# Figure out which color to use.
pkg_color = self._color(package, depth+1)
# Assemble & return the label.
rows = ''.join([self._NESTED_BODY_ROW % r for r in row_list])
body = self._NESTED_BODY % (pkg_name, rows)
label = self._MODULE_LABEL % (pkg_color, pkg_color,
pkg_url, pkg_name, body)
return label, max_depth+1, max_row_width
_COLOR_DIFF = 24
def _color(self, package, depth):
if package == self.context: return SELECTED_BG
else:
# Parse the base color.
if re.match(MODULE_BG, 'r#[0-9a-fA-F]{6}$'):
base = int(MODULE_BG[1:], 16)
else:
base = int('d8e8ff', 16)
red = (base & 0xff0000) >> 16
green = (base & 0x00ff00) >> 8
blue = (base & 0x0000ff)
# Make it darker with each level of depth. (but not *too*
# dark -- package name needs to be readable)
red = max(64, red-(depth-1)*self._COLOR_DIFF)
green = max(64, green-(depth-1)*self._COLOR_DIFF)
blue = max(64, blue-(depth-1)*self._COLOR_DIFF)
# Convert it back to a color string
return '#%06x' % ((red<<16)+(green<<8)+blue)
def to_dotfile(self):
attribs = ['%s="%s"' % (k,v) for (k,v) in self._attribs.items()]
label, depth, width = self._get_html_label(self.module_doc)
attribs.append('label=<%s>' % label)
return 'node%d%s' % (self.id, ' [%s]' % (','.join(attribs)))
######################################################################
#{ Graph Generation Functions
######################################################################
def package_tree_graph(packages, linker, context=None, **options):
"""
Return a `DotGraph` that graphically displays the package
hierarchies for the given packages.
"""
if options.get('style', 'uml') == 'uml': # default to uml style?
if get_dot_version() >= [2]:
return uml_package_tree_graph(packages, linker, context,
**options)
elif 'style' in options:
log.warning('UML style package trees require dot version 2.0+')
graph = DotGraph('Package Tree for %s' % name_list(packages, context),
body='ranksep=.3\n;nodesep=.1\n',
edge_defaults={'dir':'none'})
# Options
if options.get('dir', 'TB') != 'TB': # default: top-to-bottom
graph.body += 'rankdir=%s\n' % options.get('dir', 'TB')
# Get a list of all modules in the package.
queue = list(packages)
modules = set(packages)
for module in queue:
queue.extend(module.submodules)
modules.update(module.submodules)
# Add a node for each module.
nodes = add_valdoc_nodes(graph, modules, linker, context)
# Add an edge for each package/submodule relationship.
for module in modules:
for submodule in module.submodules:
graph.edges.append(DotGraphEdge(nodes[module], nodes[submodule],
headport='tab'))
return graph
def uml_package_tree_graph(packages, linker, context=None, **options):
"""
Return a `DotGraph` that graphically displays the package
hierarchies for the given packages as a nested set of UML
symbols.
"""
graph = DotGraph('Package Tree for %s' % name_list(packages, context))
# Remove any packages whose containers are also in the list.
root_packages = []
for package1 in packages:
for package2 in packages:
if (package1 is not package2 and
package2.canonical_name.dominates(package1.canonical_name)):
break
else:
root_packages.append(package1)
# If the context is a variable, then get its value.
if isinstance(context, VariableDoc) and context.value is not UNKNOWN:
context = context.value
# Return a graph with one node for each root package.
for package in root_packages:
graph.nodes.append(DotGraphUmlModuleNode(package, linker, context))
return graph
######################################################################
def class_tree_graph(bases, linker, context=None, **options):
"""
Return a `DotGraph` that graphically displays the class
hierarchy for the given classes. Options:
- exclude
- dir: LR|RL|BT requests a left-to-right, right-to-left, or
bottom-to- top, drawing. (corresponds to the dot option
'rankdir'
"""
if isinstance(bases, ClassDoc): bases = [bases]
graph = DotGraph('Class Hierarchy for %s' % name_list(bases, context),
body='ranksep=0.3\n',
edge_defaults={'sametail':True, 'dir':'none'})
# Options
if options.get('dir', 'TB') != 'TB': # default: top-down
graph.body += 'rankdir=%s\n' % options.get('dir', 'TB')
exclude = options.get('exclude', ())
# Find all superclasses & subclasses of the given classes.
classes = set(bases)
queue = list(bases)
for cls in queue:
if isinstance(cls, ClassDoc):
if cls.subclasses not in (None, UNKNOWN):
subclasses = cls.subclasses
if exclude:
subclasses = [d for d in subclasses if d not in exclude]
queue.extend(subclasses)
classes.update(subclasses)
queue = list(bases)
for cls in queue:
if isinstance(cls, ClassDoc):
if cls.bases not in (None, UNKNOWN):
bases = cls.bases
if exclude:
bases = [d for d in bases if d not in exclude]
queue.extend(bases)
classes.update(bases)
# Add a node for each cls.
classes = [d for d in classes if isinstance(d, ClassDoc)
if d.pyval is not object]
nodes = add_valdoc_nodes(graph, classes, linker, context)
# Add an edge for each package/subclass relationship.
edges = set()
for cls in classes:
for subcls in cls.subclasses:
if cls in nodes and subcls in nodes:
edges.add((nodes[cls], nodes[subcls]))
graph.edges = [DotGraphEdge(src,dst) for (src,dst) in edges]
return graph
######################################################################
def uml_class_tree_graph(class_doc, linker, context=None, **options):
"""
Return a `DotGraph` that graphically displays the class hierarchy
for the given class, using UML notation. Options:
- max_attributes
- max_operations
- show_private_vars
- show_magic_vars
- link_attributes
"""
nodes = {} # ClassDoc -> DotGraphUmlClassNode
exclude = options.get('exclude', ())
# Create nodes for class_doc and all its bases.
for cls in class_doc.mro():
if cls.pyval is object: continue # don't include `object`.
if cls in exclude: break # stop if we get to an excluded class.
if cls == class_doc: color = SELECTED_BG
else: color = BASECLASS_BG
nodes[cls] = DotGraphUmlClassNode(cls, linker, context,
show_inherited_vars=False,
collapsed=False, bgcolor=color)
# Create nodes for all class_doc's subclasses.
queue = [class_doc]
for cls in queue:
if (isinstance(cls, ClassDoc) and
cls.subclasses not in (None, UNKNOWN)):
for subcls in cls.subclasses:
subcls_name = subcls.canonical_name[-1]
if subcls not in nodes and subcls not in exclude:
queue.append(subcls)
nodes[subcls] = DotGraphUmlClassNode(
subcls, linker, context, collapsed=True,
bgcolor=SUBCLASS_BG)
# Only show variables in the class where they're defined for
# *class_doc*.
mro = class_doc.mro()
for name, var in class_doc.variables.items():
i = mro.index(var.container)
for base in mro[i+1:]:
if base.pyval is object: continue # don't include `object`.
overridden_var = base.variables.get(name)
if overridden_var and overridden_var.container == base:
try:
if isinstance(overridden_var.value, RoutineDoc):
nodes[base].operations.remove(overridden_var)
else:
nodes[base].attributes.remove(overridden_var)
except ValueError:
pass # var is filtered (eg private or magic)
# Keep track of which nodes are part of the inheritance graph
# (since link_attributes might add new nodes)
inheritance_nodes = set(nodes.values())
# Turn attributes into links.
if options.get('link_attributes', True):
for node in nodes.values():
node.link_attributes(nodes)
# Make sure that none of the new attribute edges break the
# rank ordering assigned by inheritance.
for edge in node.edges:
if edge.end in inheritance_nodes:
edge['constraint'] = 'False'
# Construct the graph.
graph = DotGraph('UML class diagram for %s' % class_doc.canonical_name,
body='ranksep=.2\n;nodesep=.3\n')
graph.nodes = nodes.values()
# Add inheritance edges.
for node in inheritance_nodes:
for base in node.class_doc.bases:
if base in nodes:
graph.edges.append(DotGraphEdge(nodes[base], node,
dir='back', arrowtail='empty',
headport='body', tailport='body',
color=INH_LINK_COLOR, weight=100,
style='bold'))
# And we're done!
return graph
######################################################################
def import_graph(modules, docindex, linker, context=None, **options):
graph = DotGraph('Import Graph', body='ranksep=.3\n;nodesep=.3\n')
# Options
if options.get('dir', 'RL') != 'TB': # default: right-to-left.
graph.body += 'rankdir=%s\n' % options.get('dir', 'RL')
# Add a node for each module.
nodes = add_valdoc_nodes(graph, modules, linker, context)
# Edges.
edges = set()
for dst in modules:
if dst.imports in (None, UNKNOWN): continue
for var_name in dst.imports:
for i in range(len(var_name), 0, -1):
val_doc = docindex.find(var_name[:i], context)
if isinstance(val_doc, ModuleDoc):
if val_doc in nodes and dst in nodes:
edges.add((nodes[val_doc], nodes[dst]))
break
graph.edges = [DotGraphEdge(src,dst) for (src,dst) in edges]
return graph
######################################################################
def call_graph(api_docs, docindex, linker, context=None, **options):
"""
:param options:
- ``dir``: rankdir for the graph. (default=LR)
- ``add_callers``: also include callers for any of the
routines in ``api_docs``. (default=False)
- ``add_callees``: also include callees for any of the
routines in ``api_docs``. (default=False)
:todo: Add an ``exclude`` option?
"""
if docindex.callers is None:
log.warning("No profiling information for call graph!")
return DotGraph('Call Graph') # return None instead?
if isinstance(context, VariableDoc):
context = context.value
# Get the set of requested functions.
functions = []
for api_doc in api_docs:
# If it's a variable, get its value.
if isinstance(api_doc, VariableDoc):
api_doc = api_doc.value
# Add the value to the functions list.
if isinstance(api_doc, RoutineDoc):
functions.append(api_doc)
elif isinstance(api_doc, NamespaceDoc):
for vardoc in api_doc.variables.values():
if isinstance(vardoc.value, RoutineDoc):
functions.append(vardoc.value)
# Filter out functions with no callers/callees?
# [xx] this isnt' quite right, esp if add_callers or add_callees
# options are fales.
functions = [f for f in functions if
(f in docindex.callers) or (f in docindex.callees)]
# Add any callers/callees of the selected functions
func_set = set(functions)
if options.get('add_callers', False) or options.get('add_callees', False):
for func_doc in functions:
if options.get('add_callers', False):
func_set.update(docindex.callers.get(func_doc, ()))
if options.get('add_callees', False):
func_set.update(docindex.callees.get(func_doc, ()))
graph = DotGraph('Call Graph for %s' % name_list(api_docs, context),
node_defaults={'shape':'box', 'width': 0, 'height': 0})
# Options
if options.get('dir', 'LR') != 'TB': # default: left-to-right
graph.body += 'rankdir=%s\n' % options.get('dir', 'LR')
nodes = add_valdoc_nodes(graph, func_set, linker, context)
# Find the edges.
edges = set()
for func_doc in functions:
for caller in docindex.callers.get(func_doc, ()):
if caller in nodes:
edges.add( (nodes[caller], nodes[func_doc]) )
for callee in docindex.callees.get(func_doc, ()):
if callee in nodes:
edges.add( (nodes[func_doc], nodes[callee]) )
graph.edges = [DotGraphEdge(src,dst) for (src,dst) in edges]
return graph
######################################################################
#{ Dot Version
######################################################################
_dot_version = None
_DOT_VERSION_RE = re.compile(r'dot version ([\d\.]+)')
def get_dot_version():
global _dot_version
if _dot_version is None:
try:
out, err = run_subprocess([DOT_COMMAND, '-V'])
version_info = err or out
m = _DOT_VERSION_RE.match(version_info)
if m:
_dot_version = [int(x) for x in m.group(1).split('.')]
else:
_dot_version = (0,)
except OSError, e:
_dot_version = (0,)
log.info('Detected dot version %s' % _dot_version)
return _dot_version
######################################################################
#{ Helper Functions
######################################################################
def add_valdoc_nodes(graph, val_docs, linker, context):
"""
:todo: Use different node styles for different subclasses of APIDoc
"""
nodes = {}
val_docs = sorted(val_docs, key=lambda d:d.canonical_name)
for i, val_doc in enumerate(val_docs):
label = val_doc.canonical_name.contextualize(context.canonical_name)
node = nodes[val_doc] = DotGraphNode(label)
graph.nodes.append(node)
specialize_valdoc_node(node, val_doc, context, linker.url_for(val_doc))
return nodes
NOOP_URL = 'javascript:void(0);'
MODULE_NODE_HTML = '''
<TABLE BORDER="0" CELLBORDER="0" CELLSPACING="0"
CELLPADDING="0" PORT="table" ALIGN="LEFT">
<TR><TD ALIGN="LEFT" VALIGN="BOTTOM" HEIGHT="8" WIDTH="16" FIXEDSIZE="true"
BGCOLOR="%s" BORDER="1" PORT="tab"></TD></TR>
<TR><TD ALIGN="LEFT" VALIGN="TOP" BGCOLOR="%s" BORDER="1"
PORT="body" HREF="%s" TOOLTIP="%s">%s</TD></TR>
</TABLE>'''.strip()
def specialize_valdoc_node(node, val_doc, context, url):
"""
Update the style attributes of `node` to reflext its type
and context.
"""
# We can only use html-style nodes if dot_version>2.
dot_version = get_dot_version()
# If val_doc or context is a variable, get its value.
if isinstance(val_doc, VariableDoc) and val_doc.value is not UNKNOWN:
val_doc = val_doc.value
if isinstance(context, VariableDoc) and context.value is not UNKNOWN:
context = context.value
# Set the URL. (Do this even if it points to the page we're
# currently on; otherwise, the tooltip is ignored.)
node['href'] = url or NOOP_URL
if isinstance(val_doc, ModuleDoc) and dot_version >= [2]:
node['shape'] = 'plaintext'
if val_doc == context: color = SELECTED_BG
else: color = MODULE_BG
node['tooltip'] = node['label']
node['html_label'] = MODULE_NODE_HTML % (color, color, url,
val_doc.canonical_name,
node['label'])
node['width'] = node['height'] = 0
node.port = 'body'
elif isinstance(val_doc, RoutineDoc):
node['shape'] = 'box'
node['style'] = 'rounded'
node['width'] = 0
node['height'] = 0
node['label'] = '%s()' % node['label']
node['tooltip'] = node['label']
if val_doc == context:
node['fillcolor'] = SELECTED_BG
node['style'] = 'filled,rounded,bold'
else:
node['shape'] = 'box'
node['width'] = 0
node['height'] = 0
node['tooltip'] = node['label']
if val_doc == context:
node['fillcolor'] = SELECTED_BG
node['style'] = 'filled,bold'
def name_list(api_docs, context=None):
if context is not None:
context = context.canonical_name
names = [str(d.canonical_name.contextualize(context)) for d in api_docs]
if len(names) == 0: return ''
if len(names) == 1: return '%s' % names[0]
elif len(names) == 2: return '%s and %s' % (names[0], names[1])
else:
return '%s, and %s' % (', '.join(names[:-1]), names[-1])
| apache-2.0 |
orisi/orisi | src/oracle/handlers/handlers.py | 2 | 1334 | from timelock_contract.timelock_create_handler import TimelockCreateHandler
from bounty_contract.bounty_create_handler import BountyCreateHandler
from bounty_contract.bounty_redeem_handler import GuessPasswordHandler
from transactionsigner import TransactionSigner
from safe_timelock_contract.timelock_mark_release_handler import TimelockMarkReleaseHandler
from safe_timelock_contract.safe_timelock_create_handler import SafeTimelockCreateHandler
op_handlers = {
'sign': TransactionSigner,
'timelock_create': TimelockCreateHandler,
'bounty_create': BountyCreateHandler,
'bounty_redeem': GuessPasswordHandler,
'safe_timelock_create': SafeTimelockCreateHandler,
'timelock_mark_release': TimelockMarkReleaseHandler,
}
OPERATION_REQUIRED_FIELDS = {
'timelock_create': ['message_id', 'sum_satoshi', 'prevtxs', 'outputs', 'miners_fee_satoshi', 'return_address', 'locktime', 'pubkey_list', 'req_sigs'],
'bounty_create': ['prevtx', 'locktime', 'message_id', 'sum_amount', 'miners_fee', 'oracle_fees', 'pubkey_list', 'req_sigs', 'password_hash', 'return_address'],
'bounty_redeem': ['pwtxid', 'passwords'],
'timelock_mark_release': [],
'safe_timelock_create': ['message_id', 'oracle_fees', 'miners_fee_satoshi','return_address', 'locktime', 'pubkey_list', 'req_sigs'],
}
PROTOCOL_VERSION = '0.12'
| mit |
NL66278/OCB | addons/account/report/__init__.py | 381 | 1513 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_central_journal
import account_general_journal
import account_journal
import account_balance
import account_partner_balance
import account_general_ledger
import account_partner_ledger
import account_print_overdue
import account_aged_partner_balance
import report_vat
import account_invoice_report
import account_report
import account_entries_report
import account_analytic_entries_report
import account_treasury_report
import account_financial_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Alphadelta14/ansible | lib/ansible/cli/__init__.py | 3 | 22439 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import operator
import optparse
import os
import sys
import time
import yaml
import re
import getpass
import subprocess
from ansible import __version__
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.utils.unicode import to_bytes
from ansible.utils.display import Display
from ansible.utils.path import is_executable
class SortedOptParser(optparse.OptionParser):
'''Optparser which sorts the options by opt before outputting --help'''
#FIXME: epilog parsing: OptionParser.format_epilog = lambda self, formatter: self.epilog
def format_help(self, formatter=None, epilog=None):
self.option_list.sort(key=operator.methodcaller('get_opt_string'))
return optparse.OptionParser.format_help(self, formatter=None)
class CLI(object):
''' code behind bin/ansible* programs '''
VALID_ACTIONS = ['No Actions']
_ITALIC = re.compile(r"I\(([^)]+)\)")
_BOLD = re.compile(r"B\(([^)]+)\)")
_MODULE = re.compile(r"M\(([^)]+)\)")
_URL = re.compile(r"U\(([^)]+)\)")
_CONST = re.compile(r"C\(([^)]+)\)")
PAGER = 'less'
LESS_OPTS = 'FRSX' # -F (quit-if-one-screen) -R (allow raw ansi control chars)
# -S (chop long lines) -X (disable termcap init and de-init)
def __init__(self, args, display=None):
"""
Base init method for all command line programs
"""
self.args = args
self.options = None
self.parser = None
self.action = None
if display is None:
self.display = Display()
else:
self.display = display
def set_action(self):
"""
Get the action the user wants to execute from the sys argv list.
"""
for i in range(0,len(self.args)):
arg = self.args[i]
if arg in self.VALID_ACTIONS:
self.action = arg
del self.args[i]
break
if not self.action:
raise AnsibleOptionsError("Missing required action")
def execute(self):
"""
Actually runs a child defined method using the execute_<action> pattern
"""
fn = getattr(self, "execute_%s" % self.action)
fn()
def parse(self):
raise Exception("Need to implement!")
def run(self):
if self.options.verbosity > 0:
if C.CONFIG_FILE:
self.display.display("Using %s as config file" % C.CONFIG_FILE)
else:
self.display.display("No config file found; using defaults")
@staticmethod
def ask_vault_passwords(ask_vault_pass=False, ask_new_vault_pass=False, confirm_vault=False, confirm_new=False):
''' prompt for vault password and/or password change '''
vault_pass = None
new_vault_pass = None
try:
if ask_vault_pass:
vault_pass = getpass.getpass(prompt="Vault password: ")
if ask_vault_pass and confirm_vault:
vault_pass2 = getpass.getpass(prompt="Confirm Vault password: ")
if vault_pass != vault_pass2:
raise errors.AnsibleError("Passwords do not match")
if ask_new_vault_pass:
new_vault_pass = getpass.getpass(prompt="New Vault password: ")
if ask_new_vault_pass and confirm_new:
new_vault_pass2 = getpass.getpass(prompt="Confirm New Vault password: ")
if new_vault_pass != new_vault_pass2:
raise errors.AnsibleError("Passwords do not match")
except EOFError:
pass
# enforce no newline chars at the end of passwords
if vault_pass:
vault_pass = to_bytes(vault_pass, errors='strict', nonstring='simplerepr').strip()
if new_vault_pass:
new_vault_pass = to_bytes(new_vault_pass, errors='strict', nonstring='simplerepr').strip()
return vault_pass, new_vault_pass
def ask_passwords(self):
''' prompt for connection and become passwords if needed '''
op = self.options
sshpass = None
becomepass = None
become_prompt = ''
try:
if op.ask_pass:
sshpass = getpass.getpass(prompt="SSH password: ")
become_prompt = "%s password[defaults to SSH password]: " % op.become_method.upper()
if sshpass:
sshpass = to_bytes(sshpass, errors='strict', nonstring='simplerepr')
else:
become_prompt = "%s password: " % op.become_method.upper()
if op.become_ask_pass:
becomepass = getpass.getpass(prompt=become_prompt)
if op.ask_pass and becomepass == '':
becomepass = sshpass
if becomepass:
becomepass = to_bytes(becomepass)
except EOFError:
pass
return (sshpass, becomepass)
def normalize_become_options(self):
''' this keeps backwards compatibility with sudo/su self.options '''
self.options.become_ask_pass = self.options.become_ask_pass or self.options.ask_sudo_pass or self.options.ask_su_pass or C.DEFAULT_BECOME_ASK_PASS
self.options.become_user = self.options.become_user or self.options.sudo_user or self.options.su_user or C.DEFAULT_BECOME_USER
if self.options.become:
pass
elif self.options.sudo:
self.options.become = True
self.options.become_method = 'sudo'
elif self.options.su:
self.options.become = True
self.options.become_method = 'su'
def validate_conflicts(self, vault_opts=False, runas_opts=False, fork_opts=False):
''' check for conflicting options '''
op = self.options
if vault_opts:
# Check for vault related conflicts
if (op.ask_vault_pass and op.vault_password_file):
self.parser.error("--ask-vault-pass and --vault-password-file are mutually exclusive")
if runas_opts:
# Check for privilege escalation conflicts
if (op.su or op.su_user or op.ask_su_pass) and \
(op.sudo or op.sudo_user or op.ask_sudo_pass) or \
(op.su or op.su_user or op.ask_su_pass) and \
(op.become or op.become_user or op.become_ask_pass) or \
(op.sudo or op.sudo_user or op.ask_sudo_pass) and \
(op.become or op.become_user or op.become_ask_pass):
self.parser.error("Sudo arguments ('--sudo', '--sudo-user', and '--ask-sudo-pass') "
"and su arguments ('-su', '--su-user', and '--ask-su-pass') "
"and become arguments ('--become', '--become-user', and '--ask-become-pass')"
" are exclusive of each other")
if fork_opts:
if op.forks < 1:
self.parser.error("The number of processes (--forks) must be >= 1")
@staticmethod
def expand_tilde(option, opt, value, parser):
setattr(parser.values, option.dest, os.path.expanduser(value))
@staticmethod
def base_parser(usage="", output_opts=False, runas_opts=False, meta_opts=False, runtask_opts=False, vault_opts=False,
async_opts=False, connect_opts=False, subset_opts=False, check_opts=False, diff_opts=False, epilog=None, fork_opts=False):
''' create an options parser for most ansible scripts '''
#FIXME: implemente epilog parsing
#OptionParser.format_epilog = lambda self, formatter: self.epilog
# base opts
parser = SortedOptParser(usage, version=CLI.version("%prog"))
parser.add_option('-v','--verbose', dest='verbosity', default=0, action="count",
help="verbose mode (-vvv for more, -vvvv to enable connection debugging)")
if runtask_opts:
parser.add_option('-i', '--inventory-file', dest='inventory',
help="specify inventory host file (default=%s)" % C.DEFAULT_HOST_LIST,
default=C.DEFAULT_HOST_LIST, action="callback", callback=CLI.expand_tilde, type=str)
parser.add_option('--list-hosts', dest='listhosts', action='store_true',
help='outputs a list of matching hosts; does not execute anything else')
parser.add_option('-M', '--module-path', dest='module_path',
help="specify path(s) to module library (default=%s)" % C.DEFAULT_MODULE_PATH, default=None,
action="callback", callback=CLI.expand_tilde, type=str)
parser.add_option('-e', '--extra-vars', dest="extra_vars", action="append",
help="set additional variables as key=value or YAML/JSON", default=[])
if fork_opts:
parser.add_option('-f','--forks', dest='forks', default=C.DEFAULT_FORKS, type='int',
help="specify number of parallel processes to use (default=%s)" % C.DEFAULT_FORKS)
parser.add_option('-l', '--limit', default=C.DEFAULT_SUBSET, dest='subset',
help='further limit selected hosts to an additional pattern')
if vault_opts:
parser.add_option('--ask-vault-pass', default=False, dest='ask_vault_pass', action='store_true',
help='ask for vault password')
parser.add_option('--vault-password-file', default=C.DEFAULT_VAULT_PASSWORD_FILE,
dest='vault_password_file', help="vault password file", action="callback",
callback=CLI.expand_tilde, type=str)
if subset_opts:
parser.add_option('-t', '--tags', dest='tags', default='all',
help="only run plays and tasks tagged with these values")
parser.add_option('--skip-tags', dest='skip_tags',
help="only run plays and tasks whose tags do not match these values")
if output_opts:
parser.add_option('-o', '--one-line', dest='one_line', action='store_true',
help='condense output')
parser.add_option('-t', '--tree', dest='tree', default=None,
help='log output to this directory')
if runas_opts:
# priv user defaults to root later on to enable detecting when this option was given here
parser.add_option('-K', '--ask-sudo-pass', default=C.DEFAULT_ASK_SUDO_PASS, dest='ask_sudo_pass', action='store_true',
help='ask for sudo password (deprecated, use become)')
parser.add_option('--ask-su-pass', default=C.DEFAULT_ASK_SU_PASS, dest='ask_su_pass', action='store_true',
help='ask for su password (deprecated, use become)')
parser.add_option("-s", "--sudo", default=C.DEFAULT_SUDO, action="store_true", dest='sudo',
help="run operations with sudo (nopasswd) (deprecated, use become)")
parser.add_option('-U', '--sudo-user', dest='sudo_user', default=None,
help='desired sudo user (default=root) (deprecated, use become)')
parser.add_option('-S', '--su', default=C.DEFAULT_SU, action='store_true',
help='run operations with su (deprecated, use become)')
parser.add_option('-R', '--su-user', default=None,
help='run operations with su as this user (default=%s) (deprecated, use become)' % C.DEFAULT_SU_USER)
# consolidated privilege escalation (become)
parser.add_option("-b", "--become", default=C.DEFAULT_BECOME, action="store_true", dest='become',
help="run operations with become (nopasswd implied)")
parser.add_option('--become-method', dest='become_method', default=C.DEFAULT_BECOME_METHOD, type='string',
help="privilege escalation method to use (default=%s), valid choices: [ %s ]" % (C.DEFAULT_BECOME_METHOD, ' | '.join(C.BECOME_METHODS)))
parser.add_option('--become-user', default=None, dest='become_user', type='string',
help='run operations as this user (default=%s)' % C.DEFAULT_BECOME_USER)
parser.add_option('--ask-become-pass', default=False, dest='become_ask_pass', action='store_true',
help='ask for privilege escalation password')
if connect_opts:
parser.add_option('-k', '--ask-pass', default=False, dest='ask_pass', action='store_true',
help='ask for connection password')
parser.add_option('--private-key','--key-file', default=C.DEFAULT_PRIVATE_KEY_FILE, dest='private_key_file',
help='use this file to authenticate the connection')
parser.add_option('-u', '--user', default=C.DEFAULT_REMOTE_USER, dest='remote_user',
help='connect as this user (default=%s)' % C.DEFAULT_REMOTE_USER)
parser.add_option('-c', '--connection', dest='connection', default=C.DEFAULT_TRANSPORT,
help="connection type to use (default=%s)" % C.DEFAULT_TRANSPORT)
parser.add_option('-T', '--timeout', default=C.DEFAULT_TIMEOUT, type='int', dest='timeout',
help="override the connection timeout in seconds (default=%s)" % C.DEFAULT_TIMEOUT)
if async_opts:
parser.add_option('-P', '--poll', default=C.DEFAULT_POLL_INTERVAL, type='int', dest='poll_interval',
help="set the poll interval if using -B (default=%s)" % C.DEFAULT_POLL_INTERVAL)
parser.add_option('-B', '--background', dest='seconds', type='int', default=0,
help='run asynchronously, failing after X seconds (default=N/A)')
if check_opts:
parser.add_option("-C", "--check", default=False, dest='check', action='store_true',
help="don't make any changes; instead, try to predict some of the changes that may occur")
parser.add_option('--syntax-check', dest='syntax', action='store_true',
help="perform a syntax check on the playbook, but do not execute it")
if diff_opts:
parser.add_option("-D", "--diff", default=False, dest='diff', action='store_true',
help="when changing (small) files and templates, show the differences in those files; works great with --check"
)
if meta_opts:
parser.add_option('--force-handlers', default=C.DEFAULT_FORCE_HANDLERS, dest='force_handlers', action='store_true',
help="run handlers even if a task fails")
parser.add_option('--flush-cache', dest='flush_cache', action='store_true',
help="clear the fact cache")
return parser
@staticmethod
def version(prog):
''' return ansible version '''
result = "{0} {1}".format(prog, __version__)
gitinfo = CLI._gitinfo()
if gitinfo:
result = result + " {0}".format(gitinfo)
result += "\n config file = %s" % C.CONFIG_FILE
result = result + "\n configured module search path = %s" % C.DEFAULT_MODULE_PATH
return result
@staticmethod
def version_info(gitinfo=False):
''' return full ansible version info '''
if gitinfo:
# expensive call, user with care
ansible_version_string = version('')
else:
ansible_version_string = __version__
ansible_version = ansible_version_string.split()[0]
ansible_versions = ansible_version.split('.')
for counter in range(len(ansible_versions)):
if ansible_versions[counter] == "":
ansible_versions[counter] = 0
try:
ansible_versions[counter] = int(ansible_versions[counter])
except:
pass
if len(ansible_versions) < 3:
for counter in range(len(ansible_versions), 3):
ansible_versions.append(0)
return {'string': ansible_version_string.strip(),
'full': ansible_version,
'major': ansible_versions[0],
'minor': ansible_versions[1],
'revision': ansible_versions[2]}
@staticmethod
def _git_repo_info(repo_path):
''' returns a string containing git branch, commit id and commit date '''
result = None
if os.path.exists(repo_path):
# Check if the .git is a file. If it is a file, it means that we are in a submodule structure.
if os.path.isfile(repo_path):
try:
gitdir = yaml.safe_load(open(repo_path)).get('gitdir')
# There is a possibility the .git file to have an absolute path.
if os.path.isabs(gitdir):
repo_path = gitdir
else:
repo_path = os.path.join(repo_path[:-4], gitdir)
except (IOError, AttributeError):
return ''
f = open(os.path.join(repo_path, "HEAD"))
branch = f.readline().split('/')[-1].rstrip("\n")
f.close()
branch_path = os.path.join(repo_path, "refs", "heads", branch)
if os.path.exists(branch_path):
f = open(branch_path)
commit = f.readline()[:10]
f.close()
else:
# detached HEAD
commit = branch[:10]
branch = 'detached HEAD'
branch_path = os.path.join(repo_path, "HEAD")
date = time.localtime(os.stat(branch_path).st_mtime)
if time.daylight == 0:
offset = time.timezone
else:
offset = time.altzone
result = "({0} {1}) last updated {2} (GMT {3:+04d})".format(branch, commit,
time.strftime("%Y/%m/%d %H:%M:%S", date), int(offset / -36))
else:
result = ''
return result
@staticmethod
def _gitinfo():
basedir = os.path.join(os.path.dirname(__file__), '..', '..', '..')
repo_path = os.path.join(basedir, '.git')
result = CLI._git_repo_info(repo_path)
submodules = os.path.join(basedir, '.gitmodules')
if not os.path.exists(submodules):
return result
f = open(submodules)
for line in f:
tokens = line.strip().split(' ')
if tokens[0] == 'path':
submodule_path = tokens[2]
submodule_info = CLI._git_repo_info(os.path.join(basedir, submodule_path, '.git'))
if not submodule_info:
submodule_info = ' not found - use git submodule update --init ' + submodule_path
result += "\n {0}: {1}".format(submodule_path, submodule_info)
f.close()
return result
def pager(self, text):
''' find reasonable way to display text '''
# this is a much simpler form of what is in pydoc.py
if not sys.stdout.isatty():
self.display.display(text)
elif 'PAGER' in os.environ:
if sys.platform == 'win32':
self.display.display(text)
else:
self.pager_pipe(text, os.environ['PAGER'])
elif subprocess.call('(less --version) 2> /dev/null', shell = True) == 0:
self.pager_pipe(text, 'less')
else:
self.display.display(text)
@staticmethod
def pager_pipe(text, cmd):
''' pipe text through a pager '''
if 'LESS' not in os.environ:
os.environ['LESS'] = CLI.LESS_OPTS
try:
cmd = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=sys.stdout)
cmd.communicate(input=text.encode(sys.stdout.encoding))
except IOError:
pass
except KeyboardInterrupt:
pass
@classmethod
def tty_ify(self, text):
t = self._ITALIC.sub("`" + r"\1" + "'", text) # I(word) => `word'
t = self._BOLD.sub("*" + r"\1" + "*", t) # B(word) => *word*
t = self._MODULE.sub("[" + r"\1" + "]", t) # M(word) => [word]
t = self._URL.sub(r"\1", t) # U(word) => word
t = self._CONST.sub("`" + r"\1" + "'", t) # C(word) => `word'
return t
@staticmethod
def read_vault_password_file(vault_password_file):
"""
Read a vault password from a file or if executable, execute the script and
retrieve password from STDOUT
"""
this_path = os.path.realpath(os.path.expanduser(vault_password_file))
if not os.path.exists(this_path):
raise AnsibleError("The vault password file %s was not found" % this_path)
if is_executable(this_path):
try:
# STDERR not captured to make it easier for users to prompt for input in their scripts
p = subprocess.Popen(this_path, stdout=subprocess.PIPE)
except OSError as e:
raise AnsibleError("Problem running vault password script %s (%s). If this is not a script, remove the executable bit from the file." % (' '.join(this_path), e))
stdout, stderr = p.communicate()
vault_pass = stdout.strip('\r\n')
else:
try:
f = open(this_path, "rb")
vault_pass=f.read().strip()
f.close()
except (OSError, IOError) as e:
raise AnsibleError("Could not read vault password file %s: %s" % (this_path, e))
return vault_pass
| gpl-3.0 |
sahutd/youtube-dl | youtube_dl/extractor/libsyn.py | 39 | 1996 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import unified_strdate
class LibsynIE(InfoExtractor):
_VALID_URL = r'https?://html5-player\.libsyn\.com/embed/episode/id/(?P<id>[0-9]+)'
_TEST = {
'url': 'http://html5-player.libsyn.com/embed/episode/id/3377616/',
'md5': '443360ee1b58007bc3dcf09b41d093bb',
'info_dict': {
'id': '3377616',
'ext': 'mp3',
'title': "The Daily Show Podcast without Jon Stewart - Episode 12: Bassem Youssef: Egypt's Jon Stewart",
'description': 'md5:601cb790edd05908957dae8aaa866465',
'upload_date': '20150220',
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
formats = [{
'url': media_url,
} for media_url in set(re.findall('var\s+mediaURL(?:Libsyn)?\s*=\s*"([^"]+)"', webpage))]
podcast_title = self._search_regex(
r'<h2>([^<]+)</h2>', webpage, 'title')
episode_title = self._search_regex(
r'<h3>([^<]+)</h3>', webpage, 'title', default=None)
title = '%s - %s' % (podcast_title, episode_title) if podcast_title else episode_title
description = self._html_search_regex(
r'<div id="info_text_body">(.+?)</div>', webpage,
'description', fatal=False)
thumbnail = self._search_regex(
r'<img[^>]+class="info-show-icon"[^>]+src="([^"]+)"',
webpage, 'thumbnail', fatal=False)
release_date = unified_strdate(self._search_regex(
r'<div class="release_date">Released: ([^<]+)<', webpage, 'release date', fatal=False))
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'upload_date': release_date,
'formats': formats,
}
| unlicense |
yangdw/repo.python | src/annotation/WeRoBot/werobot/session/saekvstorage.py | 14 | 1134 | # -*- coding: utf-8 -*-
from . import SessionStorage
class SaeKVDBStorage(SessionStorage):
"""
SaeKVDBStorage 使用SAE 的 KVDB 来保存你的session ::
import werobot
from werobot.session.saekvstorage import SaeKVDBStorage
session_storage = SaeKVDBStorage()
robot = werobot.WeRoBot(token="token", enable_session=True,
session_storage=session_storage)
需要先在后台开启 KVDB 支持
:param prefix: KVDB 中 Session 数据 key 的 prefix 。默认为 ``ws_``
"""
def __init__(self, prefix='ws_'):
try:
import sae.kvdb
except ImportError:
raise RuntimeError("SaeKVDBStorage requires SAE environment")
self.kv = sae.kvdb.KVClient()
self.prefix = prefix
def key_name(self, s):
return '{prefix}{s}'.format(prefix=self.prefix, s=s)
def get(self, id):
return self.kv.get(self.key_name(id)) or {}
def set(self, id, value):
return self.kv.set(self.key_name(id), value)
def delete(self, id):
return self.kv.delete(self.key_name(id))
| mit |
mmardini/django | django/views/static.py | 45 | 5174 | """
Views and functions for serving static files. These are only to be used
during development, and SHOULD NOT be used in a production setting.
"""
from __future__ import unicode_literals
import mimetypes
import os
import stat
import posixpath
import re
from django.http import (Http404, HttpResponse, HttpResponseRedirect,
HttpResponseNotModified, StreamingHttpResponse)
from django.template import loader, Template, Context, TemplateDoesNotExist
from django.utils.http import http_date, parse_http_date
from django.utils.six.moves.urllib.parse import unquote
from django.utils.translation import ugettext as _, ugettext_lazy
def serve(request, path, document_root=None, show_indexes=False):
"""
Serve static files below a given point in the directory structure.
To use, put a URL pattern such as::
(r'^(?P<path>.*)$', 'django.views.static.serve', {'document_root': '/path/to/my/files/'})
in your URLconf. You must provide the ``document_root`` param. You may
also set ``show_indexes`` to ``True`` if you'd like to serve a basic index
of the directory. This index view will use the template hardcoded below,
but if you'd like to override it, you can create a template called
``static/directory_index.html``.
"""
path = posixpath.normpath(unquote(path))
path = path.lstrip('/')
newpath = ''
for part in path.split('/'):
if not part:
# Strip empty path components.
continue
drive, part = os.path.splitdrive(part)
head, part = os.path.split(part)
if part in (os.curdir, os.pardir):
# Strip '.' and '..' in path.
continue
newpath = os.path.join(newpath, part).replace('\\', '/')
if newpath and path != newpath:
return HttpResponseRedirect(newpath)
fullpath = os.path.join(document_root, newpath)
if os.path.isdir(fullpath):
if show_indexes:
return directory_index(newpath, fullpath)
raise Http404(_("Directory indexes are not allowed here."))
if not os.path.exists(fullpath):
raise Http404(_('"%(path)s" does not exist') % {'path': fullpath})
# Respect the If-Modified-Since header.
statobj = os.stat(fullpath)
if not was_modified_since(request.META.get('HTTP_IF_MODIFIED_SINCE'),
statobj.st_mtime, statobj.st_size):
return HttpResponseNotModified()
content_type, encoding = mimetypes.guess_type(fullpath)
content_type = content_type or 'application/octet-stream'
response = StreamingHttpResponse(open(fullpath, 'rb'),
content_type=content_type)
response["Last-Modified"] = http_date(statobj.st_mtime)
if stat.S_ISREG(statobj.st_mode):
response["Content-Length"] = statobj.st_size
if encoding:
response["Content-Encoding"] = encoding
return response
DEFAULT_DIRECTORY_INDEX_TEMPLATE = """
{% load i18n %}
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="Content-type" content="text/html; charset=utf-8" />
<meta http-equiv="Content-Language" content="en-us" />
<meta name="robots" content="NONE,NOARCHIVE" />
<title>{% blocktrans %}Index of {{ directory }}{% endblocktrans %}</title>
</head>
<body>
<h1>{% blocktrans %}Index of {{ directory }}{% endblocktrans %}</h1>
<ul>
{% ifnotequal directory "/" %}
<li><a href="../">../</a></li>
{% endifnotequal %}
{% for f in file_list %}
<li><a href="{{ f|urlencode }}">{{ f }}</a></li>
{% endfor %}
</ul>
</body>
</html>
"""
template_translatable = ugettext_lazy("Index of %(directory)s")
def directory_index(path, fullpath):
try:
t = loader.select_template(['static/directory_index.html',
'static/directory_index'])
except TemplateDoesNotExist:
t = Template(DEFAULT_DIRECTORY_INDEX_TEMPLATE, name='Default directory index template')
files = []
for f in os.listdir(fullpath):
if not f.startswith('.'):
if os.path.isdir(os.path.join(fullpath, f)):
f += '/'
files.append(f)
c = Context({
'directory': path + '/',
'file_list': files,
})
return HttpResponse(t.render(c))
def was_modified_since(header=None, mtime=0, size=0):
"""
Was something modified since the user last downloaded it?
header
This is the value of the If-Modified-Since header. If this is None,
I'll just return True.
mtime
This is the modification time of the item we're talking about.
size
This is the size of the item we're talking about.
"""
try:
if header is None:
raise ValueError
matches = re.match(r"^([^;]+)(; length=([0-9]+))?$", header,
re.IGNORECASE)
header_mtime = parse_http_date(matches.group(1))
header_len = matches.group(3)
if header_len and int(header_len) != size:
raise ValueError
if int(mtime) > header_mtime:
raise ValueError
except (AttributeError, ValueError, OverflowError):
return True
return False
| bsd-3-clause |
tonnrueter/pymca_devel | PyMca/TiffStack.py | 1 | 14610 | #/*##########################################################################
# Copyright (C) 2004-2012 European Synchrotron Radiation Facility
#
# This file is part of the PyMca X-ray Fluorescence Toolkit developed at
# the ESRF by the Software group.
#
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This file is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
#############################################################################*/
__author__ = "V.A. Sole - ESRF Data Analysis"
import sys
import os
import numpy
from PyMca import DataObject
from PyMca import TiffIO
if sys.version > '2.9':
long = int
SOURCE_TYPE = "TiffStack"
class TiffArray(object):
def __init__(self, filelist, shape, dtype, imagestack=True):
self.__fileList = filelist
self.__shape = shape
self.__dtype = dtype
self.__imageStack = imagestack
if imagestack:
self.__nImagesPerFile = int(shape[0]/len(filelist))
else:
self.__nImagesPerFile = int(shape[-1]/len(filelist))
self.__oldFileNumber = -1
def __getitem__(self, args0):
standardSlice = True
indices = []
outputShape = []
scalarArgs = []
args = []
if not hasattr(args0, "__len__"):
args0 = [args0]
for i in range(len(self.__shape)):
if i < len(args0):
args.append(args0[i])
else:
args.append(slice(None, None, None))
for i in range(len(args)):
if isinstance(args[i], slice):
start = args[i].start
stop = args[i].stop
step = args[i].step
if start is None:
start = 0
if stop is None:
stop = self.__shape[i]
if step is None:
step = 1
if step < 1:
raise ValueError("Step must be >= 1 (got %d)" % step)
if start is None:
start = 0
if start < 0:
start = self.__shape[i]-start
if stop < 0:
stop = self.__shape[i]-stop
if stop == start:
raise ValueError("Zero-length selections are not allowed")
indices.append(list(range(start, stop, step)))
elif type(args[i]) == type([]):
if len(args[i]):
indices.append([int(x) for x in args[i]])
else:
standardSlice = False
elif type(args[i]) in [type(1), type(long(1))]:
start = args[i]
if start < 0:
start = self.__shape[i] - start
stop = start + 1
step = 1
start = args[i]
args[i] = slice(start, stop, step)
indices.append(list(range(start, stop, step)))
scalarArgs.append(i)
else:
standardSlice = False
if not standardSlice:
print("args = ", args)
raise NotImplemented("__getitem__(self, args) only works on slices")
if len(indices) < 3:
print("input args = ", args0)
print("working args = ", args)
print("indices = ", indices)
raise NotImplemented("__getitem__(self, args) only works on slices")
outputShape = [len(indices[0]), len(indices[1]), len(indices[2])]
outputArray = numpy.zeros(outputShape, dtype=self.__dtype)
# nbFiles = len(self.__fileList)
nImagesPerFile = self.__nImagesPerFile
if self.__imageStack:
i = 0
rowMin = min(indices[1])
rowMax = max(indices[1])
for imageIndex in indices[0]:
fileNumber = int(imageIndex/nImagesPerFile)
if fileNumber != self.__oldFileNumber:
self.__tmpInstance = TiffIO.TiffIO(self.__fileList[fileNumber],
mode='rb+')
self.__oldFileNumber = fileNumber
imageNumber = imageIndex % nImagesPerFile
imageData = self.__tmpInstance.getData(imageNumber,
rowMin=rowMin,
rowMax=rowMax)
try:
outputArray[i,:,:] = imageData[args[1],args[2]]
except:
print("outputArray[i,:,:].shape =",outputArray[i,:,:].shape)
print("imageData[args[1],args[2]].shape = " , imageData[args[1],args[2]].shape)
print("input args = ", args0)
print("working args = ", args)
print("indices = ", indices)
print("scalarArgs = ", scalarArgs)
raise
i += 1
else:
i = 0
rowMin = min(indices[0])
rowMax = max(indices[0])
for imageIndex in indices[-1]:
fileNumber = int(imageIndex/nImagesPerFile)
if fileNumber != self.__oldFileNumber:
self.__tmpInstance = TiffIO.TiffIO(self.__fileList[fileNumber],
mode='rb+')
self.__oldFileNumber = fileNumber
imageNumber = imageIndex % nImagesPerFile
imageData = self.__tmpInstance.getData(imageNumber,
rowMin=rowMin,
rowMax=rowMax)
outputArray[:,:, i] = imageData[args[0],args[1]]
i += 1
if len(scalarArgs):
finalShape = []
for i in range(len(outputShape)):
if i in scalarArgs:
continue
finalShape.append(outputShape[i])
outputArray.shape = finalShape
return outputArray
def getShape(self):
return self.__shape
shape = property(getShape)
def getDtype(self):
return self.__dtype
dtype = property(getDtype)
def getSize(self):
s = 1
for item in self.__shape:
s *= item
return s
size = property(getSize)
class TiffStack(DataObject.DataObject):
def __init__(self, filelist=None, imagestack=None, dtype=None):
DataObject.DataObject.__init__(self)
self.sourceType = SOURCE_TYPE
if imagestack is None:
self.__imageStack = True
else:
self.__imageStack = imagestack
self.__dtype = dtype
if filelist is not None:
if type(filelist) != type([]):
filelist = [filelist]
if len(filelist) == 1:
self.loadIndexedStack(filelist)
else:
self.loadFileList(filelist)
def loadFileList(self, filelist, dynamic=False, fileindex=0):
if type(filelist) != type([]):
filelist = [filelist]
#retain the file list
self.sourceName = filelist
#the number of files
nbFiles=len(filelist)
#the intance to access the first file
fileInstance = TiffIO.TiffIO(filelist[0])
#the number of images per file
nImagesPerFile = fileInstance.getNumberOfImages()
#get the dimensions from the image itself
tmpImage = fileInstance.getImage(0)
if self.__dtype is None:
self.__dtype = tmpImage.dtype
nRows, nCols = tmpImage.shape
#stack shape
if self.__imageStack:
shape = (nbFiles * nImagesPerFile, nRows, nCols)
else:
shape = (nRows, nCols, nbFiles * nImagesPerFile)
#we can create the stack
if not dynamic:
try:
data = numpy.zeros(shape,
self.__dtype)
except (MemoryError, ValueError):
dynamic = True
if not dynamic:
imageIndex = 0
self.onBegin(nbFiles * nImagesPerFile)
for i in range(nbFiles):
tmpInstance =TiffIO.TiffIO(filelist[i])
for j in range(nImagesPerFile):
tmpImage = tmpInstance.getImage(j)
if self.__imageStack:
data[imageIndex,:,:] = tmpImage
else:
data[:,:,imageIndex] = tmpImage
imageIndex += 1
self.incrProgressBar = imageIndex
self.onProgress(imageIndex)
self.onEnd()
if dynamic:
data = TiffArray(filelist,
shape,
self.__dtype,
imagestack=self.__imageStack)
self.info = {}
self.data = data
shape = self.data.shape
for i in range(len(shape)):
key = 'Dim_%d' % (i+1,)
self.info[key] = shape[i]
if self.__imageStack:
self.info["McaIndex"] = 0
self.info["FileIndex"] = 1
else:
self.info["McaIndex"] = 2
self.info["FileIndex"] = 0
self.info["SourceType"] = SOURCE_TYPE
self.info["SourceName"] = self.sourceName
def loadIndexedStack(self,filename,begin=None,end=None, skip = None, fileindex=0):
#if begin is None: begin = 0
if type(filename) == type([]):
filename = filename[0]
if not os.path.exists(filename):
raise IOError("File %s does not exists" % filename)
name = os.path.basename(filename)
n = len(name)
i = 1
numbers = ['0', '1', '2', '3', '4', '5',
'6', '7', '8','9']
while (i <= n):
c = name[n-i:n-i+1]
if c in numbers:
break
i += 1
suffix = name[n-i+1:]
if len(name) == len(suffix):
#just one file, one should use standard widget
#and not this one.
self.loadFileList(filename, fileindex=fileindex)
else:
nchain = []
while (i<=n):
c = name[n-i:n-i+1]
if c not in numbers:
break
else:
nchain.append(c)
i += 1
number = ""
nchain.reverse()
for c in nchain:
number += c
fformat = "%" + "0%dd" % len(number)
if (len(number) + len(suffix)) == len(name):
prefix = ""
else:
prefix = name[0:n-i+1]
prefix = os.path.join(os.path.dirname(filename),prefix)
if not os.path.exists(prefix + number + suffix):
print("Internal error in TIFFStack")
print("file should exist: %s " % (prefix + number + suffix))
return
i = 0
if begin is None:
begin = 0
testname = prefix+fformat % begin+suffix
while not os.path.exists(prefix+fformat % begin+suffix):
begin += 1
testname = prefix+fformat % begin+suffix
if len(testname) > len(filename):break
i = begin
else:
i = begin
if not os.path.exists(prefix+fformat % i+suffix):
raise ValueError("Invalid start index file = %s" % \
(prefix+fformat % i+suffix))
f = prefix+fformat % i+suffix
filelist = []
while os.path.exists(f):
filelist.append(f)
i += 1
if end is not None:
if i > end:
break
f = prefix+fformat % i+suffix
self.loadFileList(filelist, fileindex=fileindex)
def onBegin(self, n):
pass
def onProgress(self, n):
pass
def onEnd(self):
pass
def test():
from PyMca import StackBase
testFileName = "TiffTest.tif"
nrows = 2000
ncols = 2000
#create a dummy stack with 100 images
nImages = 100
imagestack = True
a = numpy.ones((nrows, ncols), numpy.float32)
if not os.path.exists(testFileName):
print("Creating test filename %s" % testFileName)
tif = TiffIO.TiffIO(testFileName, mode = 'wb+')
for i in range(nImages):
data = (a * i).astype(numpy.float32)
if i == 1:
tif = TiffIO.TiffIO(testFileName, mode = 'rb+')
tif.writeImage(data,
info={'Title':'Image %d of %d' % (i+1, nImages)})
tif = None
stackData = TiffStack(imagestack=imagestack)
stackData.loadFileList([testFileName], dynamic=True)
if 0:
stack = StackBase.StackBase()
stack.setStack(stackData)
print("This should be 0 = %f" % stack.calculateROIImages(0, 0)['ROI'].sum())
print("This should be %f = %f" %\
(a.sum(),stack.calculateROIImages(1, 2)['ROI'].sum()))
if imagestack:
print("%f should be = %f" %\
(stackData.data[0:10,:,:].sum(),
stack.calculateROIImages(0, 10)['ROI'].sum()))
print("Test small ROI 10 should be = %f" %\
stackData.data[10:11,[10],11].sum())
print("Test small ROI 40 should be = %f" %\
stackData.data[10:11,[10,12,14,16],11].sum())
else:
print("%f should be = %f" %\
(stackData.data[:,:, 0:10].sum(),
stack.calculateROIImages(0, 10)['ROI'].sum()))
print("Test small ROI %f" %\
stackData.data[10:11,[29],:].sum())
else:
from PyMca import PyMcaQt as qt
from PyMca import QStackWidget
app = qt.QApplication([])
w = QStackWidget.QStackWidget()
print("Setting stack")
w.setStack(stackData)
w.show()
app.exec_()
if __name__ == "__main__":
test()
| gpl-2.0 |
actuaryzhang/spark | python/pyspark/conf.py | 20 | 7601 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
>>> from pyspark.conf import SparkConf
>>> from pyspark.context import SparkContext
>>> conf = SparkConf()
>>> conf.setMaster("local").setAppName("My app")
<pyspark.conf.SparkConf object at ...>
>>> conf.get("spark.master")
u'local'
>>> conf.get("spark.app.name")
u'My app'
>>> sc = SparkContext(conf=conf)
>>> sc.master
u'local'
>>> sc.appName
u'My app'
>>> sc.sparkHome is None
True
>>> conf = SparkConf(loadDefaults=False)
>>> conf.setSparkHome("/path")
<pyspark.conf.SparkConf object at ...>
>>> conf.get("spark.home")
u'/path'
>>> conf.setExecutorEnv("VAR1", "value1")
<pyspark.conf.SparkConf object at ...>
>>> conf.setExecutorEnv(pairs = [("VAR3", "value3"), ("VAR4", "value4")])
<pyspark.conf.SparkConf object at ...>
>>> conf.get("spark.executorEnv.VAR1")
u'value1'
>>> print(conf.toDebugString())
spark.executorEnv.VAR1=value1
spark.executorEnv.VAR3=value3
spark.executorEnv.VAR4=value4
spark.home=/path
>>> sorted(conf.getAll(), key=lambda p: p[0])
[(u'spark.executorEnv.VAR1', u'value1'), (u'spark.executorEnv.VAR3', u'value3'), \
(u'spark.executorEnv.VAR4', u'value4'), (u'spark.home', u'/path')]
>>> conf._jconf.setExecutorEnv("VAR5", "value5")
JavaObject id...
>>> print(conf.toDebugString())
spark.executorEnv.VAR1=value1
spark.executorEnv.VAR3=value3
spark.executorEnv.VAR4=value4
spark.executorEnv.VAR5=value5
spark.home=/path
"""
__all__ = ['SparkConf']
import sys
import re
if sys.version > '3':
unicode = str
__doc__ = re.sub(r"(\W|^)[uU](['])", r'\1\2', __doc__)
class SparkConf(object):
"""
Configuration for a Spark application. Used to set various Spark
parameters as key-value pairs.
Most of the time, you would create a SparkConf object with
``SparkConf()``, which will load values from `spark.*` Java system
properties as well. In this case, any parameters you set directly on
the :class:`SparkConf` object take priority over system properties.
For unit tests, you can also call ``SparkConf(false)`` to skip
loading external settings and get the same configuration no matter
what the system properties are.
All setter methods in this class support chaining. For example,
you can write ``conf.setMaster("local").setAppName("My app")``.
.. note:: Once a SparkConf object is passed to Spark, it is cloned
and can no longer be modified by the user.
"""
def __init__(self, loadDefaults=True, _jvm=None, _jconf=None):
"""
Create a new Spark configuration.
:param loadDefaults: whether to load values from Java system
properties (True by default)
:param _jvm: internal parameter used to pass a handle to the
Java VM; does not need to be set by users
:param _jconf: Optionally pass in an existing SparkConf handle
to use its parameters
"""
if _jconf:
self._jconf = _jconf
else:
from pyspark.context import SparkContext
_jvm = _jvm or SparkContext._jvm
if _jvm is not None:
# JVM is created, so create self._jconf directly through JVM
self._jconf = _jvm.SparkConf(loadDefaults)
self._conf = None
else:
# JVM is not created, so store data in self._conf first
self._jconf = None
self._conf = {}
def set(self, key, value):
"""Set a configuration property."""
# Try to set self._jconf first if JVM is created, set self._conf if JVM is not created yet.
if self._jconf is not None:
self._jconf.set(key, unicode(value))
else:
self._conf[key] = unicode(value)
return self
def setIfMissing(self, key, value):
"""Set a configuration property, if not already set."""
if self.get(key) is None:
self.set(key, value)
return self
def setMaster(self, value):
"""Set master URL to connect to."""
self.set("spark.master", value)
return self
def setAppName(self, value):
"""Set application name."""
self.set("spark.app.name", value)
return self
def setSparkHome(self, value):
"""Set path where Spark is installed on worker nodes."""
self.set("spark.home", value)
return self
def setExecutorEnv(self, key=None, value=None, pairs=None):
"""Set an environment variable to be passed to executors."""
if (key is not None and pairs is not None) or (key is None and pairs is None):
raise Exception("Either pass one key-value pair or a list of pairs")
elif key is not None:
self.set("spark.executorEnv." + key, value)
elif pairs is not None:
for (k, v) in pairs:
self.set("spark.executorEnv." + k, v)
return self
def setAll(self, pairs):
"""
Set multiple parameters, passed as a list of key-value pairs.
:param pairs: list of key-value pairs to set
"""
for (k, v) in pairs:
self.set(k, v)
return self
def get(self, key, defaultValue=None):
"""Get the configured value for some key, or return a default otherwise."""
if defaultValue is None: # Py4J doesn't call the right get() if we pass None
if self._jconf is not None:
if not self._jconf.contains(key):
return None
return self._jconf.get(key)
else:
if key not in self._conf:
return None
return self._conf[key]
else:
if self._jconf is not None:
return self._jconf.get(key, defaultValue)
else:
return self._conf.get(key, defaultValue)
def getAll(self):
"""Get all values as a list of key-value pairs."""
if self._jconf is not None:
return [(elem._1(), elem._2()) for elem in self._jconf.getAll()]
else:
return self._conf.items()
def contains(self, key):
"""Does this configuration contain a given key?"""
if self._jconf is not None:
return self._jconf.contains(key)
else:
return key in self._conf
def toDebugString(self):
"""
Returns a printable version of the configuration, as a list of
key=value pairs, one per line.
"""
if self._jconf is not None:
return self._jconf.toDebugString()
else:
return '\n'.join('%s=%s' % (k, v) for k, v in self._conf.items())
def _test():
import doctest
(failure_count, test_count) = doctest.testmod(optionflags=doctest.ELLIPSIS)
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
rickyHong/Tensorflow_modi | tensorflow/python/ops/sparse_ops_test.py | 5 | 7482 | """Tests for Python ops defined in sparse_ops."""
import tensorflow.python.platform
import numpy as np
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.framework import types
from tensorflow.python.ops import constant_op
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import googletest
class SparseToIndicatorTest(test_util.TensorFlowTestCase):
def _SparseTensor_5x6(self, dtype):
ind = np.array([
[0, 0],
[1, 0], [1, 3], [1, 4],
[3, 2], [3, 3]])
val = np.array([0, 10, 13, 14, 32, 33])
shape = np.array([5, 6])
return ops.SparseTensor(
constant_op.constant(ind, types.int64),
constant_op.constant(val, dtype),
constant_op.constant(shape, types.int64))
def _SparseTensor_2x3x4(self, dtype):
ind = np.array([
[0, 0, 1],
[0, 1, 0], [0, 1, 2],
[1, 0, 3],
[1, 1, 1], [1, 1, 3],
[1, 2, 2]])
val = np.array([1, 10, 12, 103, 111, 113, 122])
shape = np.array([2, 3, 4])
return ops.SparseTensor(
constant_op.constant(ind, types.int64),
constant_op.constant(val, dtype),
constant_op.constant(shape, types.int64))
def testInt32(self):
with self.test_session(use_gpu=False):
sp_input = self._SparseTensor_5x6(types.int32)
output = sparse_ops.sparse_to_indicator(sp_input, 50).eval()
expected_output = np.zeros((5, 50), dtype=np.bool)
expected_trues = ((0, 0), (1, 10), (1, 13), (1, 14), (3, 32), (3, 33))
for expected_true in expected_trues:
expected_output[expected_true] = True
self.assertAllEqual(output, expected_output)
def testInt64(self):
with self.test_session(use_gpu=False):
sp_input = self._SparseTensor_5x6(types.int64)
output = sparse_ops.sparse_to_indicator(sp_input, 50).eval()
expected_output = np.zeros((5, 50), dtype=np.bool)
expected_trues = [(0, 0), (1, 10), (1, 13), (1, 14), (3, 32), (3, 33)]
for expected_true in expected_trues:
expected_output[expected_true] = True
self.assertAllEqual(output, expected_output)
def testHigherRank(self):
with self.test_session(use_gpu=False):
sp_input = self._SparseTensor_2x3x4(types.int64)
output = sparse_ops.sparse_to_indicator(sp_input, 200).eval()
expected_output = np.zeros((2, 3, 200), dtype=np.bool)
expected_trues = [(0, 0, 1), (0, 1, 10), (0, 1, 12),
(1, 0, 103), (1, 1, 111), (1, 1, 113), (1, 2, 122)]
for expected_true in expected_trues:
expected_output[expected_true] = True
self.assertAllEqual(output, expected_output)
class SparseRetainTest(test_util.TensorFlowTestCase):
def _SparseTensor_5x6(self):
ind = np.array([
[0, 0],
[1, 0], [1, 3], [1, 4],
[3, 2], [3, 3]])
val = np.array([0, 10, 13, 14, 32, 33])
shape = np.array([5, 6])
return ops.SparseTensor(
constant_op.constant(ind, types.int64),
constant_op.constant(val, types.int32),
constant_op.constant(shape, types.int64))
def testBasic(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensor_5x6()
to_retain = np.array([1, 0, 0, 1, 1, 0], dtype=np.bool)
sp_output = sparse_ops.sparse_retain(sp_input, to_retain)
output = sess.run(sp_output)
self.assertAllEqual(output.indices, [[0, 0], [1, 4], [3, 2]])
self.assertAllEqual(output.values, [0, 14, 32])
self.assertAllEqual(output.shape, [5, 6])
def testRetainNone(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensor_5x6()
to_retain = np.zeros((6,), dtype=np.bool)
sp_output = sparse_ops.sparse_retain(sp_input, to_retain)
output = sess.run(sp_output)
self.assertAllEqual(output.indices, np.array([]).reshape((0, 2)))
self.assertAllEqual(output.values, [])
self.assertAllEqual(output.shape, [5, 6])
def testMismatchedRetainShape(self):
with self.test_session(use_gpu=False):
sp_input = self._SparseTensor_5x6()
to_retain = np.array([1, 0, 0, 1, 0], dtype=np.bool)
with self.assertRaises(ValueError):
sparse_ops.sparse_retain(sp_input, to_retain)
class SparseFillEmptyRowsTest(test_util.TensorFlowTestCase):
def _SparseTensor_5x6(self):
ind = np.array([
[0, 0],
[1, 0], [1, 3], [1, 4],
[3, 2], [3, 3]])
val = np.array([0, 10, 13, 14, 32, 33])
shape = np.array([5, 6])
return ops.SparseTensor(
constant_op.constant(ind, types.int64),
constant_op.constant(val, types.int32),
constant_op.constant(shape, types.int64))
def _SparseTensor_String5x6(self):
ind = np.array([
[0, 0],
[1, 0], [1, 3], [1, 4],
[3, 2], [3, 3]])
val = np.array(["a", "b", "c", "d", "e", "f"])
shape = np.array([5, 6])
return ops.SparseTensor(
constant_op.constant(ind, types.int64),
constant_op.constant(val, types.string),
constant_op.constant(shape, types.int64))
def _SparseTensor_2x6(self):
ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4]])
val = np.array([0, 10, 13, 14])
shape = np.array([2, 6])
return ops.SparseTensor(
constant_op.constant(ind, types.int64),
constant_op.constant(val, types.int32),
constant_op.constant(shape, types.int64))
def testFillNumber(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensor_5x6()
sp_output, empty_row_indicator = (
sparse_ops.sparse_fill_empty_rows(sp_input, -1))
output, empty_row_indicator_out = sess.run(
[sp_output, empty_row_indicator])
self.assertAllEqual(
output.indices,
[[0, 0], [1, 0], [1, 3], [1, 4], [2, 0], [3, 2], [3, 3], [4, 0]])
self.assertAllEqual(output.values, [0, 10, 13, 14, -1, 32, 33, -1])
self.assertAllEqual(output.shape, [5, 6])
self.assertAllEqual(empty_row_indicator_out,
np.array([0, 0, 1, 0, 1]).astype(np.bool))
def testFillString(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensor_String5x6()
sp_output, empty_row_indicator = (
sparse_ops.sparse_fill_empty_rows(sp_input, ""))
output, empty_row_indicator_out = sess.run(
[sp_output, empty_row_indicator])
self.assertAllEqual(
output.indices,
[[0, 0], [1, 0], [1, 3], [1, 4], [2, 0], [3, 2], [3, 3], [4, 0]])
self.assertAllEqual(output.values, ["a", "b", "c", "d", "", "e", "f", ""])
self.assertAllEqual(output.shape, [5, 6])
self.assertAllEqual(empty_row_indicator_out,
np.array([0, 0, 1, 0, 1]).astype(np.bool))
def testNoEmptyRows(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensor_2x6()
sp_output, empty_row_indicator = (
sparse_ops.sparse_fill_empty_rows(sp_input, -1))
output, empty_row_indicator_out = sess.run(
[sp_output, empty_row_indicator])
self.assertAllEqual(output.indices, [[0, 0], [1, 0], [1, 3], [1, 4]])
self.assertAllEqual(output.values, [0, 10, 13, 14])
self.assertAllEqual(output.shape, [2, 6])
self.assertAllEqual(empty_row_indicator_out, np.zeros(2).astype(np.bool))
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
wilson208/MapMaker | MapMaker/MapMaker/env/Lib/encodings/mac_iceland.py | 593 | 13754 | """ Python Character Mapping Codec mac_iceland generated from 'MAPPINGS/VENDORS/APPLE/ICELAND.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mac-iceland',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> CONTROL CHARACTER
u'\x01' # 0x01 -> CONTROL CHARACTER
u'\x02' # 0x02 -> CONTROL CHARACTER
u'\x03' # 0x03 -> CONTROL CHARACTER
u'\x04' # 0x04 -> CONTROL CHARACTER
u'\x05' # 0x05 -> CONTROL CHARACTER
u'\x06' # 0x06 -> CONTROL CHARACTER
u'\x07' # 0x07 -> CONTROL CHARACTER
u'\x08' # 0x08 -> CONTROL CHARACTER
u'\t' # 0x09 -> CONTROL CHARACTER
u'\n' # 0x0A -> CONTROL CHARACTER
u'\x0b' # 0x0B -> CONTROL CHARACTER
u'\x0c' # 0x0C -> CONTROL CHARACTER
u'\r' # 0x0D -> CONTROL CHARACTER
u'\x0e' # 0x0E -> CONTROL CHARACTER
u'\x0f' # 0x0F -> CONTROL CHARACTER
u'\x10' # 0x10 -> CONTROL CHARACTER
u'\x11' # 0x11 -> CONTROL CHARACTER
u'\x12' # 0x12 -> CONTROL CHARACTER
u'\x13' # 0x13 -> CONTROL CHARACTER
u'\x14' # 0x14 -> CONTROL CHARACTER
u'\x15' # 0x15 -> CONTROL CHARACTER
u'\x16' # 0x16 -> CONTROL CHARACTER
u'\x17' # 0x17 -> CONTROL CHARACTER
u'\x18' # 0x18 -> CONTROL CHARACTER
u'\x19' # 0x19 -> CONTROL CHARACTER
u'\x1a' # 0x1A -> CONTROL CHARACTER
u'\x1b' # 0x1B -> CONTROL CHARACTER
u'\x1c' # 0x1C -> CONTROL CHARACTER
u'\x1d' # 0x1D -> CONTROL CHARACTER
u'\x1e' # 0x1E -> CONTROL CHARACTER
u'\x1f' # 0x1F -> CONTROL CHARACTER
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> CONTROL CHARACTER
u'\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0x81 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc7' # 0x82 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xd1' # 0x84 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xe1' # 0x87 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe0' # 0x88 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe2' # 0x89 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe3' # 0x8B -> LATIN SMALL LETTER A WITH TILDE
u'\xe5' # 0x8C -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe7' # 0x8D -> LATIN SMALL LETTER C WITH CEDILLA
u'\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE
u'\xe8' # 0x8F -> LATIN SMALL LETTER E WITH GRAVE
u'\xea' # 0x90 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x91 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xed' # 0x92 -> LATIN SMALL LETTER I WITH ACUTE
u'\xec' # 0x93 -> LATIN SMALL LETTER I WITH GRAVE
u'\xee' # 0x94 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0x95 -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xf1' # 0x96 -> LATIN SMALL LETTER N WITH TILDE
u'\xf3' # 0x97 -> LATIN SMALL LETTER O WITH ACUTE
u'\xf2' # 0x98 -> LATIN SMALL LETTER O WITH GRAVE
u'\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf5' # 0x9B -> LATIN SMALL LETTER O WITH TILDE
u'\xfa' # 0x9C -> LATIN SMALL LETTER U WITH ACUTE
u'\xf9' # 0x9D -> LATIN SMALL LETTER U WITH GRAVE
u'\xfb' # 0x9E -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xdd' # 0xA0 -> LATIN CAPITAL LETTER Y WITH ACUTE
u'\xb0' # 0xA1 -> DEGREE SIGN
u'\xa2' # 0xA2 -> CENT SIGN
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa7' # 0xA4 -> SECTION SIGN
u'\u2022' # 0xA5 -> BULLET
u'\xb6' # 0xA6 -> PILCROW SIGN
u'\xdf' # 0xA7 -> LATIN SMALL LETTER SHARP S
u'\xae' # 0xA8 -> REGISTERED SIGN
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\u2122' # 0xAA -> TRADE MARK SIGN
u'\xb4' # 0xAB -> ACUTE ACCENT
u'\xa8' # 0xAC -> DIAERESIS
u'\u2260' # 0xAD -> NOT EQUAL TO
u'\xc6' # 0xAE -> LATIN CAPITAL LETTER AE
u'\xd8' # 0xAF -> LATIN CAPITAL LETTER O WITH STROKE
u'\u221e' # 0xB0 -> INFINITY
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO
u'\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO
u'\xa5' # 0xB4 -> YEN SIGN
u'\xb5' # 0xB5 -> MICRO SIGN
u'\u2202' # 0xB6 -> PARTIAL DIFFERENTIAL
u'\u2211' # 0xB7 -> N-ARY SUMMATION
u'\u220f' # 0xB8 -> N-ARY PRODUCT
u'\u03c0' # 0xB9 -> GREEK SMALL LETTER PI
u'\u222b' # 0xBA -> INTEGRAL
u'\xaa' # 0xBB -> FEMININE ORDINAL INDICATOR
u'\xba' # 0xBC -> MASCULINE ORDINAL INDICATOR
u'\u03a9' # 0xBD -> GREEK CAPITAL LETTER OMEGA
u'\xe6' # 0xBE -> LATIN SMALL LETTER AE
u'\xf8' # 0xBF -> LATIN SMALL LETTER O WITH STROKE
u'\xbf' # 0xC0 -> INVERTED QUESTION MARK
u'\xa1' # 0xC1 -> INVERTED EXCLAMATION MARK
u'\xac' # 0xC2 -> NOT SIGN
u'\u221a' # 0xC3 -> SQUARE ROOT
u'\u0192' # 0xC4 -> LATIN SMALL LETTER F WITH HOOK
u'\u2248' # 0xC5 -> ALMOST EQUAL TO
u'\u2206' # 0xC6 -> INCREMENT
u'\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0xC8 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS
u'\xa0' # 0xCA -> NO-BREAK SPACE
u'\xc0' # 0xCB -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc3' # 0xCC -> LATIN CAPITAL LETTER A WITH TILDE
u'\xd5' # 0xCD -> LATIN CAPITAL LETTER O WITH TILDE
u'\u0152' # 0xCE -> LATIN CAPITAL LIGATURE OE
u'\u0153' # 0xCF -> LATIN SMALL LIGATURE OE
u'\u2013' # 0xD0 -> EN DASH
u'\u2014' # 0xD1 -> EM DASH
u'\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK
u'\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK
u'\xf7' # 0xD6 -> DIVISION SIGN
u'\u25ca' # 0xD7 -> LOZENGE
u'\xff' # 0xD8 -> LATIN SMALL LETTER Y WITH DIAERESIS
u'\u0178' # 0xD9 -> LATIN CAPITAL LETTER Y WITH DIAERESIS
u'\u2044' # 0xDA -> FRACTION SLASH
u'\u20ac' # 0xDB -> EURO SIGN
u'\xd0' # 0xDC -> LATIN CAPITAL LETTER ETH
u'\xf0' # 0xDD -> LATIN SMALL LETTER ETH
u'\xde' # 0xDE -> LATIN CAPITAL LETTER THORN
u'\xfe' # 0xDF -> LATIN SMALL LETTER THORN
u'\xfd' # 0xE0 -> LATIN SMALL LETTER Y WITH ACUTE
u'\xb7' # 0xE1 -> MIDDLE DOT
u'\u201a' # 0xE2 -> SINGLE LOW-9 QUOTATION MARK
u'\u201e' # 0xE3 -> DOUBLE LOW-9 QUOTATION MARK
u'\u2030' # 0xE4 -> PER MILLE SIGN
u'\xc2' # 0xE5 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xca' # 0xE6 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xc1' # 0xE7 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xcb' # 0xE8 -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xc8' # 0xE9 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xcd' # 0xEA -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xEB -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0xEC -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\xcc' # 0xED -> LATIN CAPITAL LETTER I WITH GRAVE
u'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xEF -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\uf8ff' # 0xF0 -> Apple logo
u'\xd2' # 0xF1 -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xda' # 0xF2 -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0xF3 -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xd9' # 0xF4 -> LATIN CAPITAL LETTER U WITH GRAVE
u'\u0131' # 0xF5 -> LATIN SMALL LETTER DOTLESS I
u'\u02c6' # 0xF6 -> MODIFIER LETTER CIRCUMFLEX ACCENT
u'\u02dc' # 0xF7 -> SMALL TILDE
u'\xaf' # 0xF8 -> MACRON
u'\u02d8' # 0xF9 -> BREVE
u'\u02d9' # 0xFA -> DOT ABOVE
u'\u02da' # 0xFB -> RING ABOVE
u'\xb8' # 0xFC -> CEDILLA
u'\u02dd' # 0xFD -> DOUBLE ACUTE ACCENT
u'\u02db' # 0xFE -> OGONEK
u'\u02c7' # 0xFF -> CARON
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| apache-2.0 |
shzygmyx/Adaboost | boosting.py | 1 | 13043 | # -*- coding: utf-8 -*-
"""
Created on Mon Nov 14 14:39:38 2016
@author: Meng Yuxian
This is an implementation of <Improved boosting algorithms using
confidence-rated predictions>, Schapire, 1999.
"""
from math import e, log
import numpy as np
from sklearn.tree import DecisionTreeClassifier
class Adaboost():
"""
Adaboost(X, y, estimator = DecisionTreeClassifier, itern = 20, mode = "si
gn")
Basic Adaboost to solve two-class problem
Parameters
----------
X: numpy 2d array (m samples * n features)
y: numpy 1d array (m samples' label)
estimator: base_estimator of boosting
itern: number of iterations
mode: sign mode output label directly, while num mode output a confidence
rate x. The more positive x is ,the more likely the label is Adaboost.cls0;
the more negative x is, the more likely the label is not Adaboost.cls0
e.g.
>>> x = np.array([[1,2,3,4],[2,3,4,5],[6,7,8,9],[2,5,7,8]])
>>> y = np.array([1,2,2,1])
>>> clf = Adaboost(x, y, mode = "num")
>>> clf.predict(np.array([[1,7,2,8],[2,5,6,9]]))
array([ 27.5707191 , 32.16583895])
>>> clf.cls0
1
>>> clf = Adaboost(x, y, mode = "sign")
>>> clf.predict(np.array([[1,7,2,8],[2,5,6,9]]))
array([ 1., 1.])
Note that outputs of clf.predict in num model are positive, so outputs of
clf.predict in sign model are both clf.cls0, which is label 1.
Methods
-------
predict
score
See also
--------
Adaboost
References
----------
<Improved boosting algorithms using confidence-rated predictions>, Schapire
, 1999
"""
def __init__(self, X, y, estimator = DecisionTreeClassifier, itern = 20, mode = "sign"):
self.X = X
self.y = y.copy()
self.estimator = estimator
self.mode = mode
self.itern = itern
self.estimators = [] # estimators produced by boosting algorithm
self.alphas = np.array([]) # weights of each boost estimator
self.m = self.X.shape[0] # number of samples
self.w = np.array([1/self.m] * self.m) # weights of samples
self.cls_list = [] # list used to store classes' name and numbers
self.cls0 = y[0]
for i in range(self.m):
if y[i] not in self.cls_list:
self.cls_list.append(y[i])
if y[i] == self.cls0:
self.y[i] = 1
else:
self.y[i] = -1
if len(self.cls_list) != 2:
raise TypeError(
'''This Adaboost only support two-class problem, for multiclass
problem, please use AdaboostMH.''')
self.train()
def train(self):
m = self.m
for k in range(self.itern):
cls = self.estimator(max_depth = 3, presort = True)
cls.fit(self.X, self.y, sample_weight = self.w)
self.estimators.append(cls)
y_predict = cls.predict(self.X)
error = 0 # number of wrong prediction
for i in range(m):
if y_predict[i] != self.y[i]:
error += self.w[i]
if error == 0:
error += 0.01 # smoothness
alpha = 0.5*log((1-error)/error) # estimator weight
self.alphas = np.append(self.alphas, alpha)
for i in range(m): # update sample weights
if y_predict[i] != self.y[i]:
self.w[i] *= e**alpha
else:
self.w[i] /= e**alpha
self.w /= sum(self.w)
def predict(self, X):
y_predict = np.array([])
if self.mode == "sign":
for i in range(X.shape[0]):
predict_i = (sum(self.alphas *
np.array([int(self.estimators[k].predict(X[i].reshape(1,-1))) for k in range(len(self.alphas))])))
y_predict = np.append(y_predict, self.transfer(np.sign(predict_i)))
else:
for i in range(X.shape[0]):
predict_i = (sum(self.alphas *
np.array([int(self.estimators[k].predict(X[i].reshape(1,-1))) for k in range(len(self.alphas))])))
y_predict = np.append(y_predict, predict_i)
return y_predict
def transfer(self, l):
"""turn -1/+1 to previous initial label name"""
if l == 1:
return self.cls0
else:
return self.cls_list[1]
def score(self, X_test, y_test):
"""return precision of trained estimator on x_test and y_test"""
y_predict = self.predict(X_test)
error = 0 # error
for i in range(X_test.shape[0]):
if y_predict[i] != y_test[i]:
error += 1
error /= X_test.shape[0]
return 1 - error
class AdaboostMH():
"""
AdaboostMH(X, y, estimator = DecisionTreeClassifier, itern = 20, mode = "si
gn")
Adaboost that could solve multiclass and multilabel problem.
Parameters
----------
X: numpy 2d array (m samples * n features)
y: numpy 1d array (m samples' label)
estimator: base_estimator of boosting
itern: number of iterations
mode: "sign" mode will return label directly when you use predict method,
while "num" mode will return an array of confidence rates x which reflects
how likely the labels i belongs to corresbonding sample j.
the more positive x is, the more likely the label i belongs to sample j;
the more negative x is, the more likely the label i doesn't belong to j.
e.g.
>>> x = np.array([[1,2,3,4],[2,3,4,5],[6,7,8,9],[2,5,7,8]])
>>> y = np.array([[1,2],[2],[3,1],[2,3]])
>>> clf = AdaboostMH(x, y, mode = "num")
>>> clf.predict(np.array([[1,7,2,8],[2,5,6,9]]))
array([[ 3.89458577, 3.89458577, 1.14677695],
[-1.45489964, 1.51029301, 7.75042082]])
Methods
-------
predict
score
See also
--------
Adaboost
References
----------
<Improved boosting algorithms using confidence-rated predictions>, Schapire
, 1999
"""
def __init__(self, X, y, estimator = DecisionTreeClassifier, itern = 20, mode = "sign"):
self.X = X
self.y = y
self.estimator = estimator
self.itern = itern
self.mode = mode
self.m = self.X.shape[0] # number of samples
self.cls_list = [] # list used to store classes' name and numbers
# if type(y[0]) != np.ndarray:
# self.y = y.reshape(len(y),-1)
for i in range(self.m):
for cls in self.y[i]:
if cls not in self.cls_list:
self.cls_list.append(cls)
self.k = len(self.cls_list) # number of classes
self.boost = self.train()
def train(self):
X = self.X
new_X = [] #from initial problem generate new problem
new_y = []
for i in range(self.m):
for cls in self.cls_list:
new_X.append(list(X[i])+[cls])
if cls in self.y[i]:
new_y.append(1)
else:
new_y.append(-1)
new_X = np.array(new_X)
new_y = np.array(new_y)
boost = Adaboost(new_X, new_y, estimator = self.estimator, itern = self.itern, mode = self.mode)
return boost
def predict(self, X):
"""Use trained model to predict new X
clf.predict(x)
"""
y_predict = []
if self.mode == "sign":
for i in range(X.shape[0]):
y = []
for cls in self.cls_list:
new_X = np.append(X[i], cls).reshape(1,-1)
predict = int(self.boost.predict(new_X))
if predict == 1:
y.append(cls)
y_predict.append(y)
else:
for i in range(X.shape[0]):
y = []
for cls in self.cls_list:
new_X = np.append(X[i], cls).reshape(1,-1)
predict = self.boost.predict(new_X)[0]
y.append(predict)
y_predict.append(y)
y_predict = np.array(y_predict)
return y_predict
def score(self, X_test, y_test):
"""return precision of trained estimator on test dataset X and y"""
if self.mode != "sign":
raise TypeError("score only support sign mode")
y_predict = self.predict(X_test)
error = 0 # error
for i in range(X_test.shape[0]):
for cls in self.cls_list:
if cls in y_test[i]:
if cls not in y_predict[i]:
error += 1
else:
if cls in y_predict[i]:
error += 1
error /= (X_test.shape[0] * self.k)
return 1 - error
class AdaboostMO():
"""
AdaboostMO(X, y, code_dic = None, estimator = DecisionTreeClassifier, itern
= 20)
A multiclass version of Adaboost based on output codes to solve singlelabel
problem
Parameters
----------
X: numpy 2d array (m samples * n features)
y: numpy 1d array (m samples' label)
code_dic: dictionary (key:label, value: numpy array of -1/+1)
estimator: base_estimator of boosting
itern: number of iterations
e.g.
>>> x = np.array([[1,2,3,4],[2,3,4,5],[6,7,8,9],[2,5,7,8]])
>>> y = np.array([1,2,3,1])
>>> clf = AdaboostMO(x, y, code_dic = {1:np.array([1,-1,-1], 2:np.array([-1
,1,-1], 3:np.array([-1,-1,1])))}, itern = 15)
>>> clf.predict(np.array([[1,7,2,8],[2,5,6,9]]))
array([1,1])
Methods
-------
predict
score
See also
--------
AdaboostMH
References
----------
<Improved boosting algorithms using confidence-rated predictions>, Schapire
, 1999
"""
def __init__(self, X, y, code_dic = None, estimator = DecisionTreeClassifier, itern = 20):
self.X = X
self.y = y
self.estimator = estimator
self.itern = itern
self.m = self.X.shape[0] # number of samples
self.cls_list = [] # list used to store classes' name and numbers
for i in range(self.m):
if y[i] not in self.cls_list:
self.cls_list.append(y[i])
if code_dic != None:
self.k = len(code_dic[cls_list[0]]) # dimension of encoding space
else:
self.k = len(self.cls_list)
if code_dic == None: # generate default encode dictionary
code_dic = {}
for i in range(self.k):
code = np.array([-1] * self.k)
code[i] = 1
code_dic[self.cls_list[i]] = code
self.code_dic = code_dic #store {label: array-like code}
self.boost = self.train()
def train(self):
y = self.encode(self.y) #encoding y and train it as AdaboostMH in num mode
for i in range(self.m):
y[i] = [k for k in range(self.k) if y[i][k] == 1]
boost = AdaboostMH(self.X, y, estimator = self.estimator, itern = self.itern, mode = "num")
return boost
def encode(self, y):
if not isinstance(y, np.ndarray):
return self.code_dic[y]
return np.array([self.code_dic[i] for i in y])
def decode(self, y):
"""decode an array_like labels"""
decode_y = []
for i in range(len(y)):
for cls in self.code_dic:
if self.code_dic[cls] == i:
decode_y.append(cls)
break
return np.array(decode_y)
def predict(self, X):
"""Use trained model to predict on new X"""
y_predict = []
for i in range(X.shape[0]):
confidences = self.boost.predict(X[i].reshape(1,-1))[0]
cls_score = [sum(self.encode(cls) * confidences)for cls in self.cls_list]
cls = self.cls_list[cls_score.index(max(cls_score))]
y_predict.append(cls)
return np.array(y_predict)
def score(self, x_test, y_test):
"""return precision of trained estimator on x_test and y_test"""
error = 0
y_predict = self.predict(x_test)
for i in range(len(y_test)):
if y_predict[i] != y_test[i]:
error += 1
return 1 - error/len(y_test)
| gpl-3.0 |
dionhaefner/veros | test/pyom_consistency/eke_test.py | 1 | 3344 | from collections import OrderedDict
import numpy as np
from test_base import VerosPyOMUnitTest
from veros.core import eke
class EKETest(VerosPyOMUnitTest):
nx, ny, nz = 70, 60, 50
extra_settings = {
'enable_cyclic_x': True,
'enable_eke_leewave_dissipation': True,
'enable_eke': True,
'enable_TEM_friction': True,
'enable_eke_isopycnal_diffusion': True,
'enable_store_cabbeling_heat': True,
'enable_eke_superbee_advection': True,
'enable_eke_upwind_advection': True
}
def initialize(self):
for a in ('eke_hrms_k0_min', 'eke_k_max', 'eke_c_k', 'eke_crhin', 'eke_cross',
'eke_lmin', 'K_gm_0', 'K_iso_0', 'c_lee0', 'eke_Ri0', 'eke_Ri1', 'eke_int_diss0',
'kappa_EKE0', 'eke_r_bot', 'eke_c_eps', 'alpha_eke', 'dt_tracer', 'AB_eps'):
self.set_attribute(a, np.random.rand())
for a in ('dxt', 'dxu'):
self.set_attribute(a, np.random.randint(1, 100, size=self.nx + 4).astype(np.float))
for a in ('dyt', 'dyu'):
self.set_attribute(a, np.random.randint(1, 100, size=self.ny + 4).astype(np.float))
for a in ('cosu', 'cost'):
self.set_attribute(a, 2 * np.random.rand(self.ny + 4) - 1.)
for a in ('dzt', 'dzw', 'zw'):
self.set_attribute(a, 100 * np.random.rand(self.nz))
for a in ('eke_topo_hrms', 'eke_topo_lam', 'hrms_k0', 'coriolis_t', 'beta',
'eke_lee_flux', 'eke_bot_flux', 'L_rossby'):
self.set_attribute(a, np.random.randn(self.nx + 4, self.ny + 4))
for a in ('eke_len', 'K_diss_h', 'K_diss_gm', 'P_diss_skew', 'P_diss_hmix', 'P_diss_iso',
'kappaM', 'eke_diss_iw', 'eke_diss_tke', 'K_gm', 'flux_east', 'flux_north', 'flux_top',
'L_rhines'):
self.set_attribute(a, np.random.randn(self.nx + 4, self.ny + 4, self.nz))
for a in ('eke', 'deke', 'Nsqr', 'u', 'v'):
self.set_attribute(a, np.random.randn(self.nx + 4, self.ny + 4, self.nz, 3))
for a in ('maskU', 'maskV', 'maskW', 'maskT'):
self.set_attribute(a, np.random.randint(0, 2, size=(self.nx + 4, self.ny + 4, self.nz)).astype(np.float))
kbot = np.random.randint(1, self.nz, size=(self.nx + 4, self.ny + 4))
# add some islands, but avoid boundaries
kbot[3:-3, 3:-3].flat[np.random.randint(0, (self.nx - 2) * (self.ny - 2), size=10)] = 0
self.set_attribute('kbot', kbot)
self.test_module = eke
veros_args = (self.veros_new.state, )
veros_legacy_args = dict()
self.test_routines = OrderedDict()
self.test_routines['init_eke'] = (veros_args, veros_legacy_args)
self.test_routines['set_eke_diffusivities'] = (veros_args, veros_legacy_args)
self.test_routines['integrate_eke'] = (veros_args, veros_legacy_args)
def test_passed(self, routine):
for f in ('flux_east', 'flux_north', 'flux_top', 'eke', 'deke', 'hrms_k0', 'L_rossby',
'L_rhines', 'eke_len', 'K_gm', 'kappa_gm', 'K_iso', 'sqrteke', 'c_lee', 'c_Ri_diss',
'eke_diss_iw', 'eke_diss_tke', 'eke_lee_flux', 'eke_bot_flux'):
self.check_variable(f)
def test_eke(pyom2_lib, backend):
EKETest(fortran=pyom2_lib, backend=backend).run()
| mit |
rohitwaghchaure/erpnext_develop | erpnext/setup/setup_wizard/data/industry_type.py | 76 | 1174 | from frappe import _
def get_industry_types():
return [
_('Accounting'),
_('Advertising'),
_('Aerospace'),
_('Agriculture'),
_('Airline'),
_('Apparel & Accessories'),
_('Automotive'),
_('Banking'),
_('Biotechnology'),
_('Broadcasting'),
_('Brokerage'),
_('Chemical'),
_('Computer'),
_('Consulting'),
_('Consumer Products'),
_('Cosmetics'),
_('Defense'),
_('Department Stores'),
_('Education'),
_('Electronics'),
_('Energy'),
_('Entertainment & Leisure'),
_('Executive Search'),
_('Financial Services'),
_('Food, Beverage & Tobacco'),
_('Grocery'),
_('Health Care'),
_('Internet Publishing'),
_('Investment Banking'),
_('Legal'),
_('Manufacturing'),
_('Motion Picture & Video'),
_('Music'),
_('Newspaper Publishers'),
_('Online Auctions'),
_('Pension Funds'),
_('Pharmaceuticals'),
_('Private Equity'),
_('Publishing'),
_('Real Estate'),
_('Retail & Wholesale'),
_('Securities & Commodity Exchanges'),
_('Service'),
_('Soap & Detergent'),
_('Software'),
_('Sports'),
_('Technology'),
_('Telecommunications'),
_('Television'),
_('Transportation'),
_('Venture Capital')
]
| gpl-3.0 |
PulsePod/evepod | lib/python2.7/site-packages/newrelic-2.12.0.10/newrelic/hooks/external_feedparser.py | 4 | 2136 | import sys
import types
import newrelic.packages.six as six
import newrelic.api.transaction
import newrelic.api.object_wrapper
import newrelic.api.external_trace
class capture_external_trace(object):
def __init__(self, wrapped):
newrelic.api.object_wrapper.update_wrapper(self, wrapped)
self._nr_next_object = wrapped
if not hasattr(self, '_nr_last_object'):
self._nr_last_object = wrapped
def __call__(self, url, *args, **kwargs):
# The URL be a string or a file like object. Pass call
# through if not a string.
if not isinstance(url, six.string_types):
return self._nr_next_object(url, *args, **kwargs)
# Only then wrap the call if it looks like a URL. To
# work that out need to first do some conversions of
# accepted 'feed' formats to proper URL format.
parsed_url = url
if parsed_url.startswith('feed:http'):
parsed_url = parsed_url[5:]
elif parsed_url.startswith('feed:'):
parsed_url = 'http:' + url[5:]
if parsed_url.split(':')[0].lower() in ['http', 'https', 'ftp']:
current_transaction = newrelic.api.transaction.current_transaction()
if current_transaction:
trace = newrelic.api.external_trace.ExternalTrace(
current_transaction, 'feedparser', parsed_url, 'GET')
context_manager = trace.__enter__()
try:
result = self._nr_next_object(url, *args, **kwargs)
except: # Catch all
context_manager.__exit__(*sys.exc_info())
raise
context_manager.__exit__(None, None, None)
return result
else:
return self._nr_next_object(url, *args, **kwargs)
else:
return self._nr_next_object(url, *args, **kwargs)
def __getattr__(self, name):
return getattr(self._nr_next_object, name)
def instrument(module):
newrelic.api.object_wrapper.wrap_object(
module, 'parse', capture_external_trace)
| apache-2.0 |
miraculixx/heroku-buildpack-python | vendor/pip-pop/pip/_vendor/requests/packages/chardet/sjisprober.py | 1777 | 3764 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import SJISDistributionAnalysis
from .jpcntx import SJISContextAnalysis
from .mbcssm import SJISSMModel
from . import constants
class SJISProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(SJISSMModel)
self._mDistributionAnalyzer = SJISDistributionAnalysis()
self._mContextAnalyzer = SJISContextAnalysis()
self.reset()
def reset(self):
MultiByteCharSetProber.reset(self)
self._mContextAnalyzer.reset()
def get_charset_name(self):
return self._mContextAnalyzer.get_charset_name()
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mContextAnalyzer.feed(self._mLastChar[2 - charLen:],
charLen)
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mContextAnalyzer.feed(aBuf[i + 1 - charLen:i + 3
- charLen], charLen)
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mContextAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
contxtCf = self._mContextAnalyzer.get_confidence()
distribCf = self._mDistributionAnalyzer.get_confidence()
return max(contxtCf, distribCf)
| mit |
google-research/simclr | tf2/data_util.py | 1 | 18220 | # coding=utf-8
# Copyright 2020 The SimCLR Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific simclr governing permissions and
# limitations under the License.
# ==============================================================================
"""Data preprocessing and augmentation."""
import functools
from absl import flags
import tensorflow.compat.v2 as tf
FLAGS = flags.FLAGS
CROP_PROPORTION = 0.875 # Standard for ImageNet.
def random_apply(func, p, x):
"""Randomly apply function func to x with probability p."""
return tf.cond(
tf.less(
tf.random.uniform([], minval=0, maxval=1, dtype=tf.float32),
tf.cast(p, tf.float32)), lambda: func(x), lambda: x)
def random_brightness(image, max_delta, impl='simclrv2'):
"""A multiplicative vs additive change of brightness."""
if impl == 'simclrv2':
factor = tf.random.uniform([], tf.maximum(1.0 - max_delta, 0),
1.0 + max_delta)
image = image * factor
elif impl == 'simclrv1':
image = tf.image.random_brightness(image, max_delta=max_delta)
else:
raise ValueError('Unknown impl {} for random brightness.'.format(impl))
return image
def to_grayscale(image, keep_channels=True):
image = tf.image.rgb_to_grayscale(image)
if keep_channels:
image = tf.tile(image, [1, 1, 3])
return image
def color_jitter(image, strength, random_order=True, impl='simclrv2'):
"""Distorts the color of the image.
Args:
image: The input image tensor.
strength: the floating number for the strength of the color augmentation.
random_order: A bool, specifying whether to randomize the jittering order.
impl: 'simclrv1' or 'simclrv2'. Whether to use simclrv1 or simclrv2's
version of random brightness.
Returns:
The distorted image tensor.
"""
brightness = 0.8 * strength
contrast = 0.8 * strength
saturation = 0.8 * strength
hue = 0.2 * strength
if random_order:
return color_jitter_rand(
image, brightness, contrast, saturation, hue, impl=impl)
else:
return color_jitter_nonrand(
image, brightness, contrast, saturation, hue, impl=impl)
def color_jitter_nonrand(image,
brightness=0,
contrast=0,
saturation=0,
hue=0,
impl='simclrv2'):
"""Distorts the color of the image (jittering order is fixed).
Args:
image: The input image tensor.
brightness: A float, specifying the brightness for color jitter.
contrast: A float, specifying the contrast for color jitter.
saturation: A float, specifying the saturation for color jitter.
hue: A float, specifying the hue for color jitter.
impl: 'simclrv1' or 'simclrv2'. Whether to use simclrv1 or simclrv2's
version of random brightness.
Returns:
The distorted image tensor.
"""
with tf.name_scope('distort_color'):
def apply_transform(i, x, brightness, contrast, saturation, hue):
"""Apply the i-th transformation."""
if brightness != 0 and i == 0:
x = random_brightness(x, max_delta=brightness, impl=impl)
elif contrast != 0 and i == 1:
x = tf.image.random_contrast(
x, lower=1-contrast, upper=1+contrast)
elif saturation != 0 and i == 2:
x = tf.image.random_saturation(
x, lower=1-saturation, upper=1+saturation)
elif hue != 0:
x = tf.image.random_hue(x, max_delta=hue)
return x
for i in range(4):
image = apply_transform(i, image, brightness, contrast, saturation, hue)
image = tf.clip_by_value(image, 0., 1.)
return image
def color_jitter_rand(image,
brightness=0,
contrast=0,
saturation=0,
hue=0,
impl='simclrv2'):
"""Distorts the color of the image (jittering order is random).
Args:
image: The input image tensor.
brightness: A float, specifying the brightness for color jitter.
contrast: A float, specifying the contrast for color jitter.
saturation: A float, specifying the saturation for color jitter.
hue: A float, specifying the hue for color jitter.
impl: 'simclrv1' or 'simclrv2'. Whether to use simclrv1 or simclrv2's
version of random brightness.
Returns:
The distorted image tensor.
"""
with tf.name_scope('distort_color'):
def apply_transform(i, x):
"""Apply the i-th transformation."""
def brightness_foo():
if brightness == 0:
return x
else:
return random_brightness(x, max_delta=brightness, impl=impl)
def contrast_foo():
if contrast == 0:
return x
else:
return tf.image.random_contrast(x, lower=1-contrast, upper=1+contrast)
def saturation_foo():
if saturation == 0:
return x
else:
return tf.image.random_saturation(
x, lower=1-saturation, upper=1+saturation)
def hue_foo():
if hue == 0:
return x
else:
return tf.image.random_hue(x, max_delta=hue)
x = tf.cond(tf.less(i, 2),
lambda: tf.cond(tf.less(i, 1), brightness_foo, contrast_foo),
lambda: tf.cond(tf.less(i, 3), saturation_foo, hue_foo))
return x
perm = tf.random.shuffle(tf.range(4))
for i in range(4):
image = apply_transform(perm[i], image)
image = tf.clip_by_value(image, 0., 1.)
return image
def _compute_crop_shape(
image_height, image_width, aspect_ratio, crop_proportion):
"""Compute aspect ratio-preserving shape for central crop.
The resulting shape retains `crop_proportion` along one side and a proportion
less than or equal to `crop_proportion` along the other side.
Args:
image_height: Height of image to be cropped.
image_width: Width of image to be cropped.
aspect_ratio: Desired aspect ratio (width / height) of output.
crop_proportion: Proportion of image to retain along the less-cropped side.
Returns:
crop_height: Height of image after cropping.
crop_width: Width of image after cropping.
"""
image_width_float = tf.cast(image_width, tf.float32)
image_height_float = tf.cast(image_height, tf.float32)
def _requested_aspect_ratio_wider_than_image():
crop_height = tf.cast(
tf.math.rint(crop_proportion / aspect_ratio * image_width_float),
tf.int32)
crop_width = tf.cast(
tf.math.rint(crop_proportion * image_width_float), tf.int32)
return crop_height, crop_width
def _image_wider_than_requested_aspect_ratio():
crop_height = tf.cast(
tf.math.rint(crop_proportion * image_height_float), tf.int32)
crop_width = tf.cast(
tf.math.rint(crop_proportion * aspect_ratio * image_height_float),
tf.int32)
return crop_height, crop_width
return tf.cond(
aspect_ratio > image_width_float / image_height_float,
_requested_aspect_ratio_wider_than_image,
_image_wider_than_requested_aspect_ratio)
def center_crop(image, height, width, crop_proportion):
"""Crops to center of image and rescales to desired size.
Args:
image: Image Tensor to crop.
height: Height of image to be cropped.
width: Width of image to be cropped.
crop_proportion: Proportion of image to retain along the less-cropped side.
Returns:
A `height` x `width` x channels Tensor holding a central crop of `image`.
"""
shape = tf.shape(image)
image_height = shape[0]
image_width = shape[1]
crop_height, crop_width = _compute_crop_shape(
image_height, image_width, height / width, crop_proportion)
offset_height = ((image_height - crop_height) + 1) // 2
offset_width = ((image_width - crop_width) + 1) // 2
image = tf.image.crop_to_bounding_box(
image, offset_height, offset_width, crop_height, crop_width)
image = tf.image.resize([image], [height, width],
method=tf.image.ResizeMethod.BICUBIC)[0]
return image
def distorted_bounding_box_crop(image,
bbox,
min_object_covered=0.1,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0),
max_attempts=100,
scope=None):
"""Generates cropped_image using one of the bboxes randomly distorted.
See `tf.image.sample_distorted_bounding_box` for more documentation.
Args:
image: `Tensor` of image data.
bbox: `Tensor` of bounding boxes arranged `[1, num_boxes, coords]`
where each coordinate is [0, 1) and the coordinates are arranged
as `[ymin, xmin, ymax, xmax]`. If num_boxes is 0 then use the whole
image.
min_object_covered: An optional `float`. Defaults to `0.1`. The cropped
area of the image must contain at least this fraction of any bounding
box supplied.
aspect_ratio_range: An optional list of `float`s. The cropped area of the
image must have an aspect ratio = width / height within this range.
area_range: An optional list of `float`s. The cropped area of the image
must contain a fraction of the supplied image within in this range.
max_attempts: An optional `int`. Number of attempts at generating a cropped
region of the image of the specified constraints. After `max_attempts`
failures, return the entire image.
scope: Optional `str` for name scope.
Returns:
(cropped image `Tensor`, distorted bbox `Tensor`).
"""
with tf.name_scope(scope or 'distorted_bounding_box_crop'):
shape = tf.shape(image)
sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(
shape,
bounding_boxes=bbox,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
max_attempts=max_attempts,
use_image_if_no_bounding_boxes=True)
bbox_begin, bbox_size, _ = sample_distorted_bounding_box
# Crop the image to the specified bounding box.
offset_y, offset_x, _ = tf.unstack(bbox_begin)
target_height, target_width, _ = tf.unstack(bbox_size)
image = tf.image.crop_to_bounding_box(
image, offset_y, offset_x, target_height, target_width)
return image
def crop_and_resize(image, height, width):
"""Make a random crop and resize it to height `height` and width `width`.
Args:
image: Tensor representing the image.
height: Desired image height.
width: Desired image width.
Returns:
A `height` x `width` x channels Tensor holding a random crop of `image`.
"""
bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4])
aspect_ratio = width / height
image = distorted_bounding_box_crop(
image,
bbox,
min_object_covered=0.1,
aspect_ratio_range=(3. / 4 * aspect_ratio, 4. / 3. * aspect_ratio),
area_range=(0.08, 1.0),
max_attempts=100,
scope=None)
return tf.image.resize([image], [height, width],
method=tf.image.ResizeMethod.BICUBIC)[0]
def gaussian_blur(image, kernel_size, sigma, padding='SAME'):
"""Blurs the given image with separable convolution.
Args:
image: Tensor of shape [height, width, channels] and dtype float to blur.
kernel_size: Integer Tensor for the size of the blur kernel. This is should
be an odd number. If it is an even number, the actual kernel size will be
size + 1.
sigma: Sigma value for gaussian operator.
padding: Padding to use for the convolution. Typically 'SAME' or 'VALID'.
Returns:
A Tensor representing the blurred image.
"""
radius = tf.cast(kernel_size / 2, dtype=tf.int32)
kernel_size = radius * 2 + 1
x = tf.cast(tf.range(-radius, radius + 1), dtype=tf.float32)
blur_filter = tf.exp(-tf.pow(x, 2.0) /
(2.0 * tf.pow(tf.cast(sigma, dtype=tf.float32), 2.0)))
blur_filter /= tf.reduce_sum(blur_filter)
# One vertical and one horizontal filter.
blur_v = tf.reshape(blur_filter, [kernel_size, 1, 1, 1])
blur_h = tf.reshape(blur_filter, [1, kernel_size, 1, 1])
num_channels = tf.shape(image)[-1]
blur_h = tf.tile(blur_h, [1, 1, num_channels, 1])
blur_v = tf.tile(blur_v, [1, 1, num_channels, 1])
expand_batch_dim = image.shape.ndims == 3
if expand_batch_dim:
# Tensorflow requires batched input to convolutions, which we can fake with
# an extra dimension.
image = tf.expand_dims(image, axis=0)
blurred = tf.nn.depthwise_conv2d(
image, blur_h, strides=[1, 1, 1, 1], padding=padding)
blurred = tf.nn.depthwise_conv2d(
blurred, blur_v, strides=[1, 1, 1, 1], padding=padding)
if expand_batch_dim:
blurred = tf.squeeze(blurred, axis=0)
return blurred
def random_crop_with_resize(image, height, width, p=1.0):
"""Randomly crop and resize an image.
Args:
image: `Tensor` representing an image of arbitrary size.
height: Height of output image.
width: Width of output image.
p: Probability of applying this transformation.
Returns:
A preprocessed image `Tensor`.
"""
def _transform(image): # pylint: disable=missing-docstring
image = crop_and_resize(image, height, width)
return image
return random_apply(_transform, p=p, x=image)
def random_color_jitter(image, p=1.0, strength=1.0,
impl='simclrv2'):
def _transform(image):
color_jitter_t = functools.partial(
color_jitter, strength=strength, impl=impl)
image = random_apply(color_jitter_t, p=0.8, x=image)
return random_apply(to_grayscale, p=0.2, x=image)
return random_apply(_transform, p=p, x=image)
def random_blur(image, height, width, p=1.0):
"""Randomly blur an image.
Args:
image: `Tensor` representing an image of arbitrary size.
height: Height of output image.
width: Width of output image.
p: probability of applying this transformation.
Returns:
A preprocessed image `Tensor`.
"""
del width
def _transform(image):
sigma = tf.random.uniform([], 0.1, 2.0, dtype=tf.float32)
return gaussian_blur(
image, kernel_size=height//10, sigma=sigma, padding='SAME')
return random_apply(_transform, p=p, x=image)
def batch_random_blur(images_list, height, width, blur_probability=0.5):
"""Apply efficient batch data transformations.
Args:
images_list: a list of image tensors.
height: the height of image.
width: the width of image.
blur_probability: the probaility to apply the blur operator.
Returns:
Preprocessed feature list.
"""
def generate_selector(p, bsz):
shape = [bsz, 1, 1, 1]
selector = tf.cast(
tf.less(tf.random.uniform(shape, 0, 1, dtype=tf.float32), p),
tf.float32)
return selector
new_images_list = []
for images in images_list:
images_new = random_blur(images, height, width, p=1.)
selector = generate_selector(blur_probability, tf.shape(images)[0])
images = images_new * selector + images * (1 - selector)
images = tf.clip_by_value(images, 0., 1.)
new_images_list.append(images)
return new_images_list
def preprocess_for_train(image,
height,
width,
color_distort=True,
crop=True,
flip=True,
impl='simclrv2'):
"""Preprocesses the given image for training.
Args:
image: `Tensor` representing an image of arbitrary size.
height: Height of output image.
width: Width of output image.
color_distort: Whether to apply the color distortion.
crop: Whether to crop the image.
flip: Whether or not to flip left and right of an image.
impl: 'simclrv1' or 'simclrv2'. Whether to use simclrv1 or simclrv2's
version of random brightness.
Returns:
A preprocessed image `Tensor`.
"""
if crop:
image = random_crop_with_resize(image, height, width)
if flip:
image = tf.image.random_flip_left_right(image)
if color_distort:
image = random_color_jitter(image, strength=FLAGS.color_jitter_strength,
impl=impl)
image = tf.reshape(image, [height, width, 3])
image = tf.clip_by_value(image, 0., 1.)
return image
def preprocess_for_eval(image, height, width, crop=True):
"""Preprocesses the given image for evaluation.
Args:
image: `Tensor` representing an image of arbitrary size.
height: Height of output image.
width: Width of output image.
crop: Whether or not to (center) crop the test images.
Returns:
A preprocessed image `Tensor`.
"""
if crop:
image = center_crop(image, height, width, crop_proportion=CROP_PROPORTION)
image = tf.reshape(image, [height, width, 3])
image = tf.clip_by_value(image, 0., 1.)
return image
def preprocess_image(image, height, width, is_training=False,
color_distort=True, test_crop=True):
"""Preprocesses the given image.
Args:
image: `Tensor` representing an image of arbitrary size.
height: Height of output image.
width: Width of output image.
is_training: `bool` for whether the preprocessing is for training.
color_distort: whether to apply the color distortion.
test_crop: whether or not to extract a central crop of the images
(as for standard ImageNet evaluation) during the evaluation.
Returns:
A preprocessed image `Tensor` of range [0, 1].
"""
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
if is_training:
return preprocess_for_train(image, height, width, color_distort)
else:
return preprocess_for_eval(image, height, width, test_crop)
| apache-2.0 |
heeraj123/oh-mainline | vendor/packages/Django/django/http/cookie.py | 110 | 3535 | from __future__ import absolute_import, unicode_literals
from django.utils.encoding import force_str
from django.utils import six
from django.utils.six.moves import http_cookies
# Some versions of Python 2.7 and later won't need this encoding bug fix:
_cookie_encodes_correctly = http_cookies.SimpleCookie().value_encode(';') == (';', '"\\073"')
# See ticket #13007, http://bugs.python.org/issue2193 and http://trac.edgewall.org/ticket/2256
_tc = http_cookies.SimpleCookie()
try:
_tc.load(str('foo:bar=1'))
_cookie_allows_colon_in_names = True
except http_cookies.CookieError:
_cookie_allows_colon_in_names = False
if _cookie_encodes_correctly and _cookie_allows_colon_in_names:
SimpleCookie = http_cookies.SimpleCookie
else:
Morsel = http_cookies.Morsel
class SimpleCookie(http_cookies.SimpleCookie):
if not _cookie_encodes_correctly:
def value_encode(self, val):
# Some browsers do not support quoted-string from RFC 2109,
# including some versions of Safari and Internet Explorer.
# These browsers split on ';', and some versions of Safari
# are known to split on ', '. Therefore, we encode ';' and ','
# SimpleCookie already does the hard work of encoding and decoding.
# It uses octal sequences like '\\012' for newline etc.
# and non-ASCII chars. We just make use of this mechanism, to
# avoid introducing two encoding schemes which would be confusing
# and especially awkward for javascript.
# NB, contrary to Python docs, value_encode returns a tuple containing
# (real val, encoded_val)
val, encoded = super(SimpleCookie, self).value_encode(val)
encoded = encoded.replace(";", "\\073").replace(",","\\054")
# If encoded now contains any quoted chars, we need double quotes
# around the whole string.
if "\\" in encoded and not encoded.startswith('"'):
encoded = '"' + encoded + '"'
return val, encoded
if not _cookie_allows_colon_in_names:
def load(self, rawdata):
self.bad_cookies = set()
if not six.PY3 and isinstance(rawdata, six.text_type):
rawdata = force_str(rawdata)
super(SimpleCookie, self).load(rawdata)
for key in self.bad_cookies:
del self[key]
# override private __set() method:
# (needed for using our Morsel, and for laxness with CookieError
def _BaseCookie__set(self, key, real_value, coded_value):
key = force_str(key)
try:
M = self.get(key, Morsel())
M.set(key, real_value, coded_value)
dict.__setitem__(self, key, M)
except http_cookies.CookieError:
self.bad_cookies.add(key)
dict.__setitem__(self, key, http_cookies.Morsel())
def parse_cookie(cookie):
if cookie == '':
return {}
if not isinstance(cookie, http_cookies.BaseCookie):
try:
c = SimpleCookie()
c.load(cookie)
except http_cookies.CookieError:
# Invalid cookie
return {}
else:
c = cookie
cookiedict = {}
for key in c.keys():
cookiedict[key] = c.get(key).value
return cookiedict
| agpl-3.0 |
benhamner/GEFlightQuest | PythonModule/geflight/fq2/basic_filter.py | 3 | 3642 | import csv
from geflight.transform import flighthistory
from geflight.transform import utilities
import gzip
import os
import pandas as pd
def get_us_airport_icao_codes(codes_file):
df = pd.read_csv(codes_file)
return set(df["icao_code"])
def is_flight_in_or_out_of_us(row, us_icao_codes):
if ((row["arrival_airport_icao_code"] not in us_icao_codes) or
(row["departure_airport_icao_code"] not in us_icao_codes)):
return False
return True
def filter_flight_history(input_path, output_path):
codes_file = os.path.join(os.environ["DataPath"], "GEFlight", "Reference", "usairporticaocodes.txt")
us_icao_codes = get_us_airport_icao_codes(codes_file)
reader = utilities.HeaderCsvReader(gzip.open(input_path, 'rb'))
out_handle = gzip.open(output_path, 'wb')
writer = csv.writer(out_handle, dialect=utilities.CsvDialect())
header_out = reader.get_header()
for col in flighthistory.get_flight_history_columns_to_delete():
header_out.remove(col)
writer.writerow(header_out)
row_buffer = []
flight_history_ids = set()
i_row_mod = 0
cnt = 0
i = 0
for row in reader:
i_row_mod += 1
if not is_flight_in_or_out_of_us(row, us_icao_codes):
continue
cnt += 1
row_buffer.append([row[col] for col in header_out])
flight_history_ids.add(row["flight_history_id"])
if i_row_mod < 100000:
continue
i += 1
print("%s: %d00k records processed, %d with relevant flights in this chunk" % ("flighthistory", i, cnt))
cnt = 0
i_row_mod = 0
writer.writerows(row_buffer)
out_handle.flush()
row_buffer = []
out_handle.close()
return flight_history_ids
def get_input_path(input_dir, table):
return os.path.join(input_dir, "flightstats_%s.csv.gz" % table)
def get_output_path(output_dir, table):
return os.path.join(output_dir, "%s.csv.gz" % table)
def filter_file_based_on_ids_streaming(
input_dir,
output_dir,
table,
id_column_name,
valid_ids):
"""
Takes in one file, outputs one file
"""
input_path = get_input_path(input_dir, table)
output_path = get_output_path(output_dir, table)
f_in = gzip.open(input_path, "rb")
reader = utilities.HeaderCsvReader(f_in)
f_out = gzip.open(output_path, "wb")
writer = csv.writer(f_out, dialect=utilities.CsvDialect())
writer.writerow(reader.header)
i_total = 0
i_keep = 0
for row in reader:
i_total += 1
if row[id_column_name] not in valid_ids:
continue
i_keep += 1
writer.writerow([row[col_name] for col_name in reader.header])
if i_total % 100000 == 0:
print("%s, %d00k lines processed, %d00k lines kept" % (table, int(i_total/1000), int(i_keep/1000)))
remainder, file_name = os.path.split(input_path)
day_str = os.path.split(remainder)[1]
print("%s, %s: %d lines remaining out of %d original lines" % (day_str, file_name, i_keep, i_total))
f_out.close()
def main(input_dir, output_dir):
tables = [ "asdiposition"
, "flighthistory"
]
flight_history_ids = filter_flight_history(get_input_path(input_dir, "flighthistory"), get_output_path(output_dir, "flighthistory"))
filter_file_based_on_ids_streaming(input_dir, output_dir, "asdiposition", "flighthistoryid", flight_history_ids)
if __name__=="__main__":
input_dir = os.path.join(os.environ["DataPath"], "GEFlight2", "DataSet1", "Raw")
output_dir = os.path.join(os.environ["DataPath"], "GEFlight2", "DataSet1", "Filtered")
main(input_dir, output_dir) | bsd-2-clause |
diego-plan9/beets | test/test_ui_init.py | 10 | 3579 | # -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Test module for file ui/__init__.py
"""
from __future__ import division, absolute_import, print_function
import unittest
from test import _common
from beets import ui
class InputMethodsTest(_common.TestCase):
def setUp(self):
super(InputMethodsTest, self).setUp()
self.io.install()
def _print_helper(self, s):
print(s)
def _print_helper2(self, s, prefix):
print(prefix, s)
def test_input_select_objects(self):
full_items = ['1', '2', '3', '4', '5']
# Test no
self.io.addinput('n')
items = ui.input_select_objects(
"Prompt", full_items, self._print_helper)
self.assertEqual(items, [])
# Test yes
self.io.addinput('y')
items = ui.input_select_objects(
"Prompt", full_items, self._print_helper)
self.assertEqual(items, full_items)
# Test selective 1
self.io.addinput('s')
self.io.addinput('n')
self.io.addinput('y')
self.io.addinput('n')
self.io.addinput('y')
self.io.addinput('n')
items = ui.input_select_objects(
"Prompt", full_items, self._print_helper)
self.assertEqual(items, ['2', '4'])
# Test selective 2
self.io.addinput('s')
self.io.addinput('y')
self.io.addinput('y')
self.io.addinput('n')
self.io.addinput('y')
self.io.addinput('n')
items = ui.input_select_objects(
"Prompt", full_items,
lambda s: self._print_helper2(s, "Prefix"))
self.assertEqual(items, ['1', '2', '4'])
class InitTest(_common.LibTestCase):
def setUp(self):
super(InitTest, self).setUp()
def test_human_bytes(self):
tests = [
(0, '0.0 B'),
(30, '30.0 B'),
(pow(2, 10), '1.0 KiB'),
(pow(2, 20), '1.0 MiB'),
(pow(2, 30), '1.0 GiB'),
(pow(2, 40), '1.0 TiB'),
(pow(2, 50), '1.0 PiB'),
(pow(2, 60), '1.0 EiB'),
(pow(2, 70), '1.0 ZiB'),
(pow(2, 80), '1.0 YiB'),
(pow(2, 90), '1.0 HiB'),
(pow(2, 100), 'big'),
]
for i, h in tests:
self.assertEqual(h, ui.human_bytes(i))
def test_human_seconds(self):
tests = [
(0, '0.0 seconds'),
(30, '30.0 seconds'),
(60, '1.0 minutes'),
(90, '1.5 minutes'),
(125, '2.1 minutes'),
(3600, '1.0 hours'),
(86400, '1.0 days'),
(604800, '1.0 weeks'),
(31449600, '1.0 years'),
(314496000, '1.0 decades'),
]
for i, h in tests:
self.assertEqual(h, ui.human_seconds(i))
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| mit |
Daniel-CA/odoo-addons | stock_quant_expiry/models/stock_quant.py | 1 | 1792 | # -*- coding: utf-8 -*-
# Copyright 2017 Ainara Galdona - AvanzOSC
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from openerp import models, fields, api
class StockQuant(models.Model):
_inherit = 'stock.quant'
@api.multi
@api.depends('lot_id.life_date', 'lot_id.mrp_date')
def _compute_lifespan(self):
for record in self.filtered(lambda x: x.lot_id and
x.lot_id.life_date and x.lot_id.mrp_date):
life_date = fields.Date.from_string(record.lot_id.life_date)
mrp_date = fields.Date.from_string(record.lot_id.mrp_date)
record.lifespan = (life_date - mrp_date).days
def _compute_lifespan_progress(self):
for record in self.filtered(lambda x: x.lot_id and
x.lot_id.life_date and x.lot_id.mrp_date):
life_date = fields.Date.from_string(record.lot_id.life_date)
mrp_date = fields.Date.from_string(record.lot_id.mrp_date)
today = fields.Date.from_string(fields.Date.today())
lifespan = (life_date - mrp_date).days
todayspan = (today - mrp_date).days
if not lifespan:
continue
record.lifespan_progress = float(todayspan) / float(lifespan) * 100
mrp_date = fields.Date(string='Mrp Date', store=True,
related='lot_id.mrp_date')
life_date = fields.Datetime(string='Expiry Date',
related='lot_id.life_date')
lifespan = fields.Integer(string='Lifespan', store=True,
compute='_compute_lifespan')
lifespan_progress = fields.Float(string='Lifespan Progress',
compute='_compute_lifespan_progress')
| agpl-3.0 |
carsongee/edx-platform | common/djangoapps/external_auth/tests/test_openid_provider.py | 46 | 16144 | #-*- encoding=utf-8 -*-
'''
Created on Jan 18, 2013
@author: brian
'''
import openid
import json
from openid.fetchers import HTTPFetcher, HTTPResponse
from urlparse import parse_qs, urlparse
from django.conf import settings
from django.test import TestCase, LiveServerTestCase
from django.core.cache import cache
from django.test.utils import override_settings
from django.core.urlresolvers import reverse
from django.test.client import RequestFactory
from unittest import skipUnless
from student.tests.factories import UserFactory
from external_auth.views import provider_login
class MyFetcher(HTTPFetcher):
"""A fetcher that uses server-internal calls for performing HTTP
requests.
"""
def __init__(self, client):
"""@param client: A test client object"""
super(MyFetcher, self).__init__()
self.client = client
def fetch(self, url, body=None, headers=None):
"""Perform an HTTP request
@raises Exception: Any exception that can be raised by Django
@see: C{L{HTTPFetcher.fetch}}
"""
if body:
# method = 'POST'
# undo the URL encoding of the POST arguments
data = parse_qs(body)
response = self.client.post(url, data)
else:
# method = 'GET'
data = {}
if headers and 'Accept' in headers:
data['CONTENT_TYPE'] = headers['Accept']
response = self.client.get(url, data)
# Translate the test client response to the fetcher's HTTP response abstraction
content = response.content
final_url = url
response_headers = {}
if 'Content-Type' in response:
response_headers['content-type'] = response['Content-Type']
if 'X-XRDS-Location' in response:
response_headers['x-xrds-location'] = response['X-XRDS-Location']
status = response.status_code
return HTTPResponse(
body=content,
final_url=final_url,
headers=response_headers,
status=status,
)
class OpenIdProviderTest(TestCase):
"""
Tests of the OpenId login
"""
@skipUnless(settings.FEATURES.get('AUTH_USE_OPENID') and
settings.FEATURES.get('AUTH_USE_OPENID_PROVIDER'),
'OpenID not enabled')
def test_begin_login_with_xrds_url(self):
# the provider URL must be converted to an absolute URL in order to be
# used as an openid provider.
provider_url = reverse('openid-provider-xrds')
factory = RequestFactory()
request = factory.request()
abs_provider_url = request.build_absolute_uri(location=provider_url)
# In order for this absolute URL to work (i.e. to get xrds, then authentication)
# in the test environment, we either need a live server that works with the default
# fetcher (i.e. urlopen2), or a test server that is reached through a custom fetcher.
# Here we do the latter:
fetcher = MyFetcher(self.client)
openid.fetchers.setDefaultFetcher(fetcher, wrap_exceptions=False)
# now we can begin the login process by invoking a local openid client,
# with a pointer to the (also-local) openid provider:
with self.settings(OPENID_SSO_SERVER_URL=abs_provider_url):
url = reverse('openid-login')
resp = self.client.post(url)
code = 200
self.assertEqual(resp.status_code, code,
"got code {0} for url '{1}'. Expected code {2}"
.format(resp.status_code, url, code))
@skipUnless(settings.FEATURES.get('AUTH_USE_OPENID') and
settings.FEATURES.get('AUTH_USE_OPENID_PROVIDER'),
'OpenID not enabled')
def test_begin_login_with_login_url(self):
# the provider URL must be converted to an absolute URL in order to be
# used as an openid provider.
provider_url = reverse('openid-provider-login')
factory = RequestFactory()
request = factory.request()
abs_provider_url = request.build_absolute_uri(location=provider_url)
# In order for this absolute URL to work (i.e. to get xrds, then authentication)
# in the test environment, we either need a live server that works with the default
# fetcher (i.e. urlopen2), or a test server that is reached through a custom fetcher.
# Here we do the latter:
fetcher = MyFetcher(self.client)
openid.fetchers.setDefaultFetcher(fetcher, wrap_exceptions=False)
# now we can begin the login process by invoking a local openid client,
# with a pointer to the (also-local) openid provider:
with self.settings(OPENID_SSO_SERVER_URL=abs_provider_url):
url = reverse('openid-login')
resp = self.client.post(url)
code = 200
self.assertEqual(resp.status_code, code,
"got code {0} for url '{1}'. Expected code {2}"
.format(resp.status_code, url, code))
self.assertContains(resp, '<input name="openid.mode" type="hidden" value="checkid_setup" />', html=True)
self.assertContains(resp, '<input name="openid.ns" type="hidden" value="http://specs.openid.net/auth/2.0" />', html=True)
self.assertContains(resp, '<input name="openid.identity" type="hidden" value="http://specs.openid.net/auth/2.0/identifier_select" />', html=True)
self.assertContains(resp, '<input name="openid.claimed_id" type="hidden" value="http://specs.openid.net/auth/2.0/identifier_select" />', html=True)
self.assertContains(resp, '<input name="openid.ns.ax" type="hidden" value="http://openid.net/srv/ax/1.0" />', html=True)
self.assertContains(resp, '<input name="openid.ax.mode" type="hidden" value="fetch_request" />', html=True)
self.assertContains(resp, '<input name="openid.ax.required" type="hidden" value="email,fullname,old_email,firstname,old_nickname,lastname,old_fullname,nickname" />', html=True)
self.assertContains(resp, '<input name="openid.ax.type.fullname" type="hidden" value="http://axschema.org/namePerson" />', html=True)
self.assertContains(resp, '<input name="openid.ax.type.lastname" type="hidden" value="http://axschema.org/namePerson/last" />', html=True)
self.assertContains(resp, '<input name="openid.ax.type.firstname" type="hidden" value="http://axschema.org/namePerson/first" />', html=True)
self.assertContains(resp, '<input name="openid.ax.type.nickname" type="hidden" value="http://axschema.org/namePerson/friendly" />', html=True)
self.assertContains(resp, '<input name="openid.ax.type.email" type="hidden" value="http://axschema.org/contact/email" />', html=True)
self.assertContains(resp, '<input name="openid.ax.type.old_email" type="hidden" value="http://schema.openid.net/contact/email" />', html=True)
self.assertContains(resp, '<input name="openid.ax.type.old_nickname" type="hidden" value="http://schema.openid.net/namePerson/friendly" />', html=True)
self.assertContains(resp, '<input name="openid.ax.type.old_fullname" type="hidden" value="http://schema.openid.net/namePerson" />', html=True)
self.assertContains(resp, '<input type="submit" value="Continue" />', html=True)
# this should work on the server:
self.assertContains(resp, '<input name="openid.realm" type="hidden" value="http://testserver/" />', html=True)
# not included here are elements that will vary from run to run:
# <input name="openid.return_to" type="hidden" value="http://testserver/openid/complete/?janrain_nonce=2013-01-23T06%3A20%3A17ZaN7j6H" />
# <input name="openid.assoc_handle" type="hidden" value="{HMAC-SHA1}{50ff8120}{rh87+Q==}" />
def attempt_login(self, expected_code, **kwargs):
""" Attempt to log in through the open id provider login """
url = reverse('openid-provider-login')
post_args = {
"openid.mode": "checkid_setup",
"openid.return_to": "http://testserver/openid/complete/?janrain_nonce=2013-01-23T06%3A20%3A17ZaN7j6H",
"openid.assoc_handle": "{HMAC-SHA1}{50ff8120}{rh87+Q==}",
"openid.claimed_id": "http://specs.openid.net/auth/2.0/identifier_select",
"openid.ns": "http://specs.openid.net/auth/2.0",
"openid.realm": "http://testserver/",
"openid.identity": "http://specs.openid.net/auth/2.0/identifier_select",
"openid.ns.ax": "http://openid.net/srv/ax/1.0",
"openid.ax.mode": "fetch_request",
"openid.ax.required": "email,fullname,old_email,firstname,old_nickname,lastname,old_fullname,nickname",
"openid.ax.type.fullname": "http://axschema.org/namePerson",
"openid.ax.type.lastname": "http://axschema.org/namePerson/last",
"openid.ax.type.firstname": "http://axschema.org/namePerson/first",
"openid.ax.type.nickname": "http://axschema.org/namePerson/friendly",
"openid.ax.type.email": "http://axschema.org/contact/email",
"openid.ax.type.old_email": "http://schema.openid.net/contact/email",
"openid.ax.type.old_nickname": "http://schema.openid.net/namePerson/friendly",
"openid.ax.type.old_fullname": "http://schema.openid.net/namePerson",
}
# override the default args with any given arguments
for key in kwargs:
post_args["openid." + key] = kwargs[key]
resp = self.client.post(url, post_args)
code = expected_code
self.assertEqual(resp.status_code, code,
"got code {0} for url '{1}'. Expected code {2}"
.format(resp.status_code, url, code))
@skipUnless(settings.FEATURES.get('AUTH_USE_OPENID') and
settings.FEATURES.get('AUTH_USE_OPENID_PROVIDER'),
'OpenID not enabled')
def test_open_id_setup(self):
""" Attempt a standard successful login """
self.attempt_login(200)
@skipUnless(settings.FEATURES.get('AUTH_USE_OPENID') and
settings.FEATURES.get('AUTH_USE_OPENID_PROVIDER'),
'OpenID not enabled')
def test_invalid_namespace(self):
""" Test for 403 error code when the namespace of the request is invalid"""
self.attempt_login(403, ns="http%3A%2F%2Fspecs.openid.net%2Fauth%2F2.0")
@override_settings(OPENID_PROVIDER_TRUSTED_ROOTS=['http://apps.cs50.edx.org'])
@skipUnless(settings.FEATURES.get('AUTH_USE_OPENID') and
settings.FEATURES.get('AUTH_USE_OPENID_PROVIDER'),
'OpenID not enabled')
def test_invalid_return_url(self):
""" Test for 403 error code when the url"""
self.attempt_login(403, return_to="http://apps.cs50.edx.or")
def _send_bad_redirection_login(self):
"""
Attempt to log in to the provider with setup parameters
Intentionally fail the login to force a redirect
"""
user = UserFactory()
factory = RequestFactory()
post_params = {'email': user.email, 'password': 'password'}
fake_url = 'fake url'
request = factory.post(reverse('openid-provider-login'), post_params)
openid_setup = {
'request': factory.request(),
'url': fake_url
}
request.session = {
'openid_setup': openid_setup
}
response = provider_login(request)
return response
@skipUnless(settings.FEATURES.get('AUTH_USE_OPENID') and
settings.FEATURES.get('AUTH_USE_OPENID_PROVIDER'),
'OpenID not enabled')
def test_login_openid_handle_redirection(self):
""" Test to see that we can handle login redirection properly"""
response = self._send_bad_redirection_login()
self.assertEquals(response.status_code, 302)
@skipUnless(settings.FEATURES.get('AUTH_USE_OPENID') and
settings.FEATURES.get('AUTH_USE_OPENID_PROVIDER'),
'OpenID not enabled')
def test_login_openid_handle_redirection_ratelimited(self):
# try logging in 30 times, the default limit in the number of failed
# log in attempts before the rate gets limited
for _ in xrange(30):
self._send_bad_redirection_login()
response = self._send_bad_redirection_login()
# verify that we are not returning the default 403
self.assertEquals(response.status_code, 302)
# clear the ratelimit cache so that we don't fail other logins
cache.clear()
@skipUnless(settings.FEATURES.get('AUTH_USE_OPENID_PROVIDER'),
'OpenID not enabled')
def test_openid_final_response(self):
url = reverse('openid-provider-login')
user = UserFactory()
# login to the client so that we can persist session information
for name in ['Robot 33', '☃']:
user.profile.name = name
user.profile.save()
self.client.login(username=user.username, password='test')
# login once to get the right session information
self.attempt_login(200)
post_args = {
'email': user.email,
'password': 'test',
}
# call url again, this time with username and password
resp = self.client.post(url, post_args)
# all information is embedded in the redirect url
location = resp['Location']
# parse the url
parsed_url = urlparse(location)
parsed_qs = parse_qs(parsed_url.query)
self.assertEquals(parsed_qs['openid.ax.type.ext1'][0], 'http://axschema.org/contact/email')
self.assertEquals(parsed_qs['openid.ax.type.ext0'][0], 'http://axschema.org/namePerson')
self.assertEquals(parsed_qs['openid.ax.value.ext1.1'][0], user.email)
self.assertEquals(parsed_qs['openid.ax.value.ext0.1'][0], user.profile.name)
class OpenIdProviderLiveServerTest(LiveServerTestCase):
"""
In order for this absolute URL to work (i.e. to get xrds, then authentication)
in the test environment, we either need a live server that works with the default
fetcher (i.e. urlopen2), or a test server that is reached through a custom fetcher.
Here we do the former.
"""
@skipUnless(settings.FEATURES.get('AUTH_USE_OPENID') and
settings.FEATURES.get('AUTH_USE_OPENID_PROVIDER'),
'OpenID not enabled')
def test_begin_login(self):
# the provider URL must be converted to an absolute URL in order to be
# used as an openid provider.
provider_url = reverse('openid-provider-xrds')
factory = RequestFactory()
request = factory.request()
abs_provider_url = request.build_absolute_uri(location=provider_url)
# now we can begin the login process by invoking a local openid client,
# with a pointer to the (also-local) openid provider:
with self.settings(OPENID_SSO_SERVER_URL=abs_provider_url):
url = reverse('openid-login')
resp = self.client.post(url)
code = 200
self.assertEqual(resp.status_code, code,
"got code {0} for url '{1}'. Expected code {2}"
.format(resp.status_code, url, code))
@classmethod
def tearDownClass(cls):
"""
Workaround for a runtime error that occurs
intermittently when the server thread doesn't shut down
within 2 seconds.
Since the server is running in a Django thread and will
be terminated when the test suite terminates,
this shouldn't cause a resource allocation issue.
"""
try:
super(OpenIdProviderLiveServerTest, cls).tearDownClass()
except RuntimeError:
print "Warning: Could not shut down test server."
| agpl-3.0 |
hastexo/edx-platform | common/djangoapps/terrain/stubs/xqueue.py | 24 | 8740 | """
Stub implementation of XQueue for acceptance tests.
Configuration values:
"default" (dict): Default response to be sent to LMS as a grade for a submission
"<submission>" (dict): Grade response to return for submissions containing the text <submission>
"register_submission_url" (str): URL to send grader payloads when we receive a submission
If no grade response is configured, a default response will be returned.
"""
import copy
import json
from threading import Timer
from requests import post
from .http import StubHttpRequestHandler, StubHttpService, require_params
class StubXQueueHandler(StubHttpRequestHandler):
"""
A handler for XQueue POST requests.
"""
DEFAULT_RESPONSE_DELAY = 2
DEFAULT_GRADE_RESPONSE = {'correct': True, 'score': 1, 'msg': ''}
@require_params('POST', 'xqueue_body', 'xqueue_header')
def do_POST(self):
"""
Handle a POST request from the client
Sends back an immediate success/failure response.
It then POSTS back to the client with grading results.
"""
msg = "XQueue received POST request {0} to path {1}".format(self.post_dict, self.path)
self.log_message(msg)
# Respond only to grading requests
if self._is_grade_request():
# If configured, send the grader payload to other services.
# TODO TNL-3906
# self._register_submission(self.post_dict['xqueue_body'])
try:
xqueue_header = json.loads(self.post_dict['xqueue_header'])
callback_url = xqueue_header['lms_callback_url']
except KeyError:
# If the message doesn't have a header or body,
# then it's malformed. Respond with failure
error_msg = "XQueue received invalid grade request"
self._send_immediate_response(False, message=error_msg)
except ValueError:
# If we could not decode the body or header,
# respond with failure
error_msg = "XQueue could not decode grade request"
self._send_immediate_response(False, message=error_msg)
else:
# Send an immediate response of success
# The grade request is formed correctly
self._send_immediate_response(True)
# Wait a bit before POSTing back to the callback url with the
# grade result configured by the server
# Otherwise, the problem will not realize it's
# queued and it will keep waiting for a response indefinitely
delayed_grade_func = lambda: self._send_grade_response(
callback_url, xqueue_header, self.post_dict['xqueue_body']
)
delay = self.server.config.get('response_delay', self.DEFAULT_RESPONSE_DELAY)
Timer(delay, delayed_grade_func).start()
# If we get a request that's not to the grading submission
# URL, return an error
else:
self._send_immediate_response(False, message="Invalid request URL")
def _send_immediate_response(self, success, message=""):
"""
Send an immediate success/failure message
back to the client
"""
# Send the response indicating success/failure
response_str = json.dumps(
{'return_code': 0 if success else 1, 'content': message}
)
if self._is_grade_request():
self.send_response(
200, content=response_str, headers={'Content-type': 'text/plain'}
)
self.log_message("XQueue: sent response {0}".format(response_str))
else:
self.send_response(500)
def _send_grade_response(self, postback_url, xqueue_header, xqueue_body_json):
"""
POST the grade response back to the client
using the response provided by the server configuration.
Uses the server configuration to determine what response to send:
1) Specific response for submissions containing matching text in `xqueue_body`
2) Default submission configured by client
3) Default submission
`postback_url` is the URL the client told us to post back to
`xqueue_header` (dict) is the full header the client sent us, which we will send back
to the client so it can authenticate us.
`xqueue_body_json` (json-encoded string) is the body of the submission the client sent us.
"""
# First check if we have a configured response that matches the submission body
grade_response = None
# This matches the pattern against the JSON-encoded xqueue_body
# This is very simplistic, but sufficient to associate a student response
# with a grading response.
# There is a danger here that a submission will match multiple response patterns.
# Rather than fail silently (which could cause unpredictable behavior in tests)
# we abort and log a debugging message.
for pattern, response in self.server.queue_responses:
if pattern in xqueue_body_json:
if grade_response is None:
grade_response = response
# Multiple matches, so abort and log an error
else:
self.log_error(
"Multiple response patterns matched '{0}'".format(xqueue_body_json),
)
return
# Fall back to the default grade response configured for this queue,
# then to the default response.
if grade_response is None:
grade_response = self.server.config.get(
'default', copy.deepcopy(self.DEFAULT_GRADE_RESPONSE)
)
# Wrap the message in <div> tags to ensure that it is valid XML
if isinstance(grade_response, dict) and 'msg' in grade_response:
grade_response['msg'] = "<div>{0}</div>".format(grade_response['msg'])
data = {
'xqueue_header': json.dumps(xqueue_header),
'xqueue_body': json.dumps(grade_response)
}
post(postback_url, data=data)
self.log_message("XQueue: sent grading response {0} to {1}".format(data, postback_url))
def _register_submission(self, xqueue_body_json):
"""
If configured, send the submission's grader payload to another service.
"""
url = self.server.config.get('register_submission_url')
# If not configured, do not need to send anything
if url is not None:
try:
xqueue_body = json.loads(xqueue_body_json)
except ValueError:
self.log_error(
"Could not decode XQueue body as JSON: '{0}'".format(xqueue_body_json))
else:
# Retrieve the grader payload, which should be a JSON-encoded dict.
# We pass the payload directly to the service we are notifying, without
# inspecting the contents.
grader_payload = xqueue_body.get('grader_payload')
if grader_payload is not None:
response = post(url, data={'grader_payload': grader_payload})
if not response.ok:
self.log_error(
"Could register submission at URL '{0}'. Status was {1}".format(
url, response.status_code))
else:
self.log_message(
"XQueue body is missing 'grader_payload' key: '{0}'".format(xqueue_body)
)
def _is_grade_request(self):
"""
Return a boolean indicating whether the requested URL indicates a submission.
"""
return 'xqueue/submit' in self.path
class StubXQueueService(StubHttpService):
"""
A stub XQueue grading server that responds to POST requests to localhost.
"""
HANDLER_CLASS = StubXQueueHandler
NON_QUEUE_CONFIG_KEYS = ['default', 'register_submission_url']
@property
def queue_responses(self):
"""
Returns a list of (pattern, response) tuples, where `pattern` is a pattern
to match in the XQueue body, and `response` is a dictionary to return
as the response from the grader.
Every configuration key is a queue name,
except for 'default' and 'register_submission_url' which have special meaning
"""
return {
key: value
for key, value in self.config.iteritems()
if key not in self.NON_QUEUE_CONFIG_KEYS
}.items()
| agpl-3.0 |
harpribot/deep-summarization | train_scripts/train_script_lstm_stacked_bidirectional_attention.py | 1 | 1043 | import os
import sys
sys.path.append(os.path.abspath(os.path.dirname(__file__) + '/' + '..'))
from models import lstm_stacked_bidirectional
from helpers import checkpoint
# Get the review summary file
review_summary_file = 'extracted_data/review_summary.csv'
# Initialize Checkpointer to ensure checkpointing
checkpointer = checkpoint.Checkpointer('stackedBidirectional', 'lstm', 'Attention')
checkpointer.steps_per_checkpoint(1000)
checkpointer.steps_per_prediction(1000)
# Do using GRU cell - without attention mechanism
out_file = 'result/stacked_bidirectional/lstm/attention.csv'
checkpointer.set_result_location(out_file)
lstm_net = lstm_stacked_bidirectional.LstmStackedBidirectional(review_summary_file,
checkpointer, attention=True, num_layers=2)
lstm_net.set_parameters(train_batch_size=128, test_batch_size=128, memory_dim=128, learning_rate=0.05)
lstm_net.begin_session()
lstm_net.form_model_graph()
lstm_net.fit()
lstm_net.predict()
lstm_net.store_test_predictions()
| mit |
ScanOC/trunk-player | radio/receivers.py | 1 | 1882 | # receivers.py
import json
import logging
import datetime
from django.dispatch import receiver
from django.contrib.auth.models import User
from pinax.stripe.signals import WEBHOOK_SIGNALS
from radio.models import Plan, StripePlanMatrix, Profile
from pinax.stripe.models import Plan as pinax_Plan
# Get an instance of a logger
logger = logging.getLogger(__name__)
@receiver(WEBHOOK_SIGNALS["invoice.payment_succeeded"])
def handle_payment_succeeded(sender, event, **kwargs):
logger.error('----------------------------------------')
logger.error('Stripe Payment Posted')
logger.error(event.customer)
#logger.error(event.webhook_message)
@receiver(WEBHOOK_SIGNALS["customer.subscription.created"])
def handle_subscription_created(sender, event, **kwargs):
hook_message = event.webhook_message
customer = event.customer
stripe_subscription_end = hook_message['data']['object']['current_period_end']
stripe_subscription_plan_id = hook_message['data']['object']['items']['data'][0]['plan']['id']
user = User.objects.get(username=customer)
user_profile = Profile.objects.get(user=user)
stripe_plan = pinax_Plan.objects.get(stripe_id=stripe_subscription_plan_id)
plan_matrix = StripePlanMatrix.objects.get(stripe_plan=stripe_plan)
user_profile.plan = plan_matrix.radio_plan
user_profile.save()
logger.error('Moving Customer {} to plan {}'.format(user, plan_matrix.radio_plan))
logger.error('Stripe customer.subscription.created {}'.format(event.customer))
end_date = datetime.datetime.fromtimestamp(hook_message['data']['object']['current_period_end']).strftime('%c')
logger.error('END TS {}'.format(end_date))
#logger.error('TESTING {}'.format(hook_message['data']['object']['data'][0]))
logger.error('TESTING ID {}'.format(hook_message['data']['object']['items']['data'][0]['plan']['id']))
| mit |
GitHublong/hue | desktop/core/ext-py/South-1.0.2/south/tests/autodetection.py | 93 | 12026 | from south.tests import unittest
from south.creator.changes import AutoChanges, InitialChanges
from south.migration.base import Migrations
from south.tests import Monkeypatcher
from south.creator import freezer
from south.orm import FakeORM
from south.v2 import SchemaMigration
try:
from django.utils.six.moves import reload_module
except ImportError:
# Older django, no python3 support
reload_module = reload
class TestComparison(unittest.TestCase):
"""
Tests the comparison methods of startmigration.
"""
def test_no_change(self):
"Test with a completely unchanged definition."
self.assertEqual(
AutoChanges.different_attributes(
('django.db.models.fields.related.ForeignKey', [], {'to': "orm['southdemo.Lizard']"}),
('django.db.models.fields.related.ForeignKey', [], {'to': "orm['southdemo.Lizard']"}),
),
False,
)
self.assertEqual(
AutoChanges.different_attributes(
('django.db.models.fields.related.ForeignKey', ['ohhai', 'there'], {'to': "somewhere", "from": "there"}),
('django.db.models.fields.related.ForeignKey', ['ohhai', 'there'], {"from": "there", 'to': "somewhere"}),
),
False,
)
def test_pos_change(self):
"Test with a changed positional argument."
self.assertEqual(
AutoChanges.different_attributes(
('django.db.models.fields.CharField', ['hi'], {'to': "foo"}),
('django.db.models.fields.CharField', [], {'to': "foo"}),
),
True,
)
self.assertEqual(
AutoChanges.different_attributes(
('django.db.models.fields.CharField', [], {'to': "foo"}),
('django.db.models.fields.CharField', ['bye'], {'to': "foo"}),
),
True,
)
self.assertEqual(
AutoChanges.different_attributes(
('django.db.models.fields.CharField', ['pi'], {'to': "foo"}),
('django.db.models.fields.CharField', ['pi'], {'to': "foo"}),
),
False,
)
self.assertEqual(
AutoChanges.different_attributes(
('django.db.models.fields.CharField', ['pisdadad'], {'to': "foo"}),
('django.db.models.fields.CharField', ['pi'], {'to': "foo"}),
),
True,
)
self.assertEqual(
AutoChanges.different_attributes(
('django.db.models.fields.CharField', ['hi'], {}),
('django.db.models.fields.CharField', [], {}),
),
True,
)
self.assertEqual(
AutoChanges.different_attributes(
('django.db.models.fields.CharField', [], {}),
('django.db.models.fields.CharField', ['bye'], {}),
),
True,
)
self.assertEqual(
AutoChanges.different_attributes(
('django.db.models.fields.CharField', ['pi'], {}),
('django.db.models.fields.CharField', ['pi'], {}),
),
False,
)
self.assertEqual(
AutoChanges.different_attributes(
('django.db.models.fields.CharField', ['pi'], {}),
('django.db.models.fields.CharField', ['45fdfdf'], {}),
),
True,
)
def test_kwd_change(self):
"Test a changed keyword argument"
self.assertEqual(
AutoChanges.different_attributes(
('django.db.models.fields.CharField', ['pi'], {'to': "foo"}),
('django.db.models.fields.CharField', ['pi'], {'to': "blue"}),
),
True,
)
self.assertEqual(
AutoChanges.different_attributes(
('django.db.models.fields.CharField', [], {'to': "foo"}),
('django.db.models.fields.CharField', [], {'to': "blue"}),
),
True,
)
self.assertEqual(
AutoChanges.different_attributes(
('django.db.models.fields.CharField', ['b'], {'to': "foo"}),
('django.db.models.fields.CharField', ['b'], {'to': "blue"}),
),
True,
)
self.assertEqual(
AutoChanges.different_attributes(
('django.db.models.fields.CharField', [], {'to': "foo"}),
('django.db.models.fields.CharField', [], {}),
),
True,
)
self.assertEqual(
AutoChanges.different_attributes(
('django.db.models.fields.CharField', ['a'], {'to': "foo"}),
('django.db.models.fields.CharField', ['a'], {}),
),
True,
)
self.assertEqual(
AutoChanges.different_attributes(
('django.db.models.fields.CharField', [], {}),
('django.db.models.fields.CharField', [], {'to': "foo"}),
),
True,
)
self.assertEqual(
AutoChanges.different_attributes(
('django.db.models.fields.CharField', ['a'], {}),
('django.db.models.fields.CharField', ['a'], {'to': "foo"}),
),
True,
)
def test_backcompat_nochange(self):
"Test that the backwards-compatable comparison is working"
self.assertEqual(
AutoChanges.different_attributes(
('models.CharField', [], {}),
('django.db.models.fields.CharField', [], {}),
),
False,
)
self.assertEqual(
AutoChanges.different_attributes(
('models.CharField', ['ack'], {}),
('django.db.models.fields.CharField', ['ack'], {}),
),
False,
)
self.assertEqual(
AutoChanges.different_attributes(
('models.CharField', [], {'to':'b'}),
('django.db.models.fields.CharField', [], {'to':'b'}),
),
False,
)
self.assertEqual(
AutoChanges.different_attributes(
('models.CharField', ['hah'], {'to':'you'}),
('django.db.models.fields.CharField', ['hah'], {'to':'you'}),
),
False,
)
self.assertEqual(
AutoChanges.different_attributes(
('models.CharField', ['hah'], {'to':'you'}),
('django.db.models.fields.CharField', ['hah'], {'to':'heh'}),
),
True,
)
self.assertEqual(
AutoChanges.different_attributes(
('models.CharField', ['hah'], {}),
('django.db.models.fields.CharField', [], {'to':"orm['appname.hah']"}),
),
False,
)
self.assertEqual(
AutoChanges.different_attributes(
('models.CharField', ['hah'], {}),
('django.db.models.fields.CharField', [], {'to':'hah'}),
),
True,
)
self.assertEqual(
AutoChanges.different_attributes(
('models.CharField', ['hah'], {}),
('django.db.models.fields.CharField', [], {'to':'rrr'}),
),
True,
)
self.assertEqual(
AutoChanges.different_attributes(
('models.CharField', ['hah'], {}),
('django.db.models.fields.IntField', [], {'to':'hah'}),
),
True,
)
class TestNonManagedIgnored(Monkeypatcher):
installed_apps = ["non_managed"]
full_defs = {
'non_managed.legacy': {
'Meta': {'object_name': 'Legacy', 'db_table': "'legacy_table'", 'managed': 'False'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True'}),
'size': ('django.db.models.fields.IntegerField', [], {})
}
}
def test_not_added_init(self):
migrations = Migrations("non_managed")
changes = InitialChanges(migrations)
change_list = changes.get_changes()
if list(change_list):
self.fail("Initial migration creates table for non-managed model")
def test_not_added_auto(self):
empty_defs = { }
class EmptyMigration(SchemaMigration):
"Serves as fake previous migration"
def forwards(self, orm):
pass
def backwards(self, orm):
pass
models = empty_defs
complete_apps = ['non_managed']
migrations = Migrations("non_managed")
empty_orm = FakeORM(EmptyMigration, "non_managed")
changes = AutoChanges(
migrations = migrations,
old_defs = empty_defs,
old_orm = empty_orm,
new_defs = self.full_defs,
)
change_list = changes.get_changes()
if list(change_list):
self.fail("Auto migration creates table for non-managed model")
def test_not_deleted_auto(self):
empty_defs = { }
old_defs = freezer.freeze_apps(["non_managed"])
class InitialMigration(SchemaMigration):
"Serves as fake previous migration"
def forwards(self, orm):
pass
def backwards(self, orm):
pass
models = self.full_defs
complete_apps = ['non_managed']
migrations = Migrations("non_managed")
initial_orm = FakeORM(InitialMigration, "non_managed")
changes = AutoChanges(
migrations = migrations,
old_defs = self.full_defs,
old_orm = initial_orm,
new_defs = empty_defs,
)
change_list = changes.get_changes()
if list(change_list):
self.fail("Auto migration deletes table for non-managed model")
def test_not_modified_auto(self):
fake_defs = {
'non_managed.legacy': {
'Meta': {'object_name': 'Legacy', 'db_table': "'legacy_table'", 'managed': 'False'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True'}),
#'size': ('django.db.models.fields.IntegerField', [], {}) # The "change" is the addition of this field
}
}
class InitialMigration(SchemaMigration):
"Serves as fake previous migration"
def forwards(self, orm):
pass
def backwards(self, orm):
pass
models = fake_defs
complete_apps = ['non_managed']
from non_managed import models as dummy_import_to_force_loading_models # TODO: Does needing this indicate a bug in MokeyPatcher?
reload_module(dummy_import_to_force_loading_models) # really force...
migrations = Migrations("non_managed")
initial_orm = FakeORM(InitialMigration, "non_managed")
changes = AutoChanges(
migrations = migrations,
old_defs = fake_defs,
old_orm = initial_orm,
new_defs = self.full_defs
)
change_list = changes.get_changes()
if list(change_list):
self.fail("Auto migration changes table for non-managed model")
| apache-2.0 |
Grumbel/rfactorlcd | rfactorlcd/state.py | 1 | 13232 | # rFactor Remote LCD
# Copyright (C) 2014 Ingo Ruhnke <grumbel@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import rfactorlcd
class LapTime:
def __init__(self):
self.sector1 = 0
self.sector2 = 0
self.sector3 = 0
@property
def total(self):
return self.sector1 + self.sector2 + self.sector3
class LapTimes(object):
"""Lap time history for a vehicle in a single session"""
def __init__(self):
self.laps = {}
self.current_sector = None
@property
def best_time(self):
if self.laps == []:
return 0
else:
return min([lap.total for lap in self.laps])
def last_lap(self):
last_lap = -1
last_times = None
for lap, times in self.laps.items():
if lap > last_lap:
last_lap = lap
last_times = times
return last_times
def update(self, state):
"""Update current LapTime history with info from VehicleState"""
if state.sector == 0 and state.total_laps == 0:
pass
elif self.current_sector != state.sector:
self.current_sector = state.sector
if state.sector == 0:
lap = state.total_laps - 1
else:
lap = state.total_laps
if lap in self.laps:
lap_time = self.laps[lap]
else:
lap_time = LapTime()
self.laps[lap] = lap_time
# set the sector time in the LapTime object
if state.sector == 1:
lap_time.sector1 = state.cur_sector1
elif state.sector == 2:
lap_time.sector2 = state.cur_sector2 - state.cur_sector1
elif state.sector == 0:
lap_time.sector3 = state.last_lap_time - state.cur_sector2
else:
logging.error("unknown sector: %d" % state.sector)
class WheelState(object):
def __init__(self):
self.rotation = 0.0
self.suspension_deflection = 0.0
self.ride_height = 0.0
self.tire_load = 0.0
self.lateral_force = 0.0
self.grip_fract = 0.0
self.brake_temp = 0.0
self.pressure = 0.0
self.temperature = [0.0, 0.0, 0.0]
self.wear = 0.0
self.surface_type = 0
self.flat = 0
self.detached = 0
class VehicleState(object):
def __init__(self):
self.is_player = 0
self.control = 0
self.driver_name = ""
self.vehicle_name = ""
self.vehicle_class = ""
self.total_laps = 0
self.sector = 0
self.finish_status = 0
self.lap_dist = 0
self.path_lateral = 0.0
self.track_edge = 0.0
self.in_pits = 0
self.place = 0
self.time_behind_next = 0.0
self.laps_behind_next = 0
self.time_behind_leader = 0.0
self.laps_behind_leader = 0
self.best_sector1 = 0.0
self.best_sector2 = 0.0
self.best_lap_time = 0.0
self.last_sector1 = 0.0
self.last_sector2 = 0.0
self.last_lap_time = 0.0
self.cur_sector1 = 0.0
self.cur_sector2 = 0.0
self.num_pitstops = 0
self.num_penalties = 0
self.lap_start_et = 0.0
self.lap_times = LapTimes()
class rFactorState(object):
def __init__(self):
self.session_id = 0
# telemetry defaults
self.lap_number = 0
self.lap_start_et = 0.0
self.pos = (0.0, 0.0, 0.0)
self.local_vel = (0.0, 0.0, 0.0)
self.local_accel = (0.0, 0.0, 0.0)
self.ori_x = (0.0, 0.0, 0.0)
self.ori_y = (0.0, 0.0, 0.0)
self.ori_z = (0.0, 0.0, 0.0)
self.local_rot = (0.0, 0.0, 0.0)
self.local_rot_accel = (0.0, 0.0, 0.0)
self.gear = 0
self.rpm = 0.0
self.max_rpm = 0.0
self.clutch_rpm = 0.0
self.fuel = 0.0
self.water_temp = 0.0
self.oil_temp = 0.0
self.throttle = 0.0
self.brake = 0.0
self.steering = 0.0
self.clutch = 0.0
self.steering_arm_force = 0.0
self.scheduled_stops = 0
self.overheating = 0
self.detached = 0
self.dent_severity = [0, 0, 0, 0, 0, 0, 0, 0]
self.wheels = [WheelState(), WheelState(), WheelState(), WheelState()]
self.num_vehicles = 0
self.player = None
self.vehicles = []
# info
self.track_name = ""
self.player_name = ""
self.plr_file_name = ""
self.end_e_t = 0.0
self.max_laps = 0
self.lap_dist = 1.0
# score
self.game_phase = 0
self.yellow_flag_state = 0
self.sector_flag = [0, 0, 0]
self.start_light = 0
self.num_red_lights = 0
self.session = 0
self.current_e_t = 0.0
self.ambient_temp = 0.0
self.track_temp = 0.0
# Backward compatibility hacks:
self.speed = 0
self.laptime = "1:23:45"
self.best_lap_driver = ""
@property
def best_lap_time(self):
if self.vehicles != []:
best = self.vehicles[0].best_lap_time
for veh in self.vehicles[1:]:
if veh.best_lap_time < best:
best = veh.best_lap_time
self.best_lap_driver = veh.driver_name # FIXME: hack
return best
else:
return 0
def on_telemetry(self, msg):
self.delta_time = msg.read_float()
self.lap_number = msg.read_int()
self.lap_start_et = msg.read_float()
# missing: mVehicleName[64]
# missing: mTrackName[64]
self.pos = msg.read_vect()
self.local_vel = msg.read_vect()
self.local_accel = msg.read_vect()
self.ori_x = msg.read_vect()
self.ori_y = msg.read_vect()
self.ori_z = msg.read_vect()
self.local_rot = msg.read_vect()
self.local_rot_accel = msg.read_vect()
self.gear = msg.read_int()
self.rpm = msg.read_float()
self.max_rpm = msg.read_float()
self.clutch_rpm = msg.read_float()
self.fuel = msg.read_float()
self.water_temp = msg.read_float()
self.oil_temp = msg.read_float()
self.throttle = msg.read_float()
self.brake = msg.read_float()
self.steering = msg.read_float()
self.clutch = msg.read_float()
self.steering_arm_force = msg.read_float()
self.scheduled_stops = msg.read_char()
self.overheating = msg.read_char()
self.detached = msg.read_char()
self.dent_severity = msg.read_multi_char(8)
self.last_impact_e_t = msg.read_float()
self.last_impact_magnitude = msg.read_float()
self.last_impact_pos = msg.read_vect()
# give speed in km/h
self.speed = -self.local_vel[2] * 3.6
for i in range(0, 4):
self.wheels[i].rotation = msg.read_float()
self.wheels[i].suspension_deflection = msg.read_float()
self.wheels[i].ride_height = msg.read_float()
self.wheels[i].tire_load = msg.read_float()
self.wheels[i].lateral_force = msg.read_float()
self.wheels[i].grip_fract = msg.read_float()
self.wheels[i].brake_temp = msg.read_float()
self.wheels[i].pressure = msg.read_float()
self.wheels[i].temperature = [msg.read_float(),
msg.read_float(),
msg.read_float()]
self.wheels[i].wear = msg.read_float()
# missing: mTerrainName[16]
self.wheels[i].surface_type = msg.read_char()
self.wheels[i].flat = msg.read_char()
self.wheels[i].detached = msg.read_char()
def on_vehicle(self, msg):
self.num_vehicles = msg.read_int()
if self.num_vehicles != len(self.vehicles):
self.vehicles = []
for i in range(self.num_vehicles):
self.vehicles.append(VehicleState())
for i in range(0, self.num_vehicles):
self.vehicles[i].is_player = msg.read_char()
self.vehicles[i].control = msg.read_char()
self.vehicles[i].driver_name = msg.read_string()
self.vehicles[i].vehicle_name = msg.read_string()
self.vehicles[i].vehicle_class = msg.read_string()
self.vehicles[i].total_laps = msg.read_short()
# rFactor numbers sectors 1, 2, 0, convert them to 0, 1, 2
self.vehicles[i].sector = (msg.read_char() + 2) % 3
self.vehicles[i].finish_status = msg.read_char()
self.vehicles[i].lap_dist = msg.read_float()
self.vehicles[i].path_lateral = msg.read_float()
self.vehicles[i].track_edge = msg.read_float()
self.vehicles[i].in_pits = msg.read_char()
self.vehicles[i].place = msg.read_char()
self.vehicles[i].time_behind_next = msg.read_float()
self.vehicles[i].laps_behind_next = msg.read_int()
self.vehicles[i].time_behind_leader = msg.read_float()
self.vehicles[i].laps_behind_leader = msg.read_int()
self.vehicles[i].best_sector1 = msg.read_float()
self.vehicles[i].best_sector2 = msg.read_float()
self.vehicles[i].best_lap_time = msg.read_float()
# these times are only updated going into a new lap
self.vehicles[i].last_sector1 = msg.read_float()
self.vehicles[i].last_sector2 = msg.read_float()
self.vehicles[i].last_lap_time = msg.read_float()
self.vehicles[i].cur_sector1 = msg.read_float()
self.vehicles[i].cur_sector2 = msg.read_float()
self.vehicles[i].num_pitstops = msg.read_short()
self.vehicles[i].num_penalties = msg.read_short()
self.vehicles[i].lap_start_et = msg.read_float()
self.vehicles[i].pos = msg.read_vect()
self.vehicles[i].local_vel = msg.read_vect()
self.vehicles[i].local_accel = msg.read_vect()
self.vehicles[i].ori_x = msg.read_vect()
self.vehicles[i].ori_y = msg.read_vect()
self.vehicles[i].ori_z = msg.read_vect()
self.vehicles[i].local_rot = msg.read_vect()
self.vehicles[i].local_rot_accel = msg.read_vect()
if self.vehicles[i].is_player:
self.player = self.vehicles[i]
self.vehicles[i].lap_times.update(self.vehicles[i])
def on_score(self, msg):
self.game_phase = msg.read_char()
self.yellow_flag_state = msg.read_char()
self.sector_flag = msg.read_multi_char(3)
self.start_light = msg.read_char()
self.num_red_lights = msg.read_char()
self.in_realtime = msg.read_char()
self.session = msg.read_int()
self.current_e_t = msg.read_float()
self.ambient_temp = msg.read_float()
self.track_temp = msg.read_float()
self.dark_cloud = msg.read_float()
self.raining = msg.read_float()
self.wind = msg.read_vect()
self.on_path_wetness = msg.read_float()
self.off_path_wetness = msg.read_float()
def on_info(self, msg):
self.track_name = msg.read_string()
self.player_name = msg.read_string()
self.plr_file_name = msg.read_string()
self.end_e_t = msg.read_float()
self.max_laps = msg.read_int()
self.lap_dist = msg.read_float()
# missing mResultsStream
def on_start_realtime(self, msg):
pass
def on_end_realtime(self, msg):
pass
def on_start_session(self, msg):
self.session_id += 1
self.vehicles = []
logging.info("on_start_session")
def on_end_session(self, msg):
logging.info("on_end_session")
def dispatch_message(self, tag, payload):
msg = rfactorlcd.BinaryDecoder(payload)
if tag == "STSS":
self.on_start_session(msg)
elif tag == "EDSS":
self.on_end_session(msg)
elif tag == "STRT":
self.on_start_realtime(msg)
elif tag == "EDRT":
self.on_end_realtime(msg)
elif tag == "VHCL":
self.on_vehicle(msg)
elif tag == "TLMT":
self.on_telemetry(msg)
elif tag == "SCOR":
self.on_score(msg)
elif tag == "INFO":
self.on_info(msg)
else:
print "error: unhandled tag: %s" % tag
# EOF #
| gpl-3.0 |
bagcoin/bagcoin | qa/rpc-tests/listtransactions.py | 164 | 4718 | #!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Exercise the listtransactions API
from test_framework import BitcoinTestFramework
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
def check_array_result(object_array, to_match, expected):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
"""
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
for key,value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value)))
num_matched = num_matched+1
if num_matched == 0:
raise AssertionError("No objects matched %s"%(str(to_match)))
class ListTransactionsTest(BitcoinTestFramework):
def run_test(self):
# Simple send, 0 to 1:
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
self.sync_all()
check_array_result(self.nodes[0].listtransactions(),
{"txid":txid},
{"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":0})
check_array_result(self.nodes[1].listtransactions(),
{"txid":txid},
{"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":0})
# mine a block, confirmations should change:
self.nodes[0].setgenerate(True, 1)
self.sync_all()
check_array_result(self.nodes[0].listtransactions(),
{"txid":txid},
{"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":1})
check_array_result(self.nodes[1].listtransactions(),
{"txid":txid},
{"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":1})
# send-to-self:
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 0.2)
check_array_result(self.nodes[0].listtransactions(),
{"txid":txid, "category":"send"},
{"amount":Decimal("-0.2")})
check_array_result(self.nodes[0].listtransactions(),
{"txid":txid, "category":"receive"},
{"amount":Decimal("0.2")})
# sendmany from node1: twice to self, twice to node2:
send_to = { self.nodes[0].getnewaddress() : 0.11,
self.nodes[1].getnewaddress() : 0.22,
self.nodes[0].getaccountaddress("from1") : 0.33,
self.nodes[1].getaccountaddress("toself") : 0.44 }
txid = self.nodes[1].sendmany("", send_to)
self.sync_all()
check_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.11")},
{"txid":txid} )
check_array_result(self.nodes[0].listtransactions(),
{"category":"receive","amount":Decimal("0.11")},
{"txid":txid} )
check_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.22")},
{"txid":txid} )
check_array_result(self.nodes[1].listtransactions(),
{"category":"receive","amount":Decimal("0.22")},
{"txid":txid} )
check_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.33")},
{"txid":txid} )
check_array_result(self.nodes[0].listtransactions(),
{"category":"receive","amount":Decimal("0.33")},
{"txid":txid, "account" : "from1"} )
check_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.44")},
{"txid":txid, "account" : ""} )
check_array_result(self.nodes[1].listtransactions(),
{"category":"receive","amount":Decimal("0.44")},
{"txid":txid, "account" : "toself"} )
if __name__ == '__main__':
ListTransactionsTest().main()
| apache-2.0 |
lacrazyboy/scrapy | scrapy/linkextractors/htmlparser.py | 90 | 2883 | """
HTMLParser-based link extractor
"""
import warnings
from six.moves.html_parser import HTMLParser
from six.moves.urllib.parse import urljoin
from w3lib.url import safe_url_string
from scrapy.link import Link
from scrapy.utils.python import unique as unique_list
from scrapy.exceptions import ScrapyDeprecationWarning
class HtmlParserLinkExtractor(HTMLParser):
def __init__(self, tag="a", attr="href", process=None, unique=False):
HTMLParser.__init__(self)
warnings.warn(
"HtmlParserLinkExtractor is deprecated and will be removed in "
"future releases. Please use scrapy.linkextractors.LinkExtractor",
ScrapyDeprecationWarning, stacklevel=2,
)
self.scan_tag = tag if callable(tag) else lambda t: t == tag
self.scan_attr = attr if callable(attr) else lambda a: a == attr
self.process_attr = process if callable(process) else lambda v: v
self.unique = unique
def _extract_links(self, response_text, response_url, response_encoding):
self.reset()
self.feed(response_text)
self.close()
links = unique_list(self.links, key=lambda link: link.url) if self.unique else self.links
ret = []
base_url = urljoin(response_url, self.base_url) if self.base_url else response_url
for link in links:
if isinstance(link.url, unicode):
link.url = link.url.encode(response_encoding)
try:
link.url = urljoin(base_url, link.url)
except ValueError:
continue
link.url = safe_url_string(link.url, response_encoding)
link.text = link.text.decode(response_encoding)
ret.append(link)
return ret
def extract_links(self, response):
# wrapper needed to allow to work directly with text
return self._extract_links(response.body, response.url, response.encoding)
def reset(self):
HTMLParser.reset(self)
self.base_url = None
self.current_link = None
self.links = []
def handle_starttag(self, tag, attrs):
if tag == 'base':
self.base_url = dict(attrs).get('href')
if self.scan_tag(tag):
for attr, value in attrs:
if self.scan_attr(attr):
url = self.process_attr(value)
link = Link(url=url)
self.links.append(link)
self.current_link = link
def handle_endtag(self, tag):
if self.scan_tag(tag):
self.current_link = None
def handle_data(self, data):
if self.current_link:
self.current_link.text = self.current_link.text + data
def matches(self, url):
"""This extractor matches with any url, since
it doesn't contain any patterns"""
return True
| bsd-3-clause |
azverkan/scons | src/engine/SCons/Node/AliasTests.py | 3 | 3939 | #
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import sys
import unittest
import SCons.Errors
import SCons.Node.Alias
class AliasTestCase(unittest.TestCase):
def test_AliasNameSpace(self):
"""Test creating an Alias name space
"""
ans = SCons.Node.Alias.AliasNameSpace()
assert ans is not None, ans
def test_ANS_Alias(self):
"""Test the Alias() factory
"""
ans = SCons.Node.Alias.AliasNameSpace()
a1 = ans.Alias('a1')
assert a1.name == 'a1', a1.name
a2 = ans.Alias('a1')
assert a1 is a2, (a1, a2)
def test_get_contents(self):
"""Test the get_contents() method
"""
class DummyNode(object):
def __init__(self, contents):
self.contents = contents
def get_csig(self):
return self.contents
def get_contents(self):
return self.contents
ans = SCons.Node.Alias.AliasNameSpace()
ans.Alias('a1')
a = ans.lookup('a1')
a.sources = [ DummyNode('one'), DummyNode('two'), DummyNode('three') ]
c = a.get_contents()
assert c == 'onetwothree', c
def test_lookup(self):
"""Test the lookup() method
"""
ans = SCons.Node.Alias.AliasNameSpace()
ans.Alias('a1')
a = ans.lookup('a1')
assert a.name == 'a1', a.name
a1 = ans.lookup('a1')
assert a is a1, a1
a = ans.lookup('a2')
assert a is None, a
def test_Alias(self):
"""Test creating an Alias() object
"""
a1 = SCons.Node.Alias.Alias('a')
assert a1.name == 'a', a1.name
a2 = SCons.Node.Alias.Alias('a')
assert a2.name == 'a', a2.name
assert not a1 is a2
assert a1.name == a2.name
class AliasNodeInfoTestCase(unittest.TestCase):
def test___init__(self):
"""Test AliasNodeInfo initialization"""
ans = SCons.Node.Alias.AliasNameSpace()
aaa = ans.Alias('aaa')
ni = SCons.Node.Alias.AliasNodeInfo(aaa)
class AliasBuildInfoTestCase(unittest.TestCase):
def test___init__(self):
"""Test AliasBuildInfo initialization"""
ans = SCons.Node.Alias.AliasNameSpace()
aaa = ans.Alias('aaa')
bi = SCons.Node.Alias.AliasBuildInfo(aaa)
if __name__ == "__main__":
suite = unittest.TestSuite()
tclasses = [
AliasTestCase,
AliasBuildInfoTestCase,
AliasNodeInfoTestCase,
]
for tclass in tclasses:
names = unittest.getTestCaseNames(tclass, 'test_')
suite.addTests(list(map(tclass, names)))
if not unittest.TextTestRunner().run(suite).wasSuccessful():
sys.exit(1)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| mit |
sarantapichos/faircoop-market | addons/base_import_module/models/base_import_module.py | 344 | 2066 | import base64
from StringIO import StringIO
from io import BytesIO
from openerp.osv import osv, fields
class base_import_module(osv.TransientModel):
""" Import Module """
_name = "base.import.module"
_description = "Import Module"
_columns = {
'module_file': fields.binary('Module .ZIP file', required=True),
'state':fields.selection([('init','init'),('done','done')], 'Status', readonly=True),
'import_message': fields.char('Import message'),
'force': fields.boolean('Force init', help="Force init mode even if installed. (will update `noupdate='1'` records)"),
}
_defaults = {
'state': 'init',
'force': False,
}
def import_module(self, cr, uid, ids, context=None):
module_obj = self.pool.get('ir.module.module')
data = self.browse(cr, uid, ids[0] , context=context)
zip_data = base64.decodestring(data.module_file)
fp = BytesIO()
fp.write(zip_data)
res = module_obj.import_zipfile(cr, uid, fp, force=data.force, context=context)
self.write(cr, uid, ids, {'state': 'done', 'import_message': res[0]}, context=context)
context = dict(context, module_name=res[1])
# Return wizard otherwise it will close wizard and will not show result message to user.
return {
'name': 'Import Module',
'view_type': 'form',
'view_mode': 'form',
'target': 'new',
'res_id': ids[0],
'res_model': 'base.import.module',
'type': 'ir.actions.act_window',
'context': context,
}
def action_module_open(self, cr, uid, ids, context):
return {
'domain': [('name', 'in', context.get('module_name',[]))],
'name': 'Modules',
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'ir.module.module',
'view_id': False,
'type': 'ir.actions.act_window',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
andyfaff/scipy | scipy/signal/tests/test_bsplines.py | 12 | 13021 | # pylint: disable=missing-docstring
import numpy as np
from numpy import array
from numpy.testing import (assert_equal,
assert_allclose, assert_array_equal,
assert_almost_equal)
import pytest
from pytest import raises
import scipy.signal.bsplines as bsp
from scipy import signal
class TestBSplines:
"""Test behaviors of B-splines. The values tested against were returned as of
SciPy 1.1.0 and are included for regression testing purposes"""
def test_spline_filter(self):
np.random.seed(12457)
# Test the type-error branch
raises(TypeError, bsp.spline_filter, array([0]), 0)
# Test the complex branch
data_array_complex = np.random.rand(7, 7) + np.random.rand(7, 7)*1j
# make the magnitude exceed 1, and make some negative
data_array_complex = 10*(1+1j-2*data_array_complex)
result_array_complex = array(
[[-4.61489230e-01-1.92994022j, 8.33332443+6.25519943j,
6.96300745e-01-9.05576038j, 5.28294849+3.97541356j,
5.92165565+7.68240595j, 6.59493160-1.04542804j,
9.84503460-5.85946894j],
[-8.78262329-8.4295969j, 7.20675516+5.47528982j,
-8.17223072+2.06330729j, -4.38633347-8.65968037j,
9.89916801-8.91720295j, 2.67755103+8.8706522j,
6.24192142+3.76879835j],
[-3.15627527+2.56303072j, 9.87658501-0.82838702j,
-9.96930313+8.72288895j, 3.17193985+6.42474651j,
-4.50919819-6.84576082j, 5.75423431+9.94723988j,
9.65979767+6.90665293j],
[-8.28993416-6.61064005j, 9.71416473e-01-9.44907284j,
-2.38331890+9.25196648j, -7.08868170-0.77403212j,
4.89887714+7.05371094j, -1.37062311-2.73505688j,
7.70705748+2.5395329j],
[2.51528406-1.82964492j, 3.65885472+2.95454836j,
5.16786575-1.66362023j, -8.77737999e-03+5.72478867j,
4.10533333-3.10287571j, 9.04761887+1.54017115j,
-5.77960968e-01-7.87758923j],
[9.86398506-3.98528528j, -4.71444130-2.44316983j,
-1.68038976-1.12708664j, 2.84695053+1.01725709j,
1.14315915-8.89294529j, -3.17127085-5.42145538j,
1.91830420-6.16370344j],
[7.13875294+2.91851187j, -5.35737514+9.64132309j,
-9.66586399+0.70250005j, -9.87717438-2.0262239j,
9.93160629+1.5630846j, 4.71948051-2.22050714j,
9.49550819+7.8995142j]])
# FIXME: for complex types, the computations are done in
# single precision (reason unclear). When this is changed,
# this test needs updating.
assert_allclose(bsp.spline_filter(data_array_complex, 0),
result_array_complex, rtol=1e-6)
# Test the real branch
np.random.seed(12457)
data_array_real = np.random.rand(12, 12)
# make the magnitude exceed 1, and make some negative
data_array_real = 10*(1-2*data_array_real)
result_array_real = array(
[[-.463312621, 8.33391222, .697290949, 5.28390836,
5.92066474, 6.59452137, 9.84406950, -8.78324188,
7.20675750, -8.17222994, -4.38633345, 9.89917069],
[2.67755154, 6.24192170, -3.15730578, 9.87658581,
-9.96930425, 3.17194115, -4.50919947, 5.75423446,
9.65979824, -8.29066885, .971416087, -2.38331897],
[-7.08868346, 4.89887705, -1.37062289, 7.70705838,
2.51526461, 3.65885497, 5.16786604, -8.77715342e-03,
4.10533325, 9.04761993, -.577960351, 9.86382519],
[-4.71444301, -1.68038985, 2.84695116, 1.14315938,
-3.17127091, 1.91830461, 7.13779687, -5.35737482,
-9.66586425, -9.87717456, 9.93160672, 4.71948144],
[9.49551194, -1.92958436, 6.25427993, -9.05582911,
3.97562282, 7.68232426, -1.04514824, -5.86021443,
-8.43007451, 5.47528997, 2.06330736, -8.65968112],
[-8.91720100, 8.87065356, 3.76879937, 2.56222894,
-.828387146, 8.72288903, 6.42474741, -6.84576083,
9.94724115, 6.90665380, -6.61084494, -9.44907391],
[9.25196790, -.774032030, 7.05371046, -2.73505725,
2.53953305, -1.82889155, 2.95454824, -1.66362046,
5.72478916, -3.10287679, 1.54017123, -7.87759020],
[-3.98464539, -2.44316992, -1.12708657, 1.01725672,
-8.89294671, -5.42145629, -6.16370321, 2.91775492,
9.64132208, .702499998, -2.02622392, 1.56308431],
[-2.22050773, 7.89951554, 5.98970713, -7.35861835,
5.45459283, -7.76427957, 3.67280490, -4.05521315,
4.51967507, -3.22738749, -3.65080177, 3.05630155],
[-6.21240584, -.296796126, -8.34800163, 9.21564563,
-3.61958784, -4.77120006, -3.99454057, 1.05021988e-03,
-6.95982829, 6.04380797, 8.43181250, -2.71653339],
[1.19638037, 6.99718842e-02, 6.72020394, -2.13963198,
3.75309875, -5.70076744, 5.92143551, -7.22150575,
-3.77114594, -1.11903194, -5.39151466, 3.06620093],
[9.86326886, 1.05134482, -7.75950607, -3.64429655,
7.81848957, -9.02270373, 3.73399754, -4.71962549,
-7.71144306, 3.78263161, 6.46034818, -4.43444731]])
assert_allclose(bsp.spline_filter(data_array_real, 0),
result_array_real)
def test_bspline(self):
np.random.seed(12458)
assert_allclose(bsp.bspline(np.random.rand(1, 1), 2),
array([[0.73694695]]))
data_array_complex = np.random.rand(4, 4) + np.random.rand(4, 4)*1j
data_array_complex = 0.1*data_array_complex
result_array_complex = array(
[[0.40882362, 0.41021151, 0.40886708, 0.40905103],
[0.40829477, 0.41021230, 0.40966097, 0.40939871],
[0.41036803, 0.40901724, 0.40965331, 0.40879513],
[0.41032862, 0.40925287, 0.41037754, 0.41027477]])
assert_allclose(bsp.bspline(data_array_complex, 10),
result_array_complex)
def test_gauss_spline(self):
np.random.seed(12459)
assert_almost_equal(bsp.gauss_spline(0, 0), 1.381976597885342)
assert_allclose(bsp.gauss_spline(array([1.]), 1), array([0.04865217]))
def test_gauss_spline_list(self):
# regression test for gh-12152 (accept array_like)
knots = [-1.0, 0.0, -1.0]
assert_almost_equal(bsp.gauss_spline(knots, 3),
array([0.15418033, 0.6909883, 0.15418033]))
def test_cubic(self):
np.random.seed(12460)
assert_array_equal(bsp.cubic([0]), array([0]))
data_array_complex = np.random.rand(4, 4) + np.random.rand(4, 4)*1j
data_array_complex = 1+1j-2*data_array_complex
# scaling the magnitude by 10 makes the results close enough to zero,
# that the assertion fails, so just make the elements have a mix of
# positive and negative imaginary components...
result_array_complex = array(
[[0.23056563, 0.38414406, 0.08342987, 0.06904847],
[0.17240848, 0.47055447, 0.63896278, 0.39756424],
[0.12672571, 0.65862632, 0.1116695, 0.09700386],
[0.3544116, 0.17856518, 0.1528841, 0.17285762]])
assert_allclose(bsp.cubic(data_array_complex), result_array_complex)
def test_quadratic(self):
np.random.seed(12461)
assert_array_equal(bsp.quadratic([0]), array([0]))
data_array_complex = np.random.rand(4, 4) + np.random.rand(4, 4)*1j
# scaling the magnitude by 10 makes the results all zero,
# so just make the elements have a mix of positive and negative
# imaginary components...
data_array_complex = (1+1j-2*data_array_complex)
result_array_complex = array(
[[0.23062746, 0.06338176, 0.34902312, 0.31944105],
[0.14701256, 0.13277773, 0.29428615, 0.09814697],
[0.52873842, 0.06484157, 0.09517566, 0.46420389],
[0.09286829, 0.09371954, 0.1422526, 0.16007024]])
assert_allclose(bsp.quadratic(data_array_complex),
result_array_complex)
def test_cspline1d(self):
np.random.seed(12462)
assert_array_equal(bsp.cspline1d(array([0])), [0.])
c1d = array([1.21037185, 1.86293902, 2.98834059, 4.11660378,
4.78893826])
# test lamda != 0
assert_allclose(bsp.cspline1d(array([1., 2, 3, 4, 5]), 1), c1d)
c1d0 = array([0.78683946, 2.05333735, 2.99981113, 3.94741812,
5.21051638])
assert_allclose(bsp.cspline1d(array([1., 2, 3, 4, 5])), c1d0)
def test_qspline1d(self):
np.random.seed(12463)
assert_array_equal(bsp.qspline1d(array([0])), [0.])
# test lamda != 0
raises(ValueError, bsp.qspline1d, array([1., 2, 3, 4, 5]), 1.)
raises(ValueError, bsp.qspline1d, array([1., 2, 3, 4, 5]), -1.)
q1d0 = array([0.85350007, 2.02441743, 2.99999534, 3.97561055,
5.14634135])
assert_allclose(bsp.qspline1d(array([1., 2, 3, 4, 5])), q1d0)
def test_cspline1d_eval(self):
np.random.seed(12464)
assert_allclose(bsp.cspline1d_eval(array([0., 0]), [0.]), array([0.]))
assert_array_equal(bsp.cspline1d_eval(array([1., 0, 1]), []),
array([]))
x = [-3, -2, -1, 0, 1, 2, 3, 4, 5, 6]
dx = x[1]-x[0]
newx = [-6., -5.5, -5., -4.5, -4., -3.5, -3., -2.5, -2., -1.5, -1.,
-0.5, 0., 0.5, 1., 1.5, 2., 2.5, 3., 3.5, 4., 4.5, 5., 5.5, 6.,
6.5, 7., 7.5, 8., 8.5, 9., 9.5, 10., 10.5, 11., 11.5, 12.,
12.5]
y = array([4.216, 6.864, 3.514, 6.203, 6.759, 7.433, 7.874, 5.879,
1.396, 4.094])
cj = bsp.cspline1d(y)
newy = array([6.203, 4.41570658, 3.514, 5.16924703, 6.864, 6.04643068,
4.21600281, 6.04643068, 6.864, 5.16924703, 3.514,
4.41570658, 6.203, 6.80717667, 6.759, 6.98971173, 7.433,
7.79560142, 7.874, 7.41525761, 5.879, 3.18686814, 1.396,
2.24889482, 4.094, 2.24889482, 1.396, 3.18686814, 5.879,
7.41525761, 7.874, 7.79560142, 7.433, 6.98971173, 6.759,
6.80717667, 6.203, 4.41570658])
assert_allclose(bsp.cspline1d_eval(cj, newx, dx=dx, x0=x[0]), newy)
def test_qspline1d_eval(self):
np.random.seed(12465)
assert_allclose(bsp.qspline1d_eval(array([0., 0]), [0.]), array([0.]))
assert_array_equal(bsp.qspline1d_eval(array([1., 0, 1]), []),
array([]))
x = [-3, -2, -1, 0, 1, 2, 3, 4, 5, 6]
dx = x[1]-x[0]
newx = [-6., -5.5, -5., -4.5, -4., -3.5, -3., -2.5, -2., -1.5, -1.,
-0.5, 0., 0.5, 1., 1.5, 2., 2.5, 3., 3.5, 4., 4.5, 5., 5.5, 6.,
6.5, 7., 7.5, 8., 8.5, 9., 9.5, 10., 10.5, 11., 11.5, 12.,
12.5]
y = array([4.216, 6.864, 3.514, 6.203, 6.759, 7.433, 7.874, 5.879,
1.396, 4.094])
cj = bsp.qspline1d(y)
newy = array([6.203, 4.49418159, 3.514, 5.18390821, 6.864, 5.91436915,
4.21600002, 5.91436915, 6.864, 5.18390821, 3.514,
4.49418159, 6.203, 6.71900226, 6.759, 7.03980488, 7.433,
7.81016848, 7.874, 7.32718426, 5.879, 3.23872593, 1.396,
2.34046013, 4.094, 2.34046013, 1.396, 3.23872593, 5.879,
7.32718426, 7.874, 7.81016848, 7.433, 7.03980488, 6.759,
6.71900226, 6.203, 4.49418159])
assert_allclose(bsp.qspline1d_eval(cj, newx, dx=dx, x0=x[0]), newy)
def test_sepfir2d_invalid_filter():
filt = np.array([1.0, 2.0, 4.0, 2.0, 1.0])
image = np.random.rand(7, 9)
# No error for odd lengths
signal.sepfir2d(image, filt, filt[2:])
# Row or column filter must be odd
with pytest.raises(ValueError, match="odd length"):
signal.sepfir2d(image, filt, filt[1:])
with pytest.raises(ValueError, match="odd length"):
signal.sepfir2d(image, filt[1:], filt)
# Filters must be 1-dimensional
with pytest.raises(ValueError, match="object too deep"):
signal.sepfir2d(image, filt.reshape(1, -1), filt)
with pytest.raises(ValueError, match="object too deep"):
signal.sepfir2d(image, filt, filt.reshape(1, -1))
def test_sepfir2d_invalid_image():
filt = np.array([1.0, 2.0, 4.0, 2.0, 1.0])
image = np.random.rand(8, 8)
# Image must be 2 dimensional
with pytest.raises(ValueError, match="object too deep"):
signal.sepfir2d(image.reshape(4, 4, 4), filt, filt)
with pytest.raises(ValueError, match="object of too small depth"):
signal.sepfir2d(image[0], filt, filt)
| bsd-3-clause |
ocelot-collab/ocelot | unit_tests/ebeam_test/acc_utils/acc_utils_test.py | 1 | 4045 | """Test of the demo file demos/ebeam/csr_ex.py"""
import os
import sys
import copy
import time
FILE_DIR = os.path.dirname(os.path.abspath(__file__))
REF_RES_DIR = FILE_DIR + '/ref_results/'
from unit_tests.params import *
from acc_utils_conf import *
def test_lattice_transfer_map(lattice, p_array, parameter=None, update_ref_values=False):
"""R matrix calculation test"""
r_matrix = lattice_transfer_map(lattice, 0.0)
if update_ref_values:
return numpy2json(r_matrix)
r_matrix_ref = json2numpy(json_read(REF_RES_DIR + sys._getframe().f_code.co_name + '.json'))
result = check_matrix(r_matrix, r_matrix_ref, TOL, assert_info=' r_matrix - ')
assert check_result(result)
@pytest.mark.parametrize('parameter', [0, 1])
def test_lattice_transfer_map_RT(lattice, p_array, parameter, update_ref_values=False):
"""test R56 and T566 of the chicane"""
r56, t566, u5666, Sref = chicane_RTU(yoke_len=b1.l/b1.angle*np.sin(b1.angle), dip_dist=d1.l*np.cos(b1.angle), r=b1.l/b1.angle, type='c')
lattice = copy.deepcopy(lattice)
if parameter == 1:
for elem in lattice.sequence:
if elem.__class__ == Bend:
elem.tilt = np.pi / 2
lattice.update_transfer_maps()
r_matrix = lattice_transfer_map(lattice, 0.0)
result1 = check_value(r_matrix[4, 5], r56, tolerance=1.0e-14, assert_info=" R56 ")
result2 = check_value(lattice.T[4, 5, 5], t566, tolerance=1.0e-14, assert_info=" T566 ")
assert check_result([result1, result2])
def test_rf2beam(lattice, p_array, parameter=None, update_ref_values=False):
"""
track function test without CSR
0 - normal tracking
1 - tilt bending magnets and tilt back electron beam then untilt beam and compare with ref beam (twiss not checked)
"""
v1 = 0.14746291505994155
phi1 = -11.105280079934298
vh = 0.030763428944485114
phih = 132.9179951484828 - 360
E1, chirp, curvature, skewness = rf2beam(v1, phi1, vh, phih, n=3, freq=1.3e9, E0=0.00675, zeta1=0., zeta2=0.,
zeta3=0.)
v1_r, phi1_r, vh_r, phih_r = beam2rf(E1, chirp, curvature, skewness, n=3, freq=1.3e9, E0=0.00675, zeta1=0., zeta2=0.,
zeta3=0.)
r1 = check_value(v1_r, v1, tolerance=1.0e-8, tolerance_type='relative', assert_info='v1')
r2 = check_value(phi1_r, phi1, tolerance=1.0e-8, tolerance_type='relative', assert_info='phi1')
r3 = check_value(vh_r, vh, tolerance=1.0e-8, tolerance_type='relative', assert_info='vh')
r4 = check_value(phih_r, phih, tolerance=1.0e-8, tolerance_type='relative', assert_info='phih')
assert check_result([r1, r2, r3, r4])
def setup_module(module):
f = open(pytest.TEST_RESULTS_FILE, 'a')
f.write('### CSR_EX START ###\n\n')
f.close()
def teardown_module(module):
f = open(pytest.TEST_RESULTS_FILE, 'a')
f.write('### CSR_EX END ###\n\n\n')
f.close()
def setup_function(function):
f = open(pytest.TEST_RESULTS_FILE, 'a')
f.write(function.__name__)
f.close()
pytest.t_start = time.time()
def teardown_function(function):
f = open(pytest.TEST_RESULTS_FILE, 'a')
f.write(' execution time is ' + '{:.3f}'.format(time.time() - pytest.t_start) + ' sec\n\n')
f.close()
@pytest.mark.update
def test_update_ref_values(lattice, p_array, cmdopt):
update_functions = []
update_functions.append('test_lattice_transfer_map')
update_function_parameters = {}
parameter = update_function_parameters[cmdopt] if cmdopt in update_function_parameters.keys() else ['']
if cmdopt in update_functions:
for p in parameter:
p_arr = copy.deepcopy(p_array)
result = eval(cmdopt)(lattice, p_arr, p, True)
if os.path.isfile(REF_RES_DIR + cmdopt + str(p) + '.json'):
os.rename(REF_RES_DIR + cmdopt + str(p) + '.json', REF_RES_DIR + cmdopt + str(p) + '.old')
json_save(result, REF_RES_DIR + cmdopt + str(p) + '.json')
| gpl-3.0 |
magvugr/AT | EntVirtual/lib/python2.7/site-packages/pip/_vendor/requests/packages/charade/constants.py | 3008 | 1335 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
_debug = 0
eDetecting = 0
eFoundIt = 1
eNotMe = 2
eStart = 0
eError = 1
eItsMe = 2
SHORTCUT_THRESHOLD = 0.95
| gpl-3.0 |
jelly/arch-security-tracker | test/test_group.py | 2 | 19809 | from flask import url_for
from werkzeug.exceptions import Forbidden
from werkzeug.exceptions import NotFound
from config import TRACKER_BUGTRACKER_URL
from tracker.model.cve import CVE
from tracker.model.cve import issue_types
from tracker.model.cvegroup import CVEGroup
from tracker.model.enum import Affected
from tracker.model.enum import Status
from tracker.model.enum import UserRole
from tracker.model.enum import affected_to_status
from tracker.view.add import ERROR_GROUP_WITH_ISSUE_EXISTS
from tracker.view.show import get_bug_project
from .conftest import DEFAULT_ADVISORY_ID
from .conftest import DEFAULT_GROUP_ID
from .conftest import DEFAULT_GROUP_NAME
from .conftest import DEFAULT_ISSUE_ID
from .conftest import ERROR_LOGIN_REQUIRED
from .conftest import create_advisory
from .conftest import create_group
from .conftest import create_package
from .conftest import default_group_dict
from .conftest import logged_in
from .util import AssertionHTMLParser
def set_and_assert_group_data(db, client, route, pkgnames=['foo'], issues=['CVE-1234-1234', 'CVE-2222-2222'],
affected='1.2.3-4', fixed='1.2.3-5', status=Affected.affected, bug_ticket='1234',
reference='https://security.archlinux.org', notes='the cacke\nis\na\nlie',
advisory_qualified=False, database='core'):
data = default_group_dict(dict(
cve='\n'.join(issues),
pkgnames='\n'.join(pkgnames),
affected=affected,
fixed=fixed,
status=status.name,
bug_ticket=bug_ticket,
reference=reference,
notes=notes,
advisory_qualified='true' if advisory_qualified else None))
resp = client.post(route, follow_redirects=True, data=data)
assert 200 == resp.status_code
group = CVEGroup.query.get(DEFAULT_GROUP_ID)
assert DEFAULT_GROUP_ID == group.id
assert affected == group.affected
assert fixed == group.fixed
assert Status.vulnerable == group.status
assert bug_ticket == group.bug_ticket
assert reference == group.reference
assert notes == group.notes
assert advisory_qualified == group.advisory_qualified
assert list(sorted(issues)) == list(sorted([issue.cve.id for issue in group.issues]))
assert list(sorted(pkgnames)) == list(sorted([pkg.pkgname for pkg in group.packages]))
if bug_ticket:
assert TRACKER_BUGTRACKER_URL.format(bug_ticket) in resp.data.decode('utf-8')
else:
# Assert project and product category
project = get_bug_project([database])
assert 'project={}&product_category=13'.format(project) in resp.data.decode('utf-8')
@create_package(name='foo')
@logged_in(role=UserRole.reporter)
def test_reporter_can_add(db, client):
resp = client.post(url_for('tracker.add_group'), follow_redirects=True,
data=default_group_dict(dict(pkgnames='foo')))
assert 200 == resp.status_code
group = CVEGroup.query.get(DEFAULT_GROUP_ID)
assert DEFAULT_GROUP_ID == group.id
@create_package(name='foo')
@create_group(packages=['foo'])
@logged_in(role=UserRole.reporter)
def test_reporter_can_copy(db, client):
resp = client.get(url_for('tracker.copy_group', avg=DEFAULT_GROUP_NAME), follow_redirects=True)
assert 200 == resp.status_code
assert ERROR_LOGIN_REQUIRED not in resp.data.decode()
@create_package(name='foo')
@logged_in
def test_add_implicit_issue_creation(db, client):
issue_id = 'CVE-4242-4242'
resp = client.post(url_for('tracker.add_group'), follow_redirects=True,
data=default_group_dict(dict(pkgnames='foo', cve=issue_id)))
assert 200 == resp.status_code
cve = CVE.query.get(issue_id)
assert issue_id == cve.id
@create_package(name='foo', version='1.2.3-4')
@logged_in
def test_add_group(db, client):
set_and_assert_group_data(db, client, url_for('tracker.add_group'))
@create_package(name='foo', version='1.2.3-4')
@create_group(id=DEFAULT_GROUP_ID, issues=[DEFAULT_ISSUE_ID], packages=['foo'])
@logged_in
def test_edit_group(db, client):
set_and_assert_group_data(db, client, url_for('tracker.edit_group', avg=DEFAULT_GROUP_NAME))
@create_package(name='foo', version='1.2.3-4')
@logged_in
def test_edit_group_bug_url_core(db, client):
set_and_assert_group_data(db, client, url_for('tracker.add_group'), bug_ticket='')
@create_package(name='foo', version='1.2.3-4', database='community')
@logged_in
def test_edit_group_bug_url_community(db, client):
set_and_assert_group_data(db, client, url_for('tracker.add_group'), bug_ticket='', database='community')
def test_add_needs_login(db, client):
resp = client.get(url_for('tracker.add_group'), follow_redirects=True)
assert ERROR_LOGIN_REQUIRED in resp.data.decode()
@create_group
def test_edit_needs_login(db, client):
resp = client.get(url_for('tracker.edit_group', avg=DEFAULT_GROUP_NAME), follow_redirects=True)
assert ERROR_LOGIN_REQUIRED in resp.data.decode()
@create_group
def test_copy_needs_login(db, client):
resp = client.get(url_for('tracker.copy_group', avg=DEFAULT_GROUP_NAME), follow_redirects=True)
assert ERROR_LOGIN_REQUIRED in resp.data.decode()
@logged_in
def test_show_group_not_found(db, client):
resp = client.get(url_for('tracker.show_group', avg='AVG-42'), follow_redirects=True)
assert resp.status_code == NotFound.code
@logged_in
def test_edit_group_not_found(db, client):
resp = client.get(url_for('tracker.edit_group', avg='AVG-42'), follow_redirects=True)
assert resp.status_code == NotFound.code
@create_package(name='foo', version='1.2.3-4')
@create_group(id=DEFAULT_GROUP_ID, packages=['foo'], affected='1.2.3-3', fixed='1.2.3-4',
issues=['CVE-1111-1234', 'CVE-1234-12345', 'CVE-1111-12345',
'CVE-1234-11112', 'CVE-1234-111111', 'CVE-1234-11111'])
@logged_in
def test_edit_sort_cve_entries(db, client):
resp = client.get(url_for('tracker.edit_group', avg=DEFAULT_GROUP_NAME), follow_redirects=True)
assert 200 == resp.status_code
html = AssertionHTMLParser()
html.feed(resp.data.decode())
assert ['CVE-1111-1234',
'CVE-1111-12345',
'CVE-1234-11111',
'CVE-1234-11112',
'CVE-1234-12345',
'CVE-1234-111111'] == html.get_element_by_id('cve').data.split()
@logged_in
def test_copy_group_not_found(db, client):
resp = client.get(url_for('tracker.copy_group', avg='AVG-42'), follow_redirects=True)
assert resp.status_code == NotFound.code
@create_group(id=DEFAULT_GROUP_ID, issues=[DEFAULT_ISSUE_ID], packages=['foo'])
@logged_in
def test_group_packge_dropped_from_repo(db, client):
resp = client.get(url_for('tracker.show_group', avg=DEFAULT_GROUP_NAME), follow_redirects=True)
assert 200 == resp.status_code
@create_package(name='foo', version='1.2.3-4')
@create_group(id=DEFAULT_GROUP_ID, issues=[DEFAULT_ISSUE_ID], packages=['foo'])
@logged_in
def test_warn_on_add_group_with_existing_issue(db, client):
pkgnames = ['foo']
issues = ['CVE-1234-1234', 'CVE-2222-2222', DEFAULT_ISSUE_ID]
data = default_group_dict(dict(
cve='\n'.join(issues),
pkgnames='\n'.join(pkgnames)))
resp = client.post(url_for('tracker.add_group'), follow_redirects=True, data=data)
assert 200 == resp.status_code
assert ERROR_GROUP_WITH_ISSUE_EXISTS.format(DEFAULT_GROUP_ID, DEFAULT_ISSUE_ID, pkgnames[0]) in resp.data.decode()
@create_package(name='foo', version='1.2.3-4')
@create_group(id=DEFAULT_GROUP_ID, issues=[DEFAULT_ISSUE_ID], packages=['foo'])
@logged_in
def test_dont_warn_on_add_group_without_existing_issue(db, client):
pkgnames = ['foo']
issues = ['CVE-1234-1234', 'CVE-2222-2222']
data = default_group_dict(dict(
cve='\n'.join(issues),
pkgnames='\n'.join(pkgnames)))
resp = client.post(url_for('tracker.add_group'), follow_redirects=True, data=data)
assert 200 == resp.status_code
assert ERROR_GROUP_WITH_ISSUE_EXISTS.format(DEFAULT_GROUP_ID, DEFAULT_ISSUE_ID, pkgnames[0]) not in resp.data.decode()
@create_package(name='foo', version='1.2.3-4')
@create_group(id=DEFAULT_GROUP_ID, issues=[DEFAULT_ISSUE_ID], packages=['foo'])
@logged_in
def test_warn_on_add_group_with_package_already_having_open_group(db, client):
pkgnames = ['foo']
issues = ['CVE-1234-1234', 'CVE-2222-2222', DEFAULT_ISSUE_ID]
data = default_group_dict(dict(
cve='\n'.join(issues),
pkgnames='\n'.join(pkgnames)))
resp = client.post(url_for('tracker.add_group'), follow_redirects=True, data=data)
assert 200 == resp.status_code
assert ERROR_GROUP_WITH_ISSUE_EXISTS.format(DEFAULT_GROUP_ID, DEFAULT_ISSUE_ID, pkgnames[0]) in resp.data.decode()
@create_package(name='foo', version='1.2.3-4')
@create_group(id=DEFAULT_GROUP_ID, issues=[DEFAULT_ISSUE_ID], packages=['foo'], affected='1.0-1')
@logged_in
def test_add_group_fixed_version_older_then_affected(db, client):
pkgnames = ['foo']
issues = ['CVE-1234-1234', 'CVE-2222-2222']
data = default_group_dict(dict(
cve='\n'.join(issues),
pkgnames='\n'.join(pkgnames),
fixed='0.8-1'))
resp = client.post(url_for('tracker.add_group'), follow_redirects=True, data=data)
assert 200 == resp.status_code
assert 'Version must be newer.' in resp.data.decode()
@create_package(name='foo')
@logged_in
def test_add_group_with_dot_in_pkgrel(db, client):
set_and_assert_group_data(db, client, url_for('tracker.add_group'), affected='1.2-3.4')
@create_package(name='foo')
@logged_in
def test_dont_add_group_with_dot_at_beginning_of_pkgrel(db, client):
pkgnames = ['foo']
issues = [DEFAULT_ISSUE_ID]
affected = '1.3-.37'
data = default_group_dict(dict(
cve='\n'.join(issues),
pkgnames='\n'.join(pkgnames),
affected=affected))
resp = client.post(url_for('tracker.add_group'), follow_redirects=True, data=data)
assert 'Invalid input.' in resp.data.decode()
@create_package(name='foo', version='1.2.3-4')
@create_group(id=DEFAULT_GROUP_ID, issues=[DEFAULT_ISSUE_ID], packages=['foo'])
@logged_in(role=UserRole.reporter)
def test_reporter_can_delete(db, client):
resp = client.post(url_for('tracker.delete_group', avg=DEFAULT_GROUP_NAME), follow_redirects=True,
data=dict(confirm=True))
assert 200 == resp.status_code
avg = CVEGroup.query.get(DEFAULT_GROUP_ID)
assert avg is None
@create_package(name='foo', version='1.2.3-4')
@create_group(id=DEFAULT_GROUP_ID, issues=[DEFAULT_ISSUE_ID], packages=['foo'])
@logged_in(role=UserRole.reporter)
def test_abort_delete(db, client):
resp = client.post(url_for('tracker.delete_group', avg=DEFAULT_GROUP_NAME), follow_redirects=True,
data=dict(abort=True))
assert 200 == resp.status_code
avg = CVEGroup.query.get(DEFAULT_GROUP_ID)
assert DEFAULT_GROUP_ID == avg.id
@create_package(name='foo', version='1.2.3-4')
@create_group(id=DEFAULT_GROUP_ID, issues=[DEFAULT_ISSUE_ID], packages=['foo'])
def test_delete_needs_login(db, client):
resp = client.post(url_for('tracker.delete_group', avg=DEFAULT_GROUP_NAME), follow_redirects=True)
assert ERROR_LOGIN_REQUIRED in resp.data.decode()
@logged_in
def test_delete_issue_not_found(db, client):
resp = client.post(url_for('tracker.delete_group', avg=DEFAULT_GROUP_NAME), follow_redirects=True)
assert resp.status_code == NotFound.code
@create_package(name='foo', version='1.2.3-4')
@create_group(id=DEFAULT_GROUP_ID, packages=['foo'], affected='1.2.3-3', fixed='1.2.3-4')
@create_advisory(id=DEFAULT_ADVISORY_ID, group_package_id=DEFAULT_GROUP_ID, advisory_type=issue_types[1])
@logged_in
def test_forbid_delete_with_advisory(db, client):
resp = client.post(url_for('tracker.delete_group', avg=DEFAULT_GROUP_NAME), follow_redirects=True)
assert Forbidden.code == resp.status_code
@create_package(name='foo', version='1.2.3-4')
@create_group(id=DEFAULT_GROUP_ID, packages=['foo'], affected='1.2.3-3', fixed='1.2.3-4',
issues=['CVE-1111-1234', 'CVE-1234-12345', 'CVE-1111-12345',
'CVE-1234-11112', 'CVE-1234-111111', 'CVE-1234-11111'])
@logged_in
def test_show_group_sort_cve_entries(db, client):
resp = client.get(url_for('tracker.show_group', avg=DEFAULT_GROUP_NAME), follow_redirects=True)
assert 200 == resp.status_code
html = AssertionHTMLParser()
html.feed(resp.data.decode())
cves = []
for e in html.get_elements_by_tag('a'):
if len(e.attrs) == 1 and e.attrs[0][0] == 'href':
if e.data.startswith("CVE") and e.attrs[0][1].startswith("/CVE"):
cves.append(e.data.strip())
assert ['CVE-1234-111111',
'CVE-1234-12345',
'CVE-1234-11112',
'CVE-1234-11111',
'CVE-1111-12345',
'CVE-1111-1234'] == cves
@create_group(id=DEFAULT_GROUP_ID, packages=['foo'], affected='1.2.3-3', fixed='1.2.3-4')
@create_advisory(id=DEFAULT_ADVISORY_ID, group_package_id=DEFAULT_GROUP_ID, advisory_type=issue_types[1])
def test_show_group_json(db, client):
resp = client.get(url_for('tracker.show_group_json', avg=DEFAULT_GROUP_NAME, postfix='/json'), follow_redirects=True)
assert 200 == resp.status_code
data = resp.get_json()
assert data['name'] == DEFAULT_GROUP_NAME
assert data['issues'] == [DEFAULT_ISSUE_ID]
assert data['packages'] == ['foo']
assert data['affected'] == '1.2.3-3'
assert data['fixed'] == '1.2.3-4'
def test_show_group_json_not_found(db, client):
resp = client.get(url_for('tracker.show_group_json', avg=DEFAULT_GROUP_NAME, postfix='/json'), follow_redirects=True)
assert NotFound.code == resp.status_code
@create_package(name='foo', version='1.2.3-4')
@create_group(id=DEFAULT_GROUP_ID, packages=['foo'], affected='1.2.3-3', fixed='1.2.3-4')
def test_affected_to_status_fixed(db, client):
avg = CVEGroup.query.get(DEFAULT_GROUP_ID)
status = affected_to_status(Affected.affected, 'foo', avg.fixed)
assert status == Status.fixed
@create_package(name='foo', version='1.2.3-3')
@create_group(id=DEFAULT_GROUP_ID, packages=['foo'], affected='1.2.3-3', fixed='1.2.3-4')
def test_affected_to_status_vulnerable(db, client):
avg = CVEGroup.query.get(DEFAULT_GROUP_ID)
status = affected_to_status(Affected.affected, 'foo', avg.fixed)
assert status == Status.vulnerable
@create_package(name='foo', version='1.2.3-3', database='testing')
@create_group(id=DEFAULT_GROUP_ID, packages=['foo'], affected='1.2.3-3', fixed='1.2.3-4')
def test_affected_to_status_vulnerable_testing(db, client):
avg = CVEGroup.query.get(DEFAULT_GROUP_ID)
status = affected_to_status(Affected.affected, 'foo', avg.fixed)
assert status == Status.vulnerable
@create_package(name='foo', version='1.2.3-3')
@create_package(name='foo', version='1.2.3-4', database='testing')
@create_group(id=DEFAULT_GROUP_ID, packages=['foo'], affected='1.2.3-3', fixed='1.2.3-4')
def test_affected_to_status_testing(db, client):
avg = CVEGroup.query.get(DEFAULT_GROUP_ID)
status = affected_to_status(Affected.affected, 'foo', avg.fixed)
assert status == Status.testing
@create_package(name='foo', version='1.2.3-4', database='testing')
@create_group(id=DEFAULT_GROUP_ID, packages=['foo'], affected='1.2.3-3', fixed='1.2.3-4')
def test_affected_to_status_testing_only(db, client):
avg = CVEGroup.query.get(DEFAULT_GROUP_ID)
status = affected_to_status(Affected.affected, 'foo', avg.fixed)
assert status == Status.testing
@create_package(name='foo', version='1.2.3-3', database='community')
@create_package(name='foo', version='1.2.3-4', database='community-testing')
@create_group(id=DEFAULT_GROUP_ID, packages=['foo'], affected='1.2.3-3', fixed='1.2.3-4')
def test_affected_to_status_community_testing(db, client):
avg = CVEGroup.query.get(DEFAULT_GROUP_ID)
status = affected_to_status(Affected.affected, 'foo', avg.fixed)
assert status == Status.testing
@create_package(name='foo', version='1.2.3-3')
@create_group(id=DEFAULT_GROUP_ID, packages=['foo'], affected='1.2.3-3', fixed='1.2.3-4')
def test_affected_to_status_unknown(db, client):
avg = CVEGroup.query.get(DEFAULT_GROUP_ID)
status = affected_to_status(Affected.unknown, 'foo', avg.fixed)
assert status == Status.unknown
@create_package(name='foo', version='1.2.3-3')
@create_group(id=DEFAULT_GROUP_ID, packages=['foo'], affected='1.2.3-3', fixed='1.2.3-4')
def test_affected_to_status_not_affected(db, client):
avg = CVEGroup.query.get(DEFAULT_GROUP_ID)
status = affected_to_status(Affected.not_affected, 'foo', avg.fixed)
assert status == Status.not_affected
@create_group(id=DEFAULT_GROUP_ID, packages=['foo'], affected='1.2.3-3', fixed='1.2.3-4')
def test_affected_to_status_unknown_package(db, client):
avg = CVEGroup.query.get(DEFAULT_GROUP_ID)
status = affected_to_status(Affected.affected, 'foo', avg.fixed)
assert status == Status.unknown
@create_package(name='foopkg', version='1.2.3-4')
@create_group(id=DEFAULT_GROUP_ID, issues=[DEFAULT_ISSUE_ID], packages=['foopkg'])
@logged_in
def test_edit_group_non_relational_field_updates_changed_date(db, client):
group_changed_old = CVEGroup.query.get(DEFAULT_GROUP_ID).changed
data = default_group_dict(dict(notes='regular field change'))
resp = client.post(url_for('tracker.edit_group', avg=DEFAULT_GROUP_NAME), follow_redirects=True, data=data)
assert 200 == resp.status_code
assert f'Edited {DEFAULT_GROUP_NAME}' in resp.data.decode()
group = CVEGroup.query.get(DEFAULT_GROUP_ID)
assert group.changed > group_changed_old
@create_package(name='foopkg', version='1.2.3-4')
@create_group(id=DEFAULT_GROUP_ID, issues=[DEFAULT_ISSUE_ID], packages=['foopkg'])
@logged_in
def test_edit_group_relational_field_issues_updates_changed_date(db, client):
group_changed_old = CVEGroup.query.get(DEFAULT_GROUP_ID).changed
data = default_group_dict(dict(cve=' '.join(['CVE-1234-1111', 'CVE-1234-2222'])))
resp = client.post(url_for('tracker.edit_group', avg=DEFAULT_GROUP_NAME), follow_redirects=True, data=data)
assert 200 == resp.status_code
assert f'Edited {DEFAULT_GROUP_NAME}' in resp.data.decode()
group = CVEGroup.query.get(DEFAULT_GROUP_ID)
assert group.changed > group_changed_old
@create_package(name='foopkg', version='1.2.3-4', base='foo')
@create_package(name='foopkg2', version='1.2.3-4', base='foo')
@create_group(id=DEFAULT_GROUP_ID, issues=[DEFAULT_ISSUE_ID], packages=['foopkg'])
@logged_in
def test_edit_group_relational_field_packages_updates_changed_date(db, client):
group_changed_old = CVEGroup.query.get(DEFAULT_GROUP_ID).changed
data = default_group_dict(dict(pkgnames=' '.join(['foopkg', 'foopkg2'])))
resp = client.post(url_for('tracker.edit_group', avg=DEFAULT_GROUP_NAME), follow_redirects=True, data=data)
assert 200 == resp.status_code
assert f'Edited {DEFAULT_GROUP_NAME}' in resp.data.decode()
group = CVEGroup.query.get(DEFAULT_GROUP_ID)
assert group.changed > group_changed_old
@create_package(name='foopkg', version='1.2.3-4')
@create_group(id=DEFAULT_GROUP_ID, issues=[DEFAULT_ISSUE_ID], packages=['foopkg'])
@logged_in
def test_edit_group_does_nothing_when_data_is_same(db, client):
group_old = CVEGroup.query.get(DEFAULT_GROUP_ID)
group_changed_old = group_old.changed
data = default_group_dict(dict(status=Affected.affected.name))
resp = client.post(url_for('tracker.edit_group', avg=DEFAULT_GROUP_NAME), follow_redirects=True, data=data)
assert 200 == resp.status_code
assert f'Edited {DEFAULT_GROUP_NAME}' not in resp.data.decode()
group = CVEGroup.query.get(DEFAULT_GROUP_ID)
assert group.changed == group_changed_old
| mit |
ellisztamas/faps | faps/pr_unsampled.py | 1 | 2716 | import numpy as np
def pr_unsampled(offspring_diploid, maternal_diploid, allele_freqs, offspring_genotype, maternal_genotype, male_genotype, mu):
"""
Calculate the transitions probability for a given set of parental and offspring
alleles.
Transitipn probabilities are then weight by the probability of drawing the allele
from the population, and the probability that this allele is the true allele, given
observed genotype data and the error rate mu.
ARGUMENTS:
offspring_diploid, maternal_diploid, male_diploid: arrays of diploid genotypes for
the offspring, mothers and fathers.
allele_freqs = vector of population allele frequencies.
offspring_genotype, maternal_genotype, male_genotype: a two-element list of zeroes
and ones indicating the diploid genotype of males, mothers and offspring to be
considered.
mu: point estimate of the genotyping error rate.
RETURNS:
A 3-dimensional array of probabilities indexing offspring, candidate males, and loci.
These are given in linear, rather than log space.
"""
# an array of all possible transition probabilities indexed as [offspring, mother, father].
trans_prob_array = np.array([[[1, 0.5, 0 ],
[0.5,0.25,0 ],
[0, 0, 0 ]],
[[0, 0.5, 1 ],
[0.5,0.5, 0.5],
[1, 0.5, 0 ]],
[[0, 0, 0 ],
[0, 0.25,0.5],
[0, 0.5, 1 ]]])
# the transition probability for the given genotypes.
trans_prob = trans_prob_array[offspring_genotype, maternal_genotype, male_genotype]
# Probabilities that the observed offspring marker data match observed data.
pr_offs = np.zeros([offspring_diploid.shape[0], offspring_diploid.shape[1]])
pr_offs[offspring_diploid == offspring_genotype] = 1-mu
pr_offs[offspring_diploid != offspring_genotype] = mu
# Probabilities that the observed maternal marker data match observed data.
pr_mothers = np.zeros([maternal_diploid.shape[0], maternal_diploid.shape[1]])
pr_mothers[maternal_diploid == maternal_genotype] = 1-mu
pr_mothers[maternal_diploid != maternal_genotype] = mu
# Probability of the father is drawn from population allele frequencies.
if male_genotype is 0: pr_males = allele_freqs**2
if male_genotype is 1: pr_males = allele_freqs*(1-allele_freqs)
if male_genotype is 2: pr_males = (1-allele_freqs)**2
return trans_prob * pr_males * pr_mothers * pr_offs | mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.